summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/802/garp.c4
-rw-r--r--net/8021q/vlan.c8
-rw-r--r--net/8021q/vlan_core.c2
-rw-r--r--net/8021q/vlan_dev.c18
-rw-r--r--net/9p/trans_rdma.c1
-rw-r--r--net/Kconfig8
-rw-r--r--net/Makefile2
-rw-r--r--net/appletalk/ddp.c2
-rw-r--r--net/atm/br2684.c1
-rw-r--r--net/atm/common.c30
-rw-r--r--net/atm/lec.c6
-rw-r--r--net/atm/mpc.c32
-rw-r--r--net/atm/mpoa_caches.c20
-rw-r--r--net/atm/proc.c10
-rw-r--r--net/atm/signaling.c2
-rw-r--r--net/atm/svc.c62
-rw-r--r--net/ax25/af_ax25.c8
-rw-r--r--net/bluetooth/Kconfig13
-rw-r--r--net/bluetooth/af_bluetooth.c6
-rw-r--r--net/bluetooth/bnep/core.c8
-rw-r--r--net/bluetooth/bnep/netdev.c20
-rw-r--r--net/bluetooth/cmtp/cmtp.h2
-rw-r--r--net/bluetooth/cmtp/core.c4
-rw-r--r--net/bluetooth/hci_core.c27
-rw-r--r--net/bluetooth/hci_sysfs.c34
-rw-r--r--net/bluetooth/hidp/core.c10
-rw-r--r--net/bluetooth/hidp/hidp.h4
-rw-r--r--net/bluetooth/l2cap.c1115
-rw-r--r--net/bluetooth/rfcomm/sock.c8
-rw-r--r--net/bluetooth/rfcomm/tty.c2
-rw-r--r--net/bluetooth/sco.c31
-rw-r--r--net/bridge/Kconfig6
-rw-r--r--net/bridge/br.c2
-rw-r--r--net/bridge/br_device.c131
-rw-r--r--net/bridge/br_fdb.c9
-rw-r--r--net/bridge/br_forward.c51
-rw-r--r--net/bridge/br_if.c14
-rw-r--r--net/bridge/br_input.c12
-rw-r--r--net/bridge/br_ioctl.c2
-rw-r--r--net/bridge/br_multicast.c690
-rw-r--r--net/bridge/br_netfilter.c263
-rw-r--r--net/bridge/br_netlink.c8
-rw-r--r--net/bridge/br_notify.c11
-rw-r--r--net/bridge/br_private.h57
-rw-r--r--net/bridge/br_stp.c11
-rw-r--r--net/bridge/br_stp_bpdu.c2
-rw-r--r--net/bridge/br_stp_if.c16
-rw-r--r--net/bridge/br_stp_timer.c24
-rw-r--r--net/bridge/br_sysfs_br.c2
-rw-r--r--net/bridge/br_sysfs_if.c32
-rw-r--r--net/bridge/netfilter/ebt_802_3.c8
-rw-r--r--net/bridge/netfilter/ebt_among.c27
-rw-r--r--net/bridge/netfilter/ebt_arp.c10
-rw-r--r--net/bridge/netfilter/ebt_arpreply.c10
-rw-r--r--net/bridge/netfilter/ebt_dnat.c12
-rw-r--r--net/bridge/netfilter/ebt_ip.c18
-rw-r--r--net/bridge/netfilter/ebt_ip6.c39
-rw-r--r--net/bridge/netfilter/ebt_limit.c11
-rw-r--r--net/bridge/netfilter/ebt_log.c10
-rw-r--r--net/bridge/netfilter/ebt_mark.c12
-rw-r--r--net/bridge/netfilter/ebt_mark_m.c12
-rw-r--r--net/bridge/netfilter/ebt_nflog.c8
-rw-r--r--net/bridge/netfilter/ebt_pkttype.c8
-rw-r--r--net/bridge/netfilter/ebt_redirect.c12
-rw-r--r--net/bridge/netfilter/ebt_snat.c12
-rw-r--r--net/bridge/netfilter/ebt_stp.c10
-rw-r--r--net/bridge/netfilter/ebt_ulog.c38
-rw-r--r--net/bridge/netfilter/ebt_vlan.c54
-rw-r--r--net/bridge/netfilter/ebtables.c56
-rw-r--r--net/caif/Kconfig48
-rw-r--r--net/caif/Makefile26
-rw-r--r--net/caif/caif_config_util.c87
-rw-r--r--net/caif/caif_dev.c417
-rw-r--r--net/caif/caif_socket.c1252
-rw-r--r--net/caif/cfcnfg.c470
-rw-r--r--net/caif/cfctrl.c692
-rw-r--r--net/caif/cfdbgl.c40
-rw-r--r--net/caif/cfdgml.c108
-rw-r--r--net/caif/cffrml.c151
-rw-r--r--net/caif/cfmuxl.c251
-rw-r--r--net/caif/cfpkt_skbuff.c571
-rw-r--r--net/caif/cfrfml.c108
-rw-r--r--net/caif/cfserl.c192
-rw-r--r--net/caif/cfsrvl.c192
-rw-r--r--net/caif/cfutill.c115
-rw-r--r--net/caif/cfveil.c107
-rw-r--r--net/caif/cfvidl.c65
-rw-r--r--net/caif/chnl_net.c467
-rw-r--r--net/can/bcm.c2
-rw-r--r--net/core/Makefile2
-rw-r--r--net/core/datagram.c21
-rw-r--r--net/core/dev.c1402
-rw-r--r--net/core/dev_addr_lists.c741
-rw-r--r--net/core/dev_mcast.c232
-rw-r--r--net/core/dst.c45
-rw-r--r--net/core/ethtool.c152
-rw-r--r--net/core/fib_rules.c31
-rw-r--r--net/core/filter.c7
-rw-r--r--net/core/flow.c405
-rw-r--r--net/core/net-sysfs.c377
-rw-r--r--net/core/net-sysfs.h1
-rw-r--r--net/core/net_namespace.c95
-rw-r--r--net/core/netpoll.c26
-rw-r--r--net/core/pktgen.c58
-rw-r--r--net/core/rtnetlink.c369
-rw-r--r--net/core/skbuff.c33
-rw-r--r--net/core/sock.c78
-rw-r--r--net/core/stream.c22
-rw-r--r--net/core/sysctl_net_core.c75
-rw-r--r--net/dccp/ccids/ccid3.c2
-rw-r--r--net/dccp/dccp.h4
-rw-r--r--net/dccp/input.c2
-rw-r--r--net/dccp/ipv4.c2
-rw-r--r--net/dccp/ipv6.c7
-rw-r--r--net/dccp/output.c18
-rw-r--r--net/dccp/proto.c2
-rw-r--r--net/dccp/timer.c4
-rw-r--r--net/decnet/af_decnet.c32
-rw-r--r--net/decnet/dn_dev.c15
-rw-r--r--net/decnet/dn_neigh.c9
-rw-r--r--net/decnet/dn_nsp_in.c3
-rw-r--r--net/decnet/dn_route.c29
-rw-r--r--net/decnet/dn_rules.c22
-rw-r--r--net/dsa/slave.c14
-rw-r--r--net/ethernet/eth.c4
-rw-r--r--net/ipv4/Kconfig22
-rw-r--r--net/ipv4/af_inet.c57
-rw-r--r--net/ipv4/arp.c8
-rw-r--r--net/ipv4/cipso_ipv4.c2
-rw-r--r--net/ipv4/devinet.c4
-rw-r--r--net/ipv4/fib_rules.c22
-rw-r--r--net/ipv4/fib_trie.c2
-rw-r--r--net/ipv4/icmp.c11
-rw-r--r--net/ipv4/igmp.c4
-rw-r--r--net/ipv4/inet_connection_sock.c10
-rw-r--r--net/ipv4/inet_hashtables.c2
-rw-r--r--net/ipv4/ip_forward.c4
-rw-r--r--net/ipv4/ip_gre.c10
-rw-r--r--net/ipv4/ip_input.c8
-rw-r--r--net/ipv4/ip_options.c10
-rw-r--r--net/ipv4/ip_output.c35
-rw-r--r--net/ipv4/ip_sockglue.c20
-rw-r--r--net/ipv4/ipconfig.c2
-rw-r--r--net/ipv4/ipip.c7
-rw-r--r--net/ipv4/ipmr.c926
-rw-r--r--net/ipv4/netfilter.c6
-rw-r--r--net/ipv4/netfilter/arp_tables.c102
-rw-r--r--net/ipv4/netfilter/arpt_mangle.c4
-rw-r--r--net/ipv4/netfilter/ip_queue.c4
-rw-r--r--net/ipv4/netfilter/ip_tables.c260
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c77
-rw-r--r--net/ipv4/netfilter/ipt_ECN.c23
-rw-r--r--net/ipv4/netfilter/ipt_LOG.c19
-rw-r--r--net/ipv4/netfilter/ipt_MASQUERADE.c18
-rw-r--r--net/ipv4/netfilter/ipt_NETMAP.c16
-rw-r--r--net/ipv4/netfilter/ipt_REDIRECT.c16
-rw-r--r--net/ipv4/netfilter/ipt_REJECT.c19
-rw-r--r--net/ipv4/netfilter/ipt_ULOG.c47
-rw-r--r--net/ipv4/netfilter/ipt_addrtype.c28
-rw-r--r--net/ipv4/netfilter/ipt_ah.c28
-rw-r--r--net/ipv4/netfilter/ipt_ecn.c19
-rw-r--r--net/ipv4/netfilter/iptable_filter.c2
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c10
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c7
-rw-r--r--net/ipv4/netfilter/nf_nat_h323.c17
-rw-r--r--net/ipv4/netfilter/nf_nat_rule.c21
-rw-r--r--net/ipv4/netfilter/nf_nat_snmp_basic.c16
-rw-r--r--net/ipv4/netfilter/nf_nat_standalone.c7
-rw-r--r--net/ipv4/netfilter/nf_nat_tftp.c1
-rw-r--r--net/ipv4/proc.c1
-rw-r--r--net/ipv4/raw.c6
-rw-r--r--net/ipv4/route.c154
-rw-r--r--net/ipv4/sysctl_net_ipv4.c17
-rw-r--r--net/ipv4/tcp.c55
-rw-r--r--net/ipv4/tcp_input.c12
-rw-r--r--net/ipv4/tcp_ipv4.c43
-rw-r--r--net/ipv4/tcp_minisocks.c1
-rw-r--r--net/ipv4/tcp_output.c25
-rw-r--r--net/ipv4/tcp_timer.c8
-rw-r--r--net/ipv4/udp.c40
-rw-r--r--net/ipv4/xfrm4_input.c6
-rw-r--r--net/ipv4/xfrm4_output.c2
-rw-r--r--net/ipv4/xfrm4_policy.c22
-rw-r--r--net/ipv6/Kconfig14
-rw-r--r--net/ipv6/addrconf.c886
-rw-r--r--net/ipv6/addrlabel.c8
-rw-r--r--net/ipv6/af_inet6.c5
-rw-r--r--net/ipv6/datagram.c116
-rw-r--r--net/ipv6/fib6_rules.c3
-rw-r--r--net/ipv6/icmp.c7
-rw-r--r--net/ipv6/inet6_connection_sock.c4
-rw-r--r--net/ipv6/ip6_fib.c16
-rw-r--r--net/ipv6/ip6_flowlabel.c3
-rw-r--r--net/ipv6/ip6_input.c4
-rw-r--r--net/ipv6/ip6_output.c103
-rw-r--r--net/ipv6/ip6_tunnel.c8
-rw-r--r--net/ipv6/ip6mr.c942
-rw-r--r--net/ipv6/ipv6_sockglue.c86
-rw-r--r--net/ipv6/mcast.c143
-rw-r--r--net/ipv6/ndisc.c6
-rw-r--r--net/ipv6/netfilter.c25
-rw-r--r--net/ipv6/netfilter/ip6_queue.c4
-rw-r--r--net/ipv6/netfilter/ip6_tables.c236
-rw-r--r--net/ipv6/netfilter/ip6t_LOG.c20
-rw-r--r--net/ipv6/netfilter/ip6t_REJECT.c32
-rw-r--r--net/ipv6/netfilter/ip6t_ah.c18
-rw-r--r--net/ipv6/netfilter/ip6t_eui64.c4
-rw-r--r--net/ipv6/netfilter/ip6t_frag.c18
-rw-r--r--net/ipv6/netfilter/ip6t_hbh.c33
-rw-r--r--net/ipv6/netfilter/ip6t_ipv6header.c8
-rw-r--r--net/ipv6/netfilter/ip6t_mh.c21
-rw-r--r--net/ipv6/netfilter/ip6t_rt.c20
-rw-r--r--net/ipv6/netfilter/ip6table_filter.c2
-rw-r--r--net/ipv6/netfilter/ip6table_mangle.c2
-rw-r--r--net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c14
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c2
-rw-r--r--net/ipv6/proc.c2
-rw-r--r--net/ipv6/raw.c18
-rw-r--r--net/ipv6/route.c2
-rw-r--r--net/ipv6/sit.c8
-rw-r--r--net/ipv6/tcp_ipv6.c68
-rw-r--r--net/ipv6/udp.c41
-rw-r--r--net/ipv6/xfrm6_input.c2
-rw-r--r--net/ipv6/xfrm6_output.c4
-rw-r--r--net/ipv6/xfrm6_policy.c31
-rw-r--r--net/irda/af_irda.c14
-rw-r--r--net/irda/ircomm/ircomm_param.c2
-rw-r--r--net/irda/iriap.c2
-rw-r--r--net/irda/irnet/irnet_irda.c3
-rw-r--r--net/iucv/af_iucv.c22
-rw-r--r--net/key/af_key.c10
-rw-r--r--net/l2tp/Kconfig107
-rw-r--r--net/l2tp/Makefile12
-rw-r--r--net/l2tp/l2tp_core.c1666
-rw-r--r--net/l2tp/l2tp_core.h304
-rw-r--r--net/l2tp/l2tp_debugfs.c341
-rw-r--r--net/l2tp/l2tp_eth.c334
-rw-r--r--net/l2tp/l2tp_ip.c679
-rw-r--r--net/l2tp/l2tp_netlink.c840
-rw-r--r--net/l2tp/l2tp_ppp.c1837
-rw-r--r--net/llc/af_llc.c12
-rw-r--r--net/llc/llc_core.c6
-rw-r--r--net/llc/llc_sap.c2
-rw-r--r--net/mac80211/Kconfig17
-rw-r--r--net/mac80211/Makefile3
-rw-r--r--net/mac80211/agg-rx.c80
-rw-r--r--net/mac80211/agg-tx.c16
-rw-r--r--net/mac80211/cfg.c122
-rw-r--r--net/mac80211/chan.c127
-rw-r--r--net/mac80211/debugfs.h1
-rw-r--r--net/mac80211/debugfs_netdev.c12
-rw-r--r--net/mac80211/debugfs_sta.c79
-rw-r--r--net/mac80211/driver-ops.h33
-rw-r--r--net/mac80211/driver-trace.h333
-rw-r--r--net/mac80211/ht.c3
-rw-r--r--net/mac80211/ibss.c46
-rw-r--r--net/mac80211/ieee80211_i.h51
-rw-r--r--net/mac80211/iface.c124
-rw-r--r--net/mac80211/key.c1
-rw-r--r--net/mac80211/main.c28
-rw-r--r--net/mac80211/mesh.c6
-rw-r--r--net/mac80211/mesh.h2
-rw-r--r--net/mac80211/mesh_hwmp.c5
-rw-r--r--net/mac80211/mesh_plink.c2
-rw-r--r--net/mac80211/mlme.c315
-rw-r--r--net/mac80211/pm.c2
-rw-r--r--net/mac80211/rc80211_minstrel.c2
-rw-r--r--net/mac80211/rc80211_minstrel.h11
-rw-r--r--net/mac80211/rc80211_minstrel_debugfs.c41
-rw-r--r--net/mac80211/rx.c108
-rw-r--r--net/mac80211/scan.c126
-rw-r--r--net/mac80211/sta_info.c105
-rw-r--r--net/mac80211/sta_info.h12
-rw-r--r--net/mac80211/status.c21
-rw-r--r--net/mac80211/tx.c26
-rw-r--r--net/mac80211/util.c36
-rw-r--r--net/mac80211/work.c62
-rw-r--r--net/netfilter/Kconfig133
-rw-r--r--net/netfilter/Makefile9
-rw-r--r--net/netfilter/ipvs/ip_vs_ftp.c10
-rw-r--r--net/netfilter/ipvs/ip_vs_proto.c28
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_ah_esp.c14
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c16
-rw-r--r--net/netfilter/nf_conntrack_amanda.c2
-rw-r--r--net/netfilter/nf_conntrack_core.c6
-rw-r--r--net/netfilter/nf_conntrack_ecache.c12
-rw-r--r--net/netfilter/nf_conntrack_ftp.c4
-rw-r--r--net/netfilter/nf_conntrack_h323_main.c9
-rw-r--r--net/netfilter/nf_conntrack_irc.c4
-rw-r--r--net/netfilter/nf_conntrack_netlink.c30
-rw-r--r--net/netfilter/nf_conntrack_proto.c8
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c4
-rw-r--r--net/netfilter/nf_conntrack_sip.c4
-rw-r--r--net/netfilter/nf_conntrack_standalone.c9
-rw-r--r--net/netfilter/nf_conntrack_tftp.c4
-rw-r--r--net/netfilter/nf_internals.h2
-rw-r--r--net/netfilter/nf_log.c6
-rw-r--r--net/netfilter/nf_queue.c3
-rw-r--r--net/netfilter/nfnetlink.c7
-rw-r--r--net/netfilter/nfnetlink_log.c4
-rw-r--r--net/netfilter/nfnetlink_queue.c3
-rw-r--r--net/netfilter/x_tables.c128
-rw-r--r--net/netfilter/xt_CLASSIFY.c2
-rw-r--r--net/netfilter/xt_CONNMARK.c113
-rw-r--r--net/netfilter/xt_CONNSECMARK.c29
-rw-r--r--net/netfilter/xt_CT.c25
-rw-r--r--net/netfilter/xt_DSCP.c18
-rw-r--r--net/netfilter/xt_HL.c30
-rw-r--r--net/netfilter/xt_LED.c93
-rw-r--r--net/netfilter/xt_MARK.c56
-rw-r--r--net/netfilter/xt_NFLOG.c10
-rw-r--r--net/netfilter/xt_NFQUEUE.c50
-rw-r--r--net/netfilter/xt_NOTRACK.c2
-rw-r--r--net/netfilter/xt_RATEEST.c20
-rw-r--r--net/netfilter/xt_SECMARK.c48
-rw-r--r--net/netfilter/xt_TCPMSS.c41
-rw-r--r--net/netfilter/xt_TCPOPTSTRIP.c7
-rw-r--r--net/netfilter/xt_TEE.c309
-rw-r--r--net/netfilter/xt_TPROXY.c12
-rw-r--r--net/netfilter/xt_TRACE.c2
-rw-r--r--net/netfilter/xt_cluster.c21
-rw-r--r--net/netfilter/xt_comment.c2
-rw-r--r--net/netfilter/xt_connbytes.c22
-rw-r--r--net/netfilter/xt_connlimit.c24
-rw-r--r--net/netfilter/xt_connmark.c104
-rw-r--r--net/netfilter/xt_conntrack.c23
-rw-r--r--net/netfilter/xt_dccp.c18
-rw-r--r--net/netfilter/xt_dscp.c18
-rw-r--r--net/netfilter/xt_esp.c28
-rw-r--r--net/netfilter/xt_hashlimit.c346
-rw-r--r--net/netfilter/xt_helper.c18
-rw-r--r--net/netfilter/xt_hl.c16
-rw-r--r--net/netfilter/xt_iprange.c5
-rw-r--r--net/netfilter/xt_length.c4
-rw-r--r--net/netfilter/xt_limit.c15
-rw-r--r--net/netfilter/xt_mac.c23
-rw-r--r--net/netfilter/xt_mark.c37
-rw-r--r--net/netfilter/xt_multiport.c103
-rw-r--r--net/netfilter/xt_osf.c12
-rw-r--r--net/netfilter/xt_owner.c2
-rw-r--r--net/netfilter/xt_physdev.c18
-rw-r--r--net/netfilter/xt_pkttype.c2
-rw-r--r--net/netfilter/xt_policy.c31
-rw-r--r--net/netfilter/xt_quota.c10
-rw-r--r--net/netfilter/xt_rateest.c10
-rw-r--r--net/netfilter/xt_realm.c2
-rw-r--r--net/netfilter/xt_recent.c189
-rw-r--r--net/netfilter/xt_sctp.c57
-rw-r--r--net/netfilter/xt_socket.c11
-rw-r--r--net/netfilter/xt_state.c50
-rw-r--r--net/netfilter/xt_statistic.c14
-rw-r--r--net/netfilter/xt_string.c68
-rw-r--r--net/netfilter/xt_tcpmss.c4
-rw-r--r--net/netfilter/xt_tcpudp.c38
-rw-r--r--net/netfilter/xt_time.c16
-rw-r--r--net/netfilter/xt_u32.c5
-rw-r--r--net/netlabel/netlabel_addrlist.h2
-rw-r--r--net/netlabel/netlabel_unlabeled.c1
-rw-r--r--net/netlink/af_netlink.c23
-rw-r--r--net/netlink/genetlink.c6
-rw-r--r--net/netrom/af_netrom.c8
-rw-r--r--net/packet/af_packet.c69
-rw-r--r--net/phonet/pep.c8
-rw-r--r--net/phonet/pn_dev.c23
-rw-r--r--net/phonet/socket.c2
-rw-r--r--net/rds/af_rds.c11
-rw-r--r--net/rds/cong.c2
-rw-r--r--net/rds/ib_cm.c3
-rw-r--r--net/rds/ib_rdma.c5
-rw-r--r--net/rds/ib_recv.c4
-rw-r--r--net/rds/ib_send.c20
-rw-r--r--net/rds/iw_cm.c4
-rw-r--r--net/rds/iw_recv.c4
-rw-r--r--net/rds/iw_send.c3
-rw-r--r--net/rds/loop.c7
-rw-r--r--net/rds/rdma.c4
-rw-r--r--net/rds/rdma_transport.c5
-rw-r--r--net/rds/rds.h4
-rw-r--r--net/rds/recv.c2
-rw-r--r--net/rds/send.c40
-rw-r--r--net/rds/tcp_connect.c2
-rw-r--r--net/rds/tcp_recv.c1
-rw-r--r--net/rds/tcp_send.c4
-rw-r--r--net/rds/threads.c2
-rw-r--r--net/rfkill/core.c53
-rw-r--r--net/rose/af_rose.c8
-rw-r--r--net/rxrpc/af_rxrpc.c12
-rw-r--r--net/rxrpc/ar-recvmsg.c6
-rw-r--r--net/sched/act_api.c65
-rw-r--r--net/sched/act_gact.c4
-rw-r--r--net/sched/act_ipt.c9
-rw-r--r--net/sched/act_mirred.c6
-rw-r--r--net/sched/act_pedit.c11
-rw-r--r--net/sched/act_simple.c4
-rw-r--r--net/sched/cls_api.c30
-rw-r--r--net/sched/cls_flow.c1
-rw-r--r--net/sched/cls_u32.c10
-rw-r--r--net/sched/ematch.c3
-rw-r--r--net/sched/sch_api.c121
-rw-r--r--net/sched/sch_generic.c21
-rw-r--r--net/sched/sch_hfsc.c7
-rw-r--r--net/sched/sch_ingress.c1
-rw-r--r--net/sched/sch_mq.c1
-rw-r--r--net/sched/sch_multiq.c1
-rw-r--r--net/sched/sch_prio.c1
-rw-r--r--net/sched/sch_red.c1
-rw-r--r--net/sched/sch_sfq.c10
-rw-r--r--net/sched/sch_tbf.c6
-rw-r--r--net/sctp/Kconfig12
-rw-r--r--net/sctp/Makefile3
-rw-r--r--net/sctp/associola.c15
-rw-r--r--net/sctp/chunk.c4
-rw-r--r--net/sctp/endpointola.c2
-rw-r--r--net/sctp/input.c22
-rw-r--r--net/sctp/ipv6.c27
-rw-r--r--net/sctp/output.c27
-rw-r--r--net/sctp/outqueue.c96
-rw-r--r--net/sctp/probe.c214
-rw-r--r--net/sctp/proc.c3
-rw-r--r--net/sctp/protocol.c9
-rw-r--r--net/sctp/sm_make_chunk.c86
-rw-r--r--net/sctp/sm_sideeffect.c47
-rw-r--r--net/sctp/socket.c41
-rw-r--r--net/sctp/transport.c67
-rw-r--r--net/sctp/ulpqueue.c2
-rw-r--r--net/socket.c123
-rw-r--r--net/sunrpc/auth.c21
-rw-r--r--net/sunrpc/auth_gss/Makefile2
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c89
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_crypto.c697
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_keys.c336
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_mech.c584
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_seal.c155
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_seqnum.c83
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_unseal.c113
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_wrap.c404
-rw-r--r--net/sunrpc/auth_gss/gss_mech_switch.c21
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_mech.c5
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_token.c2
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c17
-rw-r--r--net/sunrpc/bc_svc.c2
-rw-r--r--net/sunrpc/cache.c60
-rw-r--r--net/sunrpc/clnt.c20
-rw-r--r--net/sunrpc/sched.c26
-rw-r--r--net/sunrpc/stats.c29
-rw-r--r--net/sunrpc/svc_xprt.c6
-rw-r--r--net/sunrpc/svcsock.c40
-rw-r--r--net/sunrpc/xdr.c1
-rw-r--r--net/sunrpc/xprt.c61
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c3
-rw-r--r--net/sunrpc/xprtrdma/transport.c31
-rw-r--r--net/sunrpc/xprtsock.c44
-rw-r--r--net/sysctl_net.c1
-rw-r--r--net/tipc/addr.c32
-rw-r--r--net/tipc/addr.h37
-rw-r--r--net/tipc/bcast.c149
-rw-r--r--net/tipc/bcast.h117
-rw-r--r--net/tipc/bearer.c16
-rw-r--r--net/tipc/bearer.h16
-rw-r--r--net/tipc/cluster.c2
-rw-r--r--net/tipc/config.c68
-rw-r--r--net/tipc/core.c26
-rw-r--r--net/tipc/core.h27
-rw-r--r--net/tipc/discover.c8
-rw-r--r--net/tipc/link.c102
-rw-r--r--net/tipc/link.h35
-rw-r--r--net/tipc/msg.c94
-rw-r--r--net/tipc/msg.h99
-rw-r--r--net/tipc/name_distr.c2
-rw-r--r--net/tipc/name_table.c2
-rw-r--r--net/tipc/net.c8
-rw-r--r--net/tipc/node.c14
-rw-r--r--net/tipc/port.c27
-rw-r--r--net/tipc/port.h2
-rw-r--r--net/tipc/socket.c26
-rw-r--r--net/tipc/subscr.c15
-rw-r--r--net/unix/af_unix.c25
-rw-r--r--net/unix/garbage.c13
-rw-r--r--net/wimax/op-reset.c2
-rw-r--r--net/wimax/op-rfkill.c2
-rw-r--r--net/wimax/op-state-get.c2
-rw-r--r--net/wimax/stack.c4
-rw-r--r--net/wireless/chan.c56
-rw-r--r--net/wireless/core.c3
-rw-r--r--net/wireless/core.h27
-rw-r--r--net/wireless/ibss.c5
-rw-r--r--net/wireless/mlme.c52
-rw-r--r--net/wireless/nl80211.c328
-rw-r--r--net/wireless/nl80211.h6
-rw-r--r--net/wireless/reg.c6
-rw-r--r--net/wireless/sme.c36
-rw-r--r--net/wireless/util.c24
-rw-r--r--net/wireless/wext-compat.c15
-rw-r--r--net/wireless/wext-core.c134
-rw-r--r--net/wireless/wext-sme.c2
-rw-r--r--net/x25/af_x25.c42
-rw-r--r--net/x25/x25_dev.c36
-rw-r--r--net/x25/x25_in.c2
-rw-r--r--net/x25/x25_out.c5
-rw-r--r--net/xfrm/xfrm_hash.h9
-rw-r--r--net/xfrm/xfrm_policy.c848
-rw-r--r--net/xfrm/xfrm_state.c5
-rw-r--r--net/xfrm/xfrm_user.c22
504 files changed, 27406 insertions, 8614 deletions
diff --git a/net/802/garp.c b/net/802/garp.c
index 9ed7c0e7dc17..941f2a324d3a 100644
--- a/net/802/garp.c
+++ b/net/802/garp.c
@@ -576,7 +576,7 @@ int garp_init_applicant(struct net_device *dev, struct garp_application *appl)
if (!app)
goto err2;
- err = dev_mc_add(dev, appl->proto.group_address, ETH_ALEN, 0);
+ err = dev_mc_add(dev, appl->proto.group_address);
if (err < 0)
goto err3;
@@ -616,7 +616,7 @@ void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl
garp_pdu_queue(app);
garp_queue_xmit(app);
- dev_mc_delete(dev, appl->proto.group_address, ETH_ALEN, 0);
+ dev_mc_del(dev, appl->proto.group_address);
kfree(app);
garp_release_port(dev);
}
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 97da977c2a23..3c1c8c14e929 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -357,13 +357,13 @@ static void vlan_sync_address(struct net_device *dev,
* the new address */
if (compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) &&
!compare_ether_addr(vlandev->dev_addr, dev->dev_addr))
- dev_unicast_delete(dev, vlandev->dev_addr);
+ dev_uc_del(dev, vlandev->dev_addr);
/* vlan address was equal to the old address and is different from
* the new address */
if (!compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) &&
compare_ether_addr(vlandev->dev_addr, dev->dev_addr))
- dev_unicast_add(dev, vlandev->dev_addr);
+ dev_uc_add(dev, vlandev->dev_addr);
memcpy(vlan->real_dev_addr, dev->dev_addr, ETH_ALEN);
}
@@ -533,6 +533,10 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
}
unregister_netdevice_many(&list);
break;
+
+ case NETDEV_PRE_TYPE_CHANGE:
+ /* Forbid underlaying device to change its type. */
+ return NOTIFY_BAD;
}
out:
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index c584a0af77d3..bd537fc10254 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -61,7 +61,7 @@ int vlan_hwaccel_do_receive(struct sk_buff *skb)
dev->dev_addr))
skb->pkt_type = PACKET_HOST;
break;
- };
+ }
return 0;
}
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 29b6348c8d4d..55be90826f5f 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -327,7 +327,7 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
len = skb->len;
ret = dev_queue_xmit(skb);
- if (likely(ret == NET_XMIT_SUCCESS)) {
+ if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
txq->tx_packets++;
txq->tx_bytes += len;
} else
@@ -353,7 +353,7 @@ static netdev_tx_t vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb,
len = skb->len;
ret = dev_queue_xmit(skb);
- if (likely(ret == NET_XMIT_SUCCESS)) {
+ if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
txq->tx_packets++;
txq->tx_bytes += len;
} else
@@ -470,7 +470,7 @@ static int vlan_dev_open(struct net_device *dev)
return -ENETDOWN;
if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) {
- err = dev_unicast_add(real_dev, dev->dev_addr);
+ err = dev_uc_add(real_dev, dev->dev_addr);
if (err < 0)
goto out;
}
@@ -499,7 +499,7 @@ clear_allmulti:
dev_set_allmulti(real_dev, -1);
del_unicast:
if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr))
- dev_unicast_delete(real_dev, dev->dev_addr);
+ dev_uc_del(real_dev, dev->dev_addr);
out:
netif_carrier_off(dev);
return err;
@@ -514,14 +514,14 @@ static int vlan_dev_stop(struct net_device *dev)
vlan_gvrp_request_leave(dev);
dev_mc_unsync(real_dev, dev);
- dev_unicast_unsync(real_dev, dev);
+ dev_uc_unsync(real_dev, dev);
if (dev->flags & IFF_ALLMULTI)
dev_set_allmulti(real_dev, -1);
if (dev->flags & IFF_PROMISC)
dev_set_promiscuity(real_dev, -1);
if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr))
- dev_unicast_delete(real_dev, dev->dev_addr);
+ dev_uc_del(real_dev, dev->dev_addr);
netif_carrier_off(dev);
return 0;
@@ -540,13 +540,13 @@ static int vlan_dev_set_mac_address(struct net_device *dev, void *p)
goto out;
if (compare_ether_addr(addr->sa_data, real_dev->dev_addr)) {
- err = dev_unicast_add(real_dev, addr->sa_data);
+ err = dev_uc_add(real_dev, addr->sa_data);
if (err < 0)
return err;
}
if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr))
- dev_unicast_delete(real_dev, dev->dev_addr);
+ dev_uc_del(real_dev, dev->dev_addr);
out:
memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
@@ -663,7 +663,7 @@ static void vlan_dev_change_rx_flags(struct net_device *dev, int change)
static void vlan_dev_set_rx_mode(struct net_device *vlan_dev)
{
dev_mc_sync(vlan_dev_info(vlan_dev)->real_dev, vlan_dev);
- dev_unicast_sync(vlan_dev_info(vlan_dev)->real_dev, vlan_dev);
+ dev_uc_sync(vlan_dev_info(vlan_dev)->real_dev, vlan_dev);
}
/*
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
index 041101ab4aa5..0ea20c30466c 100644
--- a/net/9p/trans_rdma.c
+++ b/net/9p/trans_rdma.c
@@ -308,7 +308,6 @@ handle_recv(struct p9_client *client, struct p9_trans_rdma *rdma,
req, err, status);
rdma->state = P9_RDMA_FLUSHING;
client->status = Disconnected;
- return;
}
static void
diff --git a/net/Kconfig b/net/Kconfig
index 041c35edb763..0d68b40fc0e6 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -186,6 +186,7 @@ source "net/sctp/Kconfig"
source "net/rds/Kconfig"
source "net/tipc/Kconfig"
source "net/atm/Kconfig"
+source "net/l2tp/Kconfig"
source "net/802/Kconfig"
source "net/bridge/Kconfig"
source "net/dsa/Kconfig"
@@ -203,6 +204,11 @@ source "net/ieee802154/Kconfig"
source "net/sched/Kconfig"
source "net/dcb/Kconfig"
+config RPS
+ boolean
+ depends on SMP && SYSFS
+ default y
+
menu "Network testing"
config NET_PKTGEN
@@ -275,5 +281,7 @@ source "net/wimax/Kconfig"
source "net/rfkill/Kconfig"
source "net/9p/Kconfig"
+source "net/caif/Kconfig"
+
endif # if NET
diff --git a/net/Makefile b/net/Makefile
index 1542e7268a7b..cb7bdc1210cb 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -40,6 +40,7 @@ obj-$(CONFIG_BT) += bluetooth/
obj-$(CONFIG_SUNRPC) += sunrpc/
obj-$(CONFIG_AF_RXRPC) += rxrpc/
obj-$(CONFIG_ATM) += atm/
+obj-$(CONFIG_L2TP) += l2tp/
obj-$(CONFIG_DECNET) += decnet/
obj-$(CONFIG_ECONET) += econet/
obj-$(CONFIG_PHONET) += phonet/
@@ -56,6 +57,7 @@ obj-$(CONFIG_NETLABEL) += netlabel/
obj-$(CONFIG_IUCV) += iucv/
obj-$(CONFIG_RFKILL) += rfkill/
obj-$(CONFIG_NET_9P) += 9p/
+obj-$(CONFIG_CAIF) += caif/
ifneq ($(CONFIG_DCB),)
obj-y += dcb/
endif
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 7b02967fbbe7..c410b93fda2e 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -782,7 +782,7 @@ static int atif_ioctl(int cmd, void __user *arg)
atrtr_create(&rtdef, dev);
}
}
- dev_mc_add(dev, aarp_mcast, 6, 1);
+ dev_mc_add_global(dev, aarp_mcast);
return 0;
case SIOCGIFADDR:
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index d6c7ceaf13e9..6719af6a59fa 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -446,7 +446,6 @@ error:
net_dev->stats.rx_errors++;
free_skb:
dev_kfree_skb(skb);
- return;
}
/*
diff --git a/net/atm/common.c b/net/atm/common.c
index 97ed94aa0cbc..b43feb1a3995 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -90,10 +90,13 @@ static void vcc_sock_destruct(struct sock *sk)
static void vcc_def_wakeup(struct sock *sk)
{
- read_lock(&sk->sk_callback_lock);
- if (sk_has_sleeper(sk))
- wake_up(sk->sk_sleep);
- read_unlock(&sk->sk_callback_lock);
+ struct socket_wq *wq;
+
+ rcu_read_lock();
+ wq = rcu_dereference(sk->sk_wq);
+ if (wq_has_sleeper(wq))
+ wake_up(&wq->wait);
+ rcu_read_unlock();
}
static inline int vcc_writable(struct sock *sk)
@@ -106,16 +109,19 @@ static inline int vcc_writable(struct sock *sk)
static void vcc_write_space(struct sock *sk)
{
- read_lock(&sk->sk_callback_lock);
+ struct socket_wq *wq;
+
+ rcu_read_lock();
if (vcc_writable(sk)) {
- if (sk_has_sleeper(sk))
- wake_up_interruptible(sk->sk_sleep);
+ wq = rcu_dereference(sk->sk_wq);
+ if (wq_has_sleeper(wq))
+ wake_up_interruptible(&wq->wait);
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
}
- read_unlock(&sk->sk_callback_lock);
+ rcu_read_unlock();
}
static struct proto vcc_proto = {
@@ -549,7 +555,7 @@ int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
}
eff = (size+3) & ~3; /* align to word boundary */
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
error = 0;
while (!(skb = alloc_tx(vcc, eff))) {
if (m->msg_flags & MSG_DONTWAIT) {
@@ -568,9 +574,9 @@ int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
send_sig(SIGPIPE, current, 0);
break;
}
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
}
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
if (error)
goto out;
skb->dev = NULL; /* for paths shared with net_device interfaces */
@@ -595,7 +601,7 @@ unsigned int vcc_poll(struct file *file, struct socket *sock, poll_table *wait)
struct atm_vcc *vcc;
unsigned int mask;
- sock_poll_wait(file, sk->sk_sleep, wait);
+ sock_poll_wait(file, sk_sleep(sk), wait);
mask = 0;
vcc = ATM_SD(sock);
diff --git a/net/atm/lec.c b/net/atm/lec.c
index feeaf5718472..d98bde1a0ac8 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -161,8 +161,6 @@ static void lec_handle_bridge(struct sk_buff *skb, struct net_device *dev)
skb_queue_tail(&sk->sk_receive_queue, skb2);
sk->sk_data_ready(sk, skb2->len);
}
-
- return;
}
#endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */
@@ -640,7 +638,6 @@ static void lec_set_multicast_list(struct net_device *dev)
* by default, all multicast frames arrive over the bus.
* eventually support selective multicast service
*/
- return;
}
static const struct net_device_ops lec_netdev_ops = {
@@ -1199,8 +1196,6 @@ static void __exit lane_module_cleanup(void)
dev_lec[i] = NULL;
}
}
-
- return;
}
module_init(lane_module_init);
@@ -1334,7 +1329,6 @@ static void lane2_associate_ind(struct net_device *dev, const u8 *mac_addr,
priv->lane2_ops->associate_indicator(dev, mac_addr,
tlvs, sizeoftlvs);
}
- return;
}
/*
diff --git a/net/atm/mpc.c b/net/atm/mpc.c
index 436f2e177657..622b471e14e0 100644
--- a/net/atm/mpc.c
+++ b/net/atm/mpc.c
@@ -455,7 +455,6 @@ static void lane2_assoc_ind(struct net_device *dev, const u8 *mac_addr,
if (end_of_tlvs - tlvs != 0)
pr_info("(%s) ignoring %Zd bytes of trailing TLV garbage\n",
dev->name, end_of_tlvs - tlvs);
- return;
}
/*
@@ -684,8 +683,6 @@ static void mpc_vcc_close(struct atm_vcc *vcc, struct net_device *dev)
if (in_entry == NULL && eg_entry == NULL)
dprintk("(%s) unused vcc closed\n", dev->name);
-
- return;
}
static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb)
@@ -783,8 +780,6 @@ static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb)
memset(ATM_SKB(skb), 0, sizeof(struct atm_skb_data));
netif_rx(new_skb);
-
- return;
}
static struct atmdev_ops mpc_ops = { /* only send is required */
@@ -873,8 +868,6 @@ static void send_set_mps_ctrl_addr(const char *addr, struct mpoa_client *mpc)
mesg.type = SET_MPS_CTRL_ADDR;
memcpy(mesg.MPS_ctrl, addr, ATM_ESA_LEN);
msg_to_mpoad(&mesg, mpc);
-
- return;
}
static void mpoad_close(struct atm_vcc *vcc)
@@ -911,8 +904,6 @@ static void mpoad_close(struct atm_vcc *vcc)
pr_info("(%s) going down\n",
(mpc->dev) ? mpc->dev->name : "<unknown>");
module_put(THIS_MODULE);
-
- return;
}
/*
@@ -1122,7 +1113,6 @@ static void MPOA_trigger_rcvd(struct k_message *msg, struct mpoa_client *mpc)
pr_info("(%s) entry already in resolving state\n",
(mpc->dev) ? mpc->dev->name : "<unknown>");
mpc->in_ops->put(entry);
- return;
}
/*
@@ -1166,7 +1156,6 @@ static void check_qos_and_open_shortcut(struct k_message *msg,
} else
memset(&msg->qos, 0, sizeof(struct atm_qos));
msg_to_mpoad(msg, client);
- return;
}
static void MPOA_res_reply_rcvd(struct k_message *msg, struct mpoa_client *mpc)
@@ -1240,8 +1229,6 @@ static void ingress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc)
mpc->in_ops->put(entry);
entry = mpc->in_ops->get_with_mask(dst_ip, mpc, mask);
} while (entry != NULL);
-
- return;
}
static void egress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc)
@@ -1260,8 +1247,6 @@ static void egress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc)
write_unlock_irq(&mpc->egress_lock);
mpc->eg_ops->put(entry);
-
- return;
}
static void purge_egress_shortcut(struct atm_vcc *vcc, eg_cache_entry *entry)
@@ -1295,8 +1280,6 @@ static void purge_egress_shortcut(struct atm_vcc *vcc, eg_cache_entry *entry)
skb_queue_tail(&sk->sk_receive_queue, skb);
sk->sk_data_ready(sk, skb->len);
dprintk("exiting\n");
-
- return;
}
/*
@@ -1325,8 +1308,6 @@ static void mps_death(struct k_message *msg, struct mpoa_client *mpc)
mpc->in_ops->destroy_cache(mpc);
mpc->eg_ops->destroy_cache(mpc);
-
- return;
}
static void MPOA_cache_impos_rcvd(struct k_message *msg,
@@ -1353,8 +1334,6 @@ static void MPOA_cache_impos_rcvd(struct k_message *msg,
write_unlock_irq(&mpc->egress_lock);
mpc->eg_ops->put(entry);
-
- return;
}
static void set_mpc_ctrl_addr_rcvd(struct k_message *mesg,
@@ -1392,8 +1371,6 @@ static void set_mpc_ctrl_addr_rcvd(struct k_message *mesg,
pr_info("(%s) targetless LE_ARP request failed\n",
mpc->dev->name);
}
-
- return;
}
static void set_mps_mac_addr_rcvd(struct k_message *msg,
@@ -1409,8 +1386,6 @@ static void set_mps_mac_addr_rcvd(struct k_message *msg,
return;
}
client->number_of_mps_macs = 1;
-
- return;
}
/*
@@ -1436,7 +1411,6 @@ static void clean_up(struct k_message *msg, struct mpoa_client *mpc, int action)
msg->type = action;
msg_to_mpoad(msg, mpc);
- return;
}
static void mpc_timer_refresh(void)
@@ -1445,8 +1419,6 @@ static void mpc_timer_refresh(void)
mpc_timer.data = mpc_timer.expires;
mpc_timer.function = mpc_cache_check;
add_timer(&mpc_timer);
-
- return;
}
static void mpc_cache_check(unsigned long checking_time)
@@ -1471,8 +1443,6 @@ static void mpc_cache_check(unsigned long checking_time)
mpc = mpc->next;
}
mpc_timer_refresh();
-
- return;
}
static int atm_mpoa_ioctl(struct socket *sock, unsigned int cmd,
@@ -1561,8 +1531,6 @@ static void __exit atm_mpoa_cleanup(void)
kfree(qos);
qos = nextqos;
}
-
- return;
}
module_init(atm_mpoa_init);
diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
index e773d8336918..d1b2d9a03144 100644
--- a/net/atm/mpoa_caches.c
+++ b/net/atm/mpoa_caches.c
@@ -182,8 +182,6 @@ static void in_cache_put(in_cache_entry *entry)
memset(entry, 0, sizeof(in_cache_entry));
kfree(entry);
}
-
- return;
}
/*
@@ -221,8 +219,6 @@ static void in_cache_remove_entry(in_cache_entry *entry,
}
vcc_release_async(vcc, -EPIPE);
}
-
- return;
}
/* Call this every MPC-p2 seconds... Not exactly correct solution,
@@ -248,8 +244,6 @@ static void clear_count_and_expired(struct mpoa_client *client)
entry = next_entry;
}
write_unlock_bh(&client->ingress_lock);
-
- return;
}
/* Call this every MPC-p4 seconds. */
@@ -334,8 +328,6 @@ static void in_destroy_cache(struct mpoa_client *mpc)
while (mpc->in_cache != NULL)
mpc->in_ops->remove_entry(mpc->in_cache, mpc);
write_unlock_irq(&mpc->ingress_lock);
-
- return;
}
static eg_cache_entry *eg_cache_get_by_cache_id(__be32 cache_id,
@@ -427,8 +419,6 @@ static void eg_cache_put(eg_cache_entry *entry)
memset(entry, 0, sizeof(eg_cache_entry));
kfree(entry);
}
-
- return;
}
/*
@@ -463,8 +453,6 @@ static void eg_cache_remove_entry(eg_cache_entry *entry,
}
vcc_release_async(vcc, -EPIPE);
}
-
- return;
}
static eg_cache_entry *eg_cache_add_entry(struct k_message *msg,
@@ -509,8 +497,6 @@ static void update_eg_cache_entry(eg_cache_entry *entry, uint16_t holding_time)
do_gettimeofday(&(entry->tv));
entry->entry_state = EGRESS_RESOLVED;
entry->ctrl_info.holding_time = holding_time;
-
- return;
}
static void clear_expired(struct mpoa_client *client)
@@ -537,8 +523,6 @@ static void clear_expired(struct mpoa_client *client)
entry = next_entry;
}
write_unlock_irq(&client->egress_lock);
-
- return;
}
static void eg_destroy_cache(struct mpoa_client *mpc)
@@ -547,8 +531,6 @@ static void eg_destroy_cache(struct mpoa_client *mpc)
while (mpc->eg_cache != NULL)
mpc->eg_ops->remove_entry(mpc->eg_cache, mpc);
write_unlock_irq(&mpc->egress_lock);
-
- return;
}
@@ -584,6 +566,4 @@ void atm_mpoa_init_cache(struct mpoa_client *mpc)
{
mpc->in_ops = &ingress_ops;
mpc->eg_ops = &egress_ops;
-
- return;
}
diff --git a/net/atm/proc.c b/net/atm/proc.c
index 696e218436e5..6262aeae398e 100644
--- a/net/atm/proc.c
+++ b/net/atm/proc.c
@@ -407,7 +407,6 @@ EXPORT_SYMBOL(atm_proc_root);
int atm_proc_dev_register(struct atm_dev *dev)
{
- int digits, num;
int error;
/* No proc info */
@@ -415,16 +414,9 @@ int atm_proc_dev_register(struct atm_dev *dev)
return 0;
error = -ENOMEM;
- digits = 0;
- for (num = dev->number; num; num /= 10)
- digits++;
- if (!digits)
- digits++;
-
- dev->proc_name = kmalloc(strlen(dev->type) + digits + 2, GFP_KERNEL);
+ dev->proc_name = kasprintf(GFP_KERNEL, "%s:%d", dev->type, dev->number);
if (!dev->proc_name)
goto err_out;
- sprintf(dev->proc_name, "%s:%d", dev->type, dev->number);
dev->proc_entry = proc_create_data(dev->proc_name, 0, atm_proc_root,
&proc_atm_dev_ops, dev);
diff --git a/net/atm/signaling.c b/net/atm/signaling.c
index 6ba6e466ee54..509c8ac02b63 100644
--- a/net/atm/signaling.c
+++ b/net/atm/signaling.c
@@ -131,7 +131,7 @@ static int sigd_send(struct atm_vcc *vcc, struct sk_buff *skb)
}
sk->sk_ack_backlog++;
skb_queue_tail(&sk->sk_receive_queue, skb);
- pr_debug("waking sk->sk_sleep 0x%p\n", sk->sk_sleep);
+ pr_debug("waking sk_sleep(sk) 0x%p\n", sk_sleep(sk));
sk->sk_state_change(sk);
as_indicate_complete:
release_sock(sk);
diff --git a/net/atm/svc.c b/net/atm/svc.c
index 3ba9a45a51ac..754ee4791d96 100644
--- a/net/atm/svc.c
+++ b/net/atm/svc.c
@@ -49,14 +49,14 @@ static void svc_disconnect(struct atm_vcc *vcc)
pr_debug("%p\n", vcc);
if (test_bit(ATM_VF_REGIS, &vcc->flags)) {
- prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE);
sigd_enq(vcc, as_close, NULL, NULL, NULL);
while (!test_bit(ATM_VF_RELEASED, &vcc->flags) && sigd) {
schedule();
- prepare_to_wait(sk->sk_sleep, &wait,
+ prepare_to_wait(sk_sleep(sk), &wait,
TASK_UNINTERRUPTIBLE);
}
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
}
/* beware - socket is still in use by atmsigd until the last
as_indicate has been answered */
@@ -125,13 +125,13 @@ static int svc_bind(struct socket *sock, struct sockaddr *sockaddr,
}
vcc->local = *addr;
set_bit(ATM_VF_WAITING, &vcc->flags);
- prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE);
sigd_enq(vcc, as_bind, NULL, NULL, &vcc->local);
while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
schedule();
- prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE);
}
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
clear_bit(ATM_VF_REGIS, &vcc->flags); /* doesn't count */
if (!sigd) {
error = -EUNATCH;
@@ -201,10 +201,10 @@ static int svc_connect(struct socket *sock, struct sockaddr *sockaddr,
}
vcc->remote = *addr;
set_bit(ATM_VF_WAITING, &vcc->flags);
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
sigd_enq(vcc, as_connect, NULL, NULL, &vcc->remote);
if (flags & O_NONBLOCK) {
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
sock->state = SS_CONNECTING;
error = -EINPROGRESS;
goto out;
@@ -213,7 +213,7 @@ static int svc_connect(struct socket *sock, struct sockaddr *sockaddr,
while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
schedule();
if (!signal_pending(current)) {
- prepare_to_wait(sk->sk_sleep, &wait,
+ prepare_to_wait(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
continue;
}
@@ -232,14 +232,14 @@ static int svc_connect(struct socket *sock, struct sockaddr *sockaddr,
*/
sigd_enq(vcc, as_close, NULL, NULL, NULL);
while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
- prepare_to_wait(sk->sk_sleep, &wait,
+ prepare_to_wait(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
schedule();
}
if (!sk->sk_err)
while (!test_bit(ATM_VF_RELEASED, &vcc->flags) &&
sigd) {
- prepare_to_wait(sk->sk_sleep, &wait,
+ prepare_to_wait(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
schedule();
}
@@ -250,7 +250,7 @@ static int svc_connect(struct socket *sock, struct sockaddr *sockaddr,
error = -EINTR;
break;
}
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
if (error)
goto out;
if (!sigd) {
@@ -302,13 +302,13 @@ static int svc_listen(struct socket *sock, int backlog)
goto out;
}
set_bit(ATM_VF_WAITING, &vcc->flags);
- prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE);
sigd_enq(vcc, as_listen, NULL, NULL, &vcc->local);
while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
schedule();
- prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE);
}
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
if (!sigd) {
error = -EUNATCH;
goto out;
@@ -343,7 +343,7 @@ static int svc_accept(struct socket *sock, struct socket *newsock, int flags)
while (1) {
DEFINE_WAIT(wait);
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
while (!(skb = skb_dequeue(&sk->sk_receive_queue)) &&
sigd) {
if (test_bit(ATM_VF_RELEASED, &old_vcc->flags))
@@ -363,10 +363,10 @@ static int svc_accept(struct socket *sock, struct socket *newsock, int flags)
error = -ERESTARTSYS;
break;
}
- prepare_to_wait(sk->sk_sleep, &wait,
+ prepare_to_wait(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
}
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
if (error)
goto out;
if (!skb) {
@@ -392,17 +392,17 @@ static int svc_accept(struct socket *sock, struct socket *newsock, int flags)
}
/* wait should be short, so we ignore the non-blocking flag */
set_bit(ATM_VF_WAITING, &new_vcc->flags);
- prepare_to_wait(sk_atm(new_vcc)->sk_sleep, &wait,
+ prepare_to_wait(sk_sleep(sk_atm(new_vcc)), &wait,
TASK_UNINTERRUPTIBLE);
sigd_enq(new_vcc, as_accept, old_vcc, NULL, NULL);
while (test_bit(ATM_VF_WAITING, &new_vcc->flags) && sigd) {
release_sock(sk);
schedule();
lock_sock(sk);
- prepare_to_wait(sk_atm(new_vcc)->sk_sleep, &wait,
+ prepare_to_wait(sk_sleep(sk_atm(new_vcc)), &wait,
TASK_UNINTERRUPTIBLE);
}
- finish_wait(sk_atm(new_vcc)->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk_atm(new_vcc)), &wait);
if (!sigd) {
error = -EUNATCH;
goto out;
@@ -438,14 +438,14 @@ int svc_change_qos(struct atm_vcc *vcc, struct atm_qos *qos)
DEFINE_WAIT(wait);
set_bit(ATM_VF_WAITING, &vcc->flags);
- prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE);
sigd_enq2(vcc, as_modify, NULL, NULL, &vcc->local, qos, 0);
while (test_bit(ATM_VF_WAITING, &vcc->flags) &&
!test_bit(ATM_VF_RELEASED, &vcc->flags) && sigd) {
schedule();
- prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE);
}
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
if (!sigd)
return -EUNATCH;
return -sk->sk_err;
@@ -534,20 +534,20 @@ static int svc_addparty(struct socket *sock, struct sockaddr *sockaddr,
lock_sock(sk);
set_bit(ATM_VF_WAITING, &vcc->flags);
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
sigd_enq(vcc, as_addparty, NULL, NULL,
(struct sockaddr_atmsvc *) sockaddr);
if (flags & O_NONBLOCK) {
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
error = -EINPROGRESS;
goto out;
}
pr_debug("added wait queue\n");
while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
schedule();
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
}
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
error = xchg(&sk->sk_err_soft, 0);
out:
release_sock(sk);
@@ -563,13 +563,13 @@ static int svc_dropparty(struct socket *sock, int ep_ref)
lock_sock(sk);
set_bit(ATM_VF_WAITING, &vcc->flags);
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
sigd_enq2(vcc, as_dropparty, NULL, NULL, NULL, NULL, ep_ref);
while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
schedule();
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
}
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
if (!sigd) {
error = -EUNATCH;
goto out;
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 65c5801261f9..cfdfd7e2a172 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1281,7 +1281,7 @@ static int __must_check ax25_connect(struct socket *sock,
DEFINE_WAIT(wait);
for (;;) {
- prepare_to_wait(sk->sk_sleep, &wait,
+ prepare_to_wait(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
if (sk->sk_state != TCP_SYN_SENT)
break;
@@ -1294,7 +1294,7 @@ static int __must_check ax25_connect(struct socket *sock,
err = -ERESTARTSYS;
break;
}
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
if (err)
goto out_release;
@@ -1346,7 +1346,7 @@ static int ax25_accept(struct socket *sock, struct socket *newsock, int flags)
* hooked into the SABM we saved
*/
for (;;) {
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
skb = skb_dequeue(&sk->sk_receive_queue);
if (skb)
break;
@@ -1364,7 +1364,7 @@ static int ax25_accept(struct socket *sock, struct socket *newsock, int flags)
err = -ERESTARTSYS;
break;
}
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
if (err)
goto out;
diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig
index ed371684c133..ee3b3049d385 100644
--- a/net/bluetooth/Kconfig
+++ b/net/bluetooth/Kconfig
@@ -43,6 +43,19 @@ config BT_L2CAP
Say Y here to compile L2CAP support into the kernel or say M to
compile it as module (l2cap).
+config BT_L2CAP_EXT_FEATURES
+ bool "L2CAP Extended Features support (EXPERIMENTAL)"
+ depends on BT_L2CAP && EXPERIMENTAL
+ help
+ This option enables the L2CAP Extended Features support. These
+ new features include the Enhanced Retransmission and Streaming
+ Modes, the Frame Check Sequence (FCS), and Segmentation and
+ Reassembly (SAR) for L2CAP packets. They are a required for the
+ new Alternate MAC/PHY and the Bluetooth Medical Profile.
+
+ You should say N unless you know what you are doing. Note that
+ this is in an experimental state yet.
+
config BT_SCO
tristate "SCO links support"
depends on BT
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 404a8500fd03..421c45bd1b95 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -288,7 +288,7 @@ unsigned int bt_sock_poll(struct file * file, struct socket *sock, poll_table *w
BT_DBG("sock %p, sk %p", sock, sk);
- poll_wait(file, sk->sk_sleep, wait);
+ poll_wait(file, sk_sleep(sk), wait);
if (sk->sk_state == BT_LISTEN)
return bt_accept_poll(sk);
@@ -378,7 +378,7 @@ int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo)
BT_DBG("sk %p", sk);
- add_wait_queue(sk->sk_sleep, &wait);
+ add_wait_queue(sk_sleep(sk), &wait);
while (sk->sk_state != state) {
set_current_state(TASK_INTERRUPTIBLE);
@@ -401,7 +401,7 @@ int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo)
break;
}
set_current_state(TASK_RUNNING);
- remove_wait_queue(sk->sk_sleep, &wait);
+ remove_wait_queue(sk_sleep(sk), &wait);
return err;
}
EXPORT_SYMBOL(bt_sock_wait_state);
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 8062dad6d10d..f10b41fb05a0 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -474,7 +474,7 @@ static int bnep_session(void *arg)
set_user_nice(current, -15);
init_waitqueue_entry(&wait, current);
- add_wait_queue(sk->sk_sleep, &wait);
+ add_wait_queue(sk_sleep(sk), &wait);
while (!atomic_read(&s->killed)) {
set_current_state(TASK_INTERRUPTIBLE);
@@ -496,7 +496,7 @@ static int bnep_session(void *arg)
schedule();
}
set_current_state(TASK_RUNNING);
- remove_wait_queue(sk->sk_sleep, &wait);
+ remove_wait_queue(sk_sleep(sk), &wait);
/* Cleanup session */
down_write(&bnep_session_sem);
@@ -507,7 +507,7 @@ static int bnep_session(void *arg)
/* Wakeup user-space polling for socket errors */
s->sock->sk->sk_err = EUNATCH;
- wake_up_interruptible(s->sock->sk->sk_sleep);
+ wake_up_interruptible(sk_sleep(s->sock->sk));
/* Release the socket */
fput(s->sock->file);
@@ -638,7 +638,7 @@ int bnep_del_connection(struct bnep_conndel_req *req)
/* Kill session thread */
atomic_inc(&s->killed);
- wake_up_interruptible(s->sock->sk->sk_sleep);
+ wake_up_interruptible(sk_sleep(s->sock->sk));
} else
err = -ENOENT;
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c
index 5643a2391e76..0faad5ce6dc4 100644
--- a/net/bluetooth/bnep/netdev.c
+++ b/net/bluetooth/bnep/netdev.c
@@ -88,7 +88,7 @@ static void bnep_net_set_mc_list(struct net_device *dev)
memcpy(__skb_put(skb, ETH_ALEN), dev->broadcast, ETH_ALEN);
r->len = htons(ETH_ALEN * 2);
} else {
- struct dev_mc_list *dmi = dev->mc_list;
+ struct netdev_hw_addr *ha;
int i, len = skb->len;
if (dev->flags & IFF_BROADCAST) {
@@ -98,18 +98,18 @@ static void bnep_net_set_mc_list(struct net_device *dev)
/* FIXME: We should group addresses here. */
- for (i = 0;
- i < netdev_mc_count(dev) && i < BNEP_MAX_MULTICAST_FILTERS;
- i++) {
- memcpy(__skb_put(skb, ETH_ALEN), dmi->dmi_addr, ETH_ALEN);
- memcpy(__skb_put(skb, ETH_ALEN), dmi->dmi_addr, ETH_ALEN);
- dmi = dmi->next;
+ i = 0;
+ netdev_for_each_mc_addr(ha, dev) {
+ if (i == BNEP_MAX_MULTICAST_FILTERS)
+ break;
+ memcpy(__skb_put(skb, ETH_ALEN), ha->addr, ETH_ALEN);
+ memcpy(__skb_put(skb, ETH_ALEN), ha->addr, ETH_ALEN);
}
r->len = htons(skb->len - len);
}
skb_queue_tail(&sk->sk_write_queue, skb);
- wake_up_interruptible(sk->sk_sleep);
+ wake_up_interruptible(sk_sleep(sk));
#endif
}
@@ -193,11 +193,11 @@ static netdev_tx_t bnep_net_xmit(struct sk_buff *skb,
/*
* We cannot send L2CAP packets from here as we are potentially in a bh.
* So we have to queue them and wake up session thread which is sleeping
- * on the sk->sk_sleep.
+ * on the sk_sleep(sk).
*/
dev->trans_start = jiffies;
skb_queue_tail(&sk->sk_write_queue, skb);
- wake_up_interruptible(sk->sk_sleep);
+ wake_up_interruptible(sk_sleep(sk));
if (skb_queue_len(&sk->sk_write_queue) >= BNEP_TX_QUEUE_LEN) {
BT_DBG("tx queue is full");
diff --git a/net/bluetooth/cmtp/cmtp.h b/net/bluetooth/cmtp/cmtp.h
index e4663aa14d26..785e79e953c5 100644
--- a/net/bluetooth/cmtp/cmtp.h
+++ b/net/bluetooth/cmtp/cmtp.h
@@ -125,7 +125,7 @@ static inline void cmtp_schedule(struct cmtp_session *session)
{
struct sock *sk = session->sock->sk;
- wake_up_interruptible(sk->sk_sleep);
+ wake_up_interruptible(sk_sleep(sk));
}
/* CMTP init defines */
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index 0073ec8495da..d4c6af082d48 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -284,7 +284,7 @@ static int cmtp_session(void *arg)
set_user_nice(current, -15);
init_waitqueue_entry(&wait, current);
- add_wait_queue(sk->sk_sleep, &wait);
+ add_wait_queue(sk_sleep(sk), &wait);
while (!atomic_read(&session->terminate)) {
set_current_state(TASK_INTERRUPTIBLE);
@@ -301,7 +301,7 @@ static int cmtp_session(void *arg)
schedule();
}
set_current_state(TASK_RUNNING);
- remove_wait_queue(sk->sk_sleep, &wait);
+ remove_wait_queue(sk_sleep(sk), &wait);
down_write(&cmtp_session_sem);
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 4ad23192c7a5..2f768de87011 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -37,6 +37,7 @@
#include <linux/fcntl.h>
#include <linux/init.h>
#include <linux/skbuff.h>
+#include <linux/workqueue.h>
#include <linux/interrupt.h>
#include <linux/notifier.h>
#include <linux/rfkill.h>
@@ -928,6 +929,10 @@ int hci_register_dev(struct hci_dev *hdev)
write_unlock_bh(&hci_dev_list_lock);
+ hdev->workqueue = create_singlethread_workqueue(hdev->name);
+ if (!hdev->workqueue)
+ goto nomem;
+
hci_register_sysfs(hdev);
hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
@@ -942,6 +947,13 @@ int hci_register_dev(struct hci_dev *hdev)
hci_notify(hdev, HCI_DEV_REG);
return id;
+
+nomem:
+ write_lock_bh(&hci_dev_list_lock);
+ list_del(&hdev->list);
+ write_unlock_bh(&hci_dev_list_lock);
+
+ return -ENOMEM;
}
EXPORT_SYMBOL(hci_register_dev);
@@ -970,6 +982,8 @@ int hci_unregister_dev(struct hci_dev *hdev)
hci_unregister_sysfs(hdev);
+ destroy_workqueue(hdev->workqueue);
+
__hci_dev_put(hdev);
return 0;
@@ -1260,7 +1274,7 @@ static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
hdr->dlen = cpu_to_le16(len);
}
-int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
+void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
{
struct hci_dev *hdev = conn->hdev;
struct sk_buff *list;
@@ -1302,24 +1316,17 @@ int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
}
tasklet_schedule(&hdev->tx_task);
-
- return 0;
}
EXPORT_SYMBOL(hci_send_acl);
/* Send SCO data */
-int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
+void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
{
struct hci_dev *hdev = conn->hdev;
struct hci_sco_hdr hdr;
BT_DBG("%s len %d", hdev->name, skb->len);
- if (skb->len > hdev->sco_mtu) {
- kfree_skb(skb);
- return -EINVAL;
- }
-
hdr.handle = cpu_to_le16(conn->handle);
hdr.dlen = skb->len;
@@ -1332,8 +1339,6 @@ int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
skb_queue_tail(&conn->data_q, skb);
tasklet_schedule(&hdev->tx_task);
-
- return 0;
}
EXPORT_SYMBOL(hci_send_sco);
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 0e8e1a59856c..463ffa4fe042 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -14,8 +14,6 @@ static struct class *bt_class;
struct dentry *bt_debugfs = NULL;
EXPORT_SYMBOL_GPL(bt_debugfs);
-static struct workqueue_struct *bt_workq;
-
static inline char *link_typetostr(int type)
{
switch (type) {
@@ -161,14 +159,14 @@ void hci_conn_add_sysfs(struct hci_conn *conn)
{
BT_DBG("conn %p", conn);
- queue_work(bt_workq, &conn->work_add);
+ queue_work(conn->hdev->workqueue, &conn->work_add);
}
void hci_conn_del_sysfs(struct hci_conn *conn)
{
BT_DBG("conn %p", conn);
- queue_work(bt_workq, &conn->work_del);
+ queue_work(conn->hdev->workqueue, &conn->work_del);
}
static inline char *host_bustostr(int bus)
@@ -283,11 +281,9 @@ static ssize_t show_idle_timeout(struct device *dev, struct device_attribute *at
static ssize_t store_idle_timeout(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
struct hci_dev *hdev = dev_get_drvdata(dev);
- char *ptr;
- __u32 val;
+ unsigned long val;
- val = simple_strtoul(buf, &ptr, 10);
- if (ptr == buf)
+ if (strict_strtoul(buf, 0, &val) < 0)
return -EINVAL;
if (val != 0 && (val < 500 || val > 3600000))
@@ -307,11 +303,9 @@ static ssize_t show_sniff_max_interval(struct device *dev, struct device_attribu
static ssize_t store_sniff_max_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
struct hci_dev *hdev = dev_get_drvdata(dev);
- char *ptr;
- __u16 val;
+ unsigned long val;
- val = simple_strtoul(buf, &ptr, 10);
- if (ptr == buf)
+ if (strict_strtoul(buf, 0, &val) < 0)
return -EINVAL;
if (val < 0x0002 || val > 0xFFFE || val % 2)
@@ -334,11 +328,9 @@ static ssize_t show_sniff_min_interval(struct device *dev, struct device_attribu
static ssize_t store_sniff_min_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
struct hci_dev *hdev = dev_get_drvdata(dev);
- char *ptr;
- __u16 val;
+ unsigned long val;
- val = simple_strtoul(buf, &ptr, 10);
- if (ptr == buf)
+ if (strict_strtoul(buf, 0, &val) < 0)
return -EINVAL;
if (val < 0x0002 || val > 0xFFFE || val % 2)
@@ -487,17 +479,11 @@ void hci_unregister_sysfs(struct hci_dev *hdev)
int __init bt_sysfs_init(void)
{
- bt_workq = create_singlethread_workqueue("bluetooth");
- if (!bt_workq)
- return -ENOMEM;
-
bt_debugfs = debugfs_create_dir("bluetooth", NULL);
bt_class = class_create(THIS_MODULE, "bluetooth");
- if (IS_ERR(bt_class)) {
- destroy_workqueue(bt_workq);
+ if (IS_ERR(bt_class))
return PTR_ERR(bt_class);
- }
return 0;
}
@@ -507,6 +493,4 @@ void bt_sysfs_cleanup(void)
class_destroy(bt_class);
debugfs_remove_recursive(bt_debugfs);
-
- destroy_workqueue(bt_workq);
}
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 280529ad9274..bfe641b7dfaf 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -561,8 +561,8 @@ static int hidp_session(void *arg)
init_waitqueue_entry(&ctrl_wait, current);
init_waitqueue_entry(&intr_wait, current);
- add_wait_queue(ctrl_sk->sk_sleep, &ctrl_wait);
- add_wait_queue(intr_sk->sk_sleep, &intr_wait);
+ add_wait_queue(sk_sleep(ctrl_sk), &ctrl_wait);
+ add_wait_queue(sk_sleep(intr_sk), &intr_wait);
while (!atomic_read(&session->terminate)) {
set_current_state(TASK_INTERRUPTIBLE);
@@ -584,8 +584,8 @@ static int hidp_session(void *arg)
schedule();
}
set_current_state(TASK_RUNNING);
- remove_wait_queue(intr_sk->sk_sleep, &intr_wait);
- remove_wait_queue(ctrl_sk->sk_sleep, &ctrl_wait);
+ remove_wait_queue(sk_sleep(intr_sk), &intr_wait);
+ remove_wait_queue(sk_sleep(ctrl_sk), &ctrl_wait);
down_write(&hidp_session_sem);
@@ -609,7 +609,7 @@ static int hidp_session(void *arg)
fput(session->intr_sock->file);
- wait_event_timeout(*(ctrl_sk->sk_sleep),
+ wait_event_timeout(*(sk_sleep(ctrl_sk)),
(ctrl_sk->sk_state == BT_CLOSED), msecs_to_jiffies(500));
fput(session->ctrl_sock->file);
diff --git a/net/bluetooth/hidp/hidp.h b/net/bluetooth/hidp/hidp.h
index a4e215d50c10..8d934a19da0a 100644
--- a/net/bluetooth/hidp/hidp.h
+++ b/net/bluetooth/hidp/hidp.h
@@ -164,8 +164,8 @@ static inline void hidp_schedule(struct hidp_session *session)
struct sock *ctrl_sk = session->ctrl_sock->sk;
struct sock *intr_sk = session->intr_sock->sk;
- wake_up_interruptible(ctrl_sk->sk_sleep);
- wake_up_interruptible(intr_sk->sk_sleep);
+ wake_up_interruptible(sk_sleep(ctrl_sk));
+ wake_up_interruptible(sk_sleep(intr_sk));
}
/* HIDP init defines */
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index 9753b690a8b3..1b682a5aa061 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -55,18 +55,27 @@
#define VERSION "2.14"
+#ifdef CONFIG_BT_L2CAP_EXT_FEATURES
+static int enable_ertm = 1;
+#else
static int enable_ertm = 0;
+#endif
static int max_transmit = L2CAP_DEFAULT_MAX_TX;
+static int tx_window = L2CAP_DEFAULT_TX_WINDOW;
static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
static u8 l2cap_fixed_chan[8] = { 0x02, };
static const struct proto_ops l2cap_sock_ops;
+static struct workqueue_struct *_busy_wq;
+
static struct bt_sock_list l2cap_sk_list = {
.lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
};
+static void l2cap_busy_work(struct work_struct *work);
+
static void __l2cap_sock_close(struct sock *sk, int reason);
static void l2cap_sock_close(struct sock *sk);
static void l2cap_sock_kill(struct sock *sk);
@@ -219,7 +228,7 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct so
l2cap_pi(sk)->conn = conn;
- if (sk->sk_type == SOCK_SEQPACKET) {
+ if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
/* Alloc CID for connection-oriented socket */
l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
} else if (sk->sk_type == SOCK_DGRAM) {
@@ -325,19 +334,19 @@ static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
return id;
}
-static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
+static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
{
struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
BT_DBG("code 0x%2.2x", code);
if (!skb)
- return -ENOMEM;
+ return;
- return hci_send_acl(conn->hcon, skb, 0);
+ hci_send_acl(conn->hcon, skb, 0);
}
-static inline int l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
+static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
{
struct sk_buff *skb;
struct l2cap_hdr *lh;
@@ -352,9 +361,19 @@ static inline int l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
count = min_t(unsigned int, conn->mtu, hlen);
control |= L2CAP_CTRL_FRAME_TYPE;
+ if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
+ control |= L2CAP_CTRL_FINAL;
+ pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
+ }
+
+ if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
+ control |= L2CAP_CTRL_POLL;
+ pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
+ }
+
skb = bt_skb_alloc(count, GFP_ATOMIC);
if (!skb)
- return -ENOMEM;
+ return;
lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
@@ -366,19 +385,20 @@ static inline int l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
put_unaligned_le16(fcs, skb_put(skb, 2));
}
- return hci_send_acl(pi->conn->hcon, skb, 0);
+ hci_send_acl(pi->conn->hcon, skb, 0);
}
-static inline int l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
+static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
{
- if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY)
+ if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
control |= L2CAP_SUPER_RCV_NOT_READY;
- else
+ pi->conn_state |= L2CAP_CONN_RNR_SENT;
+ } else
control |= L2CAP_SUPER_RCV_READY;
control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
- return l2cap_send_sframe(pi, control);
+ l2cap_send_sframe(pi, control);
}
static void l2cap_do_start(struct sock *sk)
@@ -437,7 +457,8 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
bh_lock_sock(sk);
- if (sk->sk_type != SOCK_SEQPACKET) {
+ if (sk->sk_type != SOCK_SEQPACKET &&
+ sk->sk_type != SOCK_STREAM) {
bh_unlock_sock(sk);
continue;
}
@@ -497,7 +518,8 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
bh_lock_sock(sk);
- if (sk->sk_type != SOCK_SEQPACKET) {
+ if (sk->sk_type != SOCK_SEQPACKET &&
+ sk->sk_type != SOCK_STREAM) {
l2cap_sock_clear_timer(sk);
sk->sk_state = BT_CONNECTED;
sk->sk_state_change(sk);
@@ -706,7 +728,8 @@ static void __l2cap_sock_close(struct sock *sk, int reason)
case BT_CONNECTED:
case BT_CONFIG:
- if (sk->sk_type == SOCK_SEQPACKET) {
+ if (sk->sk_type == SOCK_SEQPACKET ||
+ sk->sk_type == SOCK_STREAM) {
struct l2cap_conn *conn = l2cap_pi(sk)->conn;
sk->sk_state = BT_DISCONN;
@@ -717,7 +740,8 @@ static void __l2cap_sock_close(struct sock *sk, int reason)
break;
case BT_CONNECT2:
- if (sk->sk_type == SOCK_SEQPACKET) {
+ if (sk->sk_type == SOCK_SEQPACKET ||
+ sk->sk_type == SOCK_STREAM) {
struct l2cap_conn *conn = l2cap_pi(sk)->conn;
struct l2cap_conn_rsp rsp;
__u16 result;
@@ -772,14 +796,21 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
pi->omtu = l2cap_pi(parent)->omtu;
pi->mode = l2cap_pi(parent)->mode;
pi->fcs = l2cap_pi(parent)->fcs;
+ pi->max_tx = l2cap_pi(parent)->max_tx;
+ pi->tx_win = l2cap_pi(parent)->tx_win;
pi->sec_level = l2cap_pi(parent)->sec_level;
pi->role_switch = l2cap_pi(parent)->role_switch;
pi->force_reliable = l2cap_pi(parent)->force_reliable;
} else {
pi->imtu = L2CAP_DEFAULT_MTU;
pi->omtu = 0;
- pi->mode = L2CAP_MODE_BASIC;
+ if (enable_ertm && sk->sk_type == SOCK_STREAM)
+ pi->mode = L2CAP_MODE_ERTM;
+ else
+ pi->mode = L2CAP_MODE_BASIC;
+ pi->max_tx = max_transmit;
pi->fcs = L2CAP_FCS_CRC16;
+ pi->tx_win = tx_window;
pi->sec_level = BT_SECURITY_LOW;
pi->role_switch = 0;
pi->force_reliable = 0;
@@ -790,6 +821,7 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
skb_queue_head_init(TX_QUEUE(sk));
skb_queue_head_init(SREJ_QUEUE(sk));
+ skb_queue_head_init(BUSY_QUEUE(sk));
INIT_LIST_HEAD(SREJ_LIST(sk));
}
@@ -833,7 +865,7 @@ static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
sock->state = SS_UNCONNECTED;
- if (sock->type != SOCK_SEQPACKET &&
+ if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
return -ESOCKTNOSUPPORT;
@@ -981,7 +1013,8 @@ static int l2cap_do_connect(struct sock *sk)
l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
if (hcon->state == BT_CONNECTED) {
- if (sk->sk_type != SOCK_SEQPACKET) {
+ if (sk->sk_type != SOCK_SEQPACKET &&
+ sk->sk_type != SOCK_STREAM) {
l2cap_sock_clear_timer(sk);
sk->sk_state = BT_CONNECTED;
} else
@@ -1015,7 +1048,8 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al
lock_sock(sk);
- if (sk->sk_type == SOCK_SEQPACKET && !la.l2_psm) {
+ if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
+ && !la.l2_psm) {
err = -EINVAL;
goto done;
}
@@ -1079,7 +1113,8 @@ static int l2cap_sock_listen(struct socket *sock, int backlog)
lock_sock(sk);
- if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
+ if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
+ || sk->sk_state != BT_BOUND) {
err = -EBADFD;
goto done;
}
@@ -1147,7 +1182,7 @@ static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int fl
BT_DBG("sk %p timeo %ld", sk, timeo);
/* Wait for an incoming connection. (wake-one). */
- add_wait_queue_exclusive(sk->sk_sleep, &wait);
+ add_wait_queue_exclusive(sk_sleep(sk), &wait);
while (!(nsk = bt_accept_dequeue(sk, newsock))) {
set_current_state(TASK_INTERRUPTIBLE);
if (!timeo) {
@@ -1170,7 +1205,7 @@ static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int fl
}
}
set_current_state(TASK_RUNNING);
- remove_wait_queue(sk->sk_sleep, &wait);
+ remove_wait_queue(sk_sleep(sk), &wait);
if (err)
goto done;
@@ -1207,10 +1242,40 @@ static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *l
return 0;
}
+static int __l2cap_wait_ack(struct sock *sk)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ int err = 0;
+ int timeo = HZ/5;
+
+ add_wait_queue(sk_sleep(sk), &wait);
+ while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ if (!timeo)
+ timeo = HZ/5;
+
+ if (signal_pending(current)) {
+ err = sock_intr_errno(timeo);
+ break;
+ }
+
+ release_sock(sk);
+ timeo = schedule_timeout(timeo);
+ lock_sock(sk);
+
+ err = sock_error(sk);
+ if (err)
+ break;
+ }
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(sk_sleep(sk), &wait);
+ return err;
+}
+
static void l2cap_monitor_timeout(unsigned long arg)
{
struct sock *sk = (void *) arg;
- u16 control;
bh_lock_sock(sk);
if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
@@ -1222,15 +1287,13 @@ static void l2cap_monitor_timeout(unsigned long arg)
l2cap_pi(sk)->retry_count++;
__mod_monitor_timer();
- control = L2CAP_CTRL_POLL;
- l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
+ l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
bh_unlock_sock(sk);
}
static void l2cap_retrans_timeout(unsigned long arg)
{
struct sock *sk = (void *) arg;
- u16 control;
bh_lock_sock(sk);
l2cap_pi(sk)->retry_count = 1;
@@ -1238,8 +1301,7 @@ static void l2cap_retrans_timeout(unsigned long arg)
l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
- control = L2CAP_CTRL_POLL;
- l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
+ l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
bh_unlock_sock(sk);
}
@@ -1247,7 +1309,8 @@ static void l2cap_drop_acked_frames(struct sock *sk)
{
struct sk_buff *skb;
- while ((skb = skb_peek(TX_QUEUE(sk)))) {
+ while ((skb = skb_peek(TX_QUEUE(sk))) &&
+ l2cap_pi(sk)->unacked_frames) {
if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
break;
@@ -1259,22 +1322,15 @@ static void l2cap_drop_acked_frames(struct sock *sk)
if (!l2cap_pi(sk)->unacked_frames)
del_timer(&l2cap_pi(sk)->retrans_timer);
-
- return;
}
-static inline int l2cap_do_send(struct sock *sk, struct sk_buff *skb)
+static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
{
struct l2cap_pinfo *pi = l2cap_pi(sk);
- int err;
BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
- err = hci_send_acl(pi->conn->hcon, skb, 0);
- if (err < 0)
- kfree_skb(skb);
-
- return err;
+ hci_send_acl(pi->conn->hcon, skb, 0);
}
static int l2cap_streaming_send(struct sock *sk)
@@ -1282,7 +1338,6 @@ static int l2cap_streaming_send(struct sock *sk)
struct sk_buff *skb, *tx_skb;
struct l2cap_pinfo *pi = l2cap_pi(sk);
u16 control, fcs;
- int err;
while ((skb = sk->sk_send_head)) {
tx_skb = skb_clone(skb, GFP_ATOMIC);
@@ -1291,16 +1346,12 @@ static int l2cap_streaming_send(struct sock *sk)
control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
- if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
+ if (pi->fcs == L2CAP_FCS_CRC16) {
fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
}
- err = l2cap_do_send(sk, tx_skb);
- if (err < 0) {
- l2cap_send_disconn_req(pi->conn, sk);
- return err;
- }
+ l2cap_do_send(sk, tx_skb);
pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
@@ -1315,48 +1366,44 @@ static int l2cap_streaming_send(struct sock *sk)
return 0;
}
-static int l2cap_retransmit_frame(struct sock *sk, u8 tx_seq)
+static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
{
struct l2cap_pinfo *pi = l2cap_pi(sk);
struct sk_buff *skb, *tx_skb;
u16 control, fcs;
- int err;
skb = skb_peek(TX_QUEUE(sk));
- do {
- if (bt_cb(skb)->tx_seq != tx_seq) {
- if (skb_queue_is_last(TX_QUEUE(sk), skb))
- break;
- skb = skb_queue_next(TX_QUEUE(sk), skb);
- continue;
- }
+ if (!skb)
+ return;
- if (pi->remote_max_tx &&
- bt_cb(skb)->retries == pi->remote_max_tx) {
- l2cap_send_disconn_req(pi->conn, sk);
+ do {
+ if (bt_cb(skb)->tx_seq == tx_seq)
break;
- }
- tx_skb = skb_clone(skb, GFP_ATOMIC);
- bt_cb(skb)->retries++;
- control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
- control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
- | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
- put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
+ if (skb_queue_is_last(TX_QUEUE(sk), skb))
+ return;
- if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
- fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
- put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
- }
+ } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
- err = l2cap_do_send(sk, tx_skb);
- if (err < 0) {
- l2cap_send_disconn_req(pi->conn, sk);
- return err;
- }
- break;
- } while(1);
- return 0;
+ if (pi->remote_max_tx &&
+ bt_cb(skb)->retries == pi->remote_max_tx) {
+ l2cap_send_disconn_req(pi->conn, sk);
+ return;
+ }
+
+ tx_skb = skb_clone(skb, GFP_ATOMIC);
+ bt_cb(skb)->retries++;
+ control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
+ control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
+ | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
+ put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
+
+ if (pi->fcs == L2CAP_FCS_CRC16) {
+ fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
+ put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
+ }
+
+ l2cap_do_send(sk, tx_skb);
}
static int l2cap_ertm_send(struct sock *sk)
@@ -1364,13 +1411,13 @@ static int l2cap_ertm_send(struct sock *sk)
struct sk_buff *skb, *tx_skb;
struct l2cap_pinfo *pi = l2cap_pi(sk);
u16 control, fcs;
- int err;
+ int nsent = 0;
if (pi->conn_state & L2CAP_CONN_WAIT_F)
return 0;
while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk)) &&
- !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
+ !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
if (pi->remote_max_tx &&
bt_cb(skb)->retries == pi->remote_max_tx) {
@@ -1383,35 +1430,97 @@ static int l2cap_ertm_send(struct sock *sk)
bt_cb(skb)->retries++;
control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
+ if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
+ control |= L2CAP_CTRL_FINAL;
+ pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
+ }
control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
| (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
- if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
+ if (pi->fcs == L2CAP_FCS_CRC16) {
fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
}
- err = l2cap_do_send(sk, tx_skb);
- if (err < 0) {
- l2cap_send_disconn_req(pi->conn, sk);
- return err;
- }
+ l2cap_do_send(sk, tx_skb);
+
__mod_retrans_timer();
bt_cb(skb)->tx_seq = pi->next_tx_seq;
pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
pi->unacked_frames++;
+ pi->frames_sent++;
if (skb_queue_is_last(TX_QUEUE(sk), skb))
sk->sk_send_head = NULL;
else
sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
+
+ nsent++;
}
- return 0;
+ return nsent;
+}
+
+static int l2cap_retransmit_frames(struct sock *sk)
+{
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
+ int ret;
+
+ spin_lock_bh(&pi->send_lock);
+
+ if (!skb_queue_empty(TX_QUEUE(sk)))
+ sk->sk_send_head = TX_QUEUE(sk)->next;
+
+ pi->next_tx_seq = pi->expected_ack_seq;
+ ret = l2cap_ertm_send(sk);
+
+ spin_unlock_bh(&pi->send_lock);
+
+ return ret;
+}
+
+static void l2cap_send_ack(struct l2cap_pinfo *pi)
+{
+ struct sock *sk = (struct sock *)pi;
+ u16 control = 0;
+ int nframes;
+
+ control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
+
+ if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
+ control |= L2CAP_SUPER_RCV_NOT_READY;
+ pi->conn_state |= L2CAP_CONN_RNR_SENT;
+ l2cap_send_sframe(pi, control);
+ return;
+ }
+
+ spin_lock_bh(&pi->send_lock);
+ nframes = l2cap_ertm_send(sk);
+ spin_unlock_bh(&pi->send_lock);
+
+ if (nframes > 0)
+ return;
+
+ control |= L2CAP_SUPER_RCV_READY;
+ l2cap_send_sframe(pi, control);
+}
+
+static void l2cap_send_srejtail(struct sock *sk)
+{
+ struct srej_list *tail;
+ u16 control;
+
+ control = L2CAP_SUPER_SELECT_REJECT;
+ control |= L2CAP_CTRL_FINAL;
+
+ tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
+ control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
+
+ l2cap_send_sframe(l2cap_pi(sk), control);
}
static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
@@ -1420,9 +1529,8 @@ static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, in
struct sk_buff **frag;
int err, sent = 0;
- if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
+ if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
return -EFAULT;
- }
sent += count;
len -= count;
@@ -1513,6 +1621,9 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *m
BT_DBG("sk %p len %d", sk, (int)len);
+ if (!conn)
+ return ERR_PTR(-ENOTCONN);
+
if (sdulen)
hlen += 2;
@@ -1554,25 +1665,24 @@ static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, siz
u16 control;
size_t size = 0;
- __skb_queue_head_init(&sar_queue);
+ skb_queue_head_init(&sar_queue);
control = L2CAP_SDU_START;
- skb = l2cap_create_iframe_pdu(sk, msg, pi->max_pdu_size, control, len);
+ skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
if (IS_ERR(skb))
return PTR_ERR(skb);
__skb_queue_tail(&sar_queue, skb);
- len -= pi->max_pdu_size;
- size +=pi->max_pdu_size;
- control = 0;
+ len -= pi->remote_mps;
+ size += pi->remote_mps;
while (len > 0) {
size_t buflen;
- if (len > pi->max_pdu_size) {
- control |= L2CAP_SDU_CONTINUE;
- buflen = pi->max_pdu_size;
+ if (len > pi->remote_mps) {
+ control = L2CAP_SDU_CONTINUE;
+ buflen = pi->remote_mps;
} else {
- control |= L2CAP_SDU_END;
+ control = L2CAP_SDU_END;
buflen = len;
}
@@ -1585,11 +1695,12 @@ static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, siz
__skb_queue_tail(&sar_queue, skb);
len -= buflen;
size += buflen;
- control = 0;
}
skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
+ spin_lock_bh(&pi->send_lock);
if (sk->sk_send_head == NULL)
sk->sk_send_head = sar_queue.next;
+ spin_unlock_bh(&pi->send_lock);
return size;
}
@@ -1611,11 +1722,6 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms
if (msg->msg_flags & MSG_OOB)
return -EOPNOTSUPP;
- /* Check outgoing MTU */
- if (sk->sk_type == SOCK_SEQPACKET && pi->mode == L2CAP_MODE_BASIC &&
- len > pi->omtu)
- return -EINVAL;
-
lock_sock(sk);
if (sk->sk_state != BT_CONNECTED) {
@@ -1626,15 +1732,23 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms
/* Connectionless channel */
if (sk->sk_type == SOCK_DGRAM) {
skb = l2cap_create_connless_pdu(sk, msg, len);
- if (IS_ERR(skb))
+ if (IS_ERR(skb)) {
err = PTR_ERR(skb);
- else
- err = l2cap_do_send(sk, skb);
+ } else {
+ l2cap_do_send(sk, skb);
+ err = len;
+ }
goto done;
}
switch (pi->mode) {
case L2CAP_MODE_BASIC:
+ /* Check outgoing MTU */
+ if (len > pi->omtu) {
+ err = -EINVAL;
+ goto done;
+ }
+
/* Create a basic PDU */
skb = l2cap_create_basic_pdu(sk, msg, len);
if (IS_ERR(skb)) {
@@ -1642,15 +1756,14 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms
goto done;
}
- err = l2cap_do_send(sk, skb);
- if (!err)
- err = len;
+ l2cap_do_send(sk, skb);
+ err = len;
break;
case L2CAP_MODE_ERTM:
case L2CAP_MODE_STREAMING:
/* Entire SDU fits into one PDU */
- if (len <= pi->max_pdu_size) {
+ if (len <= pi->remote_mps) {
control = L2CAP_SDU_UNSEGMENTED;
skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
if (IS_ERR(skb)) {
@@ -1658,8 +1771,15 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms
goto done;
}
__skb_queue_tail(TX_QUEUE(sk), skb);
+
+ if (pi->mode == L2CAP_MODE_ERTM)
+ spin_lock_bh(&pi->send_lock);
+
if (sk->sk_send_head == NULL)
sk->sk_send_head = skb;
+
+ if (pi->mode == L2CAP_MODE_ERTM)
+ spin_unlock_bh(&pi->send_lock);
} else {
/* Segment SDU into multiples PDUs */
err = l2cap_sar_segment_sdu(sk, msg, len);
@@ -1667,12 +1787,15 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms
goto done;
}
- if (pi->mode == L2CAP_MODE_STREAMING)
+ if (pi->mode == L2CAP_MODE_STREAMING) {
err = l2cap_streaming_send(sk);
- else
+ } else {
+ spin_lock_bh(&pi->send_lock);
err = l2cap_ertm_send(sk);
+ spin_unlock_bh(&pi->send_lock);
+ }
- if (!err)
+ if (err >= 0)
err = len;
break;
@@ -1731,6 +1854,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
opts.flush_to = l2cap_pi(sk)->flush_to;
opts.mode = l2cap_pi(sk)->mode;
opts.fcs = l2cap_pi(sk)->fcs;
+ opts.max_tx = l2cap_pi(sk)->max_tx;
+ opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
len = min_t(unsigned int, sizeof(opts), optlen);
if (copy_from_user((char *) &opts, optval, len)) {
@@ -1738,10 +1863,25 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
break;
}
+ l2cap_pi(sk)->mode = opts.mode;
+ switch (l2cap_pi(sk)->mode) {
+ case L2CAP_MODE_BASIC:
+ break;
+ case L2CAP_MODE_ERTM:
+ case L2CAP_MODE_STREAMING:
+ if (enable_ertm)
+ break;
+ /* fall through */
+ default:
+ err = -EINVAL;
+ break;
+ }
+
l2cap_pi(sk)->imtu = opts.imtu;
l2cap_pi(sk)->omtu = opts.omtu;
- l2cap_pi(sk)->mode = opts.mode;
l2cap_pi(sk)->fcs = opts.fcs;
+ l2cap_pi(sk)->max_tx = opts.max_tx;
+ l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
break;
case L2CAP_LM:
@@ -1789,7 +1929,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
switch (optname) {
case BT_SECURITY:
- if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
+ if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
+ && sk->sk_type != SOCK_RAW) {
err = -EINVAL;
break;
}
@@ -1856,6 +1997,8 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __us
opts.flush_to = l2cap_pi(sk)->flush_to;
opts.mode = l2cap_pi(sk)->mode;
opts.fcs = l2cap_pi(sk)->fcs;
+ opts.max_tx = l2cap_pi(sk)->max_tx;
+ opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
len = min_t(unsigned int, len, sizeof(opts));
if (copy_to_user(optval, (char *) &opts, len))
@@ -1937,7 +2080,8 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
switch (optname) {
case BT_SECURITY:
- if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
+ if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
+ && sk->sk_type != SOCK_RAW) {
err = -EINVAL;
break;
}
@@ -1982,6 +2126,9 @@ static int l2cap_sock_shutdown(struct socket *sock, int how)
lock_sock(sk);
if (!sk->sk_shutdown) {
+ if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
+ err = __l2cap_wait_ack(sk);
+
sk->sk_shutdown = SHUTDOWN_MASK;
l2cap_sock_clear_timer(sk);
__l2cap_sock_close(sk, 0);
@@ -2184,19 +2331,35 @@ static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
*ptr += L2CAP_CONF_OPT_SIZE + len;
}
+static void l2cap_ack_timeout(unsigned long arg)
+{
+ struct sock *sk = (void *) arg;
+
+ bh_lock_sock(sk);
+ l2cap_send_ack(l2cap_pi(sk));
+ bh_unlock_sock(sk);
+}
+
static inline void l2cap_ertm_init(struct sock *sk)
{
l2cap_pi(sk)->expected_ack_seq = 0;
l2cap_pi(sk)->unacked_frames = 0;
l2cap_pi(sk)->buffer_seq = 0;
- l2cap_pi(sk)->num_to_ack = 0;
+ l2cap_pi(sk)->num_acked = 0;
+ l2cap_pi(sk)->frames_sent = 0;
setup_timer(&l2cap_pi(sk)->retrans_timer,
l2cap_retrans_timeout, (unsigned long) sk);
setup_timer(&l2cap_pi(sk)->monitor_timer,
l2cap_monitor_timeout, (unsigned long) sk);
+ setup_timer(&l2cap_pi(sk)->ack_timer,
+ l2cap_ack_timeout, (unsigned long) sk);
__skb_queue_head_init(SREJ_QUEUE(sk));
+ __skb_queue_head_init(BUSY_QUEUE(sk));
+ spin_lock_init(&l2cap_pi(sk)->send_lock);
+
+ INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
}
static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
@@ -2232,7 +2395,7 @@ static int l2cap_build_conf_req(struct sock *sk, void *data)
{
struct l2cap_pinfo *pi = l2cap_pi(sk);
struct l2cap_conf_req *req = data;
- struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
+ struct l2cap_conf_rfc rfc = { .mode = pi->mode };
void *ptr = req->data;
BT_DBG("sk %p", sk);
@@ -2261,11 +2424,13 @@ done:
case L2CAP_MODE_ERTM:
rfc.mode = L2CAP_MODE_ERTM;
- rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
- rfc.max_transmit = max_transmit;
+ rfc.txwin_size = pi->tx_win;
+ rfc.max_transmit = pi->max_tx;
rfc.retrans_timeout = 0;
rfc.monitor_timeout = 0;
rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
+ if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
+ rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
sizeof(rfc), (unsigned long) &rfc);
@@ -2287,6 +2452,8 @@ done:
rfc.retrans_timeout = 0;
rfc.monitor_timeout = 0;
rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
+ if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
+ rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
sizeof(rfc), (unsigned long) &rfc);
@@ -2415,10 +2582,15 @@ done:
case L2CAP_MODE_ERTM:
pi->remote_tx_win = rfc.txwin_size;
pi->remote_max_tx = rfc.max_transmit;
- pi->max_pdu_size = rfc.max_pdu_size;
+ if (rfc.max_pdu_size > pi->conn->mtu - 10)
+ rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
- rfc.retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
- rfc.monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
+ pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
+
+ rfc.retrans_timeout =
+ le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
+ rfc.monitor_timeout =
+ le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
pi->conf_state |= L2CAP_CONF_MODE_DONE;
@@ -2428,8 +2600,10 @@ done:
break;
case L2CAP_MODE_STREAMING:
- pi->remote_tx_win = rfc.txwin_size;
- pi->max_pdu_size = rfc.max_pdu_size;
+ if (rfc.max_pdu_size > pi->conn->mtu - 10)
+ rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
+
+ pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
pi->conf_state |= L2CAP_CONF_MODE_DONE;
@@ -2506,13 +2680,12 @@ static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data,
switch (rfc.mode) {
case L2CAP_MODE_ERTM:
pi->remote_tx_win = rfc.txwin_size;
- pi->retrans_timeout = rfc.retrans_timeout;
- pi->monitor_timeout = rfc.monitor_timeout;
- pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
+ pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
+ pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
+ pi->mps = le16_to_cpu(rfc.max_pdu_size);
break;
case L2CAP_MODE_STREAMING:
- pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
- break;
+ pi->mps = le16_to_cpu(rfc.max_pdu_size);
}
}
@@ -2536,6 +2709,42 @@ static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 fla
return ptr - data;
}
+static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
+{
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
+ int type, olen;
+ unsigned long val;
+ struct l2cap_conf_rfc rfc;
+
+ BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
+
+ if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
+ return;
+
+ while (len >= L2CAP_CONF_OPT_SIZE) {
+ len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
+
+ switch (type) {
+ case L2CAP_CONF_RFC:
+ if (olen == sizeof(rfc))
+ memcpy(&rfc, (void *)val, olen);
+ goto done;
+ }
+ }
+
+done:
+ switch (rfc.mode) {
+ case L2CAP_MODE_ERTM:
+ pi->remote_tx_win = rfc.txwin_size;
+ pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
+ pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
+ pi->mps = le16_to_cpu(rfc.max_pdu_size);
+ break;
+ case L2CAP_MODE_STREAMING:
+ pi->mps = le16_to_cpu(rfc.max_pdu_size);
+ }
+}
+
static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
{
struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
@@ -2815,6 +3024,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
u16 scid, flags, result;
struct sock *sk;
+ int len = cmd->len - sizeof(*rsp);
scid = __le16_to_cpu(rsp->scid);
flags = __le16_to_cpu(rsp->flags);
@@ -2829,11 +3039,11 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
switch (result) {
case L2CAP_CONF_SUCCESS:
+ l2cap_conf_rfc_get(sk, rsp->data, len);
break;
case L2CAP_CONF_UNACCEPT:
if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
- int len = cmd->len - sizeof(*rsp);
char req[64];
if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
@@ -2917,8 +3127,10 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd
if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
skb_queue_purge(SREJ_QUEUE(sk));
+ skb_queue_purge(BUSY_QUEUE(sk));
del_timer(&l2cap_pi(sk)->retrans_timer);
del_timer(&l2cap_pi(sk)->monitor_timer);
+ del_timer(&l2cap_pi(sk)->ack_timer);
}
l2cap_chan_del(sk, ECONNRESET);
@@ -2947,8 +3159,10 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd
if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
skb_queue_purge(SREJ_QUEUE(sk));
+ skb_queue_purge(BUSY_QUEUE(sk));
del_timer(&l2cap_pi(sk)->retrans_timer);
del_timer(&l2cap_pi(sk)->monitor_timer);
+ del_timer(&l2cap_pi(sk)->ack_timer);
}
l2cap_chan_del(sk, 0);
@@ -3143,7 +3357,40 @@ static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
return 0;
}
-static void l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
+static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
+{
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
+ u16 control = 0;
+
+ pi->frames_sent = 0;
+ pi->conn_state |= L2CAP_CONN_SEND_FBIT;
+
+ control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
+
+ if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
+ control |= L2CAP_SUPER_RCV_NOT_READY | L2CAP_CTRL_FINAL;
+ l2cap_send_sframe(pi, control);
+ pi->conn_state |= L2CAP_CONN_RNR_SENT;
+ pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
+ }
+
+ if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY && pi->unacked_frames > 0)
+ __mod_retrans_timer();
+
+ pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
+
+ spin_lock_bh(&pi->send_lock);
+ l2cap_ertm_send(sk);
+ spin_unlock_bh(&pi->send_lock);
+
+ if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
+ pi->frames_sent == 0) {
+ control |= L2CAP_SUPER_RCV_READY;
+ l2cap_send_sframe(pi, control);
+ }
+}
+
+static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
{
struct sk_buff *next_skb;
@@ -3153,29 +3400,256 @@ static void l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_
next_skb = skb_peek(SREJ_QUEUE(sk));
if (!next_skb) {
__skb_queue_tail(SREJ_QUEUE(sk), skb);
- return;
+ return 0;
}
do {
+ if (bt_cb(next_skb)->tx_seq == tx_seq)
+ return -EINVAL;
+
if (bt_cb(next_skb)->tx_seq > tx_seq) {
__skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
- return;
+ return 0;
}
if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
break;
- } while((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
+ } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
__skb_queue_tail(SREJ_QUEUE(sk), skb);
+
+ return 0;
}
-static int l2cap_sar_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
+static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
+{
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
+ struct sk_buff *_skb;
+ int err;
+
+ switch (control & L2CAP_CTRL_SAR) {
+ case L2CAP_SDU_UNSEGMENTED:
+ if (pi->conn_state & L2CAP_CONN_SAR_SDU)
+ goto drop;
+
+ err = sock_queue_rcv_skb(sk, skb);
+ if (!err)
+ return err;
+
+ break;
+
+ case L2CAP_SDU_START:
+ if (pi->conn_state & L2CAP_CONN_SAR_SDU)
+ goto drop;
+
+ pi->sdu_len = get_unaligned_le16(skb->data);
+
+ if (pi->sdu_len > pi->imtu)
+ goto disconnect;
+
+ pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
+ if (!pi->sdu)
+ return -ENOMEM;
+
+ /* pull sdu_len bytes only after alloc, because of Local Busy
+ * condition we have to be sure that this will be executed
+ * only once, i.e., when alloc does not fail */
+ skb_pull(skb, 2);
+
+ memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
+
+ pi->conn_state |= L2CAP_CONN_SAR_SDU;
+ pi->partial_sdu_len = skb->len;
+ break;
+
+ case L2CAP_SDU_CONTINUE:
+ if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
+ goto disconnect;
+
+ if (!pi->sdu)
+ goto disconnect;
+
+ pi->partial_sdu_len += skb->len;
+ if (pi->partial_sdu_len > pi->sdu_len)
+ goto drop;
+
+ memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
+
+ break;
+
+ case L2CAP_SDU_END:
+ if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
+ goto disconnect;
+
+ if (!pi->sdu)
+ goto disconnect;
+
+ if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
+ pi->partial_sdu_len += skb->len;
+
+ if (pi->partial_sdu_len > pi->imtu)
+ goto drop;
+
+ if (pi->partial_sdu_len != pi->sdu_len)
+ goto drop;
+
+ memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
+ }
+
+ _skb = skb_clone(pi->sdu, GFP_ATOMIC);
+ if (!_skb) {
+ pi->conn_state |= L2CAP_CONN_SAR_RETRY;
+ return -ENOMEM;
+ }
+
+ err = sock_queue_rcv_skb(sk, _skb);
+ if (err < 0) {
+ kfree_skb(_skb);
+ pi->conn_state |= L2CAP_CONN_SAR_RETRY;
+ return err;
+ }
+
+ pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
+ pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
+
+ kfree_skb(pi->sdu);
+ break;
+ }
+
+ kfree_skb(skb);
+ return 0;
+
+drop:
+ kfree_skb(pi->sdu);
+ pi->sdu = NULL;
+
+disconnect:
+ l2cap_send_disconn_req(pi->conn, sk);
+ kfree_skb(skb);
+ return 0;
+}
+
+static void l2cap_busy_work(struct work_struct *work)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ struct l2cap_pinfo *pi =
+ container_of(work, struct l2cap_pinfo, busy_work);
+ struct sock *sk = (struct sock *)pi;
+ int n_tries = 0, timeo = HZ/5, err;
+ struct sk_buff *skb;
+ u16 control;
+
+ lock_sock(sk);
+
+ add_wait_queue(sk_sleep(sk), &wait);
+ while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
+ err = -EBUSY;
+ l2cap_send_disconn_req(pi->conn, sk);
+ goto done;
+ }
+
+ if (!timeo)
+ timeo = HZ/5;
+
+ if (signal_pending(current)) {
+ err = sock_intr_errno(timeo);
+ goto done;
+ }
+
+ release_sock(sk);
+ timeo = schedule_timeout(timeo);
+ lock_sock(sk);
+
+ err = sock_error(sk);
+ if (err)
+ goto done;
+
+ while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
+ control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
+ err = l2cap_ertm_reassembly_sdu(sk, skb, control);
+ if (err < 0) {
+ skb_queue_head(BUSY_QUEUE(sk), skb);
+ break;
+ }
+
+ pi->buffer_seq = (pi->buffer_seq + 1) % 64;
+ }
+
+ if (!skb)
+ break;
+ }
+
+ if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
+ goto done;
+
+ control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
+ control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
+ l2cap_send_sframe(pi, control);
+ l2cap_pi(sk)->retry_count = 1;
+
+ del_timer(&pi->retrans_timer);
+ __mod_monitor_timer();
+
+ l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
+
+done:
+ pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
+ pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
+
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(sk_sleep(sk), &wait);
+
+ release_sock(sk);
+}
+
+static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
+{
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
+ int sctrl, err;
+
+ if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
+ bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
+ __skb_queue_tail(BUSY_QUEUE(sk), skb);
+ return -EBUSY;
+ }
+
+ err = l2cap_ertm_reassembly_sdu(sk, skb, control);
+ if (err >= 0) {
+ pi->buffer_seq = (pi->buffer_seq + 1) % 64;
+ return err;
+ }
+
+ /* Busy Condition */
+ pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
+ bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
+ __skb_queue_tail(BUSY_QUEUE(sk), skb);
+
+ sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
+ sctrl |= L2CAP_SUPER_RCV_NOT_READY;
+ l2cap_send_sframe(pi, sctrl);
+
+ pi->conn_state |= L2CAP_CONN_RNR_SENT;
+
+ queue_work(_busy_wq, &pi->busy_work);
+
+ return err;
+}
+
+static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
{
struct l2cap_pinfo *pi = l2cap_pi(sk);
struct sk_buff *_skb;
int err = -EINVAL;
+ /*
+ * TODO: We have to notify the userland if some data is lost with the
+ * Streaming Mode.
+ */
+
switch (control & L2CAP_CTRL_SAR) {
case L2CAP_SDU_UNSEGMENTED:
if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
@@ -3198,6 +3672,11 @@ static int l2cap_sar_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 co
pi->sdu_len = get_unaligned_le16(skb->data);
skb_pull(skb, 2);
+ if (pi->sdu_len > pi->imtu) {
+ err = -EMSGSIZE;
+ break;
+ }
+
pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
if (!pi->sdu) {
err = -ENOMEM;
@@ -3234,15 +3713,19 @@ static int l2cap_sar_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 co
pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
pi->partial_sdu_len += skb->len;
+ if (pi->partial_sdu_len > pi->imtu)
+ goto drop;
+
if (pi->partial_sdu_len == pi->sdu_len) {
_skb = skb_clone(pi->sdu, GFP_ATOMIC);
err = sock_queue_rcv_skb(sk, _skb);
if (err < 0)
kfree_skb(_skb);
}
- kfree_skb(pi->sdu);
err = 0;
+drop:
+ kfree_skb(pi->sdu);
break;
}
@@ -3253,15 +3736,15 @@ static int l2cap_sar_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 co
static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
{
struct sk_buff *skb;
- u16 control = 0;
+ u16 control;
- while((skb = skb_peek(SREJ_QUEUE(sk)))) {
+ while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
if (bt_cb(skb)->tx_seq != tx_seq)
break;
skb = skb_dequeue(SREJ_QUEUE(sk));
- control |= bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
- l2cap_sar_reassembly_sdu(sk, skb, control);
+ control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
+ l2cap_ertm_reassembly_sdu(sk, skb, control);
l2cap_pi(sk)->buffer_seq_srej =
(l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
tx_seq++;
@@ -3274,7 +3757,7 @@ static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
struct srej_list *l, *tmp;
u16 control;
- list_for_each_entry_safe(l,tmp, SREJ_LIST(sk), list) {
+ list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
if (l->tx_seq == tx_seq) {
list_del(&l->list);
kfree(l);
@@ -3297,10 +3780,6 @@ static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
while (tx_seq != pi->expected_tx_seq) {
control = L2CAP_SUPER_SELECT_REJECT;
control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
- if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
- control |= L2CAP_CTRL_POLL;
- pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
- }
l2cap_send_sframe(pi, control);
new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
@@ -3315,18 +3794,40 @@ static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, str
struct l2cap_pinfo *pi = l2cap_pi(sk);
u8 tx_seq = __get_txseq(rx_control);
u8 req_seq = __get_reqseq(rx_control);
- u16 tx_control = 0;
u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
+ u8 tx_seq_offset, expected_tx_seq_offset;
+ int num_to_ack = (pi->tx_win/6) + 1;
int err = 0;
BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
+ if (L2CAP_CTRL_FINAL & rx_control &&
+ l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
+ del_timer(&pi->monitor_timer);
+ if (pi->unacked_frames > 0)
+ __mod_retrans_timer();
+ pi->conn_state &= ~L2CAP_CONN_WAIT_F;
+ }
+
pi->expected_ack_seq = req_seq;
l2cap_drop_acked_frames(sk);
if (tx_seq == pi->expected_tx_seq)
goto expected;
+ tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
+ if (tx_seq_offset < 0)
+ tx_seq_offset += 64;
+
+ /* invalid tx_seq */
+ if (tx_seq_offset >= pi->tx_win) {
+ l2cap_send_disconn_req(pi->conn, sk);
+ goto drop;
+ }
+
+ if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
+ goto drop;
+
if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
struct srej_list *first;
@@ -3342,10 +3843,14 @@ static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, str
if (list_empty(SREJ_LIST(sk))) {
pi->buffer_seq = pi->buffer_seq_srej;
pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
+ l2cap_send_ack(pi);
}
} else {
struct srej_list *l;
- l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
+
+ /* duplicated tx_seq */
+ if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
+ goto drop;
list_for_each_entry(l, SREJ_LIST(sk), list) {
if (l->tx_seq == tx_seq) {
@@ -3356,12 +3861,22 @@ static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, str
l2cap_send_srejframe(sk, tx_seq);
}
} else {
+ expected_tx_seq_offset =
+ (pi->expected_tx_seq - pi->buffer_seq) % 64;
+ if (expected_tx_seq_offset < 0)
+ expected_tx_seq_offset += 64;
+
+ /* duplicated tx_seq */
+ if (tx_seq_offset < expected_tx_seq_offset)
+ goto drop;
+
pi->conn_state |= L2CAP_CONN_SREJ_SENT;
INIT_LIST_HEAD(SREJ_LIST(sk));
pi->buffer_seq_srej = pi->buffer_seq;
__skb_queue_head_init(SREJ_QUEUE(sk));
+ __skb_queue_head_init(BUSY_QUEUE(sk));
l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
pi->conn_state |= L2CAP_CONN_SEND_PBIT;
@@ -3374,153 +3889,189 @@ expected:
pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
- l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
+ bt_cb(skb)->tx_seq = tx_seq;
+ bt_cb(skb)->sar = sar;
+ __skb_queue_tail(SREJ_QUEUE(sk), skb);
return 0;
}
if (rx_control & L2CAP_CTRL_FINAL) {
if (pi->conn_state & L2CAP_CONN_REJ_ACT)
pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
- else {
- sk->sk_send_head = TX_QUEUE(sk)->next;
- pi->next_tx_seq = pi->expected_ack_seq;
- l2cap_ertm_send(sk);
- }
+ else
+ l2cap_retransmit_frames(sk);
}
- pi->buffer_seq = (pi->buffer_seq + 1) % 64;
-
- err = l2cap_sar_reassembly_sdu(sk, skb, rx_control);
+ err = l2cap_push_rx_skb(sk, skb, rx_control);
if (err < 0)
- return err;
+ return 0;
+
+ __mod_ack_timer();
+
+ pi->num_acked = (pi->num_acked + 1) % num_to_ack;
+ if (pi->num_acked == num_to_ack - 1)
+ l2cap_send_ack(pi);
- pi->num_to_ack = (pi->num_to_ack + 1) % L2CAP_DEFAULT_NUM_TO_ACK;
- if (pi->num_to_ack == L2CAP_DEFAULT_NUM_TO_ACK - 1) {
- tx_control |= L2CAP_SUPER_RCV_READY;
- tx_control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
- l2cap_send_sframe(pi, tx_control);
- }
+ return 0;
+
+drop:
+ kfree_skb(skb);
return 0;
}
-static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
+static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
{
struct l2cap_pinfo *pi = l2cap_pi(sk);
- u8 tx_seq = __get_reqseq(rx_control);
- BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
+ pi->expected_ack_seq = __get_reqseq(rx_control);
+ l2cap_drop_acked_frames(sk);
- switch (rx_control & L2CAP_CTRL_SUPERVISE) {
- case L2CAP_SUPER_RCV_READY:
- if (rx_control & L2CAP_CTRL_POLL) {
- u16 control = L2CAP_CTRL_FINAL;
- control |= L2CAP_SUPER_RCV_READY |
- (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT);
- l2cap_send_sframe(l2cap_pi(sk), control);
- pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
+ if (rx_control & L2CAP_CTRL_POLL) {
+ if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
+ if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
+ (pi->unacked_frames > 0))
+ __mod_retrans_timer();
- } else if (rx_control & L2CAP_CTRL_FINAL) {
pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
- pi->expected_ack_seq = tx_seq;
- l2cap_drop_acked_frames(sk);
-
- if (pi->conn_state & L2CAP_CONN_REJ_ACT)
- pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
- else {
- sk->sk_send_head = TX_QUEUE(sk)->next;
- pi->next_tx_seq = pi->expected_ack_seq;
- l2cap_ertm_send(sk);
- }
-
- if (!(pi->conn_state & L2CAP_CONN_WAIT_F))
- break;
+ l2cap_send_srejtail(sk);
+ } else {
+ l2cap_send_i_or_rr_or_rnr(sk);
+ }
- pi->conn_state &= ~L2CAP_CONN_WAIT_F;
- del_timer(&pi->monitor_timer);
+ } else if (rx_control & L2CAP_CTRL_FINAL) {
+ pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
- if (pi->unacked_frames > 0)
- __mod_retrans_timer();
- } else {
- pi->expected_ack_seq = tx_seq;
- l2cap_drop_acked_frames(sk);
+ if (pi->conn_state & L2CAP_CONN_REJ_ACT)
+ pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
+ else
+ l2cap_retransmit_frames(sk);
- if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
- (pi->unacked_frames > 0))
- __mod_retrans_timer();
+ } else {
+ if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
+ (pi->unacked_frames > 0))
+ __mod_retrans_timer();
- pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
+ pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
+ if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
+ l2cap_send_ack(pi);
+ } else {
+ spin_lock_bh(&pi->send_lock);
l2cap_ertm_send(sk);
+ spin_unlock_bh(&pi->send_lock);
}
- break;
+ }
+}
- case L2CAP_SUPER_REJECT:
- pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
+static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
+{
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
+ u8 tx_seq = __get_reqseq(rx_control);
+
+ pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
+
+ pi->expected_ack_seq = tx_seq;
+ l2cap_drop_acked_frames(sk);
- pi->expected_ack_seq = __get_reqseq(rx_control);
+ if (rx_control & L2CAP_CTRL_FINAL) {
+ if (pi->conn_state & L2CAP_CONN_REJ_ACT)
+ pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
+ else
+ l2cap_retransmit_frames(sk);
+ } else {
+ l2cap_retransmit_frames(sk);
+
+ if (pi->conn_state & L2CAP_CONN_WAIT_F)
+ pi->conn_state |= L2CAP_CONN_REJ_ACT;
+ }
+}
+static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
+{
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
+ u8 tx_seq = __get_reqseq(rx_control);
+
+ pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
+
+ if (rx_control & L2CAP_CTRL_POLL) {
+ pi->expected_ack_seq = tx_seq;
l2cap_drop_acked_frames(sk);
+ l2cap_retransmit_one_frame(sk, tx_seq);
- if (rx_control & L2CAP_CTRL_FINAL) {
- if (pi->conn_state & L2CAP_CONN_REJ_ACT)
- pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
- else {
- sk->sk_send_head = TX_QUEUE(sk)->next;
- pi->next_tx_seq = pi->expected_ack_seq;
- l2cap_ertm_send(sk);
- }
- } else {
- sk->sk_send_head = TX_QUEUE(sk)->next;
- pi->next_tx_seq = pi->expected_ack_seq;
- l2cap_ertm_send(sk);
+ spin_lock_bh(&pi->send_lock);
+ l2cap_ertm_send(sk);
+ spin_unlock_bh(&pi->send_lock);
- if (pi->conn_state & L2CAP_CONN_WAIT_F) {
- pi->srej_save_reqseq = tx_seq;
- pi->conn_state |= L2CAP_CONN_REJ_ACT;
- }
+ if (pi->conn_state & L2CAP_CONN_WAIT_F) {
+ pi->srej_save_reqseq = tx_seq;
+ pi->conn_state |= L2CAP_CONN_SREJ_ACT;
+ }
+ } else if (rx_control & L2CAP_CTRL_FINAL) {
+ if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
+ pi->srej_save_reqseq == tx_seq)
+ pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
+ else
+ l2cap_retransmit_one_frame(sk, tx_seq);
+ } else {
+ l2cap_retransmit_one_frame(sk, tx_seq);
+ if (pi->conn_state & L2CAP_CONN_WAIT_F) {
+ pi->srej_save_reqseq = tx_seq;
+ pi->conn_state |= L2CAP_CONN_SREJ_ACT;
}
+ }
+}
+static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
+{
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
+ u8 tx_seq = __get_reqseq(rx_control);
+
+ pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
+ pi->expected_ack_seq = tx_seq;
+ l2cap_drop_acked_frames(sk);
+
+ if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
+ del_timer(&pi->retrans_timer);
+ if (rx_control & L2CAP_CTRL_POLL)
+ l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
+ return;
+ }
+
+ if (rx_control & L2CAP_CTRL_POLL)
+ l2cap_send_srejtail(sk);
+ else
+ l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
+}
+
+static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
+{
+ BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
+
+ if (L2CAP_CTRL_FINAL & rx_control &&
+ l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
+ del_timer(&l2cap_pi(sk)->monitor_timer);
+ if (l2cap_pi(sk)->unacked_frames > 0)
+ __mod_retrans_timer();
+ l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
+ }
+
+ switch (rx_control & L2CAP_CTRL_SUPERVISE) {
+ case L2CAP_SUPER_RCV_READY:
+ l2cap_data_channel_rrframe(sk, rx_control);
break;
- case L2CAP_SUPER_SELECT_REJECT:
- pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
+ case L2CAP_SUPER_REJECT:
+ l2cap_data_channel_rejframe(sk, rx_control);
+ break;
- if (rx_control & L2CAP_CTRL_POLL) {
- pi->expected_ack_seq = tx_seq;
- l2cap_drop_acked_frames(sk);
- l2cap_retransmit_frame(sk, tx_seq);
- l2cap_ertm_send(sk);
- if (pi->conn_state & L2CAP_CONN_WAIT_F) {
- pi->srej_save_reqseq = tx_seq;
- pi->conn_state |= L2CAP_CONN_SREJ_ACT;
- }
- } else if (rx_control & L2CAP_CTRL_FINAL) {
- if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
- pi->srej_save_reqseq == tx_seq)
- pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
- else
- l2cap_retransmit_frame(sk, tx_seq);
- }
- else {
- l2cap_retransmit_frame(sk, tx_seq);
- if (pi->conn_state & L2CAP_CONN_WAIT_F) {
- pi->srej_save_reqseq = tx_seq;
- pi->conn_state |= L2CAP_CONN_SREJ_ACT;
- }
- }
+ case L2CAP_SUPER_SELECT_REJECT:
+ l2cap_data_channel_srejframe(sk, rx_control);
break;
case L2CAP_SUPER_RCV_NOT_READY:
- pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
- pi->expected_ack_seq = tx_seq;
- l2cap_drop_acked_frames(sk);
-
- del_timer(&l2cap_pi(sk)->retrans_timer);
- if (rx_control & L2CAP_CTRL_POLL) {
- u16 control = L2CAP_CTRL_FINAL;
- l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
- }
+ l2cap_data_channel_rnrframe(sk, rx_control);
break;
}
+ kfree_skb(skb);
return 0;
}
@@ -3529,7 +4080,7 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
struct sock *sk;
struct l2cap_pinfo *pi;
u16 control, len;
- u8 tx_seq;
+ u8 tx_seq, req_seq, next_tx_seq_offset, req_seq_offset;
sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
if (!sk) {
@@ -3574,16 +4125,45 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
* Receiver will miss it and start proper recovery
* procedures and ask retransmission.
*/
- if (len > L2CAP_DEFAULT_MAX_PDU_SIZE)
+ if (len > pi->mps) {
+ l2cap_send_disconn_req(pi->conn, sk);
goto drop;
+ }
if (l2cap_check_fcs(pi, skb))
goto drop;
- if (__is_iframe(control))
+ req_seq = __get_reqseq(control);
+ req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
+ if (req_seq_offset < 0)
+ req_seq_offset += 64;
+
+ next_tx_seq_offset =
+ (pi->next_tx_seq - pi->expected_ack_seq) % 64;
+ if (next_tx_seq_offset < 0)
+ next_tx_seq_offset += 64;
+
+ /* check for invalid req-seq */
+ if (req_seq_offset > next_tx_seq_offset) {
+ l2cap_send_disconn_req(pi->conn, sk);
+ goto drop;
+ }
+
+ if (__is_iframe(control)) {
+ if (len < 4) {
+ l2cap_send_disconn_req(pi->conn, sk);
+ goto drop;
+ }
+
l2cap_data_channel_iframe(sk, control, skb);
- else
+ } else {
+ if (len != 0) {
+ l2cap_send_disconn_req(pi->conn, sk);
+ goto drop;
+ }
+
l2cap_data_channel_sframe(sk, control, skb);
+ }
goto done;
@@ -3598,7 +4178,7 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
if (pi->fcs == L2CAP_FCS_CRC16)
len -= 2;
- if (len > L2CAP_DEFAULT_MAX_PDU_SIZE || __is_sframe(control))
+ if (len > pi->mps || len < 4 || __is_sframe(control))
goto drop;
if (l2cap_check_fcs(pi, skb))
@@ -3609,14 +4189,14 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
if (pi->expected_tx_seq == tx_seq)
pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
else
- pi->expected_tx_seq = tx_seq + 1;
+ pi->expected_tx_seq = (tx_seq + 1) % 64;
- l2cap_sar_reassembly_sdu(sk, skb, control);
+ l2cap_streaming_reassembly_sdu(sk, skb, control);
goto done;
default:
- BT_DBG("sk %p: bad mode 0x%2.2x", sk, l2cap_pi(sk)->mode);
+ BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
break;
}
@@ -3772,7 +4352,7 @@ static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
{
- if (sk->sk_type != SOCK_SEQPACKET)
+ if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
return;
if (encrypt == 0x00) {
@@ -4030,6 +4610,10 @@ static int __init l2cap_init(void)
if (err < 0)
return err;
+ _busy_wq = create_singlethread_workqueue("l2cap");
+ if (!_busy_wq)
+ goto error;
+
err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
if (err < 0) {
BT_ERR("L2CAP socket registration failed");
@@ -4064,6 +4648,9 @@ static void __exit l2cap_exit(void)
{
debugfs_remove(l2cap_debugfs);
+ flush_workqueue(_busy_wq);
+ destroy_workqueue(_busy_wq);
+
if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
BT_ERR("L2CAP socket unregistration failed");
@@ -4078,7 +4665,6 @@ void l2cap_load(void)
/* Dummy function to trigger automatic L2CAP module loading by
* other modules that use L2CAP sockets but don't use any other
* symbols from it. */
- return;
}
EXPORT_SYMBOL(l2cap_load);
@@ -4091,6 +4677,9 @@ MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
module_param(max_transmit, uint, 0644);
MODULE_PARM_DESC(max_transmit, "Max transmit value (default = 3)");
+module_param(tx_window, uint, 0644);
+MODULE_PARM_DESC(tx_window, "Transmission window size value (default = 63)");
+
MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
MODULE_VERSION(VERSION);
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 8ed3c37684fa..43fbf6b4b4bf 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -503,7 +503,7 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f
BT_DBG("sk %p timeo %ld", sk, timeo);
/* Wait for an incoming connection. (wake-one). */
- add_wait_queue_exclusive(sk->sk_sleep, &wait);
+ add_wait_queue_exclusive(sk_sleep(sk), &wait);
while (!(nsk = bt_accept_dequeue(sk, newsock))) {
set_current_state(TASK_INTERRUPTIBLE);
if (!timeo) {
@@ -526,7 +526,7 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f
}
}
set_current_state(TASK_RUNNING);
- remove_wait_queue(sk->sk_sleep, &wait);
+ remove_wait_queue(sk_sleep(sk), &wait);
if (err)
goto done;
@@ -621,7 +621,7 @@ static long rfcomm_sock_data_wait(struct sock *sk, long timeo)
{
DECLARE_WAITQUEUE(wait, current);
- add_wait_queue(sk->sk_sleep, &wait);
+ add_wait_queue(sk_sleep(sk), &wait);
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
@@ -640,7 +640,7 @@ static long rfcomm_sock_data_wait(struct sock *sk, long timeo)
}
__set_current_state(TASK_RUNNING);
- remove_wait_queue(sk->sk_sleep, &wait);
+ remove_wait_queue(sk_sleep(sk), &wait);
return timeo;
}
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index cab71ea2796d..309b6c261b25 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -1014,8 +1014,6 @@ static void rfcomm_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
rfcomm_send_rpn(dev->dlc->session, 1, dev->dlc->dlci, baud,
data_bits, stop_bits, parity,
RFCOMM_RPN_FLOW_NONE, x_on, x_off, changes);
-
- return;
}
static void rfcomm_tty_throttle(struct tty_struct *tty)
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index ca6b2ad1c3fc..d0927d1fdada 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -165,11 +165,11 @@ static inline int sco_chan_add(struct sco_conn *conn, struct sock *sk, struct so
int err = 0;
sco_conn_lock(conn);
- if (conn->sk) {
+ if (conn->sk)
err = -EBUSY;
- } else {
+ else
__sco_chan_add(conn, sk, parent);
- }
+
sco_conn_unlock(conn);
return err;
}
@@ -241,22 +241,19 @@ static inline int sco_send_frame(struct sock *sk, struct msghdr *msg, int len)
BT_DBG("sk %p len %d", sk, len);
count = min_t(unsigned int, conn->mtu, len);
- if (!(skb = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err)))
+ skb = bt_skb_send_alloc(sk, count,
+ msg->msg_flags & MSG_DONTWAIT, &err);
+ if (!skb)
return err;
if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
- err = -EFAULT;
- goto fail;
+ kfree_skb(skb);
+ return -EFAULT;
}
- if ((err = hci_send_sco(conn->hcon, skb)) < 0)
- return err;
+ hci_send_sco(conn->hcon, skb);
return count;
-
-fail:
- kfree_skb(skb);
- return err;
}
static inline void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb)
@@ -276,7 +273,6 @@ static inline void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb)
drop:
kfree_skb(skb);
- return;
}
/* -------- Socket interface ---------- */
@@ -567,7 +563,7 @@ static int sco_sock_accept(struct socket *sock, struct socket *newsock, int flag
BT_DBG("sk %p timeo %ld", sk, timeo);
/* Wait for an incoming connection. (wake-one). */
- add_wait_queue_exclusive(sk->sk_sleep, &wait);
+ add_wait_queue_exclusive(sk_sleep(sk), &wait);
while (!(ch = bt_accept_dequeue(sk, newsock))) {
set_current_state(TASK_INTERRUPTIBLE);
if (!timeo) {
@@ -590,7 +586,7 @@ static int sco_sock_accept(struct socket *sock, struct socket *newsock, int flag
}
}
set_current_state(TASK_RUNNING);
- remove_wait_queue(sk->sk_sleep, &wait);
+ remove_wait_queue(sk_sleep(sk), &wait);
if (err)
goto done;
@@ -626,7 +622,7 @@ static int sco_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
- int err = 0;
+ int err;
BT_DBG("sock %p, sk %p", sock, sk);
@@ -851,7 +847,8 @@ static void sco_conn_ready(struct sco_conn *conn)
bh_lock_sock(parent);
- sk = sco_sock_alloc(sock_net(parent), NULL, BTPROTO_SCO, GFP_ATOMIC);
+ sk = sco_sock_alloc(sock_net(parent), NULL,
+ BTPROTO_SCO, GFP_ATOMIC);
if (!sk) {
bh_unlock_sock(parent);
goto done;
diff --git a/net/bridge/Kconfig b/net/bridge/Kconfig
index d115d5cea5b6..9190ae462cb4 100644
--- a/net/bridge/Kconfig
+++ b/net/bridge/Kconfig
@@ -33,14 +33,14 @@ config BRIDGE
If unsure, say N.
config BRIDGE_IGMP_SNOOPING
- bool "IGMP snooping"
+ bool "IGMP/MLD snooping"
depends on BRIDGE
depends on INET
default y
---help---
If you say Y here, then the Ethernet bridge will be able selectively
- forward multicast traffic based on IGMP traffic received from each
- port.
+ forward multicast traffic based on IGMP/MLD traffic received from
+ each port.
Say N to exclude this support and reduce the binary size.
diff --git a/net/bridge/br.c b/net/bridge/br.c
index e1241c76239a..76357b547752 100644
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -38,7 +38,7 @@ static int __init br_init(void)
err = stp_proto_register(&br_stp_proto);
if (err < 0) {
- printk(KERN_ERR "bridge: can't register sap for STP\n");
+ pr_err("bridge: can't register sap for STP\n");
return err;
}
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 90a9024e5c1e..eedf2c94820e 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -13,8 +13,11 @@
#include <linux/kernel.h>
#include <linux/netdevice.h>
+#include <linux/netpoll.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
+#include <linux/list.h>
+#include <linux/netfilter_bridge.h>
#include <asm/uaccess.h>
#include "br_private.h"
@@ -26,16 +29,24 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
const unsigned char *dest = skb->data;
struct net_bridge_fdb_entry *dst;
struct net_bridge_mdb_entry *mdst;
+ struct br_cpu_netstats *brstats = this_cpu_ptr(br->stats);
- BR_INPUT_SKB_CB(skb)->brdev = dev;
+#ifdef CONFIG_BRIDGE_NETFILTER
+ if (skb->nf_bridge && (skb->nf_bridge->mask & BRNF_BRIDGED_DNAT)) {
+ br_nf_pre_routing_finish_bridge_slow(skb);
+ return NETDEV_TX_OK;
+ }
+#endif
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += skb->len;
+ brstats->tx_packets++;
+ brstats->tx_bytes += skb->len;
+
+ BR_INPUT_SKB_CB(skb)->brdev = dev;
skb_reset_mac_header(skb);
skb_pull(skb, ETH_HLEN);
- if (dest[0] & 1) {
+ if (is_multicast_ether_addr(dest)) {
if (br_multicast_rcv(br, NULL, skb))
goto out;
@@ -81,6 +92,31 @@ static int br_dev_stop(struct net_device *dev)
return 0;
}
+static struct net_device_stats *br_get_stats(struct net_device *dev)
+{
+ struct net_bridge *br = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ struct br_cpu_netstats sum = { 0 };
+ unsigned int cpu;
+
+ for_each_possible_cpu(cpu) {
+ const struct br_cpu_netstats *bstats
+ = per_cpu_ptr(br->stats, cpu);
+
+ sum.tx_bytes += bstats->tx_bytes;
+ sum.tx_packets += bstats->tx_packets;
+ sum.rx_bytes += bstats->rx_bytes;
+ sum.rx_packets += bstats->rx_packets;
+ }
+
+ stats->tx_bytes = sum.tx_bytes;
+ stats->tx_packets = sum.tx_packets;
+ stats->rx_bytes = sum.rx_bytes;
+ stats->rx_packets = sum.rx_packets;
+
+ return stats;
+}
+
static int br_change_mtu(struct net_device *dev, int new_mtu)
{
struct net_bridge *br = netdev_priv(dev);
@@ -162,6 +198,78 @@ static int br_set_tx_csum(struct net_device *dev, u32 data)
return 0;
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static bool br_devices_support_netpoll(struct net_bridge *br)
+{
+ struct net_bridge_port *p;
+ bool ret = true;
+ int count = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&br->lock, flags);
+ list_for_each_entry(p, &br->port_list, list) {
+ count++;
+ if ((p->dev->priv_flags & IFF_DISABLE_NETPOLL) ||
+ !p->dev->netdev_ops->ndo_poll_controller)
+ ret = false;
+ }
+ spin_unlock_irqrestore(&br->lock, flags);
+ return count != 0 && ret;
+}
+
+static void br_poll_controller(struct net_device *br_dev)
+{
+ struct netpoll *np = br_dev->npinfo->netpoll;
+
+ if (np->real_dev != br_dev)
+ netpoll_poll_dev(np->real_dev);
+}
+
+void br_netpoll_cleanup(struct net_device *dev)
+{
+ struct net_bridge *br = netdev_priv(dev);
+ struct net_bridge_port *p, *n;
+ const struct net_device_ops *ops;
+
+ br->dev->npinfo = NULL;
+ list_for_each_entry_safe(p, n, &br->port_list, list) {
+ if (p->dev) {
+ ops = p->dev->netdev_ops;
+ if (ops->ndo_netpoll_cleanup)
+ ops->ndo_netpoll_cleanup(p->dev);
+ else
+ p->dev->npinfo = NULL;
+ }
+ }
+}
+
+void br_netpoll_disable(struct net_bridge *br,
+ struct net_device *dev)
+{
+ if (br_devices_support_netpoll(br))
+ br->dev->priv_flags &= ~IFF_DISABLE_NETPOLL;
+ if (dev->netdev_ops->ndo_netpoll_cleanup)
+ dev->netdev_ops->ndo_netpoll_cleanup(dev);
+ else
+ dev->npinfo = NULL;
+}
+
+void br_netpoll_enable(struct net_bridge *br,
+ struct net_device *dev)
+{
+ if (br_devices_support_netpoll(br)) {
+ br->dev->priv_flags &= ~IFF_DISABLE_NETPOLL;
+ if (br->dev->npinfo)
+ dev->npinfo = br->dev->npinfo;
+ } else if (!(br->dev->priv_flags & IFF_DISABLE_NETPOLL)) {
+ br->dev->priv_flags |= IFF_DISABLE_NETPOLL;
+ br_info(br,"new device %s does not support netpoll (disabling)",
+ dev->name);
+ }
+}
+
+#endif
+
static const struct ethtool_ops br_ethtool_ops = {
.get_drvinfo = br_getinfo,
.get_link = ethtool_op_get_link,
@@ -180,19 +288,32 @@ static const struct net_device_ops br_netdev_ops = {
.ndo_open = br_dev_open,
.ndo_stop = br_dev_stop,
.ndo_start_xmit = br_dev_xmit,
+ .ndo_get_stats = br_get_stats,
.ndo_set_mac_address = br_set_mac_address,
.ndo_set_multicast_list = br_dev_set_multicast_list,
.ndo_change_mtu = br_change_mtu,
.ndo_do_ioctl = br_dev_ioctl,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_netpoll_cleanup = br_netpoll_cleanup,
+ .ndo_poll_controller = br_poll_controller,
+#endif
};
+static void br_dev_free(struct net_device *dev)
+{
+ struct net_bridge *br = netdev_priv(dev);
+
+ free_percpu(br->stats);
+ free_netdev(dev);
+}
+
void br_dev_setup(struct net_device *dev)
{
random_ether_addr(dev->dev_addr);
ether_setup(dev);
dev->netdev_ops = &br_netdev_ops;
- dev->destructor = free_netdev;
+ dev->destructor = br_dev_free;
SET_ETHTOOL_OPS(dev, &br_ethtool_ops);
dev->tx_queue_len = 0;
dev->priv_flags = IFF_EBRIDGE;
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 9101a4e56201..26637439965b 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -353,8 +353,7 @@ static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
*/
if (fdb->is_local)
return 0;
-
- printk(KERN_WARNING "%s adding interface with same address "
+ br_warn(br, "adding interface %s with same address "
"as a received packet\n",
source->dev->name);
fdb_delete(fdb);
@@ -397,9 +396,9 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
/* attempt to update an entry for a local interface */
if (unlikely(fdb->is_local)) {
if (net_ratelimit())
- printk(KERN_WARNING "%s: received packet with "
- "own address as source address\n",
- source->dev->name);
+ br_warn(br, "received packet on %s with "
+ "own address as source address\n",
+ source->dev->name);
} else {
/* fastpath: update of existing entry */
fdb->dst = source;
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 7a241c396981..a98ef1393097 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -15,6 +15,7 @@
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
+#include <linux/netpoll.h>
#include <linux/skbuff.h>
#include <linux/if_vlan.h>
#include <linux/netfilter_bridge.h>
@@ -44,13 +45,19 @@ int br_dev_queue_push_xmit(struct sk_buff *skb)
if (packet_length(skb) > skb->dev->mtu && !skb_is_gso(skb))
kfree_skb(skb);
else {
- /* ip_refrag calls ip_fragment, doesn't copy the MAC header. */
+ /* ip_fragment doesn't copy the MAC header */
if (nf_bridge_maybe_copy_header(skb))
kfree_skb(skb);
else {
skb_push(skb, ETH_HLEN);
- dev_queue_xmit(skb);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ if (unlikely(skb->dev->priv_flags & IFF_IN_NETPOLL)) {
+ netpoll_send_skb(skb->dev->npinfo->netpoll, skb);
+ skb->dev->priv_flags &= ~IFF_IN_NETPOLL;
+ } else
+#endif
+ dev_queue_xmit(skb);
}
}
@@ -59,16 +66,30 @@ int br_dev_queue_push_xmit(struct sk_buff *skb)
int br_forward_finish(struct sk_buff *skb)
{
- return NF_HOOK(PF_BRIDGE, NF_BR_POST_ROUTING, skb, NULL, skb->dev,
+ return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING, skb, NULL, skb->dev,
br_dev_queue_push_xmit);
}
static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
{
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ struct net_bridge *br = to->br;
+ if (unlikely(br->dev->priv_flags & IFF_IN_NETPOLL)) {
+ struct netpoll *np;
+ to->dev->npinfo = skb->dev->npinfo;
+ np = skb->dev->npinfo->netpoll;
+ np->real_dev = np->dev = to->dev;
+ to->dev->priv_flags |= IFF_IN_NETPOLL;
+ }
+#endif
skb->dev = to->dev;
- NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
- br_forward_finish);
+ NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
+ br_forward_finish);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ if (skb->dev->npinfo)
+ skb->dev->npinfo->netpoll->dev = br->dev;
+#endif
}
static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
@@ -84,8 +105,8 @@ static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
skb->dev = to->dev;
skb_forward_csum(skb);
- NF_HOOK(PF_BRIDGE, NF_BR_FORWARD, skb, indev, skb->dev,
- br_forward_finish);
+ NF_HOOK(NFPROTO_BRIDGE, NF_BR_FORWARD, skb, indev, skb->dev,
+ br_forward_finish);
}
/* called with rcu_read_lock */
@@ -208,17 +229,15 @@ static void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
{
struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
struct net_bridge *br = netdev_priv(dev);
- struct net_bridge_port *port;
- struct net_bridge_port *lport, *rport;
- struct net_bridge_port *prev;
+ struct net_bridge_port *prev = NULL;
struct net_bridge_port_group *p;
struct hlist_node *rp;
- prev = NULL;
-
- rp = br->router_list.first;
- p = mdst ? mdst->ports : NULL;
+ rp = rcu_dereference(br->router_list.first);
+ p = mdst ? rcu_dereference(mdst->ports) : NULL;
while (p || rp) {
+ struct net_bridge_port *port, *lport, *rport;
+
lport = p ? p->port : NULL;
rport = rp ? hlist_entry(rp, struct net_bridge_port, rlist) :
NULL;
@@ -231,9 +250,9 @@ static void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
goto out;
if ((unsigned long)lport >= (unsigned long)port)
- p = p->next;
+ p = rcu_dereference(p->next);
if ((unsigned long)rport >= (unsigned long)port)
- rp = rp->next;
+ rp = rcu_dereference(rp->next);
}
if (!prev)
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 0b6b1f2ff7ac..18b245e2c00e 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -13,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/netdevice.h>
+#include <linux/netpoll.h>
#include <linux/ethtool.h>
#include <linux/if_arp.h>
#include <linux/module.h>
@@ -132,7 +133,7 @@ static void del_nbp(struct net_bridge_port *p)
struct net_bridge *br = p->br;
struct net_device *dev = p->dev;
- sysfs_remove_link(br->ifobj, dev->name);
+ sysfs_remove_link(br->ifobj, p->dev->name);
dev_set_promiscuity(dev, -1);
@@ -153,6 +154,7 @@ static void del_nbp(struct net_bridge_port *p)
kobject_uevent(&p->kobj, KOBJ_REMOVE);
kobject_del(&p->kobj);
+ br_netpoll_disable(br, dev);
call_rcu(&p->rcu, destroy_nbp_rcu);
}
@@ -165,6 +167,8 @@ static void del_br(struct net_bridge *br, struct list_head *head)
del_nbp(p);
}
+ br_netpoll_cleanup(br->dev);
+
del_timer_sync(&br->gc_timer);
br_sysfs_delbr(br->dev);
@@ -186,6 +190,12 @@ static struct net_device *new_bridge_dev(struct net *net, const char *name)
br = netdev_priv(dev);
br->dev = dev;
+ br->stats = alloc_percpu(struct br_cpu_netstats);
+ if (!br->stats) {
+ free_netdev(dev);
+ return NULL;
+ }
+
spin_lock_init(&br->lock);
INIT_LIST_HEAD(&br->port_list);
spin_lock_init(&br->hash_lock);
@@ -438,6 +448,8 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
kobject_uevent(&p->kobj, KOBJ_ADD);
+ br_netpoll_enable(br, dev);
+
return 0;
err2:
br_fdb_delete_by_port(br, p, 1);
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index a82dde2d2ead..d36e700f7a26 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -24,14 +24,16 @@ const u8 br_group_address[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
static int br_pass_frame_up(struct sk_buff *skb)
{
struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev;
+ struct net_bridge *br = netdev_priv(brdev);
+ struct br_cpu_netstats *brstats = this_cpu_ptr(br->stats);
- brdev->stats.rx_packets++;
- brdev->stats.rx_bytes += skb->len;
+ brstats->rx_packets++;
+ brstats->rx_bytes += skb->len;
indev = skb->dev;
skb->dev = brdev;
- return NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, indev, NULL,
+ return NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, skb, indev, NULL,
netif_receive_skb);
}
@@ -154,7 +156,7 @@ struct sk_buff *br_handle_frame(struct net_bridge_port *p, struct sk_buff *skb)
if (p->br->stp_enabled == BR_NO_STP && dest[5] == 0)
goto forward;
- if (NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev,
+ if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev,
NULL, br_handle_local_finish))
return NULL; /* frame consumed by filter */
else
@@ -175,7 +177,7 @@ forward:
if (!compare_ether_addr(p->br->dev->dev_addr, dest))
skb->pkt_type = PACKET_HOST;
- NF_HOOK(PF_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
+ NF_HOOK(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
br_handle_frame_finish);
break;
default:
diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
index 995afc4b04dc..cb43312b846e 100644
--- a/net/bridge/br_ioctl.c
+++ b/net/bridge/br_ioctl.c
@@ -412,6 +412,6 @@ int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
}
- pr_debug("Bridge does not support ioctl 0x%x\n", cmd);
+ br_debug(br, "Bridge does not support ioctl 0x%x\n", cmd);
return -EOPNOTSUPP;
}
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index eaa0e1bae49b..9d21d98ae5fa 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -24,51 +24,139 @@
#include <linux/slab.h>
#include <linux/timer.h>
#include <net/ip.h>
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#include <net/ipv6.h>
+#include <net/mld.h>
+#include <net/addrconf.h>
+#include <net/ip6_checksum.h>
+#endif
#include "br_private.h"
-static inline int br_ip_hash(struct net_bridge_mdb_htable *mdb, __be32 ip)
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+static inline int ipv6_is_local_multicast(const struct in6_addr *addr)
{
- return jhash_1word(mdb->secret, (u32)ip) & (mdb->max - 1);
+ if (ipv6_addr_is_multicast(addr) &&
+ IPV6_ADDR_MC_SCOPE(addr) <= IPV6_ADDR_SCOPE_LINKLOCAL)
+ return 1;
+ return 0;
+}
+#endif
+
+static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b)
+{
+ if (a->proto != b->proto)
+ return 0;
+ switch (a->proto) {
+ case htons(ETH_P_IP):
+ return a->u.ip4 == b->u.ip4;
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ case htons(ETH_P_IPV6):
+ return ipv6_addr_equal(&a->u.ip6, &b->u.ip6);
+#endif
+ }
+ return 0;
+}
+
+static inline int __br_ip4_hash(struct net_bridge_mdb_htable *mdb, __be32 ip)
+{
+ return jhash_1word(mdb->secret, (__force u32)ip) & (mdb->max - 1);
+}
+
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+static inline int __br_ip6_hash(struct net_bridge_mdb_htable *mdb,
+ const struct in6_addr *ip)
+{
+ return jhash2((__force u32 *)ip->s6_addr32, 4, mdb->secret) & (mdb->max - 1);
+}
+#endif
+
+static inline int br_ip_hash(struct net_bridge_mdb_htable *mdb,
+ struct br_ip *ip)
+{
+ switch (ip->proto) {
+ case htons(ETH_P_IP):
+ return __br_ip4_hash(mdb, ip->u.ip4);
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ case htons(ETH_P_IPV6):
+ return __br_ip6_hash(mdb, &ip->u.ip6);
+#endif
+ }
+ return 0;
}
static struct net_bridge_mdb_entry *__br_mdb_ip_get(
- struct net_bridge_mdb_htable *mdb, __be32 dst, int hash)
+ struct net_bridge_mdb_htable *mdb, struct br_ip *dst, int hash)
{
struct net_bridge_mdb_entry *mp;
struct hlist_node *p;
hlist_for_each_entry_rcu(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) {
- if (dst == mp->addr)
+ if (br_ip_equal(&mp->addr, dst))
return mp;
}
return NULL;
}
-static struct net_bridge_mdb_entry *br_mdb_ip_get(
+static struct net_bridge_mdb_entry *br_mdb_ip4_get(
struct net_bridge_mdb_htable *mdb, __be32 dst)
{
- if (!mdb)
- return NULL;
+ struct br_ip br_dst;
+
+ br_dst.u.ip4 = dst;
+ br_dst.proto = htons(ETH_P_IP);
+
+ return __br_mdb_ip_get(mdb, &br_dst, __br_ip4_hash(mdb, dst));
+}
+
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+static struct net_bridge_mdb_entry *br_mdb_ip6_get(
+ struct net_bridge_mdb_htable *mdb, const struct in6_addr *dst)
+{
+ struct br_ip br_dst;
+
+ ipv6_addr_copy(&br_dst.u.ip6, dst);
+ br_dst.proto = htons(ETH_P_IPV6);
+ return __br_mdb_ip_get(mdb, &br_dst, __br_ip6_hash(mdb, dst));
+}
+#endif
+
+static struct net_bridge_mdb_entry *br_mdb_ip_get(
+ struct net_bridge_mdb_htable *mdb, struct br_ip *dst)
+{
return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst));
}
struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
struct sk_buff *skb)
{
- if (br->multicast_disabled)
+ struct net_bridge_mdb_htable *mdb = br->mdb;
+ struct br_ip ip;
+
+ if (!mdb || br->multicast_disabled)
return NULL;
+ if (BR_INPUT_SKB_CB(skb)->igmp)
+ return NULL;
+
+ ip.proto = skb->protocol;
+
switch (skb->protocol) {
case htons(ETH_P_IP):
- if (BR_INPUT_SKB_CB(skb)->igmp)
- break;
- return br_mdb_ip_get(br->mdb, ip_hdr(skb)->daddr);
+ ip.u.ip4 = ip_hdr(skb)->daddr;
+ break;
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ case htons(ETH_P_IPV6):
+ ipv6_addr_copy(&ip.u.ip6, &ipv6_hdr(skb)->daddr);
+ break;
+#endif
+ default:
+ return NULL;
}
- return NULL;
+ return br_mdb_ip_get(mdb, &ip);
}
static void br_mdb_free(struct rcu_head *head)
@@ -95,7 +183,7 @@ static int br_mdb_copy(struct net_bridge_mdb_htable *new,
for (i = 0; i < old->max; i++)
hlist_for_each_entry(mp, p, &old->mhash[i], hlist[old->ver])
hlist_add_head(&mp->hlist[new->ver],
- &new->mhash[br_ip_hash(new, mp->addr)]);
+ &new->mhash[br_ip_hash(new, &mp->addr)]);
if (!elasticity)
return 0;
@@ -163,7 +251,7 @@ static void br_multicast_del_pg(struct net_bridge *br,
struct net_bridge_port_group *p;
struct net_bridge_port_group **pp;
- mp = br_mdb_ip_get(mdb, pg->addr);
+ mp = br_mdb_ip_get(mdb, &pg->addr);
if (WARN_ON(!mp))
return;
@@ -171,7 +259,7 @@ static void br_multicast_del_pg(struct net_bridge *br,
if (p != pg)
continue;
- *pp = p->next;
+ rcu_assign_pointer(*pp, p->next);
hlist_del_init(&p->mglist);
del_timer(&p->timer);
del_timer(&p->query_timer);
@@ -249,8 +337,8 @@ out:
return 0;
}
-static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br,
- __be32 group)
+static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br,
+ __be32 group)
{
struct sk_buff *skb;
struct igmphdr *ih;
@@ -314,12 +402,104 @@ out:
return skb;
}
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
+ struct in6_addr *group)
+{
+ struct sk_buff *skb;
+ struct ipv6hdr *ip6h;
+ struct mld_msg *mldq;
+ struct ethhdr *eth;
+ u8 *hopopt;
+ unsigned long interval;
+
+ skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*ip6h) +
+ 8 + sizeof(*mldq));
+ if (!skb)
+ goto out;
+
+ skb->protocol = htons(ETH_P_IPV6);
+
+ /* Ethernet header */
+ skb_reset_mac_header(skb);
+ eth = eth_hdr(skb);
+
+ memcpy(eth->h_source, br->dev->dev_addr, 6);
+ ipv6_eth_mc_map(group, eth->h_dest);
+ eth->h_proto = htons(ETH_P_IPV6);
+ skb_put(skb, sizeof(*eth));
+
+ /* IPv6 header + HbH option */
+ skb_set_network_header(skb, skb->len);
+ ip6h = ipv6_hdr(skb);
+
+ *(__force __be32 *)ip6h = htonl(0x60000000);
+ ip6h->payload_len = 8 + sizeof(*mldq);
+ ip6h->nexthdr = IPPROTO_HOPOPTS;
+ ip6h->hop_limit = 1;
+ ipv6_addr_set(&ip6h->saddr, 0, 0, 0, 0);
+ ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1));
+
+ hopopt = (u8 *)(ip6h + 1);
+ hopopt[0] = IPPROTO_ICMPV6; /* next hdr */
+ hopopt[1] = 0; /* length of HbH */
+ hopopt[2] = IPV6_TLV_ROUTERALERT; /* Router Alert */
+ hopopt[3] = 2; /* Length of RA Option */
+ hopopt[4] = 0; /* Type = 0x0000 (MLD) */
+ hopopt[5] = 0;
+ hopopt[6] = IPV6_TLV_PAD0; /* Pad0 */
+ hopopt[7] = IPV6_TLV_PAD0; /* Pad0 */
+
+ skb_put(skb, sizeof(*ip6h) + 8);
+
+ /* ICMPv6 */
+ skb_set_transport_header(skb, skb->len);
+ mldq = (struct mld_msg *) icmp6_hdr(skb);
+
+ interval = ipv6_addr_any(group) ? br->multicast_last_member_interval :
+ br->multicast_query_response_interval;
+
+ mldq->mld_type = ICMPV6_MGM_QUERY;
+ mldq->mld_code = 0;
+ mldq->mld_cksum = 0;
+ mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval));
+ mldq->mld_reserved = 0;
+ ipv6_addr_copy(&mldq->mld_mca, group);
+
+ /* checksum */
+ mldq->mld_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
+ sizeof(*mldq), IPPROTO_ICMPV6,
+ csum_partial(mldq,
+ sizeof(*mldq), 0));
+ skb_put(skb, sizeof(*mldq));
+
+ __skb_pull(skb, sizeof(*eth));
+
+out:
+ return skb;
+}
+#endif
+
+static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br,
+ struct br_ip *addr)
+{
+ switch (addr->proto) {
+ case htons(ETH_P_IP):
+ return br_ip4_multicast_alloc_query(br, addr->u.ip4);
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ case htons(ETH_P_IPV6):
+ return br_ip6_multicast_alloc_query(br, &addr->u.ip6);
+#endif
+ }
+ return NULL;
+}
+
static void br_multicast_send_group_query(struct net_bridge_mdb_entry *mp)
{
struct net_bridge *br = mp->br;
struct sk_buff *skb;
- skb = br_multicast_alloc_query(br, mp->addr);
+ skb = br_multicast_alloc_query(br, &mp->addr);
if (!skb)
goto timer;
@@ -353,7 +533,7 @@ static void br_multicast_send_port_group_query(struct net_bridge_port_group *pg)
struct net_bridge *br = port->br;
struct sk_buff *skb;
- skb = br_multicast_alloc_query(br, pg->addr);
+ skb = br_multicast_alloc_query(br, &pg->addr);
if (!skb)
goto timer;
@@ -383,8 +563,8 @@ out:
}
static struct net_bridge_mdb_entry *br_multicast_get_group(
- struct net_bridge *br, struct net_bridge_port *port, __be32 group,
- int hash)
+ struct net_bridge *br, struct net_bridge_port *port,
+ struct br_ip *group, int hash)
{
struct net_bridge_mdb_htable *mdb = br->mdb;
struct net_bridge_mdb_entry *mp;
@@ -396,9 +576,8 @@ static struct net_bridge_mdb_entry *br_multicast_get_group(
hlist_for_each_entry(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) {
count++;
- if (unlikely(group == mp->addr)) {
+ if (unlikely(br_ip_equal(group, &mp->addr)))
return mp;
- }
}
elasticity = 0;
@@ -406,10 +585,9 @@ static struct net_bridge_mdb_entry *br_multicast_get_group(
if (unlikely(count > br->hash_elasticity && count)) {
if (net_ratelimit())
- printk(KERN_INFO "%s: Multicast hash table "
- "chain limit reached: %s\n",
- br->dev->name, port ? port->dev->name :
- br->dev->name);
+ br_info(br, "Multicast hash table "
+ "chain limit reached: %s\n",
+ port ? port->dev->name : br->dev->name);
elasticity = br->hash_elasticity;
}
@@ -417,11 +595,9 @@ static struct net_bridge_mdb_entry *br_multicast_get_group(
if (mdb->size >= max) {
max *= 2;
if (unlikely(max >= br->hash_max)) {
- printk(KERN_WARNING "%s: Multicast hash table maximum "
- "reached, disabling snooping: %s, %d\n",
- br->dev->name, port ? port->dev->name :
- br->dev->name,
- max);
+ br_warn(br, "Multicast hash table maximum "
+ "reached, disabling snooping: %s, %d\n",
+ port ? port->dev->name : br->dev->name, max);
err = -E2BIG;
disable:
br->multicast_disabled = 1;
@@ -432,22 +608,19 @@ disable:
if (max > mdb->max || elasticity) {
if (mdb->old) {
if (net_ratelimit())
- printk(KERN_INFO "%s: Multicast hash table "
- "on fire: %s\n",
- br->dev->name, port ? port->dev->name :
- br->dev->name);
+ br_info(br, "Multicast hash table "
+ "on fire: %s\n",
+ port ? port->dev->name : br->dev->name);
err = -EEXIST;
goto err;
}
err = br_mdb_rehash(&br->mdb, max, elasticity);
if (err) {
- printk(KERN_WARNING "%s: Cannot rehash multicast "
- "hash table, disabling snooping: "
- "%s, %d, %d\n",
- br->dev->name, port ? port->dev->name :
- br->dev->name,
- mdb->size, err);
+ br_warn(br, "Cannot rehash multicast "
+ "hash table, disabling snooping: %s, %d, %d\n",
+ port ? port->dev->name : br->dev->name,
+ mdb->size, err);
goto disable;
}
@@ -463,7 +636,8 @@ err:
}
static struct net_bridge_mdb_entry *br_multicast_new_group(
- struct net_bridge *br, struct net_bridge_port *port, __be32 group)
+ struct net_bridge *br, struct net_bridge_port *port,
+ struct br_ip *group)
{
struct net_bridge_mdb_htable *mdb = br->mdb;
struct net_bridge_mdb_entry *mp;
@@ -496,7 +670,7 @@ rehash:
goto out;
mp->br = br;
- mp->addr = group;
+ mp->addr = *group;
setup_timer(&mp->timer, br_multicast_group_expired,
(unsigned long)mp);
setup_timer(&mp->query_timer, br_multicast_group_query_expired,
@@ -510,7 +684,8 @@ out:
}
static int br_multicast_add_group(struct net_bridge *br,
- struct net_bridge_port *port, __be32 group)
+ struct net_bridge_port *port,
+ struct br_ip *group)
{
struct net_bridge_mdb_entry *mp;
struct net_bridge_port_group *p;
@@ -518,9 +693,6 @@ static int br_multicast_add_group(struct net_bridge *br,
unsigned long now = jiffies;
int err;
- if (ipv4_is_local_multicast(group))
- return 0;
-
spin_lock(&br->multicast_lock);
if (!netif_running(br->dev) ||
(port && port->state == BR_STATE_DISABLED))
@@ -549,7 +721,7 @@ static int br_multicast_add_group(struct net_bridge *br,
if (unlikely(!p))
goto err;
- p->addr = group;
+ p->addr = *group;
p->port = port;
p->next = *pp;
hlist_add_head(&p->mglist, &port->mglist);
@@ -570,6 +742,38 @@ err:
return err;
}
+static int br_ip4_multicast_add_group(struct net_bridge *br,
+ struct net_bridge_port *port,
+ __be32 group)
+{
+ struct br_ip br_group;
+
+ if (ipv4_is_local_multicast(group))
+ return 0;
+
+ br_group.u.ip4 = group;
+ br_group.proto = htons(ETH_P_IP);
+
+ return br_multicast_add_group(br, port, &br_group);
+}
+
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+static int br_ip6_multicast_add_group(struct net_bridge *br,
+ struct net_bridge_port *port,
+ const struct in6_addr *group)
+{
+ struct br_ip br_group;
+
+ if (ipv6_is_local_multicast(group))
+ return 0;
+
+ ipv6_addr_copy(&br_group.u.ip6, group);
+ br_group.proto = htons(ETH_P_IP);
+
+ return br_multicast_add_group(br, port, &br_group);
+}
+#endif
+
static void br_multicast_router_expired(unsigned long data)
{
struct net_bridge_port *port = (void *)data;
@@ -591,29 +795,45 @@ static void br_multicast_local_router_expired(unsigned long data)
{
}
-static void br_multicast_send_query(struct net_bridge *br,
- struct net_bridge_port *port, u32 sent)
+static void __br_multicast_send_query(struct net_bridge *br,
+ struct net_bridge_port *port,
+ struct br_ip *ip)
{
- unsigned long time;
struct sk_buff *skb;
- if (!netif_running(br->dev) || br->multicast_disabled ||
- timer_pending(&br->multicast_querier_timer))
- return;
-
- skb = br_multicast_alloc_query(br, 0);
+ skb = br_multicast_alloc_query(br, ip);
if (!skb)
- goto timer;
+ return;
if (port) {
__skb_push(skb, sizeof(struct ethhdr));
skb->dev = port->dev;
- NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
+ NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
dev_queue_xmit);
} else
netif_rx(skb);
+}
+
+static void br_multicast_send_query(struct net_bridge *br,
+ struct net_bridge_port *port, u32 sent)
+{
+ unsigned long time;
+ struct br_ip br_group;
+
+ if (!netif_running(br->dev) || br->multicast_disabled ||
+ timer_pending(&br->multicast_querier_timer))
+ return;
+
+ memset(&br_group.u, 0, sizeof(br_group.u));
+
+ br_group.proto = htons(ETH_P_IP);
+ __br_multicast_send_query(br, port, &br_group);
+
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ br_group.proto = htons(ETH_P_IPV6);
+ __br_multicast_send_query(br, port, &br_group);
+#endif
-timer:
time = jiffies;
time += sent < br->multicast_startup_query_count ?
br->multicast_startup_query_interval :
@@ -698,9 +918,9 @@ void br_multicast_disable_port(struct net_bridge_port *port)
spin_unlock(&br->multicast_lock);
}
-static int br_multicast_igmp3_report(struct net_bridge *br,
- struct net_bridge_port *port,
- struct sk_buff *skb)
+static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
+ struct net_bridge_port *port,
+ struct sk_buff *skb)
{
struct igmpv3_report *ih;
struct igmpv3_grec *grec;
@@ -745,7 +965,7 @@ static int br_multicast_igmp3_report(struct net_bridge *br,
continue;
}
- err = br_multicast_add_group(br, port, group);
+ err = br_ip4_multicast_add_group(br, port, group);
if (err)
break;
}
@@ -753,24 +973,87 @@ static int br_multicast_igmp3_report(struct net_bridge *br,
return err;
}
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+static int br_ip6_multicast_mld2_report(struct net_bridge *br,
+ struct net_bridge_port *port,
+ struct sk_buff *skb)
+{
+ struct icmp6hdr *icmp6h;
+ struct mld2_grec *grec;
+ int i;
+ int len;
+ int num;
+ int err = 0;
+
+ if (!pskb_may_pull(skb, sizeof(*icmp6h)))
+ return -EINVAL;
+
+ icmp6h = icmp6_hdr(skb);
+ num = ntohs(icmp6h->icmp6_dataun.un_data16[1]);
+ len = sizeof(*icmp6h);
+
+ for (i = 0; i < num; i++) {
+ __be16 *nsrcs, _nsrcs;
+
+ nsrcs = skb_header_pointer(skb,
+ len + offsetof(struct mld2_grec,
+ grec_mca),
+ sizeof(_nsrcs), &_nsrcs);
+ if (!nsrcs)
+ return -EINVAL;
+
+ if (!pskb_may_pull(skb,
+ len + sizeof(*grec) +
+ sizeof(struct in6_addr) * (*nsrcs)))
+ return -EINVAL;
+
+ grec = (struct mld2_grec *)(skb->data + len);
+ len += sizeof(*grec) + sizeof(struct in6_addr) * (*nsrcs);
+
+ /* We treat these as MLDv1 reports for now. */
+ switch (grec->grec_type) {
+ case MLD2_MODE_IS_INCLUDE:
+ case MLD2_MODE_IS_EXCLUDE:
+ case MLD2_CHANGE_TO_INCLUDE:
+ case MLD2_CHANGE_TO_EXCLUDE:
+ case MLD2_ALLOW_NEW_SOURCES:
+ case MLD2_BLOCK_OLD_SOURCES:
+ break;
+
+ default:
+ continue;
+ }
+
+ err = br_ip6_multicast_add_group(br, port, &grec->grec_mca);
+ if (!err)
+ break;
+ }
+
+ return err;
+}
+#endif
+
+/*
+ * Add port to rotuer_list
+ * list is maintained ordered by pointer value
+ * and locked by br->multicast_lock and RCU
+ */
static void br_multicast_add_router(struct net_bridge *br,
struct net_bridge_port *port)
{
- struct hlist_node *p;
- struct hlist_node **h;
-
- for (h = &br->router_list.first;
- (p = *h) &&
- (unsigned long)container_of(p, struct net_bridge_port, rlist) >
- (unsigned long)port;
- h = &p->next)
- ;
-
- port->rlist.pprev = h;
- port->rlist.next = p;
- rcu_assign_pointer(*h, &port->rlist);
- if (p)
- p->pprev = &port->rlist.next;
+ struct net_bridge_port *p;
+ struct hlist_node *n, *slot = NULL;
+
+ hlist_for_each_entry(p, n, &br->router_list, rlist) {
+ if ((unsigned long) port >= (unsigned long) p)
+ break;
+ slot = n;
+ }
+
+ if (slot)
+ hlist_add_after_rcu(slot, &port->rlist);
+ else
+ hlist_add_head_rcu(&port->rlist, &br->router_list);
}
static void br_multicast_mark_router(struct net_bridge *br,
@@ -800,7 +1083,7 @@ timer:
static void br_multicast_query_received(struct net_bridge *br,
struct net_bridge_port *port,
- __be32 saddr)
+ int saddr)
{
if (saddr)
mod_timer(&br->multicast_querier_timer,
@@ -811,9 +1094,9 @@ static void br_multicast_query_received(struct net_bridge *br,
br_multicast_mark_router(br, port);
}
-static int br_multicast_query(struct net_bridge *br,
- struct net_bridge_port *port,
- struct sk_buff *skb)
+static int br_ip4_multicast_query(struct net_bridge *br,
+ struct net_bridge_port *port,
+ struct sk_buff *skb)
{
struct iphdr *iph = ip_hdr(skb);
struct igmphdr *ih = igmp_hdr(skb);
@@ -831,7 +1114,7 @@ static int br_multicast_query(struct net_bridge *br,
(port && port->state == BR_STATE_DISABLED))
goto out;
- br_multicast_query_received(br, port, iph->saddr);
+ br_multicast_query_received(br, port, !!iph->saddr);
group = ih->group;
@@ -859,7 +1142,7 @@ static int br_multicast_query(struct net_bridge *br,
if (!group)
goto out;
- mp = br_mdb_ip_get(br->mdb, group);
+ mp = br_mdb_ip4_get(br->mdb, group);
if (!mp)
goto out;
@@ -883,9 +1166,78 @@ out:
return err;
}
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+static int br_ip6_multicast_query(struct net_bridge *br,
+ struct net_bridge_port *port,
+ struct sk_buff *skb)
+{
+ struct ipv6hdr *ip6h = ipv6_hdr(skb);
+ struct mld_msg *mld = (struct mld_msg *) icmp6_hdr(skb);
+ struct net_bridge_mdb_entry *mp;
+ struct mld2_query *mld2q;
+ struct net_bridge_port_group *p, **pp;
+ unsigned long max_delay;
+ unsigned long now = jiffies;
+ struct in6_addr *group = NULL;
+ int err = 0;
+
+ spin_lock(&br->multicast_lock);
+ if (!netif_running(br->dev) ||
+ (port && port->state == BR_STATE_DISABLED))
+ goto out;
+
+ br_multicast_query_received(br, port, !ipv6_addr_any(&ip6h->saddr));
+
+ if (skb->len == sizeof(*mld)) {
+ if (!pskb_may_pull(skb, sizeof(*mld))) {
+ err = -EINVAL;
+ goto out;
+ }
+ mld = (struct mld_msg *) icmp6_hdr(skb);
+ max_delay = msecs_to_jiffies(htons(mld->mld_maxdelay));
+ if (max_delay)
+ group = &mld->mld_mca;
+ } else if (skb->len >= sizeof(*mld2q)) {
+ if (!pskb_may_pull(skb, sizeof(*mld2q))) {
+ err = -EINVAL;
+ goto out;
+ }
+ mld2q = (struct mld2_query *)icmp6_hdr(skb);
+ if (!mld2q->mld2q_nsrcs)
+ group = &mld2q->mld2q_mca;
+ max_delay = mld2q->mld2q_mrc ? MLDV2_MRC(mld2q->mld2q_mrc) : 1;
+ }
+
+ if (!group)
+ goto out;
+
+ mp = br_mdb_ip6_get(br->mdb, group);
+ if (!mp)
+ goto out;
+
+ max_delay *= br->multicast_last_member_count;
+ if (!hlist_unhashed(&mp->mglist) &&
+ (timer_pending(&mp->timer) ?
+ time_after(mp->timer.expires, now + max_delay) :
+ try_to_del_timer_sync(&mp->timer) >= 0))
+ mod_timer(&mp->timer, now + max_delay);
+
+ for (pp = &mp->ports; (p = *pp); pp = &p->next) {
+ if (timer_pending(&p->timer) ?
+ time_after(p->timer.expires, now + max_delay) :
+ try_to_del_timer_sync(&p->timer) >= 0)
+ mod_timer(&mp->timer, now + max_delay);
+ }
+
+out:
+ spin_unlock(&br->multicast_lock);
+ return err;
+}
+#endif
+
static void br_multicast_leave_group(struct net_bridge *br,
struct net_bridge_port *port,
- __be32 group)
+ struct br_ip *group)
{
struct net_bridge_mdb_htable *mdb;
struct net_bridge_mdb_entry *mp;
@@ -893,9 +1245,6 @@ static void br_multicast_leave_group(struct net_bridge *br,
unsigned long now;
unsigned long time;
- if (ipv4_is_local_multicast(group))
- return;
-
spin_lock(&br->multicast_lock);
if (!netif_running(br->dev) ||
(port && port->state == BR_STATE_DISABLED) ||
@@ -946,6 +1295,38 @@ out:
spin_unlock(&br->multicast_lock);
}
+static void br_ip4_multicast_leave_group(struct net_bridge *br,
+ struct net_bridge_port *port,
+ __be32 group)
+{
+ struct br_ip br_group;
+
+ if (ipv4_is_local_multicast(group))
+ return;
+
+ br_group.u.ip4 = group;
+ br_group.proto = htons(ETH_P_IP);
+
+ br_multicast_leave_group(br, port, &br_group);
+}
+
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+static void br_ip6_multicast_leave_group(struct net_bridge *br,
+ struct net_bridge_port *port,
+ const struct in6_addr *group)
+{
+ struct br_ip br_group;
+
+ if (ipv6_is_local_multicast(group))
+ return;
+
+ ipv6_addr_copy(&br_group.u.ip6, group);
+ br_group.proto = htons(ETH_P_IPV6);
+
+ br_multicast_leave_group(br, port, &br_group);
+}
+#endif
+
static int br_multicast_ipv4_rcv(struct net_bridge *br,
struct net_bridge_port *port,
struct sk_buff *skb)
@@ -1000,8 +1381,6 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
if (!pskb_may_pull(skb2, sizeof(*ih)))
goto out;
- iph = ip_hdr(skb2);
-
switch (skb2->ip_summed) {
case CHECKSUM_COMPLETE:
if (!csum_fold(skb2->csum))
@@ -1022,16 +1401,16 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
case IGMP_HOST_MEMBERSHIP_REPORT:
case IGMPV2_HOST_MEMBERSHIP_REPORT:
BR_INPUT_SKB_CB(skb2)->mrouters_only = 1;
- err = br_multicast_add_group(br, port, ih->group);
+ err = br_ip4_multicast_add_group(br, port, ih->group);
break;
case IGMPV3_HOST_MEMBERSHIP_REPORT:
- err = br_multicast_igmp3_report(br, port, skb2);
+ err = br_ip4_multicast_igmp3_report(br, port, skb2);
break;
case IGMP_HOST_MEMBERSHIP_QUERY:
- err = br_multicast_query(br, port, skb2);
+ err = br_ip4_multicast_query(br, port, skb2);
break;
case IGMP_HOST_LEAVE_MESSAGE:
- br_multicast_leave_group(br, port, ih->group);
+ br_ip4_multicast_leave_group(br, port, ih->group);
break;
}
@@ -1043,6 +1422,123 @@ err_out:
return err;
}
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+static int br_multicast_ipv6_rcv(struct net_bridge *br,
+ struct net_bridge_port *port,
+ struct sk_buff *skb)
+{
+ struct sk_buff *skb2 = skb;
+ struct ipv6hdr *ip6h;
+ struct icmp6hdr *icmp6h;
+ u8 nexthdr;
+ unsigned len;
+ unsigned offset;
+ int err;
+
+ if (!pskb_may_pull(skb, sizeof(*ip6h)))
+ return -EINVAL;
+
+ ip6h = ipv6_hdr(skb);
+
+ /*
+ * We're interested in MLD messages only.
+ * - Version is 6
+ * - MLD has always Router Alert hop-by-hop option
+ * - But we do not support jumbrograms.
+ */
+ if (ip6h->version != 6 ||
+ ip6h->nexthdr != IPPROTO_HOPOPTS ||
+ ip6h->payload_len == 0)
+ return 0;
+
+ len = ntohs(ip6h->payload_len);
+ if (skb->len < len)
+ return -EINVAL;
+
+ nexthdr = ip6h->nexthdr;
+ offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
+
+ if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
+ return 0;
+
+ /* Okay, we found ICMPv6 header */
+ skb2 = skb_clone(skb, GFP_ATOMIC);
+ if (!skb2)
+ return -ENOMEM;
+
+ len -= offset - skb_network_offset(skb2);
+
+ __skb_pull(skb2, offset);
+ skb_reset_transport_header(skb2);
+
+ err = -EINVAL;
+ if (!pskb_may_pull(skb2, sizeof(*icmp6h)))
+ goto out;
+
+ icmp6h = icmp6_hdr(skb2);
+
+ switch (icmp6h->icmp6_type) {
+ case ICMPV6_MGM_QUERY:
+ case ICMPV6_MGM_REPORT:
+ case ICMPV6_MGM_REDUCTION:
+ case ICMPV6_MLD2_REPORT:
+ break;
+ default:
+ err = 0;
+ goto out;
+ }
+
+ /* Okay, we found MLD message. Check further. */
+ if (skb2->len > len) {
+ err = pskb_trim_rcsum(skb2, len);
+ if (err)
+ goto out;
+ }
+
+ switch (skb2->ip_summed) {
+ case CHECKSUM_COMPLETE:
+ if (!csum_fold(skb2->csum))
+ break;
+ /*FALLTHROUGH*/
+ case CHECKSUM_NONE:
+ skb2->csum = 0;
+ if (skb_checksum_complete(skb2))
+ goto out;
+ }
+
+ err = 0;
+
+ BR_INPUT_SKB_CB(skb)->igmp = 1;
+
+ switch (icmp6h->icmp6_type) {
+ case ICMPV6_MGM_REPORT:
+ {
+ struct mld_msg *mld = (struct mld_msg *)icmp6h;
+ BR_INPUT_SKB_CB(skb2)->mrouters_only = 1;
+ err = br_ip6_multicast_add_group(br, port, &mld->mld_mca);
+ break;
+ }
+ case ICMPV6_MLD2_REPORT:
+ err = br_ip6_multicast_mld2_report(br, port, skb2);
+ break;
+ case ICMPV6_MGM_QUERY:
+ err = br_ip6_multicast_query(br, port, skb2);
+ break;
+ case ICMPV6_MGM_REDUCTION:
+ {
+ struct mld_msg *mld = (struct mld_msg *)icmp6h;
+ br_ip6_multicast_leave_group(br, port, &mld->mld_mca);
+ }
+ }
+
+out:
+ __skb_push(skb2, offset);
+ if (skb2 != skb)
+ kfree_skb(skb2);
+ return err;
+}
+#endif
+
int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
struct sk_buff *skb)
{
@@ -1055,6 +1551,10 @@ int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
switch (skb->protocol) {
case htons(ETH_P_IP):
return br_multicast_ipv4_rcv(br, port, skb);
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ case htons(ETH_P_IPV6):
+ return br_multicast_ipv6_rcv(br, port, skb);
+#endif
}
return 0;
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 4c4977d12fd6..44420992f72f 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -3,15 +3,8 @@
* Linux ethernet bridge
*
* Authors:
- * Lennert Buytenhek <buytenh@gnu.org>
- * Bart De Schuymer (maintainer) <bdschuym@pandora.be>
- *
- * Changes:
- * Apr 29 2003: physdev module support (bdschuym)
- * Jun 19 2003: let arptables see bridged ARP traffic (bdschuym)
- * Oct 06 2003: filter encapsulated IP/ARP VLAN traffic on untagged bridge
- * (bdschuym)
- * Sep 01 2004: add IPv6 filtering (bdschuym)
+ * Lennert Buytenhek <buytenh@gnu.org>
+ * Bart De Schuymer <bdschuym@pandora.be>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -204,15 +197,24 @@ static inline void nf_bridge_save_header(struct sk_buff *skb)
skb->nf_bridge->data, header_size);
}
-/*
- * When forwarding bridge frames, we save a copy of the original
- * header before processing.
+static inline void nf_bridge_update_protocol(struct sk_buff *skb)
+{
+ if (skb->nf_bridge->mask & BRNF_8021Q)
+ skb->protocol = htons(ETH_P_8021Q);
+ else if (skb->nf_bridge->mask & BRNF_PPPoE)
+ skb->protocol = htons(ETH_P_PPP_SES);
+}
+
+/* Fill in the header for fragmented IP packets handled by
+ * the IPv4 connection tracking code.
*/
int nf_bridge_copy_header(struct sk_buff *skb)
{
int err;
- int header_size = ETH_HLEN + nf_bridge_encap_header_len(skb);
+ unsigned int header_size;
+ nf_bridge_update_protocol(skb);
+ header_size = ETH_HLEN + nf_bridge_encap_header_len(skb);
err = skb_cow_head(skb, header_size);
if (err)
return err;
@@ -246,27 +248,48 @@ static int br_nf_pre_routing_finish_ipv6(struct sk_buff *skb)
skb_dst_set(skb, &rt->u.dst);
skb->dev = nf_bridge->physindev;
+ nf_bridge_update_protocol(skb);
nf_bridge_push_encap_header(skb);
- NF_HOOK_THRESH(PF_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
+ NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
br_handle_frame_finish, 1);
return 0;
}
-static void __br_dnat_complain(void)
+/* Obtain the correct destination MAC address, while preserving the original
+ * source MAC address. If we already know this address, we just copy it. If we
+ * don't, we use the neighbour framework to find out. In both cases, we make
+ * sure that br_handle_frame_finish() is called afterwards.
+ */
+static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb)
{
- static unsigned long last_complaint;
+ struct nf_bridge_info *nf_bridge = skb->nf_bridge;
+ struct dst_entry *dst;
- if (jiffies - last_complaint >= 5 * HZ) {
- printk(KERN_WARNING "Performing cross-bridge DNAT requires IP "
- "forwarding to be enabled\n");
- last_complaint = jiffies;
+ skb->dev = bridge_parent(skb->dev);
+ if (!skb->dev)
+ goto free_skb;
+ dst = skb_dst(skb);
+ if (dst->hh) {
+ neigh_hh_bridge(dst->hh, skb);
+ skb->dev = nf_bridge->physindev;
+ return br_handle_frame_finish(skb);
+ } else if (dst->neighbour) {
+ /* the neighbour function below overwrites the complete
+ * MAC header, so we save the Ethernet source address and
+ * protocol number. */
+ skb_copy_from_linear_data_offset(skb, -(ETH_HLEN-ETH_ALEN), skb->nf_bridge->data, ETH_HLEN-ETH_ALEN);
+ /* tell br_dev_xmit to continue with forwarding */
+ nf_bridge->mask |= BRNF_BRIDGED_DNAT;
+ return dst->neighbour->output(skb);
}
+free_skb:
+ kfree_skb(skb);
+ return 0;
}
/* This requires some explaining. If DNAT has taken place,
- * we will need to fix up the destination Ethernet address,
- * and this is a tricky process.
+ * we will need to fix up the destination Ethernet address.
*
* There are two cases to consider:
* 1. The packet was DNAT'ed to a device in the same bridge
@@ -280,62 +303,29 @@ static void __br_dnat_complain(void)
* call ip_route_input() and to look at skb->dst->dev, which is
* changed to the destination device if ip_route_input() succeeds.
*
- * Let us first consider the case that ip_route_input() succeeds:
- *
- * If skb->dst->dev equals the logical bridge device the packet
- * came in on, we can consider this bridging. The packet is passed
- * through the neighbour output function to build a new destination
- * MAC address, which will make the packet enter br_nf_local_out()
- * not much later. In that function it is assured that the iptables
- * FORWARD chain is traversed for the packet.
+ * Let's first consider the case that ip_route_input() succeeds:
*
+ * If the output device equals the logical bridge device the packet
+ * came in on, we can consider this bridging. The corresponding MAC
+ * address will be obtained in br_nf_pre_routing_finish_bridge.
* Otherwise, the packet is considered to be routed and we just
* change the destination MAC address so that the packet will
* later be passed up to the IP stack to be routed. For a redirected
* packet, ip_route_input() will give back the localhost as output device,
* which differs from the bridge device.
*
- * Let us now consider the case that ip_route_input() fails:
+ * Let's now consider the case that ip_route_input() fails:
*
* This can be because the destination address is martian, in which case
* the packet will be dropped.
- * After a "echo '0' > /proc/sys/net/ipv4/ip_forward" ip_route_input()
- * will fail, while __ip_route_output_key() will return success. The source
- * address for __ip_route_output_key() is set to zero, so __ip_route_output_key
+ * If IP forwarding is disabled, ip_route_input() will fail, while
+ * ip_route_output_key() can return success. The source
+ * address for ip_route_output_key() is set to zero, so ip_route_output_key()
* thinks we're handling a locally generated packet and won't care
- * if IP forwarding is allowed. We send a warning message to the users's
- * log telling her to put IP forwarding on.
- *
- * ip_route_input() will also fail if there is no route available.
- * In that case we just drop the packet.
- *
- * --Lennert, 20020411
- * --Bart, 20020416 (updated)
- * --Bart, 20021007 (updated)
- * --Bart, 20062711 (updated) */
-static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb)
-{
- if (skb->pkt_type == PACKET_OTHERHOST) {
- skb->pkt_type = PACKET_HOST;
- skb->nf_bridge->mask |= BRNF_PKT_TYPE;
- }
- skb->nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING;
-
- skb->dev = bridge_parent(skb->dev);
- if (skb->dev) {
- struct dst_entry *dst = skb_dst(skb);
-
- nf_bridge_pull_encap_header(skb);
-
- if (dst->hh)
- return neigh_hh_output(dst->hh, skb);
- else if (dst->neighbour)
- return dst->neighbour->output(skb);
- }
- kfree_skb(skb);
- return 0;
-}
-
+ * if IP forwarding is enabled. If the output device equals the logical bridge
+ * device, we proceed as if ip_route_input() succeeded. If it differs from the
+ * logical bridge port or if ip_route_output_key() fails we drop the packet.
+ */
static int br_nf_pre_routing_finish(struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
@@ -379,11 +369,6 @@ static int br_nf_pre_routing_finish(struct sk_buff *skb)
skb_dst_set(skb, (struct dst_entry *)rt);
goto bridged_dnat;
}
- /* we are sure that forwarding is disabled, so printing
- * this message is no problem. Note that the packet could
- * still have a martian destination address, in which case
- * the packet could be dropped even if forwarding were enabled */
- __br_dnat_complain();
dst_release((struct dst_entry *)rt);
}
free_skb:
@@ -392,12 +377,11 @@ free_skb:
} else {
if (skb_dst(skb)->dev == dev) {
bridged_dnat:
- /* Tell br_nf_local_out this is a
- * bridged frame */
- nf_bridge->mask |= BRNF_BRIDGED_DNAT;
skb->dev = nf_bridge->physindev;
+ nf_bridge_update_protocol(skb);
nf_bridge_push_encap_header(skb);
- NF_HOOK_THRESH(PF_BRIDGE, NF_BR_PRE_ROUTING,
+ NF_HOOK_THRESH(NFPROTO_BRIDGE,
+ NF_BR_PRE_ROUTING,
skb, skb->dev, NULL,
br_nf_pre_routing_finish_bridge,
1);
@@ -417,8 +401,9 @@ bridged_dnat:
}
skb->dev = nf_bridge->physindev;
+ nf_bridge_update_protocol(skb);
nf_bridge_push_encap_header(skb);
- NF_HOOK_THRESH(PF_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
+ NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
br_handle_frame_finish, 1);
return 0;
@@ -437,6 +422,10 @@ static struct net_device *setup_pre_routing(struct sk_buff *skb)
nf_bridge->mask |= BRNF_NF_BRIDGE_PREROUTING;
nf_bridge->physindev = skb->dev;
skb->dev = bridge_parent(skb->dev);
+ if (skb->protocol == htons(ETH_P_8021Q))
+ nf_bridge->mask |= BRNF_8021Q;
+ else if (skb->protocol == htons(ETH_P_PPP_SES))
+ nf_bridge->mask |= BRNF_PPPoE;
return skb->dev;
}
@@ -535,7 +524,8 @@ static unsigned int br_nf_pre_routing_ipv6(unsigned int hook,
if (!setup_pre_routing(skb))
return NF_DROP;
- NF_HOOK(PF_INET6, NF_INET_PRE_ROUTING, skb, skb->dev, NULL,
+ skb->protocol = htons(ETH_P_IPV6);
+ NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, skb->dev, NULL,
br_nf_pre_routing_finish_ipv6);
return NF_STOLEN;
@@ -607,8 +597,9 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff *skb,
if (!setup_pre_routing(skb))
return NF_DROP;
store_orig_dstaddr(skb);
+ skb->protocol = htons(ETH_P_IP);
- NF_HOOK(PF_INET, NF_INET_PRE_ROUTING, skb, skb->dev, NULL,
+ NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, skb->dev, NULL,
br_nf_pre_routing_finish);
return NF_STOLEN;
@@ -652,11 +643,13 @@ static int br_nf_forward_finish(struct sk_buff *skb)
skb->pkt_type = PACKET_OTHERHOST;
nf_bridge->mask ^= BRNF_PKT_TYPE;
}
+ nf_bridge_update_protocol(skb);
} else {
in = *((struct net_device **)(skb->cb));
}
nf_bridge_push_encap_header(skb);
- NF_HOOK_THRESH(PF_BRIDGE, NF_BR_FORWARD, skb, in,
+
+ NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_FORWARD, skb, in,
skb->dev, br_forward_finish, 1);
return 0;
}
@@ -707,6 +700,10 @@ static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff *skb,
/* The physdev module checks on this */
nf_bridge->mask |= BRNF_BRIDGED;
nf_bridge->physoutdev = skb->dev;
+ if (pf == PF_INET)
+ skb->protocol = htons(ETH_P_IP);
+ else
+ skb->protocol = htons(ETH_P_IPV6);
NF_HOOK(pf, NF_INET_FORWARD, skb, bridge_parent(in), parent,
br_nf_forward_finish);
@@ -744,60 +741,11 @@ static unsigned int br_nf_forward_arp(unsigned int hook, struct sk_buff *skb,
return NF_STOLEN;
}
-/* PF_BRIDGE/LOCAL_OUT ***********************************************
- *
- * This function sees both locally originated IP packets and forwarded
- * IP packets (in both cases the destination device is a bridge
- * device). It also sees bridged-and-DNAT'ed packets.
- *
- * If (nf_bridge->mask & BRNF_BRIDGED_DNAT) then the packet is bridged
- * and we fake the PF_BRIDGE/FORWARD hook. The function br_nf_forward()
- * will then fake the PF_INET/FORWARD hook. br_nf_local_out() has priority
- * NF_BR_PRI_FIRST, so no relevant PF_BRIDGE/INPUT functions have been nor
- * will be executed.
- */
-static unsigned int br_nf_local_out(unsigned int hook, struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
-{
- struct net_device *realindev;
- struct nf_bridge_info *nf_bridge;
-
- if (!skb->nf_bridge)
- return NF_ACCEPT;
-
- /* Need exclusive nf_bridge_info since we might have multiple
- * different physoutdevs. */
- if (!nf_bridge_unshare(skb))
- return NF_DROP;
-
- nf_bridge = skb->nf_bridge;
- if (!(nf_bridge->mask & BRNF_BRIDGED_DNAT))
- return NF_ACCEPT;
-
- /* Bridged, take PF_BRIDGE/FORWARD.
- * (see big note in front of br_nf_pre_routing_finish) */
- nf_bridge->physoutdev = skb->dev;
- realindev = nf_bridge->physindev;
-
- if (nf_bridge->mask & BRNF_PKT_TYPE) {
- skb->pkt_type = PACKET_OTHERHOST;
- nf_bridge->mask ^= BRNF_PKT_TYPE;
- }
- nf_bridge_push_encap_header(skb);
-
- NF_HOOK(PF_BRIDGE, NF_BR_FORWARD, skb, realindev, skb->dev,
- br_forward_finish);
- return NF_STOLEN;
-}
-
#if defined(CONFIG_NF_CONNTRACK_IPV4) || defined(CONFIG_NF_CONNTRACK_IPV4_MODULE)
static int br_nf_dev_queue_xmit(struct sk_buff *skb)
{
- if (skb->nfct != NULL &&
- (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb)) &&
- skb->len > skb->dev->mtu &&
+ if (skb->nfct != NULL && skb->protocol == htons(ETH_P_IP) &&
+ skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu &&
!skb_is_gso(skb))
return ip_fragment(skb, br_dev_queue_push_xmit);
else
@@ -820,21 +768,7 @@ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff *skb,
struct net_device *realoutdev = bridge_parent(skb->dev);
u_int8_t pf;
-#ifdef CONFIG_NETFILTER_DEBUG
- /* Be very paranoid. This probably won't happen anymore, but let's
- * keep the check just to be sure... */
- if (skb_mac_header(skb) < skb->head ||
- skb_mac_header(skb) + ETH_HLEN > skb->data) {
- printk(KERN_CRIT "br_netfilter: Argh!! br_nf_post_routing: "
- "bad mac.raw pointer.\n");
- goto print_error;
- }
-#endif
-
- if (!nf_bridge)
- return NF_ACCEPT;
-
- if (!(nf_bridge->mask & (BRNF_BRIDGED | BRNF_BRIDGED_DNAT)))
+ if (!nf_bridge || !(nf_bridge->mask & BRNF_BRIDGED))
return NF_ACCEPT;
if (!realoutdev)
@@ -849,13 +783,6 @@ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff *skb,
else
return NF_ACCEPT;
-#ifdef CONFIG_NETFILTER_DEBUG
- if (skb_dst(skb) == NULL) {
- printk(KERN_INFO "br_netfilter post_routing: skb->dst == NULL\n");
- goto print_error;
- }
-#endif
-
/* We assume any code from br_dev_queue_push_xmit onwards doesn't care
* about the value of skb->pkt_type. */
if (skb->pkt_type == PACKET_OTHERHOST) {
@@ -865,24 +792,15 @@ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff *skb,
nf_bridge_pull_encap_header(skb);
nf_bridge_save_header(skb);
+ if (pf == PF_INET)
+ skb->protocol = htons(ETH_P_IP);
+ else
+ skb->protocol = htons(ETH_P_IPV6);
NF_HOOK(pf, NF_INET_POST_ROUTING, skb, NULL, realoutdev,
br_nf_dev_queue_xmit);
return NF_STOLEN;
-
-#ifdef CONFIG_NETFILTER_DEBUG
-print_error:
- if (skb->dev != NULL) {
- printk("[%s]", skb->dev->name);
- if (realoutdev)
- printk("[%s]", realoutdev->name);
- }
- printk(" head:%p, raw:%p, data:%p\n", skb->head, skb_mac_header(skb),
- skb->data);
- dump_stack();
- return NF_ACCEPT;
-#endif
}
/* IP/SABOTAGE *****************************************************/
@@ -901,10 +819,8 @@ static unsigned int ip_sabotage_in(unsigned int hook, struct sk_buff *skb,
return NF_ACCEPT;
}
-/* For br_nf_local_out we need (prio = NF_BR_PRI_FIRST), to insure that innocent
- * PF_BRIDGE/NF_BR_LOCAL_OUT functions don't get bridged traffic as input.
- * For br_nf_post_routing, we need (prio = NF_BR_PRI_LAST), because
- * ip_refrag() can return NF_STOLEN. */
+/* For br_nf_post_routing, we need (prio = NF_BR_PRI_LAST), because
+ * br_dev_queue_push_xmit is called afterwards */
static struct nf_hook_ops br_nf_ops[] __read_mostly = {
{
.hook = br_nf_pre_routing,
@@ -935,13 +851,6 @@ static struct nf_hook_ops br_nf_ops[] __read_mostly = {
.priority = NF_BR_PRI_BRNF,
},
{
- .hook = br_nf_local_out,
- .owner = THIS_MODULE,
- .pf = PF_BRIDGE,
- .hooknum = NF_BR_LOCAL_OUT,
- .priority = NF_BR_PRI_FIRST,
- },
- {
.hook = br_nf_post_routing,
.owner = THIS_MODULE,
.pf = PF_BRIDGE,
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index aa56ac2c8829..fe0a79018ab2 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -42,8 +42,8 @@ static int br_fill_ifinfo(struct sk_buff *skb, const struct net_bridge_port *por
struct nlmsghdr *nlh;
u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
- pr_debug("br_fill_info event %d port %s master %s\n",
- event, dev->name, br->dev->name);
+ br_debug(br, "br_fill_info event %d port %s master %s\n",
+ event, dev->name, br->dev->name);
nlh = nlmsg_put(skb, pid, seq, event, sizeof(*hdr), flags);
if (nlh == NULL)
@@ -87,7 +87,9 @@ void br_ifinfo_notify(int event, struct net_bridge_port *port)
struct sk_buff *skb;
int err = -ENOBUFS;
- pr_debug("bridge notify event=%d\n", event);
+ br_debug(port->br, "port %u(%s) event %d\n",
+ (unsigned)port->port_no, port->dev->name, event);
+
skb = nlmsg_new(br_nlmsg_size(), GFP_ATOMIC);
if (skb == NULL)
goto errout;
diff --git a/net/bridge/br_notify.c b/net/bridge/br_notify.c
index 763a3ec292e5..717e1fd6133c 100644
--- a/net/bridge/br_notify.c
+++ b/net/bridge/br_notify.c
@@ -34,6 +34,7 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
struct net_device *dev = ptr;
struct net_bridge_port *p = dev->br_port;
struct net_bridge *br;
+ int err;
/* not a port of a bridge */
if (p == NULL)
@@ -82,6 +83,16 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
case NETDEV_UNREGISTER:
br_del_if(br, dev);
break;
+
+ case NETDEV_CHANGENAME:
+ err = br_sysfs_renameif(p);
+ if (err)
+ return notifier_from_errno(err);
+ break;
+
+ case NETDEV_PRE_TYPE_CHANGE:
+ /* Forbid underlaying device to change its type. */
+ return NOTIFY_BAD;
}
/* Events that may cause spanning tree to refresh */
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 846d7d1e2075..0f4a74bc6a9b 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -45,6 +45,17 @@ struct mac_addr
unsigned char addr[6];
};
+struct br_ip
+{
+ union {
+ __be32 ip4;
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ struct in6_addr ip6;
+#endif
+ } u;
+ __be16 proto;
+};
+
struct net_bridge_fdb_entry
{
struct hlist_node hlist;
@@ -64,7 +75,7 @@ struct net_bridge_port_group {
struct rcu_head rcu;
struct timer_list timer;
struct timer_list query_timer;
- __be32 addr;
+ struct br_ip addr;
u32 queries_sent;
};
@@ -77,7 +88,7 @@ struct net_bridge_mdb_entry
struct rcu_head rcu;
struct timer_list timer;
struct timer_list query_timer;
- __be32 addr;
+ struct br_ip addr;
u32 queries_sent;
};
@@ -128,6 +139,17 @@ struct net_bridge_port
struct hlist_head mglist;
struct hlist_node rlist;
#endif
+
+#ifdef CONFIG_SYSFS
+ char sysfs_name[IFNAMSIZ];
+#endif
+};
+
+struct br_cpu_netstats {
+ unsigned long rx_packets;
+ unsigned long rx_bytes;
+ unsigned long tx_packets;
+ unsigned long tx_bytes;
};
struct net_bridge
@@ -135,6 +157,8 @@ struct net_bridge
spinlock_t lock;
struct list_head port_list;
struct net_device *dev;
+
+ struct br_cpu_netstats __percpu *stats;
spinlock_t hash_lock;
struct hlist_head hash[BR_HASH_SIZE];
unsigned long feature_mask;
@@ -220,6 +244,21 @@ struct br_input_skb_cb {
# define BR_INPUT_SKB_CB_MROUTERS_ONLY(__skb) (0)
#endif
+#define br_printk(level, br, format, args...) \
+ printk(level "%s: " format, (br)->dev->name, ##args)
+
+#define br_err(__br, format, args...) \
+ br_printk(KERN_ERR, __br, format, ##args)
+#define br_warn(__br, format, args...) \
+ br_printk(KERN_WARNING, __br, format, ##args)
+#define br_notice(__br, format, args...) \
+ br_printk(KERN_NOTICE, __br, format, ##args)
+#define br_info(__br, format, args...) \
+ br_printk(KERN_INFO, __br, format, ##args)
+
+#define br_debug(br, format, args...) \
+ pr_debug("%s: " format, (br)->dev->name, ##args)
+
extern struct notifier_block br_device_notifier;
extern const u8 br_group_address[ETH_ALEN];
@@ -233,6 +272,18 @@ static inline int br_is_root_bridge(const struct net_bridge *br)
extern void br_dev_setup(struct net_device *dev);
extern netdev_tx_t br_dev_xmit(struct sk_buff *skb,
struct net_device *dev);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+extern void br_netpoll_cleanup(struct net_device *dev);
+extern void br_netpoll_enable(struct net_bridge *br,
+ struct net_device *dev);
+extern void br_netpoll_disable(struct net_bridge *br,
+ struct net_device *dev);
+#else
+#define br_netpoll_cleanup(br)
+#define br_netpoll_enable(br, dev)
+#define br_netpoll_disable(br, dev)
+
+#endif
/* br_fdb.c */
extern int br_fdb_init(void);
@@ -433,6 +484,7 @@ extern void br_ifinfo_notify(int event, struct net_bridge_port *port);
/* br_sysfs_if.c */
extern const struct sysfs_ops brport_sysfs_ops;
extern int br_sysfs_addif(struct net_bridge_port *p);
+extern int br_sysfs_renameif(struct net_bridge_port *p);
/* br_sysfs_br.c */
extern int br_sysfs_addbr(struct net_device *dev);
@@ -441,6 +493,7 @@ extern void br_sysfs_delbr(struct net_device *dev);
#else
#define br_sysfs_addif(p) (0)
+#define br_sysfs_renameif(p) (0)
#define br_sysfs_addbr(dev) (0)
#define br_sysfs_delbr(dev) do { } while(0)
#endif /* CONFIG_SYSFS */
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index edcf14b560f6..57186d84d2bd 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -31,10 +31,9 @@ static const char *const br_port_state_names[] = {
void br_log_state(const struct net_bridge_port *p)
{
- pr_info("%s: port %d(%s) entering %s state\n",
- p->br->dev->name, p->port_no, p->dev->name,
+ br_info(p->br, "port %u(%s) entering %s state\n",
+ (unsigned) p->port_no, p->dev->name,
br_port_state_names[p->state]);
-
}
/* called under bridge lock */
@@ -300,7 +299,7 @@ void br_topology_change_detection(struct net_bridge *br)
if (br->stp_enabled != BR_KERNEL_STP)
return;
- pr_info("%s: topology change detected, %s\n", br->dev->name,
+ br_info(br, "topology change detected, %s\n",
isroot ? "propagating" : "sending tcn bpdu");
if (isroot) {
@@ -469,8 +468,8 @@ void br_received_config_bpdu(struct net_bridge_port *p, struct br_config_bpdu *b
void br_received_tcn_bpdu(struct net_bridge_port *p)
{
if (br_is_designated_port(p)) {
- pr_info("%s: received tcn bpdu on port %i(%s)\n",
- p->br->dev->name, p->port_no, p->dev->name);
+ br_info(p->br, "port %u(%s) received tcn bpdu\n",
+ (unsigned) p->port_no, p->dev->name);
br_topology_change_detection(p->br);
br_topology_change_acknowledge(p);
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
index d66cce11f3bf..217bd225a42f 100644
--- a/net/bridge/br_stp_bpdu.c
+++ b/net/bridge/br_stp_bpdu.c
@@ -50,7 +50,7 @@ static void br_send_bpdu(struct net_bridge_port *p,
llc_mac_hdr_init(skb, p->dev->dev_addr, p->br->group_addr);
- NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
+ NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
dev_queue_xmit);
}
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index d527119e9f54..1d8826914cbf 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -85,17 +85,16 @@ void br_stp_enable_port(struct net_bridge_port *p)
{
br_init_port(p);
br_port_state_selection(p->br);
+ br_log_state(p);
}
/* called under bridge lock */
void br_stp_disable_port(struct net_bridge_port *p)
{
- struct net_bridge *br;
+ struct net_bridge *br = p->br;
int wasroot;
- br = p->br;
- printk(KERN_INFO "%s: port %i(%s) entering %s state\n",
- br->dev->name, p->port_no, p->dev->name, "disabled");
+ br_log_state(p);
wasroot = br_is_root_bridge(br);
br_become_designated_port(p);
@@ -127,11 +126,10 @@ static void br_stp_start(struct net_bridge *br)
r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
if (r == 0) {
br->stp_enabled = BR_USER_STP;
- printk(KERN_INFO "%s: userspace STP started\n", br->dev->name);
+ br_debug(br, "userspace STP started\n");
} else {
br->stp_enabled = BR_KERNEL_STP;
- printk(KERN_INFO "%s: starting userspace STP failed, "
- "starting kernel STP\n", br->dev->name);
+ br_debug(br, "using kernel STP\n");
/* To start timers on any ports left in blocking */
spin_lock_bh(&br->lock);
@@ -148,9 +146,7 @@ static void br_stp_stop(struct net_bridge *br)
if (br->stp_enabled == BR_USER_STP) {
r = call_usermodehelper(BR_STP_PROG, argv, envp, 1);
- printk(KERN_INFO "%s: userspace STP stopped, return code %d\n",
- br->dev->name, r);
-
+ br_info(br, "userspace STP stopped, return code %d\n", r);
/* To start timers on any ports left in blocking */
spin_lock_bh(&br->lock);
diff --git a/net/bridge/br_stp_timer.c b/net/bridge/br_stp_timer.c
index 772a140bfdf0..7b22456023c5 100644
--- a/net/bridge/br_stp_timer.c
+++ b/net/bridge/br_stp_timer.c
@@ -35,7 +35,7 @@ static void br_hello_timer_expired(unsigned long arg)
{
struct net_bridge *br = (struct net_bridge *)arg;
- pr_debug("%s: hello timer expired\n", br->dev->name);
+ br_debug(br, "hello timer expired\n");
spin_lock(&br->lock);
if (br->dev->flags & IFF_UP) {
br_config_bpdu_generation(br);
@@ -55,13 +55,9 @@ static void br_message_age_timer_expired(unsigned long arg)
if (p->state == BR_STATE_DISABLED)
return;
-
- pr_info("%s: neighbor %.2x%.2x.%.2x:%.2x:%.2x:%.2x:%.2x:%.2x lost on port %d(%s)\n",
- br->dev->name,
- id->prio[0], id->prio[1],
- id->addr[0], id->addr[1], id->addr[2],
- id->addr[3], id->addr[4], id->addr[5],
- p->port_no, p->dev->name);
+ br_info(br, "port %u(%s) neighbor %.2x%.2x.%pM lost\n",
+ (unsigned) p->port_no, p->dev->name,
+ id->prio[0], id->prio[1], &id->addr);
/*
* According to the spec, the message age timer cannot be
@@ -87,8 +83,8 @@ static void br_forward_delay_timer_expired(unsigned long arg)
struct net_bridge_port *p = (struct net_bridge_port *) arg;
struct net_bridge *br = p->br;
- pr_debug("%s: %d(%s) forward delay timer\n",
- br->dev->name, p->port_no, p->dev->name);
+ br_debug(br, "port %u(%s) forward delay timer\n",
+ (unsigned) p->port_no, p->dev->name);
spin_lock(&br->lock);
if (p->state == BR_STATE_LISTENING) {
p->state = BR_STATE_LEARNING;
@@ -107,7 +103,7 @@ static void br_tcn_timer_expired(unsigned long arg)
{
struct net_bridge *br = (struct net_bridge *) arg;
- pr_debug("%s: tcn timer expired\n", br->dev->name);
+ br_debug(br, "tcn timer expired\n");
spin_lock(&br->lock);
if (br->dev->flags & IFF_UP) {
br_transmit_tcn(br);
@@ -121,7 +117,7 @@ static void br_topology_change_timer_expired(unsigned long arg)
{
struct net_bridge *br = (struct net_bridge *) arg;
- pr_debug("%s: topo change timer expired\n", br->dev->name);
+ br_debug(br, "topo change timer expired\n");
spin_lock(&br->lock);
br->topology_change_detected = 0;
br->topology_change = 0;
@@ -132,8 +128,8 @@ static void br_hold_timer_expired(unsigned long arg)
{
struct net_bridge_port *p = (struct net_bridge_port *) arg;
- pr_debug("%s: %d(%s) hold timer expired\n",
- p->br->dev->name, p->port_no, p->dev->name);
+ br_debug(p->br, "port %u(%s) hold timer expired\n",
+ (unsigned) p->port_no, p->dev->name);
spin_lock(&p->br->lock);
if (p->config_pending)
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index dd321e39e621..486b8f3861d2 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -659,7 +659,7 @@ static struct attribute_group bridge_group = {
*
* Returns the number of bytes read.
*/
-static ssize_t brforward_read(struct kobject *kobj,
+static ssize_t brforward_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
index 0b9916489d6b..fd5799c9bc8d 100644
--- a/net/bridge/br_sysfs_if.c
+++ b/net/bridge/br_sysfs_if.c
@@ -246,7 +246,7 @@ const struct sysfs_ops brport_sysfs_ops = {
/*
* Add sysfs entries to ethernet device added to a bridge.
* Creates a brport subdirectory with bridge attributes.
- * Puts symlink in bridge's brport subdirectory
+ * Puts symlink in bridge's brif subdirectory
*/
int br_sysfs_addif(struct net_bridge_port *p)
{
@@ -257,15 +257,37 @@ int br_sysfs_addif(struct net_bridge_port *p)
err = sysfs_create_link(&p->kobj, &br->dev->dev.kobj,
SYSFS_BRIDGE_PORT_LINK);
if (err)
- goto out2;
+ return err;
for (a = brport_attrs; *a; ++a) {
err = sysfs_create_file(&p->kobj, &((*a)->attr));
if (err)
- goto out2;
+ return err;
}
- err = sysfs_create_link(br->ifobj, &p->kobj, p->dev->name);
-out2:
+ strlcpy(p->sysfs_name, p->dev->name, IFNAMSIZ);
+ return sysfs_create_link(br->ifobj, &p->kobj, p->sysfs_name);
+}
+
+/* Rename bridge's brif symlink */
+int br_sysfs_renameif(struct net_bridge_port *p)
+{
+ struct net_bridge *br = p->br;
+ int err;
+
+ /* If a rename fails, the rollback will cause another
+ * rename call with the existing name.
+ */
+ if (!strncmp(p->sysfs_name, p->dev->name, IFNAMSIZ))
+ return 0;
+
+ err = sysfs_rename_link(br->ifobj, &p->kobj,
+ p->sysfs_name, p->dev->name);
+ if (err)
+ netdev_notice(br->dev, "unable to rename link %s to %s",
+ p->sysfs_name, p->dev->name);
+ else
+ strlcpy(p->sysfs_name, p->dev->name, IFNAMSIZ);
+
return err;
}
diff --git a/net/bridge/netfilter/ebt_802_3.c b/net/bridge/netfilter/ebt_802_3.c
index 5d1176758ca5..2a449b7ab8fa 100644
--- a/net/bridge/netfilter/ebt_802_3.c
+++ b/net/bridge/netfilter/ebt_802_3.c
@@ -13,7 +13,7 @@
#include <linux/netfilter_bridge/ebt_802_3.h>
static bool
-ebt_802_3_mt(const struct sk_buff *skb, const struct xt_match_param *par)
+ebt_802_3_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct ebt_802_3_info *info = par->matchinfo;
const struct ebt_802_3_hdr *hdr = ebt_802_3_hdr(skb);
@@ -36,14 +36,14 @@ ebt_802_3_mt(const struct sk_buff *skb, const struct xt_match_param *par)
return true;
}
-static bool ebt_802_3_mt_check(const struct xt_mtchk_param *par)
+static int ebt_802_3_mt_check(const struct xt_mtchk_param *par)
{
const struct ebt_802_3_info *info = par->matchinfo;
if (info->bitmask & ~EBT_802_3_MASK || info->invflags & ~EBT_802_3_MASK)
- return false;
+ return -EINVAL;
- return true;
+ return 0;
}
static struct xt_match ebt_802_3_mt_reg __read_mostly = {
diff --git a/net/bridge/netfilter/ebt_among.c b/net/bridge/netfilter/ebt_among.c
index b595f091f35b..8b84c581be30 100644
--- a/net/bridge/netfilter/ebt_among.c
+++ b/net/bridge/netfilter/ebt_among.c
@@ -7,6 +7,7 @@
* August, 2003
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/ip.h>
#include <linux/if_arp.h>
#include <linux/module.h>
@@ -128,7 +129,7 @@ static int get_ip_src(const struct sk_buff *skb, __be32 *addr)
}
static bool
-ebt_among_mt(const struct sk_buff *skb, const struct xt_match_param *par)
+ebt_among_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct ebt_among_info *info = par->matchinfo;
const char *dmac, *smac;
@@ -171,7 +172,7 @@ ebt_among_mt(const struct sk_buff *skb, const struct xt_match_param *par)
return true;
}
-static bool ebt_among_mt_check(const struct xt_mtchk_param *par)
+static int ebt_among_mt_check(const struct xt_mtchk_param *par)
{
const struct ebt_among_info *info = par->matchinfo;
const struct ebt_entry_match *em =
@@ -186,24 +187,20 @@ static bool ebt_among_mt_check(const struct xt_mtchk_param *par)
expected_length += ebt_mac_wormhash_size(wh_src);
if (em->match_size != EBT_ALIGN(expected_length)) {
- printk(KERN_WARNING
- "ebtables: among: wrong size: %d "
- "against expected %d, rounded to %Zd\n",
- em->match_size, expected_length,
- EBT_ALIGN(expected_length));
- return false;
+ pr_info("wrong size: %d against expected %d, rounded to %Zd\n",
+ em->match_size, expected_length,
+ EBT_ALIGN(expected_length));
+ return -EINVAL;
}
if (wh_dst && (err = ebt_mac_wormhash_check_integrity(wh_dst))) {
- printk(KERN_WARNING
- "ebtables: among: dst integrity fail: %x\n", -err);
- return false;
+ pr_info("dst integrity fail: %x\n", -err);
+ return -EINVAL;
}
if (wh_src && (err = ebt_mac_wormhash_check_integrity(wh_src))) {
- printk(KERN_WARNING
- "ebtables: among: src integrity fail: %x\n", -err);
- return false;
+ pr_info("src integrity fail: %x\n", -err);
+ return -EINVAL;
}
- return true;
+ return 0;
}
static struct xt_match ebt_among_mt_reg __read_mostly = {
diff --git a/net/bridge/netfilter/ebt_arp.c b/net/bridge/netfilter/ebt_arp.c
index e727697c5847..cd457b891b27 100644
--- a/net/bridge/netfilter/ebt_arp.c
+++ b/net/bridge/netfilter/ebt_arp.c
@@ -16,7 +16,7 @@
#include <linux/netfilter_bridge/ebt_arp.h>
static bool
-ebt_arp_mt(const struct sk_buff *skb, const struct xt_match_param *par)
+ebt_arp_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct ebt_arp_info *info = par->matchinfo;
const struct arphdr *ah;
@@ -100,7 +100,7 @@ ebt_arp_mt(const struct sk_buff *skb, const struct xt_match_param *par)
return true;
}
-static bool ebt_arp_mt_check(const struct xt_mtchk_param *par)
+static int ebt_arp_mt_check(const struct xt_mtchk_param *par)
{
const struct ebt_arp_info *info = par->matchinfo;
const struct ebt_entry *e = par->entryinfo;
@@ -108,10 +108,10 @@ static bool ebt_arp_mt_check(const struct xt_mtchk_param *par)
if ((e->ethproto != htons(ETH_P_ARP) &&
e->ethproto != htons(ETH_P_RARP)) ||
e->invflags & EBT_IPROTO)
- return false;
+ return -EINVAL;
if (info->bitmask & ~EBT_ARP_MASK || info->invflags & ~EBT_ARP_MASK)
- return false;
- return true;
+ return -EINVAL;
+ return 0;
}
static struct xt_match ebt_arp_mt_reg __read_mostly = {
diff --git a/net/bridge/netfilter/ebt_arpreply.c b/net/bridge/netfilter/ebt_arpreply.c
index f392e9d93f53..070cf134a22f 100644
--- a/net/bridge/netfilter/ebt_arpreply.c
+++ b/net/bridge/netfilter/ebt_arpreply.c
@@ -16,7 +16,7 @@
#include <linux/netfilter_bridge/ebt_arpreply.h>
static unsigned int
-ebt_arpreply_tg(struct sk_buff *skb, const struct xt_target_param *par)
+ebt_arpreply_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct ebt_arpreply_info *info = par->targinfo;
const __be32 *siptr, *diptr;
@@ -57,17 +57,17 @@ ebt_arpreply_tg(struct sk_buff *skb, const struct xt_target_param *par)
return info->target;
}
-static bool ebt_arpreply_tg_check(const struct xt_tgchk_param *par)
+static int ebt_arpreply_tg_check(const struct xt_tgchk_param *par)
{
const struct ebt_arpreply_info *info = par->targinfo;
const struct ebt_entry *e = par->entryinfo;
if (BASE_CHAIN && info->target == EBT_RETURN)
- return false;
+ return -EINVAL;
if (e->ethproto != htons(ETH_P_ARP) ||
e->invflags & EBT_IPROTO)
- return false;
- return true;
+ return -EINVAL;
+ return 0;
}
static struct xt_target ebt_arpreply_tg_reg __read_mostly = {
diff --git a/net/bridge/netfilter/ebt_dnat.c b/net/bridge/netfilter/ebt_dnat.c
index 2bb40d728a35..c59f7bfae6e2 100644
--- a/net/bridge/netfilter/ebt_dnat.c
+++ b/net/bridge/netfilter/ebt_dnat.c
@@ -15,7 +15,7 @@
#include <linux/netfilter_bridge/ebt_nat.h>
static unsigned int
-ebt_dnat_tg(struct sk_buff *skb, const struct xt_target_param *par)
+ebt_dnat_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct ebt_nat_info *info = par->targinfo;
@@ -26,13 +26,13 @@ ebt_dnat_tg(struct sk_buff *skb, const struct xt_target_param *par)
return info->target;
}
-static bool ebt_dnat_tg_check(const struct xt_tgchk_param *par)
+static int ebt_dnat_tg_check(const struct xt_tgchk_param *par)
{
const struct ebt_nat_info *info = par->targinfo;
unsigned int hook_mask;
if (BASE_CHAIN && info->target == EBT_RETURN)
- return false;
+ return -EINVAL;
hook_mask = par->hook_mask & ~(1 << NF_BR_NUMHOOKS);
if ((strcmp(par->table, "nat") != 0 ||
@@ -40,10 +40,10 @@ static bool ebt_dnat_tg_check(const struct xt_tgchk_param *par)
(1 << NF_BR_LOCAL_OUT)))) &&
(strcmp(par->table, "broute") != 0 ||
hook_mask & ~(1 << NF_BR_BROUTING)))
- return false;
+ return -EINVAL;
if (INVALID_TARGET)
- return false;
- return true;
+ return -EINVAL;
+ return 0;
}
static struct xt_target ebt_dnat_tg_reg __read_mostly = {
diff --git a/net/bridge/netfilter/ebt_ip.c b/net/bridge/netfilter/ebt_ip.c
index 5de6df6f86b8..23bca62d58d2 100644
--- a/net/bridge/netfilter/ebt_ip.c
+++ b/net/bridge/netfilter/ebt_ip.c
@@ -25,7 +25,7 @@ struct tcpudphdr {
};
static bool
-ebt_ip_mt(const struct sk_buff *skb, const struct xt_match_param *par)
+ebt_ip_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct ebt_ip_info *info = par->matchinfo;
const struct iphdr *ih;
@@ -77,31 +77,31 @@ ebt_ip_mt(const struct sk_buff *skb, const struct xt_match_param *par)
return true;
}
-static bool ebt_ip_mt_check(const struct xt_mtchk_param *par)
+static int ebt_ip_mt_check(const struct xt_mtchk_param *par)
{
const struct ebt_ip_info *info = par->matchinfo;
const struct ebt_entry *e = par->entryinfo;
if (e->ethproto != htons(ETH_P_IP) ||
e->invflags & EBT_IPROTO)
- return false;
+ return -EINVAL;
if (info->bitmask & ~EBT_IP_MASK || info->invflags & ~EBT_IP_MASK)
- return false;
+ return -EINVAL;
if (info->bitmask & (EBT_IP_DPORT | EBT_IP_SPORT)) {
if (info->invflags & EBT_IP_PROTO)
- return false;
+ return -EINVAL;
if (info->protocol != IPPROTO_TCP &&
info->protocol != IPPROTO_UDP &&
info->protocol != IPPROTO_UDPLITE &&
info->protocol != IPPROTO_SCTP &&
info->protocol != IPPROTO_DCCP)
- return false;
+ return -EINVAL;
}
if (info->bitmask & EBT_IP_DPORT && info->dport[0] > info->dport[1])
- return false;
+ return -EINVAL;
if (info->bitmask & EBT_IP_SPORT && info->sport[0] > info->sport[1])
- return false;
- return true;
+ return -EINVAL;
+ return 0;
}
static struct xt_match ebt_ip_mt_reg __read_mostly = {
diff --git a/net/bridge/netfilter/ebt_ip6.c b/net/bridge/netfilter/ebt_ip6.c
index bbf2534ef026..50a46afc2bcc 100644
--- a/net/bridge/netfilter/ebt_ip6.c
+++ b/net/bridge/netfilter/ebt_ip6.c
@@ -4,7 +4,7 @@
* Authors:
* Manohar Castelino <manohar.r.castelino@intel.com>
* Kuo-Lang Tseng <kuo-lang.tseng@intel.com>
- * Jan Engelhardt <jengelh@computergmbh.de>
+ * Jan Engelhardt <jengelh@medozas.de>
*
* Summary:
* This is just a modification of the IPv4 code written by
@@ -28,15 +28,13 @@ struct tcpudphdr {
};
static bool
-ebt_ip6_mt(const struct sk_buff *skb, const struct xt_match_param *par)
+ebt_ip6_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct ebt_ip6_info *info = par->matchinfo;
const struct ipv6hdr *ih6;
struct ipv6hdr _ip6h;
const struct tcpudphdr *pptr;
struct tcpudphdr _ports;
- struct in6_addr tmp_addr;
- int i;
ih6 = skb_header_pointer(skb, 0, sizeof(_ip6h), &_ip6h);
if (ih6 == NULL)
@@ -44,18 +42,10 @@ ebt_ip6_mt(const struct sk_buff *skb, const struct xt_match_param *par)
if (info->bitmask & EBT_IP6_TCLASS &&
FWINV(info->tclass != ipv6_get_dsfield(ih6), EBT_IP6_TCLASS))
return false;
- for (i = 0; i < 4; i++)
- tmp_addr.in6_u.u6_addr32[i] = ih6->saddr.in6_u.u6_addr32[i] &
- info->smsk.in6_u.u6_addr32[i];
- if (info->bitmask & EBT_IP6_SOURCE &&
- FWINV((ipv6_addr_cmp(&tmp_addr, &info->saddr) != 0),
- EBT_IP6_SOURCE))
- return false;
- for (i = 0; i < 4; i++)
- tmp_addr.in6_u.u6_addr32[i] = ih6->daddr.in6_u.u6_addr32[i] &
- info->dmsk.in6_u.u6_addr32[i];
- if (info->bitmask & EBT_IP6_DEST &&
- FWINV((ipv6_addr_cmp(&tmp_addr, &info->daddr) != 0), EBT_IP6_DEST))
+ if (FWINV(ipv6_masked_addr_cmp(&ih6->saddr, &info->smsk,
+ &info->saddr), EBT_IP6_SOURCE) ||
+ FWINV(ipv6_masked_addr_cmp(&ih6->daddr, &info->dmsk,
+ &info->daddr), EBT_IP6_DEST))
return false;
if (info->bitmask & EBT_IP6_PROTO) {
uint8_t nexthdr = ih6->nexthdr;
@@ -90,30 +80,30 @@ ebt_ip6_mt(const struct sk_buff *skb, const struct xt_match_param *par)
return true;
}
-static bool ebt_ip6_mt_check(const struct xt_mtchk_param *par)
+static int ebt_ip6_mt_check(const struct xt_mtchk_param *par)
{
const struct ebt_entry *e = par->entryinfo;
struct ebt_ip6_info *info = par->matchinfo;
if (e->ethproto != htons(ETH_P_IPV6) || e->invflags & EBT_IPROTO)
- return false;
+ return -EINVAL;
if (info->bitmask & ~EBT_IP6_MASK || info->invflags & ~EBT_IP6_MASK)
- return false;
+ return -EINVAL;
if (info->bitmask & (EBT_IP6_DPORT | EBT_IP6_SPORT)) {
if (info->invflags & EBT_IP6_PROTO)
- return false;
+ return -EINVAL;
if (info->protocol != IPPROTO_TCP &&
info->protocol != IPPROTO_UDP &&
info->protocol != IPPROTO_UDPLITE &&
info->protocol != IPPROTO_SCTP &&
info->protocol != IPPROTO_DCCP)
- return false;
+ return -EINVAL;
}
if (info->bitmask & EBT_IP6_DPORT && info->dport[0] > info->dport[1])
- return false;
+ return -EINVAL;
if (info->bitmask & EBT_IP6_SPORT && info->sport[0] > info->sport[1])
- return false;
- return true;
+ return -EINVAL;
+ return 0;
}
static struct xt_match ebt_ip6_mt_reg __read_mostly = {
@@ -139,4 +129,5 @@ static void __exit ebt_ip6_fini(void)
module_init(ebt_ip6_init);
module_exit(ebt_ip6_fini);
MODULE_DESCRIPTION("Ebtables: IPv6 protocol packet match");
+MODULE_AUTHOR("Kuo-Lang Tseng <kuo-lang.tseng@intel.com>");
MODULE_LICENSE("GPL");
diff --git a/net/bridge/netfilter/ebt_limit.c b/net/bridge/netfilter/ebt_limit.c
index 7a8182710eb3..517e78befcb2 100644
--- a/net/bridge/netfilter/ebt_limit.c
+++ b/net/bridge/netfilter/ebt_limit.c
@@ -10,6 +10,7 @@
* September, 2003
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/spinlock.h>
@@ -31,7 +32,7 @@ static DEFINE_SPINLOCK(limit_lock);
#define CREDITS_PER_JIFFY POW2_BELOW32(MAX_CPJ)
static bool
-ebt_limit_mt(const struct sk_buff *skb, const struct xt_match_param *par)
+ebt_limit_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
struct ebt_limit_info *info = (void *)par->matchinfo;
unsigned long now = jiffies;
@@ -64,16 +65,16 @@ user2credits(u_int32_t user)
return (user * HZ * CREDITS_PER_JIFFY) / EBT_LIMIT_SCALE;
}
-static bool ebt_limit_mt_check(const struct xt_mtchk_param *par)
+static int ebt_limit_mt_check(const struct xt_mtchk_param *par)
{
struct ebt_limit_info *info = par->matchinfo;
/* Check for overflow. */
if (info->burst == 0 ||
user2credits(info->avg * info->burst) < user2credits(info->avg)) {
- printk("Overflow in ebt_limit, try lower: %u/%u\n",
+ pr_info("overflow, try lower: %u/%u\n",
info->avg, info->burst);
- return false;
+ return -EINVAL;
}
/* User avg in seconds * EBT_LIMIT_SCALE: convert to jiffies * 128. */
@@ -81,7 +82,7 @@ static bool ebt_limit_mt_check(const struct xt_mtchk_param *par)
info->credit = user2credits(info->avg * info->burst);
info->credit_cap = user2credits(info->avg * info->burst);
info->cost = user2credits(info->avg);
- return true;
+ return 0;
}
diff --git a/net/bridge/netfilter/ebt_log.c b/net/bridge/netfilter/ebt_log.c
index e873924ddb5d..6e5a8bb9b940 100644
--- a/net/bridge/netfilter/ebt_log.c
+++ b/net/bridge/netfilter/ebt_log.c
@@ -24,16 +24,16 @@
static DEFINE_SPINLOCK(ebt_log_lock);
-static bool ebt_log_tg_check(const struct xt_tgchk_param *par)
+static int ebt_log_tg_check(const struct xt_tgchk_param *par)
{
struct ebt_log_info *info = par->targinfo;
if (info->bitmask & ~EBT_LOG_MASK)
- return false;
+ return -EINVAL;
if (info->loglevel >= 8)
- return false;
+ return -EINVAL;
info->prefix[EBT_LOG_PREFIX_SIZE - 1] = '\0';
- return true;
+ return 0;
}
struct tcpudphdr
@@ -171,7 +171,7 @@ out:
}
static unsigned int
-ebt_log_tg(struct sk_buff *skb, const struct xt_target_param *par)
+ebt_log_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct ebt_log_info *info = par->targinfo;
struct nf_loginfo li;
diff --git a/net/bridge/netfilter/ebt_mark.c b/net/bridge/netfilter/ebt_mark.c
index 2b5ce533d6b9..66697cbd0a8b 100644
--- a/net/bridge/netfilter/ebt_mark.c
+++ b/net/bridge/netfilter/ebt_mark.c
@@ -19,7 +19,7 @@
#include <linux/netfilter_bridge/ebt_mark_t.h>
static unsigned int
-ebt_mark_tg(struct sk_buff *skb, const struct xt_target_param *par)
+ebt_mark_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct ebt_mark_t_info *info = par->targinfo;
int action = info->target & -16;
@@ -36,21 +36,21 @@ ebt_mark_tg(struct sk_buff *skb, const struct xt_target_param *par)
return info->target | ~EBT_VERDICT_BITS;
}
-static bool ebt_mark_tg_check(const struct xt_tgchk_param *par)
+static int ebt_mark_tg_check(const struct xt_tgchk_param *par)
{
const struct ebt_mark_t_info *info = par->targinfo;
int tmp;
tmp = info->target | ~EBT_VERDICT_BITS;
if (BASE_CHAIN && tmp == EBT_RETURN)
- return false;
+ return -EINVAL;
if (tmp < -NUM_STANDARD_TARGETS || tmp >= 0)
- return false;
+ return -EINVAL;
tmp = info->target & ~EBT_VERDICT_BITS;
if (tmp != MARK_SET_VALUE && tmp != MARK_OR_VALUE &&
tmp != MARK_AND_VALUE && tmp != MARK_XOR_VALUE)
- return false;
- return true;
+ return -EINVAL;
+ return 0;
}
#ifdef CONFIG_COMPAT
struct compat_ebt_mark_t_info {
diff --git a/net/bridge/netfilter/ebt_mark_m.c b/net/bridge/netfilter/ebt_mark_m.c
index 8de8c396d913..d98baefc4c7e 100644
--- a/net/bridge/netfilter/ebt_mark_m.c
+++ b/net/bridge/netfilter/ebt_mark_m.c
@@ -13,7 +13,7 @@
#include <linux/netfilter_bridge/ebt_mark_m.h>
static bool
-ebt_mark_mt(const struct sk_buff *skb, const struct xt_match_param *par)
+ebt_mark_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct ebt_mark_m_info *info = par->matchinfo;
@@ -22,17 +22,17 @@ ebt_mark_mt(const struct sk_buff *skb, const struct xt_match_param *par)
return ((skb->mark & info->mask) == info->mark) ^ info->invert;
}
-static bool ebt_mark_mt_check(const struct xt_mtchk_param *par)
+static int ebt_mark_mt_check(const struct xt_mtchk_param *par)
{
const struct ebt_mark_m_info *info = par->matchinfo;
if (info->bitmask & ~EBT_MARK_MASK)
- return false;
+ return -EINVAL;
if ((info->bitmask & EBT_MARK_OR) && (info->bitmask & EBT_MARK_AND))
- return false;
+ return -EINVAL;
if (!info->bitmask)
- return false;
- return true;
+ return -EINVAL;
+ return 0;
}
diff --git a/net/bridge/netfilter/ebt_nflog.c b/net/bridge/netfilter/ebt_nflog.c
index 40dbd248b9ae..5be68bbcc341 100644
--- a/net/bridge/netfilter/ebt_nflog.c
+++ b/net/bridge/netfilter/ebt_nflog.c
@@ -20,7 +20,7 @@
#include <net/netfilter/nf_log.h>
static unsigned int
-ebt_nflog_tg(struct sk_buff *skb, const struct xt_target_param *par)
+ebt_nflog_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct ebt_nflog_info *info = par->targinfo;
struct nf_loginfo li;
@@ -35,14 +35,14 @@ ebt_nflog_tg(struct sk_buff *skb, const struct xt_target_param *par)
return EBT_CONTINUE;
}
-static bool ebt_nflog_tg_check(const struct xt_tgchk_param *par)
+static int ebt_nflog_tg_check(const struct xt_tgchk_param *par)
{
struct ebt_nflog_info *info = par->targinfo;
if (info->flags & ~EBT_NFLOG_MASK)
- return false;
+ return -EINVAL;
info->prefix[EBT_NFLOG_PREFIX_SIZE - 1] = '\0';
- return true;
+ return 0;
}
static struct xt_target ebt_nflog_tg_reg __read_mostly = {
diff --git a/net/bridge/netfilter/ebt_pkttype.c b/net/bridge/netfilter/ebt_pkttype.c
index e2a07e6cbef3..496a56515307 100644
--- a/net/bridge/netfilter/ebt_pkttype.c
+++ b/net/bridge/netfilter/ebt_pkttype.c
@@ -13,21 +13,21 @@
#include <linux/netfilter_bridge/ebt_pkttype.h>
static bool
-ebt_pkttype_mt(const struct sk_buff *skb, const struct xt_match_param *par)
+ebt_pkttype_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct ebt_pkttype_info *info = par->matchinfo;
return (skb->pkt_type == info->pkt_type) ^ info->invert;
}
-static bool ebt_pkttype_mt_check(const struct xt_mtchk_param *par)
+static int ebt_pkttype_mt_check(const struct xt_mtchk_param *par)
{
const struct ebt_pkttype_info *info = par->matchinfo;
if (info->invert != 0 && info->invert != 1)
- return false;
+ return -EINVAL;
/* Allow any pkt_type value */
- return true;
+ return 0;
}
static struct xt_match ebt_pkttype_mt_reg __read_mostly = {
diff --git a/net/bridge/netfilter/ebt_redirect.c b/net/bridge/netfilter/ebt_redirect.c
index 9be8fbcd370b..9e19166ba453 100644
--- a/net/bridge/netfilter/ebt_redirect.c
+++ b/net/bridge/netfilter/ebt_redirect.c
@@ -16,7 +16,7 @@
#include <linux/netfilter_bridge/ebt_redirect.h>
static unsigned int
-ebt_redirect_tg(struct sk_buff *skb, const struct xt_target_param *par)
+ebt_redirect_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct ebt_redirect_info *info = par->targinfo;
@@ -32,23 +32,23 @@ ebt_redirect_tg(struct sk_buff *skb, const struct xt_target_param *par)
return info->target;
}
-static bool ebt_redirect_tg_check(const struct xt_tgchk_param *par)
+static int ebt_redirect_tg_check(const struct xt_tgchk_param *par)
{
const struct ebt_redirect_info *info = par->targinfo;
unsigned int hook_mask;
if (BASE_CHAIN && info->target == EBT_RETURN)
- return false;
+ return -EINVAL;
hook_mask = par->hook_mask & ~(1 << NF_BR_NUMHOOKS);
if ((strcmp(par->table, "nat") != 0 ||
hook_mask & ~(1 << NF_BR_PRE_ROUTING)) &&
(strcmp(par->table, "broute") != 0 ||
hook_mask & ~(1 << NF_BR_BROUTING)))
- return false;
+ return -EINVAL;
if (INVALID_TARGET)
- return false;
- return true;
+ return -EINVAL;
+ return 0;
}
static struct xt_target ebt_redirect_tg_reg __read_mostly = {
diff --git a/net/bridge/netfilter/ebt_snat.c b/net/bridge/netfilter/ebt_snat.c
index 9c7b520765a2..f8f0bd1a1d51 100644
--- a/net/bridge/netfilter/ebt_snat.c
+++ b/net/bridge/netfilter/ebt_snat.c
@@ -17,7 +17,7 @@
#include <linux/netfilter_bridge/ebt_nat.h>
static unsigned int
-ebt_snat_tg(struct sk_buff *skb, const struct xt_target_param *par)
+ebt_snat_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct ebt_nat_info *info = par->targinfo;
@@ -42,21 +42,21 @@ out:
return info->target | ~EBT_VERDICT_BITS;
}
-static bool ebt_snat_tg_check(const struct xt_tgchk_param *par)
+static int ebt_snat_tg_check(const struct xt_tgchk_param *par)
{
const struct ebt_nat_info *info = par->targinfo;
int tmp;
tmp = info->target | ~EBT_VERDICT_BITS;
if (BASE_CHAIN && tmp == EBT_RETURN)
- return false;
+ return -EINVAL;
if (tmp < -NUM_STANDARD_TARGETS || tmp >= 0)
- return false;
+ return -EINVAL;
tmp = info->target | EBT_VERDICT_BITS;
if ((tmp & ~NAT_ARP_BIT) != ~NAT_ARP_BIT)
- return false;
- return true;
+ return -EINVAL;
+ return 0;
}
static struct xt_target ebt_snat_tg_reg __read_mostly = {
diff --git a/net/bridge/netfilter/ebt_stp.c b/net/bridge/netfilter/ebt_stp.c
index 92a93d363765..5b33a2e634a6 100644
--- a/net/bridge/netfilter/ebt_stp.c
+++ b/net/bridge/netfilter/ebt_stp.c
@@ -120,7 +120,7 @@ static bool ebt_filter_config(const struct ebt_stp_info *info,
}
static bool
-ebt_stp_mt(const struct sk_buff *skb, const struct xt_match_param *par)
+ebt_stp_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct ebt_stp_info *info = par->matchinfo;
const struct stp_header *sp;
@@ -153,7 +153,7 @@ ebt_stp_mt(const struct sk_buff *skb, const struct xt_match_param *par)
return true;
}
-static bool ebt_stp_mt_check(const struct xt_mtchk_param *par)
+static int ebt_stp_mt_check(const struct xt_mtchk_param *par)
{
const struct ebt_stp_info *info = par->matchinfo;
const uint8_t bridge_ula[6] = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x00};
@@ -162,13 +162,13 @@ static bool ebt_stp_mt_check(const struct xt_mtchk_param *par)
if (info->bitmask & ~EBT_STP_MASK || info->invflags & ~EBT_STP_MASK ||
!(info->bitmask & EBT_STP_MASK))
- return false;
+ return -EINVAL;
/* Make sure the match only receives stp frames */
if (compare_ether_addr(e->destmac, bridge_ula) ||
compare_ether_addr(e->destmsk, msk) || !(e->bitmask & EBT_DESTMAC))
- return false;
+ return -EINVAL;
- return true;
+ return 0;
}
static struct xt_match ebt_stp_mt_reg __read_mostly = {
diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c
index f9560f3dbdc7..ae3c7cef1484 100644
--- a/net/bridge/netfilter/ebt_ulog.c
+++ b/net/bridge/netfilter/ebt_ulog.c
@@ -27,7 +27,7 @@
* flushed even if it is not full yet.
*
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -44,9 +44,6 @@
#include <net/sock.h>
#include "../br_private.h"
-#define PRINTR(format, args...) do { if (net_ratelimit()) \
- printk(format , ## args); } while (0)
-
static unsigned int nlbufsiz = NLMSG_GOODSIZE;
module_param(nlbufsiz, uint, 0600);
MODULE_PARM_DESC(nlbufsiz, "netlink buffer size (number of bytes) "
@@ -107,15 +104,14 @@ static struct sk_buff *ulog_alloc_skb(unsigned int size)
n = max(size, nlbufsiz);
skb = alloc_skb(n, GFP_ATOMIC);
if (!skb) {
- PRINTR(KERN_ERR "ebt_ulog: can't alloc whole buffer "
- "of size %ub!\n", n);
+ pr_debug("cannot alloc whole buffer of size %ub!\n", n);
if (n > size) {
/* try to allocate only as much as we need for
* current packet */
skb = alloc_skb(size, GFP_ATOMIC);
if (!skb)
- PRINTR(KERN_ERR "ebt_ulog: can't even allocate "
- "buffer of size %ub\n", size);
+ pr_debug("cannot even allocate "
+ "buffer of size %ub\n", size);
}
}
@@ -142,8 +138,7 @@ static void ebt_ulog_packet(unsigned int hooknr, const struct sk_buff *skb,
size = NLMSG_SPACE(sizeof(*pm) + copy_len);
if (size > nlbufsiz) {
- PRINTR("ebt_ulog: Size %Zd needed, but nlbufsiz=%d\n",
- size, nlbufsiz);
+ pr_debug("Size %Zd needed, but nlbufsiz=%d\n", size, nlbufsiz);
return;
}
@@ -217,8 +212,8 @@ unlock:
return;
nlmsg_failure:
- printk(KERN_CRIT "ebt_ulog: error during NLMSG_PUT. This should "
- "not happen, please report to author.\n");
+ pr_debug("error during NLMSG_PUT. This should "
+ "not happen, please report to author.\n");
goto unlock;
alloc_failure:
goto unlock;
@@ -248,26 +243,26 @@ static void ebt_log_packet(u_int8_t pf, unsigned int hooknum,
}
static unsigned int
-ebt_ulog_tg(struct sk_buff *skb, const struct xt_target_param *par)
+ebt_ulog_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
ebt_ulog_packet(par->hooknum, skb, par->in, par->out,
par->targinfo, NULL);
return EBT_CONTINUE;
}
-static bool ebt_ulog_tg_check(const struct xt_tgchk_param *par)
+static int ebt_ulog_tg_check(const struct xt_tgchk_param *par)
{
struct ebt_ulog_info *uloginfo = par->targinfo;
if (uloginfo->nlgroup > 31)
- return false;
+ return -EINVAL;
uloginfo->prefix[EBT_ULOG_PREFIX_LEN - 1] = '\0';
if (uloginfo->qthreshold > EBT_ULOG_MAX_QLEN)
uloginfo->qthreshold = EBT_ULOG_MAX_QLEN;
- return true;
+ return 0;
}
static struct xt_target ebt_ulog_tg_reg __read_mostly = {
@@ -292,8 +287,8 @@ static int __init ebt_ulog_init(void)
int i;
if (nlbufsiz >= 128*1024) {
- printk(KERN_NOTICE "ebt_ulog: Netlink buffer has to be <= 128kB,"
- " please try a smaller nlbufsiz parameter.\n");
+ pr_warning("Netlink buffer has to be <= 128kB,"
+ " please try a smaller nlbufsiz parameter.\n");
return -EINVAL;
}
@@ -306,13 +301,10 @@ static int __init ebt_ulog_init(void)
ebtulognl = netlink_kernel_create(&init_net, NETLINK_NFLOG,
EBT_ULOG_MAXNLGROUPS, NULL, NULL,
THIS_MODULE);
- if (!ebtulognl) {
- printk(KERN_WARNING KBUILD_MODNAME ": out of memory trying to "
- "call netlink_kernel_create\n");
+ if (!ebtulognl)
ret = -ENOMEM;
- } else if ((ret = xt_register_target(&ebt_ulog_tg_reg)) != 0) {
+ else if ((ret = xt_register_target(&ebt_ulog_tg_reg)) != 0)
netlink_kernel_release(ebtulognl);
- }
if (ret == 0)
nf_log_register(NFPROTO_BRIDGE, &ebt_ulog_logger);
diff --git a/net/bridge/netfilter/ebt_vlan.c b/net/bridge/netfilter/ebt_vlan.c
index be1dd2e1f615..87b53b3a921d 100644
--- a/net/bridge/netfilter/ebt_vlan.c
+++ b/net/bridge/netfilter/ebt_vlan.c
@@ -26,22 +26,17 @@
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/netfilter_bridge/ebt_vlan.h>
-static int debug;
#define MODULE_VERS "0.6"
-module_param(debug, int, 0);
-MODULE_PARM_DESC(debug, "debug=1 is turn on debug messages");
MODULE_AUTHOR("Nick Fedchik <nick@fedchik.org.ua>");
MODULE_DESCRIPTION("Ebtables: 802.1Q VLAN tag match");
MODULE_LICENSE("GPL");
-
-#define DEBUG_MSG(args...) if (debug) printk (KERN_DEBUG "ebt_vlan: " args)
#define GET_BITMASK(_BIT_MASK_) info->bitmask & _BIT_MASK_
#define EXIT_ON_MISMATCH(_MATCH_,_MASK_) {if (!((info->_MATCH_ == _MATCH_)^!!(info->invflags & _MASK_))) return false; }
static bool
-ebt_vlan_mt(const struct sk_buff *skb, const struct xt_match_param *par)
+ebt_vlan_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct ebt_vlan_info *info = par->matchinfo;
const struct vlan_hdr *fp;
@@ -84,32 +79,31 @@ ebt_vlan_mt(const struct sk_buff *skb, const struct xt_match_param *par)
return true;
}
-static bool ebt_vlan_mt_check(const struct xt_mtchk_param *par)
+static int ebt_vlan_mt_check(const struct xt_mtchk_param *par)
{
struct ebt_vlan_info *info = par->matchinfo;
const struct ebt_entry *e = par->entryinfo;
/* Is it 802.1Q frame checked? */
if (e->ethproto != htons(ETH_P_8021Q)) {
- DEBUG_MSG
- ("passed entry proto %2.4X is not 802.1Q (8100)\n",
- (unsigned short) ntohs(e->ethproto));
- return false;
+ pr_debug("passed entry proto %2.4X is not 802.1Q (8100)\n",
+ ntohs(e->ethproto));
+ return -EINVAL;
}
/* Check for bitmask range
* True if even one bit is out of mask */
if (info->bitmask & ~EBT_VLAN_MASK) {
- DEBUG_MSG("bitmask %2X is out of mask (%2X)\n",
- info->bitmask, EBT_VLAN_MASK);
- return false;
+ pr_debug("bitmask %2X is out of mask (%2X)\n",
+ info->bitmask, EBT_VLAN_MASK);
+ return -EINVAL;
}
/* Check for inversion flags range */
if (info->invflags & ~EBT_VLAN_MASK) {
- DEBUG_MSG("inversion flags %2X is out of mask (%2X)\n",
- info->invflags, EBT_VLAN_MASK);
- return false;
+ pr_debug("inversion flags %2X is out of mask (%2X)\n",
+ info->invflags, EBT_VLAN_MASK);
+ return -EINVAL;
}
/* Reserved VLAN ID (VID) values
@@ -121,10 +115,9 @@ static bool ebt_vlan_mt_check(const struct xt_mtchk_param *par)
if (GET_BITMASK(EBT_VLAN_ID)) {
if (!!info->id) { /* if id!=0 => check vid range */
if (info->id > VLAN_GROUP_ARRAY_LEN) {
- DEBUG_MSG
- ("id %d is out of range (1-4096)\n",
- info->id);
- return false;
+ pr_debug("id %d is out of range (1-4096)\n",
+ info->id);
+ return -EINVAL;
}
/* Note: This is valid VLAN-tagged frame point.
* Any value of user_priority are acceptable,
@@ -137,9 +130,9 @@ static bool ebt_vlan_mt_check(const struct xt_mtchk_param *par)
if (GET_BITMASK(EBT_VLAN_PRIO)) {
if ((unsigned char) info->prio > 7) {
- DEBUG_MSG("prio %d is out of range (0-7)\n",
- info->prio);
- return false;
+ pr_debug("prio %d is out of range (0-7)\n",
+ info->prio);
+ return -EINVAL;
}
}
/* Check for encapsulated proto range - it is possible to be
@@ -147,14 +140,13 @@ static bool ebt_vlan_mt_check(const struct xt_mtchk_param *par)
* if_ether.h: ETH_ZLEN 60 - Min. octets in frame sans FCS */
if (GET_BITMASK(EBT_VLAN_ENCAP)) {
if ((unsigned short) ntohs(info->encap) < ETH_ZLEN) {
- DEBUG_MSG
- ("encap frame length %d is less than minimal\n",
- ntohs(info->encap));
- return false;
+ pr_debug("encap frame length %d is less than "
+ "minimal\n", ntohs(info->encap));
+ return -EINVAL;
}
}
- return true;
+ return 0;
}
static struct xt_match ebt_vlan_mt_reg __read_mostly = {
@@ -169,9 +161,7 @@ static struct xt_match ebt_vlan_mt_reg __read_mostly = {
static int __init ebt_vlan_init(void)
{
- DEBUG_MSG("ebtables 802.1Q extension module v"
- MODULE_VERS "\n");
- DEBUG_MSG("module debug=%d\n", !!debug);
+ pr_debug("ebtables 802.1Q extension module v" MODULE_VERS "\n");
return xt_register_match(&ebt_vlan_mt_reg);
}
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index f0865fd1e3ec..59ca00e40dec 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -14,8 +14,7 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
-
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kmod.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
@@ -87,7 +86,7 @@ static struct xt_target ebt_standard_target = {
static inline int
ebt_do_watcher(const struct ebt_entry_watcher *w, struct sk_buff *skb,
- struct xt_target_param *par)
+ struct xt_action_param *par)
{
par->target = w->u.watcher;
par->targinfo = w->data;
@@ -96,8 +95,9 @@ ebt_do_watcher(const struct ebt_entry_watcher *w, struct sk_buff *skb,
return 0;
}
-static inline int ebt_do_match (struct ebt_entry_match *m,
- const struct sk_buff *skb, struct xt_match_param *par)
+static inline int
+ebt_do_match(struct ebt_entry_match *m, const struct sk_buff *skb,
+ struct xt_action_param *par)
{
par->match = m->u.match;
par->matchinfo = m->data;
@@ -186,15 +186,13 @@ unsigned int ebt_do_table (unsigned int hook, struct sk_buff *skb,
struct ebt_entries *chaininfo;
const char *base;
const struct ebt_table_info *private;
- bool hotdrop = false;
- struct xt_match_param mtpar;
- struct xt_target_param tgpar;
+ struct xt_action_param acpar;
- mtpar.family = tgpar.family = NFPROTO_BRIDGE;
- mtpar.in = tgpar.in = in;
- mtpar.out = tgpar.out = out;
- mtpar.hotdrop = &hotdrop;
- mtpar.hooknum = tgpar.hooknum = hook;
+ acpar.family = NFPROTO_BRIDGE;
+ acpar.in = in;
+ acpar.out = out;
+ acpar.hotdrop = false;
+ acpar.hooknum = hook;
read_lock_bh(&table->lock);
private = table->private;
@@ -215,9 +213,9 @@ unsigned int ebt_do_table (unsigned int hook, struct sk_buff *skb,
if (ebt_basic_match(point, eth_hdr(skb), in, out))
goto letscontinue;
- if (EBT_MATCH_ITERATE(point, ebt_do_match, skb, &mtpar) != 0)
+ if (EBT_MATCH_ITERATE(point, ebt_do_match, skb, &acpar) != 0)
goto letscontinue;
- if (hotdrop) {
+ if (acpar.hotdrop) {
read_unlock_bh(&table->lock);
return NF_DROP;
}
@@ -228,7 +226,7 @@ unsigned int ebt_do_table (unsigned int hook, struct sk_buff *skb,
/* these should only watch: not modify, nor tell us
what to do with the packet */
- EBT_WATCHER_ITERATE(point, ebt_do_watcher, skb, &tgpar);
+ EBT_WATCHER_ITERATE(point, ebt_do_watcher, skb, &acpar);
t = (struct ebt_entry_target *)
(((char *)point) + point->target_offset);
@@ -236,9 +234,9 @@ unsigned int ebt_do_table (unsigned int hook, struct sk_buff *skb,
if (!t->u.target->target)
verdict = ((struct ebt_standard_target *)t)->verdict;
else {
- tgpar.target = t->u.target;
- tgpar.targinfo = t->data;
- verdict = t->u.target->target(skb, &tgpar);
+ acpar.target = t->u.target;
+ acpar.targinfo = t->data;
+ verdict = t->u.target->target(skb, &acpar);
}
if (verdict == EBT_ACCEPT) {
read_unlock_bh(&table->lock);
@@ -363,12 +361,9 @@ ebt_check_match(struct ebt_entry_match *m, struct xt_mtchk_param *par,
left - sizeof(struct ebt_entry_match) < m->match_size)
return -EINVAL;
- match = try_then_request_module(xt_find_match(NFPROTO_BRIDGE,
- m->u.name, 0), "ebt_%s", m->u.name);
+ match = xt_request_find_match(NFPROTO_BRIDGE, m->u.name, 0);
if (IS_ERR(match))
return PTR_ERR(match);
- if (match == NULL)
- return -ENOENT;
m->u.match = match;
par->match = match;
@@ -397,13 +392,9 @@ ebt_check_watcher(struct ebt_entry_watcher *w, struct xt_tgchk_param *par,
left - sizeof(struct ebt_entry_watcher) < w->watcher_size)
return -EINVAL;
- watcher = try_then_request_module(
- xt_find_target(NFPROTO_BRIDGE, w->u.name, 0),
- "ebt_%s", w->u.name);
+ watcher = xt_request_find_target(NFPROTO_BRIDGE, w->u.name, 0);
if (IS_ERR(watcher))
return PTR_ERR(watcher);
- if (watcher == NULL)
- return -ENOENT;
w->u.watcher = watcher;
par->target = watcher;
@@ -716,15 +707,10 @@ ebt_check_entry(struct ebt_entry *e, struct net *net,
t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
gap = e->next_offset - e->target_offset;
- target = try_then_request_module(
- xt_find_target(NFPROTO_BRIDGE, t->u.name, 0),
- "ebt_%s", t->u.name);
+ target = xt_request_find_target(NFPROTO_BRIDGE, t->u.name, 0);
if (IS_ERR(target)) {
ret = PTR_ERR(target);
goto cleanup_watchers;
- } else if (target == NULL) {
- ret = -ENOENT;
- goto cleanup_watchers;
}
t->u.target = target;
@@ -2128,7 +2114,7 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
return ret;
new_offset += ret;
if (offsets_update && new_offset) {
- pr_debug("ebtables: change offset %d to %d\n",
+ pr_debug("change offset %d to %d\n",
offsets_update[i], offsets[j] + new_offset);
offsets_update[i] = offsets[j] + new_offset;
}
diff --git a/net/caif/Kconfig b/net/caif/Kconfig
new file mode 100644
index 000000000000..cd1daf6008bd
--- /dev/null
+++ b/net/caif/Kconfig
@@ -0,0 +1,48 @@
+#
+# CAIF net configurations
+#
+
+#menu "CAIF Support"
+comment "CAIF Support"
+menuconfig CAIF
+ tristate "Enable CAIF support"
+ select CRC_CCITT
+ default n
+ ---help---
+ The "Communication CPU to Application CPU Interface" (CAIF) is a packet
+ based connection-oriented MUX protocol developed by ST-Ericsson for use
+ with its modems. It is accessed from user space as sockets (PF_CAIF).
+
+ Say Y (or M) here if you build for a phone product (e.g. Android or
+ MeeGo ) that uses CAIF as transport, if unsure say N.
+
+ If you select to build it as module then CAIF_NETDEV also needs to be
+ built as modules. You will also need to say yes to any CAIF physical
+ devices that your platform requires.
+
+ See Documentation/networking/caif for a further explanation on how to
+ use and configure CAIF.
+
+if CAIF
+
+config CAIF_DEBUG
+ bool "Enable Debug"
+ default n
+ --- help ---
+ Enable the inclusion of debug code in the CAIF stack.
+ Be aware that doing this will impact performance.
+ If unsure say N.
+
+
+config CAIF_NETDEV
+ tristate "CAIF GPRS Network device"
+ default CAIF
+ ---help---
+ Say Y if you will be using a CAIF based GPRS network device.
+ This can be either built-in or a loadable module,
+ If you select to build it as a built-in then the main CAIF device must
+ also be a built-in.
+ If unsure say Y.
+
+endif
+#endmenu
diff --git a/net/caif/Makefile b/net/caif/Makefile
new file mode 100644
index 000000000000..34852af2595e
--- /dev/null
+++ b/net/caif/Makefile
@@ -0,0 +1,26 @@
+ifeq ($(CONFIG_CAIF_DEBUG),1)
+CAIF_DBG_FLAGS := -DDEBUG
+endif
+
+ccflags-y := $(CAIF_FLAGS) $(CAIF_DBG_FLAGS)
+
+caif-objs := caif_dev.o \
+ cfcnfg.o cfmuxl.o cfctrl.o \
+ cffrml.o cfveil.o cfdbgl.o\
+ cfserl.o cfdgml.o \
+ cfrfml.o cfvidl.o cfutill.o \
+ cfsrvl.o cfpkt_skbuff.o caif_config_util.o
+clean-dirs:= .tmp_versions
+
+clean-files:= \
+ Module.symvers \
+ modules.order \
+ *.cmd \
+ *.o \
+ *~
+
+obj-$(CONFIG_CAIF) += caif.o
+obj-$(CONFIG_CAIF_NETDEV) += chnl_net.o
+obj-$(CONFIG_CAIF) += caif_socket.o
+
+export-objs := caif.o
diff --git a/net/caif/caif_config_util.c b/net/caif/caif_config_util.c
new file mode 100644
index 000000000000..6f36580366f0
--- /dev/null
+++ b/net/caif/caif_config_util.c
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <net/caif/cfctrl.h>
+#include <net/caif/cfcnfg.h>
+#include <net/caif/caif_dev.h>
+
+int connect_req_to_link_param(struct cfcnfg *cnfg,
+ struct caif_connect_request *s,
+ struct cfctrl_link_param *l)
+{
+ struct dev_info *dev_info;
+ enum cfcnfg_phy_preference pref;
+ memset(l, 0, sizeof(*l));
+ l->priority = s->priority;
+
+ if (s->link_name[0] != '\0')
+ l->phyid = cfcnfg_get_named(cnfg, s->link_name);
+ else {
+ switch (s->link_selector) {
+ case CAIF_LINK_HIGH_BANDW:
+ pref = CFPHYPREF_HIGH_BW;
+ break;
+ case CAIF_LINK_LOW_LATENCY:
+ pref = CFPHYPREF_LOW_LAT;
+ break;
+ default:
+ return -EINVAL;
+ }
+ dev_info = cfcnfg_get_phyid(cnfg, pref);
+ if (dev_info == NULL)
+ return -ENODEV;
+ l->phyid = dev_info->id;
+ }
+ switch (s->protocol) {
+ case CAIFPROTO_AT:
+ l->linktype = CFCTRL_SRV_VEI;
+ if (s->sockaddr.u.at.type == CAIF_ATTYPE_PLAIN)
+ l->chtype = 0x02;
+ else
+ l->chtype = s->sockaddr.u.at.type;
+ l->endpoint = 0x00;
+ break;
+ case CAIFPROTO_DATAGRAM:
+ l->linktype = CFCTRL_SRV_DATAGRAM;
+ l->chtype = 0x00;
+ l->u.datagram.connid = s->sockaddr.u.dgm.connection_id;
+ break;
+ case CAIFPROTO_DATAGRAM_LOOP:
+ l->linktype = CFCTRL_SRV_DATAGRAM;
+ l->chtype = 0x03;
+ l->endpoint = 0x00;
+ l->u.datagram.connid = s->sockaddr.u.dgm.connection_id;
+ break;
+ case CAIFPROTO_RFM:
+ l->linktype = CFCTRL_SRV_RFM;
+ l->u.datagram.connid = s->sockaddr.u.rfm.connection_id;
+ strncpy(l->u.rfm.volume, s->sockaddr.u.rfm.volume,
+ sizeof(l->u.rfm.volume)-1);
+ l->u.rfm.volume[sizeof(l->u.rfm.volume)-1] = 0;
+ break;
+ case CAIFPROTO_UTIL:
+ l->linktype = CFCTRL_SRV_UTIL;
+ l->endpoint = 0x00;
+ l->chtype = 0x00;
+ strncpy(l->u.utility.name, s->sockaddr.u.util.service,
+ sizeof(l->u.utility.name)-1);
+ l->u.utility.name[sizeof(l->u.utility.name)-1] = 0;
+ caif_assert(sizeof(l->u.utility.name) > 10);
+ l->u.utility.paramlen = s->param.size;
+ if (l->u.utility.paramlen > sizeof(l->u.utility.params))
+ l->u.utility.paramlen = sizeof(l->u.utility.params);
+
+ memcpy(l->u.utility.params, s->param.data,
+ l->u.utility.paramlen);
+
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
new file mode 100644
index 000000000000..e2b86f1f5a47
--- /dev/null
+++ b/net/caif/caif_dev.c
@@ -0,0 +1,417 @@
+/*
+ * CAIF Interface registration.
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ * Borrowed heavily from file: pn_dev.c. Thanks to
+ * Remi Denis-Courmont <remi.denis-courmont@nokia.com>
+ * and Sakari Ailus <sakari.ailus@nokia.com>
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/if_arp.h>
+#include <linux/net.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <net/netns/generic.h>
+#include <net/net_namespace.h>
+#include <net/pkt_sched.h>
+#include <net/caif/caif_device.h>
+#include <net/caif/caif_dev.h>
+#include <net/caif/caif_layer.h>
+#include <net/caif/cfpkt.h>
+#include <net/caif/cfcnfg.h>
+
+MODULE_LICENSE("GPL");
+#define TIMEOUT (HZ*5)
+
+/* Used for local tracking of the CAIF net devices */
+struct caif_device_entry {
+ struct cflayer layer;
+ struct list_head list;
+ atomic_t in_use;
+ atomic_t state;
+ u16 phyid;
+ struct net_device *netdev;
+ wait_queue_head_t event;
+};
+
+struct caif_device_entry_list {
+ struct list_head list;
+ /* Protects simulanous deletes in list */
+ spinlock_t lock;
+};
+
+struct caif_net {
+ struct caif_device_entry_list caifdevs;
+};
+
+static int caif_net_id;
+static struct cfcnfg *cfg;
+
+static struct caif_device_entry_list *caif_device_list(struct net *net)
+{
+ struct caif_net *caifn;
+ BUG_ON(!net);
+ caifn = net_generic(net, caif_net_id);
+ BUG_ON(!caifn);
+ return &caifn->caifdevs;
+}
+
+/* Allocate new CAIF device. */
+static struct caif_device_entry *caif_device_alloc(struct net_device *dev)
+{
+ struct caif_device_entry_list *caifdevs;
+ struct caif_device_entry *caifd;
+ caifdevs = caif_device_list(dev_net(dev));
+ BUG_ON(!caifdevs);
+ caifd = kzalloc(sizeof(*caifd), GFP_ATOMIC);
+ if (!caifd)
+ return NULL;
+ caifd->netdev = dev;
+ list_add(&caifd->list, &caifdevs->list);
+ init_waitqueue_head(&caifd->event);
+ return caifd;
+}
+
+static struct caif_device_entry *caif_get(struct net_device *dev)
+{
+ struct caif_device_entry_list *caifdevs =
+ caif_device_list(dev_net(dev));
+ struct caif_device_entry *caifd;
+ BUG_ON(!caifdevs);
+ list_for_each_entry(caifd, &caifdevs->list, list) {
+ if (caifd->netdev == dev)
+ return caifd;
+ }
+ return NULL;
+}
+
+static void caif_device_destroy(struct net_device *dev)
+{
+ struct caif_device_entry_list *caifdevs =
+ caif_device_list(dev_net(dev));
+ struct caif_device_entry *caifd;
+ ASSERT_RTNL();
+ if (dev->type != ARPHRD_CAIF)
+ return;
+
+ spin_lock_bh(&caifdevs->lock);
+ caifd = caif_get(dev);
+ if (caifd == NULL) {
+ spin_unlock_bh(&caifdevs->lock);
+ return;
+ }
+
+ list_del(&caifd->list);
+ spin_unlock_bh(&caifdevs->lock);
+
+ kfree(caifd);
+}
+
+static int transmit(struct cflayer *layer, struct cfpkt *pkt)
+{
+ struct caif_device_entry *caifd =
+ container_of(layer, struct caif_device_entry, layer);
+ struct sk_buff *skb, *skb2;
+ int ret = -EINVAL;
+ skb = cfpkt_tonative(pkt);
+ skb->dev = caifd->netdev;
+ /*
+ * Don't allow SKB to be destroyed upon error, but signal resend
+ * notification to clients. We can't rely on the return value as
+ * congestion (NET_XMIT_CN) sometimes drops the packet, sometimes don't.
+ */
+ if (netif_queue_stopped(caifd->netdev))
+ return -EAGAIN;
+ skb2 = skb_get(skb);
+
+ ret = dev_queue_xmit(skb2);
+
+ if (!ret)
+ kfree_skb(skb);
+ else
+ return -EAGAIN;
+
+ return 0;
+}
+
+static int modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
+{
+ struct caif_device_entry *caifd;
+ struct caif_dev_common *caifdev;
+ caifd = container_of(layr, struct caif_device_entry, layer);
+ caifdev = netdev_priv(caifd->netdev);
+ if (ctrl == _CAIF_MODEMCMD_PHYIF_USEFULL) {
+ atomic_set(&caifd->in_use, 1);
+ wake_up_interruptible(&caifd->event);
+
+ } else if (ctrl == _CAIF_MODEMCMD_PHYIF_USELESS) {
+ atomic_set(&caifd->in_use, 0);
+ wake_up_interruptible(&caifd->event);
+ }
+ return 0;
+}
+
+/*
+ * Stuff received packets to associated sockets.
+ * On error, returns non-zero and releases the skb.
+ */
+static int receive(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pkttype, struct net_device *orig_dev)
+{
+ struct net *net;
+ struct cfpkt *pkt;
+ struct caif_device_entry *caifd;
+ net = dev_net(dev);
+ pkt = cfpkt_fromnative(CAIF_DIR_IN, skb);
+ caifd = caif_get(dev);
+ if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd)
+ return NET_RX_DROP;
+
+ if (caifd->layer.up->receive(caifd->layer.up, pkt))
+ return NET_RX_DROP;
+
+ return 0;
+}
+
+static struct packet_type caif_packet_type __read_mostly = {
+ .type = cpu_to_be16(ETH_P_CAIF),
+ .func = receive,
+};
+
+static void dev_flowctrl(struct net_device *dev, int on)
+{
+ struct caif_device_entry *caifd = caif_get(dev);
+ if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd)
+ return;
+
+ caifd->layer.up->ctrlcmd(caifd->layer.up,
+ on ?
+ _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND :
+ _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND,
+ caifd->layer.id);
+}
+
+/* notify Caif of device events */
+static int caif_device_notify(struct notifier_block *me, unsigned long what,
+ void *arg)
+{
+ struct net_device *dev = arg;
+ struct caif_device_entry *caifd = NULL;
+ struct caif_dev_common *caifdev;
+ enum cfcnfg_phy_preference pref;
+ int res = -EINVAL;
+ enum cfcnfg_phy_type phy_type;
+
+ if (dev->type != ARPHRD_CAIF)
+ return 0;
+
+ switch (what) {
+ case NETDEV_REGISTER:
+ pr_info("CAIF: %s():register %s\n", __func__, dev->name);
+ caifd = caif_device_alloc(dev);
+ if (caifd == NULL)
+ break;
+ caifdev = netdev_priv(dev);
+ caifdev->flowctrl = dev_flowctrl;
+ atomic_set(&caifd->state, what);
+ res = 0;
+ break;
+
+ case NETDEV_UP:
+ pr_info("CAIF: %s(): up %s\n", __func__, dev->name);
+ caifd = caif_get(dev);
+ if (caifd == NULL)
+ break;
+ caifdev = netdev_priv(dev);
+ if (atomic_read(&caifd->state) == NETDEV_UP) {
+ pr_info("CAIF: %s():%s already up\n",
+ __func__, dev->name);
+ break;
+ }
+ atomic_set(&caifd->state, what);
+ caifd->layer.transmit = transmit;
+ caifd->layer.modemcmd = modemcmd;
+
+ if (caifdev->use_frag)
+ phy_type = CFPHYTYPE_FRAG;
+ else
+ phy_type = CFPHYTYPE_CAIF;
+
+ switch (caifdev->link_select) {
+ case CAIF_LINK_HIGH_BANDW:
+ pref = CFPHYPREF_HIGH_BW;
+ break;
+ case CAIF_LINK_LOW_LATENCY:
+ pref = CFPHYPREF_LOW_LAT;
+ break;
+ default:
+ pref = CFPHYPREF_HIGH_BW;
+ break;
+ }
+
+ cfcnfg_add_phy_layer(get_caif_conf(),
+ phy_type,
+ dev,
+ &caifd->layer,
+ &caifd->phyid,
+ pref,
+ caifdev->use_fcs,
+ caifdev->use_stx);
+ strncpy(caifd->layer.name, dev->name,
+ sizeof(caifd->layer.name) - 1);
+ caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0;
+ break;
+
+ case NETDEV_GOING_DOWN:
+ caifd = caif_get(dev);
+ if (caifd == NULL)
+ break;
+ pr_info("CAIF: %s():going down %s\n", __func__, dev->name);
+
+ if (atomic_read(&caifd->state) == NETDEV_GOING_DOWN ||
+ atomic_read(&caifd->state) == NETDEV_DOWN)
+ break;
+
+ atomic_set(&caifd->state, what);
+ if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd)
+ return -EINVAL;
+ caifd->layer.up->ctrlcmd(caifd->layer.up,
+ _CAIF_CTRLCMD_PHYIF_DOWN_IND,
+ caifd->layer.id);
+ res = wait_event_interruptible_timeout(caifd->event,
+ atomic_read(&caifd->in_use) == 0,
+ TIMEOUT);
+ break;
+
+ case NETDEV_DOWN:
+ caifd = caif_get(dev);
+ if (caifd == NULL)
+ break;
+ pr_info("CAIF: %s(): down %s\n", __func__, dev->name);
+ if (atomic_read(&caifd->in_use))
+ pr_warning("CAIF: %s(): "
+ "Unregistering an active CAIF device: %s\n",
+ __func__, dev->name);
+ cfcnfg_del_phy_layer(get_caif_conf(), &caifd->layer);
+ atomic_set(&caifd->state, what);
+ break;
+
+ case NETDEV_UNREGISTER:
+ caifd = caif_get(dev);
+ pr_info("CAIF: %s(): unregister %s\n", __func__, dev->name);
+ atomic_set(&caifd->state, what);
+ caif_device_destroy(dev);
+ break;
+ }
+ return 0;
+}
+
+static struct notifier_block caif_device_notifier = {
+ .notifier_call = caif_device_notify,
+ .priority = 0,
+};
+
+
+struct cfcnfg *get_caif_conf(void)
+{
+ return cfg;
+}
+EXPORT_SYMBOL(get_caif_conf);
+
+int caif_connect_client(struct caif_connect_request *conn_req,
+ struct cflayer *client_layer)
+{
+ struct cfctrl_link_param param;
+ int ret;
+ ret = connect_req_to_link_param(get_caif_conf(), conn_req, &param);
+ if (ret)
+ return ret;
+ /* Hook up the adaptation layer. */
+ return cfcnfg_add_adaptation_layer(get_caif_conf(),
+ &param, client_layer);
+}
+EXPORT_SYMBOL(caif_connect_client);
+
+int caif_disconnect_client(struct cflayer *adap_layer)
+{
+ return cfcnfg_disconn_adapt_layer(get_caif_conf(), adap_layer);
+}
+EXPORT_SYMBOL(caif_disconnect_client);
+
+void caif_release_client(struct cflayer *adap_layer)
+{
+ cfcnfg_release_adap_layer(adap_layer);
+}
+EXPORT_SYMBOL(caif_release_client);
+
+/* Per-namespace Caif devices handling */
+static int caif_init_net(struct net *net)
+{
+ struct caif_net *caifn = net_generic(net, caif_net_id);
+ INIT_LIST_HEAD(&caifn->caifdevs.list);
+ spin_lock_init(&caifn->caifdevs.lock);
+ return 0;
+}
+
+static void caif_exit_net(struct net *net)
+{
+ struct net_device *dev;
+ int res;
+ rtnl_lock();
+ for_each_netdev(net, dev) {
+ if (dev->type != ARPHRD_CAIF)
+ continue;
+ res = dev_close(dev);
+ caif_device_destroy(dev);
+ }
+ rtnl_unlock();
+}
+
+static struct pernet_operations caif_net_ops = {
+ .init = caif_init_net,
+ .exit = caif_exit_net,
+ .id = &caif_net_id,
+ .size = sizeof(struct caif_net),
+};
+
+/* Initialize Caif devices list */
+static int __init caif_device_init(void)
+{
+ int result;
+ cfg = cfcnfg_create();
+ if (!cfg) {
+ pr_warning("CAIF: %s(): can't create cfcnfg.\n", __func__);
+ goto err_cfcnfg_create_failed;
+ }
+ result = register_pernet_device(&caif_net_ops);
+
+ if (result) {
+ kfree(cfg);
+ cfg = NULL;
+ return result;
+ }
+ dev_add_pack(&caif_packet_type);
+ register_netdevice_notifier(&caif_device_notifier);
+
+ return result;
+err_cfcnfg_create_failed:
+ return -ENODEV;
+}
+
+static void __exit caif_device_exit(void)
+{
+ dev_remove_pack(&caif_packet_type);
+ unregister_pernet_device(&caif_net_ops);
+ unregister_netdevice_notifier(&caif_device_notifier);
+ cfcnfg_remove(cfg);
+}
+
+module_init(caif_device_init);
+module_exit(caif_device_exit);
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
new file mode 100644
index 000000000000..c3a70c5c893a
--- /dev/null
+++ b/net/caif/caif_socket.c
@@ -0,0 +1,1252 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <linux/poll.h>
+#include <linux/tcp.h>
+#include <linux/uaccess.h>
+#include <linux/mutex.h>
+#include <linux/debugfs.h>
+#include <linux/caif/caif_socket.h>
+#include <asm/atomic.h>
+#include <net/sock.h>
+#include <net/tcp_states.h>
+#include <net/caif/caif_layer.h>
+#include <net/caif/caif_dev.h>
+#include <net/caif/cfpkt.h>
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NETPROTO(AF_CAIF);
+
+#define CAIF_DEF_SNDBUF (CAIF_MAX_PAYLOAD_SIZE*10)
+#define CAIF_DEF_RCVBUF (CAIF_MAX_PAYLOAD_SIZE*100)
+
+/*
+ * CAIF state is re-using the TCP socket states.
+ * caif_states stored in sk_state reflect the state as reported by
+ * the CAIF stack, while sk_socket->state is the state of the socket.
+ */
+enum caif_states {
+ CAIF_CONNECTED = TCP_ESTABLISHED,
+ CAIF_CONNECTING = TCP_SYN_SENT,
+ CAIF_DISCONNECTED = TCP_CLOSE
+};
+
+#define TX_FLOW_ON_BIT 1
+#define RX_FLOW_ON_BIT 2
+
+static struct dentry *debugfsdir;
+
+#ifdef CONFIG_DEBUG_FS
+struct debug_fs_counter {
+ atomic_t caif_nr_socks;
+ atomic_t num_connect_req;
+ atomic_t num_connect_resp;
+ atomic_t num_connect_fail_resp;
+ atomic_t num_disconnect;
+ atomic_t num_remote_shutdown_ind;
+ atomic_t num_tx_flow_off_ind;
+ atomic_t num_tx_flow_on_ind;
+ atomic_t num_rx_flow_off;
+ atomic_t num_rx_flow_on;
+};
+struct debug_fs_counter cnt;
+#define dbfs_atomic_inc(v) atomic_inc(v)
+#define dbfs_atomic_dec(v) atomic_dec(v)
+#else
+#define dbfs_atomic_inc(v)
+#define dbfs_atomic_dec(v)
+#endif
+
+struct caifsock {
+ struct sock sk; /* must be first member */
+ struct cflayer layer;
+ char name[CAIF_LAYER_NAME_SZ]; /* Used for debugging */
+ u32 flow_state;
+ struct caif_connect_request conn_req;
+ struct mutex readlock;
+ struct dentry *debugfs_socket_dir;
+};
+
+static int rx_flow_is_on(struct caifsock *cf_sk)
+{
+ return test_bit(RX_FLOW_ON_BIT,
+ (void *) &cf_sk->flow_state);
+}
+
+static int tx_flow_is_on(struct caifsock *cf_sk)
+{
+ return test_bit(TX_FLOW_ON_BIT,
+ (void *) &cf_sk->flow_state);
+}
+
+static void set_rx_flow_off(struct caifsock *cf_sk)
+{
+ clear_bit(RX_FLOW_ON_BIT,
+ (void *) &cf_sk->flow_state);
+}
+
+static void set_rx_flow_on(struct caifsock *cf_sk)
+{
+ set_bit(RX_FLOW_ON_BIT,
+ (void *) &cf_sk->flow_state);
+}
+
+static void set_tx_flow_off(struct caifsock *cf_sk)
+{
+ clear_bit(TX_FLOW_ON_BIT,
+ (void *) &cf_sk->flow_state);
+}
+
+static void set_tx_flow_on(struct caifsock *cf_sk)
+{
+ set_bit(TX_FLOW_ON_BIT,
+ (void *) &cf_sk->flow_state);
+}
+
+static void caif_read_lock(struct sock *sk)
+{
+ struct caifsock *cf_sk;
+ cf_sk = container_of(sk, struct caifsock, sk);
+ mutex_lock(&cf_sk->readlock);
+}
+
+static void caif_read_unlock(struct sock *sk)
+{
+ struct caifsock *cf_sk;
+ cf_sk = container_of(sk, struct caifsock, sk);
+ mutex_unlock(&cf_sk->readlock);
+}
+
+int sk_rcvbuf_lowwater(struct caifsock *cf_sk)
+{
+ /* A quarter of full buffer is used a low water mark */
+ return cf_sk->sk.sk_rcvbuf / 4;
+}
+
+void caif_flow_ctrl(struct sock *sk, int mode)
+{
+ struct caifsock *cf_sk;
+ cf_sk = container_of(sk, struct caifsock, sk);
+ if (cf_sk->layer.dn)
+ cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, mode);
+}
+
+/*
+ * Copied from sock.c:sock_queue_rcv_skb(), but changed so packets are
+ * not dropped, but CAIF is sending flow off instead.
+ */
+int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+{
+ int err;
+ int skb_len;
+ unsigned long flags;
+ struct sk_buff_head *list = &sk->sk_receive_queue;
+ struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
+
+ if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
+ (unsigned)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) {
+ trace_printk("CAIF: %s():"
+ " sending flow OFF (queue len = %d %d)\n",
+ __func__,
+ atomic_read(&cf_sk->sk.sk_rmem_alloc),
+ sk_rcvbuf_lowwater(cf_sk));
+ set_rx_flow_off(cf_sk);
+ if (cf_sk->layer.dn)
+ cf_sk->layer.dn->modemcmd(cf_sk->layer.dn,
+ CAIF_MODEMCMD_FLOW_OFF_REQ);
+ }
+
+ err = sk_filter(sk, skb);
+ if (err)
+ return err;
+ if (!sk_rmem_schedule(sk, skb->truesize) && rx_flow_is_on(cf_sk)) {
+ set_rx_flow_off(cf_sk);
+ trace_printk("CAIF: %s():"
+ " sending flow OFF due to rmem_schedule\n",
+ __func__);
+ if (cf_sk->layer.dn)
+ cf_sk->layer.dn->modemcmd(cf_sk->layer.dn,
+ CAIF_MODEMCMD_FLOW_OFF_REQ);
+ }
+ skb->dev = NULL;
+ skb_set_owner_r(skb, sk);
+ /* Cache the SKB length before we tack it onto the receive
+ * queue. Once it is added it no longer belongs to us and
+ * may be freed by other threads of control pulling packets
+ * from the queue.
+ */
+ skb_len = skb->len;
+ spin_lock_irqsave(&list->lock, flags);
+ if (!sock_flag(sk, SOCK_DEAD))
+ __skb_queue_tail(list, skb);
+ spin_unlock_irqrestore(&list->lock, flags);
+
+ if (!sock_flag(sk, SOCK_DEAD))
+ sk->sk_data_ready(sk, skb_len);
+ else
+ kfree_skb(skb);
+ return 0;
+}
+
+/* Packet Receive Callback function called from CAIF Stack */
+static int caif_sktrecv_cb(struct cflayer *layr, struct cfpkt *pkt)
+{
+ struct caifsock *cf_sk;
+ struct sk_buff *skb;
+
+ cf_sk = container_of(layr, struct caifsock, layer);
+ skb = cfpkt_tonative(pkt);
+
+ if (unlikely(cf_sk->sk.sk_state != CAIF_CONNECTED)) {
+ cfpkt_destroy(pkt);
+ return 0;
+ }
+ caif_queue_rcv_skb(&cf_sk->sk, skb);
+ return 0;
+}
+
+/* Packet Control Callback function called from CAIF */
+static void caif_ctrl_cb(struct cflayer *layr,
+ enum caif_ctrlcmd flow,
+ int phyid)
+{
+ struct caifsock *cf_sk = container_of(layr, struct caifsock, layer);
+ switch (flow) {
+ case CAIF_CTRLCMD_FLOW_ON_IND:
+ /* OK from modem to start sending again */
+ dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
+ set_tx_flow_on(cf_sk);
+ cf_sk->sk.sk_state_change(&cf_sk->sk);
+ break;
+
+ case CAIF_CTRLCMD_FLOW_OFF_IND:
+ /* Modem asks us to shut up */
+ dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
+ set_tx_flow_off(cf_sk);
+ cf_sk->sk.sk_state_change(&cf_sk->sk);
+ break;
+
+ case CAIF_CTRLCMD_INIT_RSP:
+ /* We're now connected */
+ dbfs_atomic_inc(&cnt.num_connect_resp);
+ cf_sk->sk.sk_state = CAIF_CONNECTED;
+ set_tx_flow_on(cf_sk);
+ cf_sk->sk.sk_state_change(&cf_sk->sk);
+ break;
+
+ case CAIF_CTRLCMD_DEINIT_RSP:
+ /* We're now disconnected */
+ cf_sk->sk.sk_state = CAIF_DISCONNECTED;
+ cf_sk->sk.sk_state_change(&cf_sk->sk);
+ cfcnfg_release_adap_layer(&cf_sk->layer);
+ break;
+
+ case CAIF_CTRLCMD_INIT_FAIL_RSP:
+ /* Connect request failed */
+ dbfs_atomic_inc(&cnt.num_connect_fail_resp);
+ cf_sk->sk.sk_err = ECONNREFUSED;
+ cf_sk->sk.sk_state = CAIF_DISCONNECTED;
+ cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
+ /*
+ * Socket "standards" seems to require POLLOUT to
+ * be set at connect failure.
+ */
+ set_tx_flow_on(cf_sk);
+ cf_sk->sk.sk_state_change(&cf_sk->sk);
+ break;
+
+ case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
+ /* Modem has closed this connection, or device is down. */
+ dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
+ cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
+ cf_sk->sk.sk_err = ECONNRESET;
+ set_rx_flow_on(cf_sk);
+ cf_sk->sk.sk_error_report(&cf_sk->sk);
+ break;
+
+ default:
+ pr_debug("CAIF: %s(): Unexpected flow command %d\n",
+ __func__, flow);
+ }
+}
+
+static void caif_check_flow_release(struct sock *sk)
+{
+ struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
+
+ if (cf_sk->layer.dn == NULL || cf_sk->layer.dn->modemcmd == NULL)
+ return;
+ if (rx_flow_is_on(cf_sk))
+ return;
+
+ if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
+ dbfs_atomic_inc(&cnt.num_rx_flow_on);
+ set_rx_flow_on(cf_sk);
+ cf_sk->layer.dn->modemcmd(cf_sk->layer.dn,
+ CAIF_MODEMCMD_FLOW_ON_REQ);
+ }
+}
+/*
+ * Copied from sock.c:sock_queue_rcv_skb(), and added check that user buffer
+ * has sufficient size.
+ */
+
+static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *m, size_t buf_len, int flags)
+
+{
+ struct sock *sk = sock->sk;
+ struct sk_buff *skb;
+ int ret = 0;
+ int len;
+
+ if (unlikely(!buf_len))
+ return -EINVAL;
+
+ skb = skb_recv_datagram(sk, flags, 0 , &ret);
+ if (!skb)
+ goto read_error;
+
+ len = skb->len;
+
+ if (skb && skb->len > buf_len && !(flags & MSG_PEEK)) {
+ len = buf_len;
+ /*
+ * Push skb back on receive queue if buffer too small.
+ * This has a built-in race where multi-threaded receive
+ * may get packet in wrong order, but multiple read does
+ * not really guarantee ordered delivery anyway.
+ * Let's optimize for speed without taking locks.
+ */
+
+ skb_queue_head(&sk->sk_receive_queue, skb);
+ ret = -EMSGSIZE;
+ goto read_error;
+ }
+
+ ret = skb_copy_datagram_iovec(skb, 0, m->msg_iov, len);
+ if (ret)
+ goto read_error;
+
+ skb_free_datagram(sk, skb);
+
+ caif_check_flow_release(sk);
+
+ return len;
+
+read_error:
+ return ret;
+}
+
+
+/* Copied from unix_stream_wait_data, identical except for lock call. */
+static long caif_stream_data_wait(struct sock *sk, long timeo)
+{
+ DEFINE_WAIT(wait);
+ lock_sock(sk);
+
+ for (;;) {
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
+
+ if (!skb_queue_empty(&sk->sk_receive_queue) ||
+ sk->sk_err ||
+ sk->sk_state != CAIF_CONNECTED ||
+ sock_flag(sk, SOCK_DEAD) ||
+ (sk->sk_shutdown & RCV_SHUTDOWN) ||
+ signal_pending(current) ||
+ !timeo)
+ break;
+
+ set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+ release_sock(sk);
+ timeo = schedule_timeout(timeo);
+ lock_sock(sk);
+ clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+ }
+
+ finish_wait(sk_sleep(sk), &wait);
+ release_sock(sk);
+ return timeo;
+}
+
+
+/*
+ * Copied from unix_stream_recvmsg, but removed credit checks,
+ * changed locking calls, changed address handling.
+ */
+static int caif_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *msg, size_t size,
+ int flags)
+{
+ struct sock *sk = sock->sk;
+ int copied = 0;
+ int target;
+ int err = 0;
+ long timeo;
+
+ err = -EOPNOTSUPP;
+ if (flags&MSG_OOB)
+ goto out;
+
+ msg->msg_namelen = 0;
+
+ /*
+ * Lock the socket to prevent queue disordering
+ * while sleeps in memcpy_tomsg
+ */
+ err = -EAGAIN;
+ if (sk->sk_state == CAIF_CONNECTING)
+ goto out;
+
+ caif_read_lock(sk);
+ target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
+ timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
+
+ do {
+ int chunk;
+ struct sk_buff *skb;
+
+ lock_sock(sk);
+ skb = skb_dequeue(&sk->sk_receive_queue);
+ caif_check_flow_release(sk);
+
+ if (skb == NULL) {
+ if (copied >= target)
+ goto unlock;
+ /*
+ * POSIX 1003.1g mandates this order.
+ */
+ err = sock_error(sk);
+ if (err)
+ goto unlock;
+ err = -ECONNRESET;
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
+ goto unlock;
+
+ err = -EPIPE;
+ if (sk->sk_state != CAIF_CONNECTED)
+ goto unlock;
+ if (sock_flag(sk, SOCK_DEAD))
+ goto unlock;
+
+ release_sock(sk);
+
+ err = -EAGAIN;
+ if (!timeo)
+ break;
+
+ caif_read_unlock(sk);
+
+ timeo = caif_stream_data_wait(sk, timeo);
+
+ if (signal_pending(current)) {
+ err = sock_intr_errno(timeo);
+ goto out;
+ }
+ caif_read_lock(sk);
+ continue;
+unlock:
+ release_sock(sk);
+ break;
+ }
+ release_sock(sk);
+ chunk = min_t(unsigned int, skb->len, size);
+ if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
+ skb_queue_head(&sk->sk_receive_queue, skb);
+ if (copied == 0)
+ copied = -EFAULT;
+ break;
+ }
+ copied += chunk;
+ size -= chunk;
+
+ /* Mark read part of skb as used */
+ if (!(flags & MSG_PEEK)) {
+ skb_pull(skb, chunk);
+
+ /* put the skb back if we didn't use it up. */
+ if (skb->len) {
+ skb_queue_head(&sk->sk_receive_queue, skb);
+ break;
+ }
+ kfree_skb(skb);
+
+ } else {
+ /*
+ * It is questionable, see note in unix_dgram_recvmsg.
+ */
+ /* put message back and return */
+ skb_queue_head(&sk->sk_receive_queue, skb);
+ break;
+ }
+ } while (size);
+ caif_read_unlock(sk);
+
+out:
+ return copied ? : err;
+}
+
+/*
+ * Copied from sock.c:sock_wait_for_wmem, but change to wait for
+ * CAIF flow-on and sock_writable.
+ */
+static long caif_wait_for_flow_on(struct caifsock *cf_sk,
+ int wait_writeable, long timeo, int *err)
+{
+ struct sock *sk = &cf_sk->sk;
+ DEFINE_WAIT(wait);
+ for (;;) {
+ *err = 0;
+ if (tx_flow_is_on(cf_sk) &&
+ (!wait_writeable || sock_writeable(&cf_sk->sk)))
+ break;
+ *err = -ETIMEDOUT;
+ if (!timeo)
+ break;
+ *err = -ERESTARTSYS;
+ if (signal_pending(current))
+ break;
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
+ *err = -ECONNRESET;
+ if (sk->sk_shutdown & SHUTDOWN_MASK)
+ break;
+ *err = -sk->sk_err;
+ if (sk->sk_err)
+ break;
+ *err = -EPIPE;
+ if (cf_sk->sk.sk_state != CAIF_CONNECTED)
+ break;
+ timeo = schedule_timeout(timeo);
+ }
+ finish_wait(sk_sleep(sk), &wait);
+ return timeo;
+}
+
+/*
+ * Transmit a SKB. The device may temporarily request re-transmission
+ * by returning EAGAIN.
+ */
+static int transmit_skb(struct sk_buff *skb, struct caifsock *cf_sk,
+ int noblock, long timeo)
+{
+ struct cfpkt *pkt;
+ int ret, loopcnt = 0;
+
+ pkt = cfpkt_fromnative(CAIF_DIR_OUT, skb);
+ memset(cfpkt_info(pkt), 0, sizeof(struct caif_payload_info));
+ do {
+
+ ret = -ETIMEDOUT;
+
+ /* Slight paranoia, probably not needed. */
+ if (unlikely(loopcnt++ > 1000)) {
+ pr_warning("CAIF: %s(): transmit retries failed,"
+ " error = %d\n", __func__, ret);
+ break;
+ }
+
+ if (cf_sk->layer.dn != NULL)
+ ret = cf_sk->layer.dn->transmit(cf_sk->layer.dn, pkt);
+ if (likely(ret >= 0))
+ break;
+ /* if transmit return -EAGAIN, then retry */
+ if (noblock && ret == -EAGAIN)
+ break;
+ timeo = caif_wait_for_flow_on(cf_sk, 0, timeo, &ret);
+ if (signal_pending(current)) {
+ ret = sock_intr_errno(timeo);
+ break;
+ }
+ if (ret)
+ break;
+ if (cf_sk->sk.sk_state != CAIF_CONNECTED ||
+ sock_flag(&cf_sk->sk, SOCK_DEAD) ||
+ (cf_sk->sk.sk_shutdown & RCV_SHUTDOWN)) {
+ ret = -EPIPE;
+ cf_sk->sk.sk_err = EPIPE;
+ break;
+ }
+ } while (ret == -EAGAIN);
+ return ret;
+}
+
+/* Copied from af_unix:unix_dgram_sendmsg, and adapted to CAIF */
+static int caif_seqpkt_sendmsg(struct kiocb *kiocb, struct socket *sock,
+ struct msghdr *msg, size_t len)
+{
+ struct sock *sk = sock->sk;
+ struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
+ int buffer_size;
+ int ret = 0;
+ struct sk_buff *skb = NULL;
+ int noblock;
+ long timeo;
+ caif_assert(cf_sk);
+ ret = sock_error(sk);
+ if (ret)
+ goto err;
+
+ ret = -EOPNOTSUPP;
+ if (msg->msg_flags&MSG_OOB)
+ goto err;
+
+ ret = -EOPNOTSUPP;
+ if (msg->msg_namelen)
+ goto err;
+
+ ret = -EINVAL;
+ if (unlikely(msg->msg_iov->iov_base == NULL))
+ goto err;
+ noblock = msg->msg_flags & MSG_DONTWAIT;
+
+ buffer_size = len + CAIF_NEEDED_HEADROOM + CAIF_NEEDED_TAILROOM;
+
+ ret = -EMSGSIZE;
+ if (buffer_size > CAIF_MAX_PAYLOAD_SIZE)
+ goto err;
+
+ timeo = sock_sndtimeo(sk, noblock);
+ timeo = caif_wait_for_flow_on(container_of(sk, struct caifsock, sk),
+ 1, timeo, &ret);
+
+ ret = -EPIPE;
+ if (cf_sk->sk.sk_state != CAIF_CONNECTED ||
+ sock_flag(sk, SOCK_DEAD) ||
+ (sk->sk_shutdown & RCV_SHUTDOWN))
+ goto err;
+
+ ret = -ENOMEM;
+ skb = sock_alloc_send_skb(sk, buffer_size, noblock, &ret);
+ if (!skb)
+ goto err;
+ skb_reserve(skb, CAIF_NEEDED_HEADROOM);
+
+ ret = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
+
+ if (ret)
+ goto err;
+ ret = transmit_skb(skb, cf_sk, noblock, timeo);
+ if (ret < 0)
+ goto err;
+ return len;
+err:
+ kfree_skb(skb);
+ return ret;
+}
+
+/*
+ * Copied from unix_stream_sendmsg and adapted to CAIF:
+ * Changed removed permission handling and added waiting for flow on
+ * and other minor adaptations.
+ */
+static int caif_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
+ struct msghdr *msg, size_t len)
+{
+ struct sock *sk = sock->sk;
+ struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
+ int err, size;
+ struct sk_buff *skb;
+ int sent = 0;
+ long timeo;
+
+ err = -EOPNOTSUPP;
+
+ if (unlikely(msg->msg_flags&MSG_OOB))
+ goto out_err;
+
+ if (unlikely(msg->msg_namelen))
+ goto out_err;
+
+ timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
+ timeo = caif_wait_for_flow_on(cf_sk, 1, timeo, &err);
+
+ if (unlikely(sk->sk_shutdown & SEND_SHUTDOWN))
+ goto pipe_err;
+
+ while (sent < len) {
+
+ size = len-sent;
+
+ if (size > CAIF_MAX_PAYLOAD_SIZE)
+ size = CAIF_MAX_PAYLOAD_SIZE;
+
+ /* If size is more than half of sndbuf, chop up message */
+ if (size > ((sk->sk_sndbuf >> 1) - 64))
+ size = (sk->sk_sndbuf >> 1) - 64;
+
+ if (size > SKB_MAX_ALLOC)
+ size = SKB_MAX_ALLOC;
+
+ skb = sock_alloc_send_skb(sk,
+ size + CAIF_NEEDED_HEADROOM
+ + CAIF_NEEDED_TAILROOM,
+ msg->msg_flags&MSG_DONTWAIT,
+ &err);
+ if (skb == NULL)
+ goto out_err;
+
+ skb_reserve(skb, CAIF_NEEDED_HEADROOM);
+ /*
+ * If you pass two values to the sock_alloc_send_skb
+ * it tries to grab the large buffer with GFP_NOFS
+ * (which can fail easily), and if it fails grab the
+ * fallback size buffer which is under a page and will
+ * succeed. [Alan]
+ */
+ size = min_t(int, size, skb_tailroom(skb));
+
+ err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
+ if (err) {
+ kfree_skb(skb);
+ goto out_err;
+ }
+ err = transmit_skb(skb, cf_sk,
+ msg->msg_flags&MSG_DONTWAIT, timeo);
+ if (err < 0) {
+ kfree_skb(skb);
+ goto pipe_err;
+ }
+ sent += size;
+ }
+
+ return sent;
+
+pipe_err:
+ if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
+ send_sig(SIGPIPE, current, 0);
+ err = -EPIPE;
+out_err:
+ return sent ? : err;
+}
+
+static int setsockopt(struct socket *sock,
+ int lvl, int opt, char __user *ov, unsigned int ol)
+{
+ struct sock *sk = sock->sk;
+ struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
+ int prio, linksel;
+ struct ifreq ifreq;
+
+ if (cf_sk->sk.sk_socket->state != SS_UNCONNECTED)
+ return -ENOPROTOOPT;
+
+ switch (opt) {
+ case CAIFSO_LINK_SELECT:
+ if (ol < sizeof(int))
+ return -EINVAL;
+ if (lvl != SOL_CAIF)
+ goto bad_sol;
+ if (copy_from_user(&linksel, ov, sizeof(int)))
+ return -EINVAL;
+ lock_sock(&(cf_sk->sk));
+ cf_sk->conn_req.link_selector = linksel;
+ release_sock(&cf_sk->sk);
+ return 0;
+
+ case SO_PRIORITY:
+ if (lvl != SOL_SOCKET)
+ goto bad_sol;
+ if (ol < sizeof(int))
+ return -EINVAL;
+ if (copy_from_user(&prio, ov, sizeof(int)))
+ return -EINVAL;
+ lock_sock(&(cf_sk->sk));
+ cf_sk->conn_req.priority = prio;
+ release_sock(&cf_sk->sk);
+ return 0;
+
+ case SO_BINDTODEVICE:
+ if (lvl != SOL_SOCKET)
+ goto bad_sol;
+ if (ol < sizeof(struct ifreq))
+ return -EINVAL;
+ if (copy_from_user(&ifreq, ov, sizeof(ifreq)))
+ return -EFAULT;
+ lock_sock(&(cf_sk->sk));
+ strncpy(cf_sk->conn_req.link_name, ifreq.ifr_name,
+ sizeof(cf_sk->conn_req.link_name));
+ cf_sk->conn_req.link_name
+ [sizeof(cf_sk->conn_req.link_name)-1] = 0;
+ release_sock(&cf_sk->sk);
+ return 0;
+
+ case CAIFSO_REQ_PARAM:
+ if (lvl != SOL_CAIF)
+ goto bad_sol;
+ if (cf_sk->sk.sk_protocol != CAIFPROTO_UTIL)
+ return -ENOPROTOOPT;
+ lock_sock(&(cf_sk->sk));
+ cf_sk->conn_req.param.size = ol;
+ if (ol > sizeof(cf_sk->conn_req.param.data) ||
+ copy_from_user(&cf_sk->conn_req.param.data, ov, ol)) {
+ release_sock(&cf_sk->sk);
+ return -EINVAL;
+ }
+ release_sock(&cf_sk->sk);
+ return 0;
+
+ default:
+ return -ENOPROTOOPT;
+ }
+
+ return 0;
+bad_sol:
+ return -ENOPROTOOPT;
+
+}
+
+/*
+ * caif_connect() - Connect a CAIF Socket
+ * Copied and modified af_irda.c:irda_connect().
+ *
+ * Note : by consulting "errno", the user space caller may learn the cause
+ * of the failure. Most of them are visible in the function, others may come
+ * from subroutines called and are listed here :
+ * o -EAFNOSUPPORT: bad socket family or type.
+ * o -ESOCKTNOSUPPORT: bad socket type or protocol
+ * o -EINVAL: bad socket address, or CAIF link type
+ * o -ECONNREFUSED: remote end refused the connection.
+ * o -EINPROGRESS: connect request sent but timed out (or non-blocking)
+ * o -EISCONN: already connected.
+ * o -ETIMEDOUT: Connection timed out (send timeout)
+ * o -ENODEV: No link layer to send request
+ * o -ECONNRESET: Received Shutdown indication or lost link layer
+ * o -ENOMEM: Out of memory
+ *
+ * State Strategy:
+ * o sk_state: holds the CAIF_* protocol state, it's updated by
+ * caif_ctrl_cb.
+ * o sock->state: holds the SS_* socket state and is updated by connect and
+ * disconnect.
+ */
+static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
+ int addr_len, int flags)
+{
+ struct sock *sk = sock->sk;
+ struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
+ long timeo;
+ int err;
+ lock_sock(sk);
+
+ err = -EAFNOSUPPORT;
+ if (uaddr->sa_family != AF_CAIF)
+ goto out;
+
+ err = -ESOCKTNOSUPPORT;
+ if (unlikely(!(sk->sk_type == SOCK_STREAM &&
+ cf_sk->sk.sk_protocol == CAIFPROTO_AT) &&
+ sk->sk_type != SOCK_SEQPACKET))
+ goto out;
+ switch (sock->state) {
+ case SS_UNCONNECTED:
+ /* Normal case, a fresh connect */
+ caif_assert(sk->sk_state == CAIF_DISCONNECTED);
+ break;
+ case SS_CONNECTING:
+ switch (sk->sk_state) {
+ case CAIF_CONNECTED:
+ sock->state = SS_CONNECTED;
+ err = -EISCONN;
+ goto out;
+ case CAIF_DISCONNECTED:
+ /* Reconnect allowed */
+ break;
+ case CAIF_CONNECTING:
+ err = -EALREADY;
+ if (flags & O_NONBLOCK)
+ goto out;
+ goto wait_connect;
+ }
+ break;
+ case SS_CONNECTED:
+ caif_assert(sk->sk_state == CAIF_CONNECTED ||
+ sk->sk_state == CAIF_DISCONNECTED);
+ if (sk->sk_shutdown & SHUTDOWN_MASK) {
+ /* Allow re-connect after SHUTDOWN_IND */
+ caif_disconnect_client(&cf_sk->layer);
+ break;
+ }
+ /* No reconnect on a seqpacket socket */
+ err = -EISCONN;
+ goto out;
+ case SS_DISCONNECTING:
+ case SS_FREE:
+ caif_assert(1); /*Should never happen */
+ break;
+ }
+ sk->sk_state = CAIF_DISCONNECTED;
+ sock->state = SS_UNCONNECTED;
+ sk_stream_kill_queues(&cf_sk->sk);
+
+ err = -EINVAL;
+ if (addr_len != sizeof(struct sockaddr_caif) ||
+ !uaddr)
+ goto out;
+
+ memcpy(&cf_sk->conn_req.sockaddr, uaddr,
+ sizeof(struct sockaddr_caif));
+
+ /* Move to connecting socket, start sending Connect Requests */
+ sock->state = SS_CONNECTING;
+ sk->sk_state = CAIF_CONNECTING;
+
+ dbfs_atomic_inc(&cnt.num_connect_req);
+ cf_sk->layer.receive = caif_sktrecv_cb;
+ err = caif_connect_client(&cf_sk->conn_req,
+ &cf_sk->layer);
+ if (err < 0) {
+ cf_sk->sk.sk_socket->state = SS_UNCONNECTED;
+ cf_sk->sk.sk_state = CAIF_DISCONNECTED;
+ goto out;
+ }
+
+ err = -EINPROGRESS;
+wait_connect:
+
+ if (sk->sk_state != CAIF_CONNECTED && (flags & O_NONBLOCK))
+ goto out;
+
+ timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
+
+ release_sock(sk);
+ err = wait_event_interruptible_timeout(*sk_sleep(sk),
+ sk->sk_state != CAIF_CONNECTING,
+ timeo);
+ lock_sock(sk);
+ if (err < 0)
+ goto out; /* -ERESTARTSYS */
+ if (err == 0 && sk->sk_state != CAIF_CONNECTED) {
+ err = -ETIMEDOUT;
+ goto out;
+ }
+
+ if (sk->sk_state != CAIF_CONNECTED) {
+ sock->state = SS_UNCONNECTED;
+ err = sock_error(sk);
+ if (!err)
+ err = -ECONNREFUSED;
+ goto out;
+ }
+ sock->state = SS_CONNECTED;
+ err = 0;
+out:
+ release_sock(sk);
+ return err;
+}
+
+
+/*
+ * caif_release() - Disconnect a CAIF Socket
+ * Copied and modified af_irda.c:irda_release().
+ */
+static int caif_release(struct socket *sock)
+{
+ struct sock *sk = sock->sk;
+ struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
+ int res = 0;
+
+ if (!sk)
+ return 0;
+
+ set_tx_flow_off(cf_sk);
+
+ /*
+ * Ensure that packets are not queued after this point in time.
+ * caif_queue_rcv_skb checks SOCK_DEAD holding the queue lock,
+ * this ensures no packets when sock is dead.
+ */
+ spin_lock(&sk->sk_receive_queue.lock);
+ sock_set_flag(sk, SOCK_DEAD);
+ spin_unlock(&sk->sk_receive_queue.lock);
+ sock->sk = NULL;
+
+ dbfs_atomic_inc(&cnt.num_disconnect);
+
+ if (cf_sk->debugfs_socket_dir != NULL)
+ debugfs_remove_recursive(cf_sk->debugfs_socket_dir);
+
+ lock_sock(&(cf_sk->sk));
+ sk->sk_state = CAIF_DISCONNECTED;
+ sk->sk_shutdown = SHUTDOWN_MASK;
+
+ if (cf_sk->sk.sk_socket->state == SS_CONNECTED ||
+ cf_sk->sk.sk_socket->state == SS_CONNECTING)
+ res = caif_disconnect_client(&cf_sk->layer);
+
+ cf_sk->sk.sk_socket->state = SS_DISCONNECTING;
+ wake_up_interruptible_poll(sk_sleep(sk), POLLERR|POLLHUP);
+
+ sock_orphan(sk);
+ cf_sk->layer.dn = NULL;
+ sk_stream_kill_queues(&cf_sk->sk);
+ release_sock(sk);
+ sock_put(sk);
+ return res;
+}
+
+/* Copied from af_unix.c:unix_poll(), added CAIF tx_flow handling */
+static unsigned int caif_poll(struct file *file,
+ struct socket *sock, poll_table *wait)
+{
+ struct sock *sk = sock->sk;
+ unsigned int mask;
+ struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
+
+ sock_poll_wait(file, sk_sleep(sk), wait);
+ mask = 0;
+
+ /* exceptional events? */
+ if (sk->sk_err)
+ mask |= POLLERR;
+ if (sk->sk_shutdown == SHUTDOWN_MASK)
+ mask |= POLLHUP;
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
+ mask |= POLLRDHUP;
+
+ /* readable? */
+ if (!skb_queue_empty(&sk->sk_receive_queue) ||
+ (sk->sk_shutdown & RCV_SHUTDOWN))
+ mask |= POLLIN | POLLRDNORM;
+
+ /* Connection-based need to check for termination and startup */
+ if (sk->sk_state == CAIF_DISCONNECTED)
+ mask |= POLLHUP;
+
+ /*
+ * we set writable also when the other side has shut down the
+ * connection. This prevents stuck sockets.
+ */
+ if (sock_writeable(sk) && tx_flow_is_on(cf_sk))
+ mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
+
+ return mask;
+}
+
+static const struct proto_ops caif_seqpacket_ops = {
+ .family = PF_CAIF,
+ .owner = THIS_MODULE,
+ .release = caif_release,
+ .bind = sock_no_bind,
+ .connect = caif_connect,
+ .socketpair = sock_no_socketpair,
+ .accept = sock_no_accept,
+ .getname = sock_no_getname,
+ .poll = caif_poll,
+ .ioctl = sock_no_ioctl,
+ .listen = sock_no_listen,
+ .shutdown = sock_no_shutdown,
+ .setsockopt = setsockopt,
+ .getsockopt = sock_no_getsockopt,
+ .sendmsg = caif_seqpkt_sendmsg,
+ .recvmsg = caif_seqpkt_recvmsg,
+ .mmap = sock_no_mmap,
+ .sendpage = sock_no_sendpage,
+};
+
+static const struct proto_ops caif_stream_ops = {
+ .family = PF_CAIF,
+ .owner = THIS_MODULE,
+ .release = caif_release,
+ .bind = sock_no_bind,
+ .connect = caif_connect,
+ .socketpair = sock_no_socketpair,
+ .accept = sock_no_accept,
+ .getname = sock_no_getname,
+ .poll = caif_poll,
+ .ioctl = sock_no_ioctl,
+ .listen = sock_no_listen,
+ .shutdown = sock_no_shutdown,
+ .setsockopt = setsockopt,
+ .getsockopt = sock_no_getsockopt,
+ .sendmsg = caif_stream_sendmsg,
+ .recvmsg = caif_stream_recvmsg,
+ .mmap = sock_no_mmap,
+ .sendpage = sock_no_sendpage,
+};
+
+/* This function is called when a socket is finally destroyed. */
+static void caif_sock_destructor(struct sock *sk)
+{
+ struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
+ caif_assert(!atomic_read(&sk->sk_wmem_alloc));
+ caif_assert(sk_unhashed(sk));
+ caif_assert(!sk->sk_socket);
+ if (!sock_flag(sk, SOCK_DEAD)) {
+ pr_info("Attempt to release alive CAIF socket: %p\n", sk);
+ return;
+ }
+ sk_stream_kill_queues(&cf_sk->sk);
+ dbfs_atomic_dec(&cnt.caif_nr_socks);
+}
+
+static int caif_create(struct net *net, struct socket *sock, int protocol,
+ int kern)
+{
+ struct sock *sk = NULL;
+ struct caifsock *cf_sk = NULL;
+ static struct proto prot = {.name = "PF_CAIF",
+ .owner = THIS_MODULE,
+ .obj_size = sizeof(struct caifsock),
+ };
+
+ if (!capable(CAP_SYS_ADMIN) && !capable(CAP_NET_ADMIN))
+ return -EPERM;
+ /*
+ * The sock->type specifies the socket type to use.
+ * The CAIF socket is a packet stream in the sense
+ * that it is packet based. CAIF trusts the reliability
+ * of the link, no resending is implemented.
+ */
+ if (sock->type == SOCK_SEQPACKET)
+ sock->ops = &caif_seqpacket_ops;
+ else if (sock->type == SOCK_STREAM)
+ sock->ops = &caif_stream_ops;
+ else
+ return -ESOCKTNOSUPPORT;
+
+ if (protocol < 0 || protocol >= CAIFPROTO_MAX)
+ return -EPROTONOSUPPORT;
+ /*
+ * Set the socket state to unconnected. The socket state
+ * is really not used at all in the net/core or socket.c but the
+ * initialization makes sure that sock->state is not uninitialized.
+ */
+ sk = sk_alloc(net, PF_CAIF, GFP_KERNEL, &prot);
+ if (!sk)
+ return -ENOMEM;
+
+ cf_sk = container_of(sk, struct caifsock, sk);
+
+ /* Store the protocol */
+ sk->sk_protocol = (unsigned char) protocol;
+
+ /* Sendbuf dictates the amount of outbound packets not yet sent */
+ sk->sk_sndbuf = CAIF_DEF_SNDBUF;
+ sk->sk_rcvbuf = CAIF_DEF_RCVBUF;
+
+ /*
+ * Lock in order to try to stop someone from opening the socket
+ * too early.
+ */
+ lock_sock(&(cf_sk->sk));
+
+ /* Initialize the nozero default sock structure data. */
+ sock_init_data(sock, sk);
+ sk->sk_destruct = caif_sock_destructor;
+
+ mutex_init(&cf_sk->readlock); /* single task reading lock */
+ cf_sk->layer.ctrlcmd = caif_ctrl_cb;
+ cf_sk->sk.sk_socket->state = SS_UNCONNECTED;
+ cf_sk->sk.sk_state = CAIF_DISCONNECTED;
+
+ set_tx_flow_off(cf_sk);
+ set_rx_flow_on(cf_sk);
+
+ /* Set default options on configuration */
+ cf_sk->conn_req.priority = CAIF_PRIO_NORMAL;
+ cf_sk->conn_req.link_selector = CAIF_LINK_LOW_LATENCY;
+ cf_sk->conn_req.protocol = protocol;
+ /* Increase the number of sockets created. */
+ dbfs_atomic_inc(&cnt.caif_nr_socks);
+#ifdef CONFIG_DEBUG_FS
+ if (!IS_ERR(debugfsdir)) {
+ /* Fill in some information concerning the misc socket. */
+ snprintf(cf_sk->name, sizeof(cf_sk->name), "cfsk%d",
+ atomic_read(&cnt.caif_nr_socks));
+
+ cf_sk->debugfs_socket_dir =
+ debugfs_create_dir(cf_sk->name, debugfsdir);
+ debugfs_create_u32("sk_state", S_IRUSR | S_IWUSR,
+ cf_sk->debugfs_socket_dir,
+ (u32 *) &cf_sk->sk.sk_state);
+ debugfs_create_u32("flow_state", S_IRUSR | S_IWUSR,
+ cf_sk->debugfs_socket_dir, &cf_sk->flow_state);
+ debugfs_create_u32("sk_rmem_alloc", S_IRUSR | S_IWUSR,
+ cf_sk->debugfs_socket_dir,
+ (u32 *) &cf_sk->sk.sk_rmem_alloc);
+ debugfs_create_u32("sk_wmem_alloc", S_IRUSR | S_IWUSR,
+ cf_sk->debugfs_socket_dir,
+ (u32 *) &cf_sk->sk.sk_wmem_alloc);
+ debugfs_create_u32("identity", S_IRUSR | S_IWUSR,
+ cf_sk->debugfs_socket_dir,
+ (u32 *) &cf_sk->layer.id);
+ }
+#endif
+ release_sock(&cf_sk->sk);
+ return 0;
+}
+
+
+static struct net_proto_family caif_family_ops = {
+ .family = PF_CAIF,
+ .create = caif_create,
+ .owner = THIS_MODULE,
+};
+
+int af_caif_init(void)
+{
+ int err = sock_register(&caif_family_ops);
+ if (!err)
+ return err;
+ return 0;
+}
+
+static int __init caif_sktinit_module(void)
+{
+#ifdef CONFIG_DEBUG_FS
+ debugfsdir = debugfs_create_dir("caif_sk", NULL);
+ if (!IS_ERR(debugfsdir)) {
+ debugfs_create_u32("num_sockets", S_IRUSR | S_IWUSR,
+ debugfsdir,
+ (u32 *) &cnt.caif_nr_socks);
+ debugfs_create_u32("num_connect_req", S_IRUSR | S_IWUSR,
+ debugfsdir,
+ (u32 *) &cnt.num_connect_req);
+ debugfs_create_u32("num_connect_resp", S_IRUSR | S_IWUSR,
+ debugfsdir,
+ (u32 *) &cnt.num_connect_resp);
+ debugfs_create_u32("num_connect_fail_resp", S_IRUSR | S_IWUSR,
+ debugfsdir,
+ (u32 *) &cnt.num_connect_fail_resp);
+ debugfs_create_u32("num_disconnect", S_IRUSR | S_IWUSR,
+ debugfsdir,
+ (u32 *) &cnt.num_disconnect);
+ debugfs_create_u32("num_remote_shutdown_ind",
+ S_IRUSR | S_IWUSR, debugfsdir,
+ (u32 *) &cnt.num_remote_shutdown_ind);
+ debugfs_create_u32("num_tx_flow_off_ind", S_IRUSR | S_IWUSR,
+ debugfsdir,
+ (u32 *) &cnt.num_tx_flow_off_ind);
+ debugfs_create_u32("num_tx_flow_on_ind", S_IRUSR | S_IWUSR,
+ debugfsdir,
+ (u32 *) &cnt.num_tx_flow_on_ind);
+ debugfs_create_u32("num_rx_flow_off", S_IRUSR | S_IWUSR,
+ debugfsdir,
+ (u32 *) &cnt.num_rx_flow_off);
+ debugfs_create_u32("num_rx_flow_on", S_IRUSR | S_IWUSR,
+ debugfsdir,
+ (u32 *) &cnt.num_rx_flow_on);
+ }
+#endif
+ return af_caif_init();
+}
+
+static void __exit caif_sktexit_module(void)
+{
+ sock_unregister(PF_CAIF);
+ if (debugfsdir != NULL)
+ debugfs_remove_recursive(debugfsdir);
+}
+module_init(caif_sktinit_module);
+module_exit(caif_sktexit_module);
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c
new file mode 100644
index 000000000000..df43f264d9fb
--- /dev/null
+++ b/net/caif/cfcnfg.c
@@ -0,0 +1,470 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#include <linux/kernel.h>
+#include <linux/stddef.h>
+#include <linux/slab.h>
+#include <net/caif/caif_layer.h>
+#include <net/caif/cfpkt.h>
+#include <net/caif/cfcnfg.h>
+#include <net/caif/cfctrl.h>
+#include <net/caif/cfmuxl.h>
+#include <net/caif/cffrml.h>
+#include <net/caif/cfserl.h>
+#include <net/caif/cfsrvl.h>
+
+#include <linux/module.h>
+#include <asm/atomic.h>
+
+#define MAX_PHY_LAYERS 7
+#define PHY_NAME_LEN 20
+
+#define container_obj(layr) container_of(layr, struct cfcnfg, layer)
+
+/* Information about CAIF physical interfaces held by Config Module in order
+ * to manage physical interfaces
+ */
+struct cfcnfg_phyinfo {
+ /* Pointer to the layer below the MUX (framing layer) */
+ struct cflayer *frm_layer;
+ /* Pointer to the lowest actual physical layer */
+ struct cflayer *phy_layer;
+ /* Unique identifier of the physical interface */
+ unsigned int id;
+ /* Preference of the physical in interface */
+ enum cfcnfg_phy_preference pref;
+
+ /* Reference count, number of channels using the device */
+ int phy_ref_count;
+
+ /* Information about the physical device */
+ struct dev_info dev_info;
+};
+
+struct cfcnfg {
+ struct cflayer layer;
+ struct cflayer *ctrl;
+ struct cflayer *mux;
+ u8 last_phyid;
+ struct cfcnfg_phyinfo phy_layers[MAX_PHY_LAYERS];
+};
+
+static void cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id,
+ enum cfctrl_srv serv, u8 phyid,
+ struct cflayer *adapt_layer);
+static void cfcnfg_linkdestroy_rsp(struct cflayer *layer, u8 channel_id);
+static void cfcnfg_reject_rsp(struct cflayer *layer, u8 channel_id,
+ struct cflayer *adapt_layer);
+static void cfctrl_resp_func(void);
+static void cfctrl_enum_resp(void);
+
+struct cfcnfg *cfcnfg_create(void)
+{
+ struct cfcnfg *this;
+ struct cfctrl_rsp *resp;
+ /* Initiate this layer */
+ this = kzalloc(sizeof(struct cfcnfg), GFP_ATOMIC);
+ if (!this) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return NULL;
+ }
+ this->mux = cfmuxl_create();
+ if (!this->mux)
+ goto out_of_mem;
+ this->ctrl = cfctrl_create();
+ if (!this->ctrl)
+ goto out_of_mem;
+ /* Initiate response functions */
+ resp = cfctrl_get_respfuncs(this->ctrl);
+ resp->enum_rsp = cfctrl_enum_resp;
+ resp->linkerror_ind = cfctrl_resp_func;
+ resp->linkdestroy_rsp = cfcnfg_linkdestroy_rsp;
+ resp->sleep_rsp = cfctrl_resp_func;
+ resp->wake_rsp = cfctrl_resp_func;
+ resp->restart_rsp = cfctrl_resp_func;
+ resp->radioset_rsp = cfctrl_resp_func;
+ resp->linksetup_rsp = cfcnfg_linkup_rsp;
+ resp->reject_rsp = cfcnfg_reject_rsp;
+
+ this->last_phyid = 1;
+
+ cfmuxl_set_uplayer(this->mux, this->ctrl, 0);
+ layer_set_dn(this->ctrl, this->mux);
+ layer_set_up(this->ctrl, this);
+ return this;
+out_of_mem:
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ kfree(this->mux);
+ kfree(this->ctrl);
+ kfree(this);
+ return NULL;
+}
+EXPORT_SYMBOL(cfcnfg_create);
+
+void cfcnfg_remove(struct cfcnfg *cfg)
+{
+ if (cfg) {
+ kfree(cfg->mux);
+ kfree(cfg->ctrl);
+ kfree(cfg);
+ }
+}
+
+static void cfctrl_resp_func(void)
+{
+}
+
+static void cfctrl_enum_resp(void)
+{
+}
+
+struct dev_info *cfcnfg_get_phyid(struct cfcnfg *cnfg,
+ enum cfcnfg_phy_preference phy_pref)
+{
+ u16 i;
+
+ /* Try to match with specified preference */
+ for (i = 1; i < MAX_PHY_LAYERS; i++) {
+ if (cnfg->phy_layers[i].id == i &&
+ cnfg->phy_layers[i].pref == phy_pref &&
+ cnfg->phy_layers[i].frm_layer != NULL) {
+ caif_assert(cnfg->phy_layers != NULL);
+ caif_assert(cnfg->phy_layers[i].id == i);
+ return &cnfg->phy_layers[i].dev_info;
+ }
+ }
+ /* Otherwise just return something */
+ for (i = 1; i < MAX_PHY_LAYERS; i++) {
+ if (cnfg->phy_layers[i].id == i) {
+ caif_assert(cnfg->phy_layers != NULL);
+ caif_assert(cnfg->phy_layers[i].id == i);
+ return &cnfg->phy_layers[i].dev_info;
+ }
+ }
+
+ return NULL;
+}
+
+static struct cfcnfg_phyinfo *cfcnfg_get_phyinfo(struct cfcnfg *cnfg,
+ u8 phyid)
+{
+ int i;
+ /* Try to match with specified preference */
+ for (i = 0; i < MAX_PHY_LAYERS; i++)
+ if (cnfg->phy_layers[i].frm_layer != NULL &&
+ cnfg->phy_layers[i].id == phyid)
+ return &cnfg->phy_layers[i];
+ return NULL;
+}
+
+int cfcnfg_get_named(struct cfcnfg *cnfg, char *name)
+{
+ int i;
+
+ /* Try to match with specified name */
+ for (i = 0; i < MAX_PHY_LAYERS; i++) {
+ if (cnfg->phy_layers[i].frm_layer != NULL
+ && strcmp(cnfg->phy_layers[i].phy_layer->name,
+ name) == 0)
+ return cnfg->phy_layers[i].frm_layer->id;
+ }
+ return 0;
+}
+
+int cfcnfg_disconn_adapt_layer(struct cfcnfg *cnfg, struct cflayer *adap_layer)
+{
+ u8 channel_id = 0;
+ int ret = 0;
+ struct cflayer *servl = NULL;
+ struct cfcnfg_phyinfo *phyinfo = NULL;
+ u8 phyid = 0;
+ caif_assert(adap_layer != NULL);
+ channel_id = adap_layer->id;
+ if (adap_layer->dn == NULL || channel_id == 0) {
+ pr_err("CAIF: %s():adap_layer->id is 0\n", __func__);
+ ret = -ENOTCONN;
+ goto end;
+ }
+ servl = cfmuxl_remove_uplayer(cnfg->mux, channel_id);
+ if (servl == NULL)
+ goto end;
+ layer_set_up(servl, NULL);
+ ret = cfctrl_linkdown_req(cnfg->ctrl, channel_id, adap_layer);
+ if (servl == NULL) {
+ pr_err("CAIF: %s(): PROTOCOL ERROR "
+ "- Error removing service_layer Channel_Id(%d)",
+ __func__, channel_id);
+ ret = -EINVAL;
+ goto end;
+ }
+ caif_assert(channel_id == servl->id);
+ if (adap_layer->dn != NULL) {
+ phyid = cfsrvl_getphyid(adap_layer->dn);
+
+ phyinfo = cfcnfg_get_phyinfo(cnfg, phyid);
+ if (phyinfo == NULL) {
+ pr_warning("CAIF: %s(): "
+ "No interface to send disconnect to\n",
+ __func__);
+ ret = -ENODEV;
+ goto end;
+ }
+ if (phyinfo->id != phyid ||
+ phyinfo->phy_layer->id != phyid ||
+ phyinfo->frm_layer->id != phyid) {
+ pr_err("CAIF: %s(): "
+ "Inconsistency in phy registration\n",
+ __func__);
+ ret = -EINVAL;
+ goto end;
+ }
+ }
+ if (phyinfo != NULL && --phyinfo->phy_ref_count == 0 &&
+ phyinfo->phy_layer != NULL &&
+ phyinfo->phy_layer->modemcmd != NULL) {
+ phyinfo->phy_layer->modemcmd(phyinfo->phy_layer,
+ _CAIF_MODEMCMD_PHYIF_USELESS);
+ }
+end:
+ cfsrvl_put(servl);
+ cfctrl_cancel_req(cnfg->ctrl, adap_layer);
+ if (adap_layer->ctrlcmd != NULL)
+ adap_layer->ctrlcmd(adap_layer, CAIF_CTRLCMD_DEINIT_RSP, 0);
+ return ret;
+
+}
+EXPORT_SYMBOL(cfcnfg_disconn_adapt_layer);
+
+void cfcnfg_release_adap_layer(struct cflayer *adap_layer)
+{
+ if (adap_layer->dn)
+ cfsrvl_put(adap_layer->dn);
+}
+EXPORT_SYMBOL(cfcnfg_release_adap_layer);
+
+static void cfcnfg_linkdestroy_rsp(struct cflayer *layer, u8 channel_id)
+{
+}
+
+int cfcnfg_add_adaptation_layer(struct cfcnfg *cnfg,
+ struct cfctrl_link_param *param,
+ struct cflayer *adap_layer)
+{
+ struct cflayer *frml;
+ if (adap_layer == NULL) {
+ pr_err("CAIF: %s(): adap_layer is zero", __func__);
+ return -EINVAL;
+ }
+ if (adap_layer->receive == NULL) {
+ pr_err("CAIF: %s(): adap_layer->receive is NULL", __func__);
+ return -EINVAL;
+ }
+ if (adap_layer->ctrlcmd == NULL) {
+ pr_err("CAIF: %s(): adap_layer->ctrlcmd == NULL", __func__);
+ return -EINVAL;
+ }
+ frml = cnfg->phy_layers[param->phyid].frm_layer;
+ if (frml == NULL) {
+ pr_err("CAIF: %s(): Specified PHY type does not exist!",
+ __func__);
+ return -ENODEV;
+ }
+ caif_assert(param->phyid == cnfg->phy_layers[param->phyid].id);
+ caif_assert(cnfg->phy_layers[param->phyid].frm_layer->id ==
+ param->phyid);
+ caif_assert(cnfg->phy_layers[param->phyid].phy_layer->id ==
+ param->phyid);
+ /* FIXME: ENUMERATE INITIALLY WHEN ACTIVATING PHYSICAL INTERFACE */
+ cfctrl_enum_req(cnfg->ctrl, param->phyid);
+ return cfctrl_linkup_request(cnfg->ctrl, param, adap_layer);
+}
+EXPORT_SYMBOL(cfcnfg_add_adaptation_layer);
+
+static void cfcnfg_reject_rsp(struct cflayer *layer, u8 channel_id,
+ struct cflayer *adapt_layer)
+{
+ if (adapt_layer != NULL && adapt_layer->ctrlcmd != NULL)
+ adapt_layer->ctrlcmd(adapt_layer,
+ CAIF_CTRLCMD_INIT_FAIL_RSP, 0);
+}
+
+static void
+cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv,
+ u8 phyid, struct cflayer *adapt_layer)
+{
+ struct cfcnfg *cnfg = container_obj(layer);
+ struct cflayer *servicel = NULL;
+ struct cfcnfg_phyinfo *phyinfo;
+ if (adapt_layer == NULL) {
+ pr_debug("CAIF: %s(): link setup response "
+ "but no client exist, send linkdown back\n",
+ __func__);
+ cfctrl_linkdown_req(cnfg->ctrl, channel_id, NULL);
+ return;
+ }
+
+ caif_assert(cnfg != NULL);
+ caif_assert(phyid != 0);
+ phyinfo = &cnfg->phy_layers[phyid];
+ caif_assert(phyinfo != NULL);
+ caif_assert(phyinfo->id == phyid);
+ caif_assert(phyinfo->phy_layer != NULL);
+ caif_assert(phyinfo->phy_layer->id == phyid);
+
+ if (phyinfo != NULL &&
+ phyinfo->phy_ref_count++ == 0 &&
+ phyinfo->phy_layer != NULL &&
+ phyinfo->phy_layer->modemcmd != NULL) {
+ caif_assert(phyinfo->phy_layer->id == phyid);
+ phyinfo->phy_layer->modemcmd(phyinfo->phy_layer,
+ _CAIF_MODEMCMD_PHYIF_USEFULL);
+
+ }
+ adapt_layer->id = channel_id;
+
+ switch (serv) {
+ case CFCTRL_SRV_VEI:
+ servicel = cfvei_create(channel_id, &phyinfo->dev_info);
+ break;
+ case CFCTRL_SRV_DATAGRAM:
+ servicel = cfdgml_create(channel_id, &phyinfo->dev_info);
+ break;
+ case CFCTRL_SRV_RFM:
+ servicel = cfrfml_create(channel_id, &phyinfo->dev_info);
+ break;
+ case CFCTRL_SRV_UTIL:
+ servicel = cfutill_create(channel_id, &phyinfo->dev_info);
+ break;
+ case CFCTRL_SRV_VIDEO:
+ servicel = cfvidl_create(channel_id, &phyinfo->dev_info);
+ break;
+ case CFCTRL_SRV_DBG:
+ servicel = cfdbgl_create(channel_id, &phyinfo->dev_info);
+ break;
+ default:
+ pr_err("CAIF: %s(): Protocol error. "
+ "Link setup response - unknown channel type\n",
+ __func__);
+ return;
+ }
+ if (!servicel) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return;
+ }
+ layer_set_dn(servicel, cnfg->mux);
+ cfmuxl_set_uplayer(cnfg->mux, servicel, channel_id);
+ layer_set_up(servicel, adapt_layer);
+ layer_set_dn(adapt_layer, servicel);
+ cfsrvl_get(servicel);
+ servicel->ctrlcmd(servicel, CAIF_CTRLCMD_INIT_RSP, 0);
+}
+
+void
+cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
+ void *dev, struct cflayer *phy_layer, u16 *phyid,
+ enum cfcnfg_phy_preference pref,
+ bool fcs, bool stx)
+{
+ struct cflayer *frml;
+ struct cflayer *phy_driver = NULL;
+ int i;
+
+
+ if (cnfg->phy_layers[cnfg->last_phyid].frm_layer == NULL) {
+ *phyid = cnfg->last_phyid;
+
+ /* range: * 1..(MAX_PHY_LAYERS-1) */
+ cnfg->last_phyid =
+ (cnfg->last_phyid % (MAX_PHY_LAYERS - 1)) + 1;
+ } else {
+ *phyid = 0;
+ for (i = 1; i < MAX_PHY_LAYERS; i++) {
+ if (cnfg->phy_layers[i].frm_layer == NULL) {
+ *phyid = i;
+ break;
+ }
+ }
+ }
+ if (*phyid == 0) {
+ pr_err("CAIF: %s(): No Available PHY ID\n", __func__);
+ return;
+ }
+
+ switch (phy_type) {
+ case CFPHYTYPE_FRAG:
+ phy_driver =
+ cfserl_create(CFPHYTYPE_FRAG, *phyid, stx);
+ if (!phy_driver) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return;
+ }
+
+ break;
+ case CFPHYTYPE_CAIF:
+ phy_driver = NULL;
+ break;
+ default:
+ pr_err("CAIF: %s(): %d", __func__, phy_type);
+ return;
+ break;
+ }
+
+ phy_layer->id = *phyid;
+ cnfg->phy_layers[*phyid].pref = pref;
+ cnfg->phy_layers[*phyid].id = *phyid;
+ cnfg->phy_layers[*phyid].dev_info.id = *phyid;
+ cnfg->phy_layers[*phyid].dev_info.dev = dev;
+ cnfg->phy_layers[*phyid].phy_layer = phy_layer;
+ cnfg->phy_layers[*phyid].phy_ref_count = 0;
+ phy_layer->type = phy_type;
+ frml = cffrml_create(*phyid, fcs);
+ if (!frml) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return;
+ }
+ cnfg->phy_layers[*phyid].frm_layer = frml;
+ cfmuxl_set_dnlayer(cnfg->mux, frml, *phyid);
+ layer_set_up(frml, cnfg->mux);
+
+ if (phy_driver != NULL) {
+ phy_driver->id = *phyid;
+ layer_set_dn(frml, phy_driver);
+ layer_set_up(phy_driver, frml);
+ layer_set_dn(phy_driver, phy_layer);
+ layer_set_up(phy_layer, phy_driver);
+ } else {
+ layer_set_dn(frml, phy_layer);
+ layer_set_up(phy_layer, frml);
+ }
+}
+EXPORT_SYMBOL(cfcnfg_add_phy_layer);
+
+int cfcnfg_del_phy_layer(struct cfcnfg *cnfg, struct cflayer *phy_layer)
+{
+ struct cflayer *frml, *frml_dn;
+ u16 phyid;
+ phyid = phy_layer->id;
+ caif_assert(phyid == cnfg->phy_layers[phyid].id);
+ caif_assert(phy_layer == cnfg->phy_layers[phyid].phy_layer);
+ caif_assert(phy_layer->id == phyid);
+ caif_assert(cnfg->phy_layers[phyid].frm_layer->id == phyid);
+
+ memset(&cnfg->phy_layers[phy_layer->id], 0,
+ sizeof(struct cfcnfg_phyinfo));
+ frml = cfmuxl_remove_dnlayer(cnfg->mux, phy_layer->id);
+ frml_dn = frml->dn;
+ cffrml_set_uplayer(frml, NULL);
+ cffrml_set_dnlayer(frml, NULL);
+ kfree(frml);
+
+ if (phy_layer != frml_dn) {
+ layer_set_up(frml_dn, NULL);
+ layer_set_dn(frml_dn, NULL);
+ kfree(frml_dn);
+ }
+ layer_set_up(phy_layer, NULL);
+ return 0;
+}
+EXPORT_SYMBOL(cfcnfg_del_phy_layer);
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
new file mode 100644
index 000000000000..0ffe1e1ce901
--- /dev/null
+++ b/net/caif/cfctrl.c
@@ -0,0 +1,692 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/stddef.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <net/caif/caif_layer.h>
+#include <net/caif/cfpkt.h>
+#include <net/caif/cfctrl.h>
+
+#define container_obj(layr) container_of(layr, struct cfctrl, serv.layer)
+#define UTILITY_NAME_LENGTH 16
+#define CFPKT_CTRL_PKT_LEN 20
+
+
+#ifdef CAIF_NO_LOOP
+static int handle_loop(struct cfctrl *ctrl,
+ int cmd, struct cfpkt *pkt){
+ return CAIF_FAILURE;
+}
+#else
+static int handle_loop(struct cfctrl *ctrl,
+ int cmd, struct cfpkt *pkt);
+#endif
+static int cfctrl_recv(struct cflayer *layr, struct cfpkt *pkt);
+static void cfctrl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
+ int phyid);
+
+
+struct cflayer *cfctrl_create(void)
+{
+ struct dev_info dev_info;
+ struct cfctrl *this =
+ kmalloc(sizeof(struct cfctrl), GFP_ATOMIC);
+ if (!this) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return NULL;
+ }
+ caif_assert(offsetof(struct cfctrl, serv.layer) == 0);
+ memset(&dev_info, 0, sizeof(dev_info));
+ dev_info.id = 0xff;
+ memset(this, 0, sizeof(*this));
+ cfsrvl_init(&this->serv, 0, &dev_info);
+ spin_lock_init(&this->info_list_lock);
+ atomic_set(&this->req_seq_no, 1);
+ atomic_set(&this->rsp_seq_no, 1);
+ this->serv.layer.receive = cfctrl_recv;
+ sprintf(this->serv.layer.name, "ctrl");
+ this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
+ spin_lock_init(&this->loop_linkid_lock);
+ this->loop_linkid = 1;
+ return &this->serv.layer;
+}
+
+static bool param_eq(struct cfctrl_link_param *p1, struct cfctrl_link_param *p2)
+{
+ bool eq =
+ p1->linktype == p2->linktype &&
+ p1->priority == p2->priority &&
+ p1->phyid == p2->phyid &&
+ p1->endpoint == p2->endpoint && p1->chtype == p2->chtype;
+
+ if (!eq)
+ return false;
+
+ switch (p1->linktype) {
+ case CFCTRL_SRV_VEI:
+ return true;
+ case CFCTRL_SRV_DATAGRAM:
+ return p1->u.datagram.connid == p2->u.datagram.connid;
+ case CFCTRL_SRV_RFM:
+ return
+ p1->u.rfm.connid == p2->u.rfm.connid &&
+ strcmp(p1->u.rfm.volume, p2->u.rfm.volume) == 0;
+ case CFCTRL_SRV_UTIL:
+ return
+ p1->u.utility.fifosize_kb == p2->u.utility.fifosize_kb
+ && p1->u.utility.fifosize_bufs ==
+ p2->u.utility.fifosize_bufs
+ && strcmp(p1->u.utility.name, p2->u.utility.name) == 0
+ && p1->u.utility.paramlen == p2->u.utility.paramlen
+ && memcmp(p1->u.utility.params, p2->u.utility.params,
+ p1->u.utility.paramlen) == 0;
+
+ case CFCTRL_SRV_VIDEO:
+ return p1->u.video.connid == p2->u.video.connid;
+ case CFCTRL_SRV_DBG:
+ return true;
+ case CFCTRL_SRV_DECM:
+ return false;
+ default:
+ return false;
+ }
+ return false;
+}
+
+bool cfctrl_req_eq(struct cfctrl_request_info *r1,
+ struct cfctrl_request_info *r2)
+{
+ if (r1->cmd != r2->cmd)
+ return false;
+ if (r1->cmd == CFCTRL_CMD_LINK_SETUP)
+ return param_eq(&r1->param, &r2->param);
+ else
+ return r1->channel_id == r2->channel_id;
+}
+
+/* Insert request at the end */
+void cfctrl_insert_req(struct cfctrl *ctrl,
+ struct cfctrl_request_info *req)
+{
+ struct cfctrl_request_info *p;
+ spin_lock(&ctrl->info_list_lock);
+ req->next = NULL;
+ atomic_inc(&ctrl->req_seq_no);
+ req->sequence_no = atomic_read(&ctrl->req_seq_no);
+ if (ctrl->first_req == NULL) {
+ ctrl->first_req = req;
+ spin_unlock(&ctrl->info_list_lock);
+ return;
+ }
+ p = ctrl->first_req;
+ while (p->next != NULL)
+ p = p->next;
+ p->next = req;
+ spin_unlock(&ctrl->info_list_lock);
+}
+
+/* Compare and remove request */
+struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
+ struct cfctrl_request_info *req)
+{
+ struct cfctrl_request_info *p;
+ struct cfctrl_request_info *ret;
+
+ spin_lock(&ctrl->info_list_lock);
+ if (ctrl->first_req == NULL) {
+ spin_unlock(&ctrl->info_list_lock);
+ return NULL;
+ }
+
+ if (cfctrl_req_eq(req, ctrl->first_req)) {
+ ret = ctrl->first_req;
+ caif_assert(ctrl->first_req);
+ atomic_set(&ctrl->rsp_seq_no,
+ ctrl->first_req->sequence_no);
+ ctrl->first_req = ctrl->first_req->next;
+ spin_unlock(&ctrl->info_list_lock);
+ return ret;
+ }
+
+ p = ctrl->first_req;
+
+ while (p->next != NULL) {
+ if (cfctrl_req_eq(req, p->next)) {
+ pr_warning("CAIF: %s(): Requests are not "
+ "received in order\n",
+ __func__);
+ ret = p->next;
+ atomic_set(&ctrl->rsp_seq_no,
+ p->next->sequence_no);
+ p->next = p->next->next;
+ spin_unlock(&ctrl->info_list_lock);
+ return ret;
+ }
+ p = p->next;
+ }
+ spin_unlock(&ctrl->info_list_lock);
+
+ pr_warning("CAIF: %s(): Request does not match\n",
+ __func__);
+ return NULL;
+}
+
+struct cfctrl_rsp *cfctrl_get_respfuncs(struct cflayer *layer)
+{
+ struct cfctrl *this = container_obj(layer);
+ return &this->res;
+}
+
+void cfctrl_set_dnlayer(struct cflayer *this, struct cflayer *dn)
+{
+ this->dn = dn;
+}
+
+void cfctrl_set_uplayer(struct cflayer *this, struct cflayer *up)
+{
+ this->up = up;
+}
+
+static void init_info(struct caif_payload_info *info, struct cfctrl *cfctrl)
+{
+ info->hdr_len = 0;
+ info->channel_id = cfctrl->serv.layer.id;
+ info->dev_info = &cfctrl->serv.dev_info;
+}
+
+void cfctrl_enum_req(struct cflayer *layer, u8 physlinkid)
+{
+ struct cfctrl *cfctrl = container_obj(layer);
+ int ret;
+ struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
+ if (!pkt) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return;
+ }
+ caif_assert(offsetof(struct cfctrl, serv.layer) == 0);
+ init_info(cfpkt_info(pkt), cfctrl);
+ cfpkt_info(pkt)->dev_info->id = physlinkid;
+ cfctrl->serv.dev_info.id = physlinkid;
+ cfpkt_addbdy(pkt, CFCTRL_CMD_ENUM);
+ cfpkt_addbdy(pkt, physlinkid);
+ ret =
+ cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
+ if (ret < 0) {
+ pr_err("CAIF: %s(): Could not transmit enum message\n",
+ __func__);
+ cfpkt_destroy(pkt);
+ }
+}
+
+int cfctrl_linkup_request(struct cflayer *layer,
+ struct cfctrl_link_param *param,
+ struct cflayer *user_layer)
+{
+ struct cfctrl *cfctrl = container_obj(layer);
+ u32 tmp32;
+ u16 tmp16;
+ u8 tmp8;
+ struct cfctrl_request_info *req;
+ int ret;
+ char utility_name[16];
+ struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
+ if (!pkt) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return -ENOMEM;
+ }
+ cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_SETUP);
+ cfpkt_addbdy(pkt, (param->chtype << 4) + param->linktype);
+ cfpkt_addbdy(pkt, (param->priority << 3) + param->phyid);
+ cfpkt_addbdy(pkt, param->endpoint & 0x03);
+
+ switch (param->linktype) {
+ case CFCTRL_SRV_VEI:
+ break;
+ case CFCTRL_SRV_VIDEO:
+ cfpkt_addbdy(pkt, (u8) param->u.video.connid);
+ break;
+ case CFCTRL_SRV_DBG:
+ break;
+ case CFCTRL_SRV_DATAGRAM:
+ tmp32 = cpu_to_le32(param->u.datagram.connid);
+ cfpkt_add_body(pkt, &tmp32, 4);
+ break;
+ case CFCTRL_SRV_RFM:
+ /* Construct a frame, convert DatagramConnectionID to network
+ * format long and copy it out...
+ */
+ tmp32 = cpu_to_le32(param->u.rfm.connid);
+ cfpkt_add_body(pkt, &tmp32, 4);
+ /* Add volume name, including zero termination... */
+ cfpkt_add_body(pkt, param->u.rfm.volume,
+ strlen(param->u.rfm.volume) + 1);
+ break;
+ case CFCTRL_SRV_UTIL:
+ tmp16 = cpu_to_le16(param->u.utility.fifosize_kb);
+ cfpkt_add_body(pkt, &tmp16, 2);
+ tmp16 = cpu_to_le16(param->u.utility.fifosize_bufs);
+ cfpkt_add_body(pkt, &tmp16, 2);
+ memset(utility_name, 0, sizeof(utility_name));
+ strncpy(utility_name, param->u.utility.name,
+ UTILITY_NAME_LENGTH - 1);
+ cfpkt_add_body(pkt, utility_name, UTILITY_NAME_LENGTH);
+ tmp8 = param->u.utility.paramlen;
+ cfpkt_add_body(pkt, &tmp8, 1);
+ cfpkt_add_body(pkt, param->u.utility.params,
+ param->u.utility.paramlen);
+ break;
+ default:
+ pr_warning("CAIF: %s():Request setup of bad link type = %d\n",
+ __func__, param->linktype);
+ return -EINVAL;
+ }
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return -ENOMEM;
+ }
+ req->client_layer = user_layer;
+ req->cmd = CFCTRL_CMD_LINK_SETUP;
+ req->param = *param;
+ cfctrl_insert_req(cfctrl, req);
+ init_info(cfpkt_info(pkt), cfctrl);
+ /*
+ * NOTE:Always send linkup and linkdown request on the same
+ * device as the payload. Otherwise old queued up payload
+ * might arrive with the newly allocated channel ID.
+ */
+ cfpkt_info(pkt)->dev_info->id = param->phyid;
+ ret =
+ cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
+ if (ret < 0) {
+ pr_err("CAIF: %s(): Could not transmit linksetup request\n",
+ __func__);
+ cfpkt_destroy(pkt);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+int cfctrl_linkdown_req(struct cflayer *layer, u8 channelid,
+ struct cflayer *client)
+{
+ int ret;
+ struct cfctrl *cfctrl = container_obj(layer);
+ struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
+ if (!pkt) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return -ENOMEM;
+ }
+ cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_DESTROY);
+ cfpkt_addbdy(pkt, channelid);
+ init_info(cfpkt_info(pkt), cfctrl);
+ ret =
+ cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
+ if (ret < 0) {
+ pr_err("CAIF: %s(): Could not transmit link-down request\n",
+ __func__);
+ cfpkt_destroy(pkt);
+ }
+ return ret;
+}
+
+void cfctrl_sleep_req(struct cflayer *layer)
+{
+ int ret;
+ struct cfctrl *cfctrl = container_obj(layer);
+ struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
+ if (!pkt) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return;
+ }
+ cfpkt_addbdy(pkt, CFCTRL_CMD_SLEEP);
+ init_info(cfpkt_info(pkt), cfctrl);
+ ret =
+ cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
+ if (ret < 0)
+ cfpkt_destroy(pkt);
+}
+
+void cfctrl_wake_req(struct cflayer *layer)
+{
+ int ret;
+ struct cfctrl *cfctrl = container_obj(layer);
+ struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
+ if (!pkt) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return;
+ }
+ cfpkt_addbdy(pkt, CFCTRL_CMD_WAKE);
+ init_info(cfpkt_info(pkt), cfctrl);
+ ret =
+ cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
+ if (ret < 0)
+ cfpkt_destroy(pkt);
+}
+
+void cfctrl_getstartreason_req(struct cflayer *layer)
+{
+ int ret;
+ struct cfctrl *cfctrl = container_obj(layer);
+ struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
+ if (!pkt) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return;
+ }
+ cfpkt_addbdy(pkt, CFCTRL_CMD_START_REASON);
+ init_info(cfpkt_info(pkt), cfctrl);
+ ret =
+ cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
+ if (ret < 0)
+ cfpkt_destroy(pkt);
+}
+
+
+void cfctrl_cancel_req(struct cflayer *layr, struct cflayer *adap_layer)
+{
+ struct cfctrl_request_info *p, *req;
+ struct cfctrl *ctrl = container_obj(layr);
+ spin_lock(&ctrl->info_list_lock);
+
+ if (ctrl->first_req == NULL) {
+ spin_unlock(&ctrl->info_list_lock);
+ return;
+ }
+
+ if (ctrl->first_req->client_layer == adap_layer) {
+
+ req = ctrl->first_req;
+ ctrl->first_req = ctrl->first_req->next;
+ kfree(req);
+ }
+
+ p = ctrl->first_req;
+ while (p != NULL && p->next != NULL) {
+ if (p->next->client_layer == adap_layer) {
+
+ req = p->next;
+ p->next = p->next->next;
+ kfree(p->next);
+ }
+ p = p->next;
+ }
+
+ spin_unlock(&ctrl->info_list_lock);
+}
+
+static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
+{
+ u8 cmdrsp;
+ u8 cmd;
+ int ret = -1;
+ u16 tmp16;
+ u8 len;
+ u8 param[255];
+ u8 linkid;
+ struct cfctrl *cfctrl = container_obj(layer);
+ struct cfctrl_request_info rsp, *req;
+
+
+ cfpkt_extr_head(pkt, &cmdrsp, 1);
+ cmd = cmdrsp & CFCTRL_CMD_MASK;
+ if (cmd != CFCTRL_CMD_LINK_ERR
+ && CFCTRL_RSP_BIT != (CFCTRL_RSP_BIT & cmdrsp)) {
+ if (handle_loop(cfctrl, cmd, pkt) == CAIF_FAILURE)
+ cmdrsp |= CFCTRL_ERR_BIT;
+ }
+
+ switch (cmd) {
+ case CFCTRL_CMD_LINK_SETUP:
+ {
+ enum cfctrl_srv serv;
+ enum cfctrl_srv servtype;
+ u8 endpoint;
+ u8 physlinkid;
+ u8 prio;
+ u8 tmp;
+ u32 tmp32;
+ u8 *cp;
+ int i;
+ struct cfctrl_link_param linkparam;
+ memset(&linkparam, 0, sizeof(linkparam));
+
+ cfpkt_extr_head(pkt, &tmp, 1);
+
+ serv = tmp & CFCTRL_SRV_MASK;
+ linkparam.linktype = serv;
+
+ servtype = tmp >> 4;
+ linkparam.chtype = servtype;
+
+ cfpkt_extr_head(pkt, &tmp, 1);
+ physlinkid = tmp & 0x07;
+ prio = tmp >> 3;
+
+ linkparam.priority = prio;
+ linkparam.phyid = physlinkid;
+ cfpkt_extr_head(pkt, &endpoint, 1);
+ linkparam.endpoint = endpoint & 0x03;
+
+ switch (serv) {
+ case CFCTRL_SRV_VEI:
+ case CFCTRL_SRV_DBG:
+ if (CFCTRL_ERR_BIT & cmdrsp)
+ break;
+ /* Link ID */
+ cfpkt_extr_head(pkt, &linkid, 1);
+ break;
+ case CFCTRL_SRV_VIDEO:
+ cfpkt_extr_head(pkt, &tmp, 1);
+ linkparam.u.video.connid = tmp;
+ if (CFCTRL_ERR_BIT & cmdrsp)
+ break;
+ /* Link ID */
+ cfpkt_extr_head(pkt, &linkid, 1);
+ break;
+
+ case CFCTRL_SRV_DATAGRAM:
+ cfpkt_extr_head(pkt, &tmp32, 4);
+ linkparam.u.datagram.connid =
+ le32_to_cpu(tmp32);
+ if (CFCTRL_ERR_BIT & cmdrsp)
+ break;
+ /* Link ID */
+ cfpkt_extr_head(pkt, &linkid, 1);
+ break;
+ case CFCTRL_SRV_RFM:
+ /* Construct a frame, convert
+ * DatagramConnectionID
+ * to network format long and copy it out...
+ */
+ cfpkt_extr_head(pkt, &tmp32, 4);
+ linkparam.u.rfm.connid =
+ le32_to_cpu(tmp32);
+ cp = (u8 *) linkparam.u.rfm.volume;
+ for (cfpkt_extr_head(pkt, &tmp, 1);
+ cfpkt_more(pkt) && tmp != '\0';
+ cfpkt_extr_head(pkt, &tmp, 1))
+ *cp++ = tmp;
+ *cp = '\0';
+
+ if (CFCTRL_ERR_BIT & cmdrsp)
+ break;
+ /* Link ID */
+ cfpkt_extr_head(pkt, &linkid, 1);
+
+ break;
+ case CFCTRL_SRV_UTIL:
+ /* Construct a frame, convert
+ * DatagramConnectionID
+ * to network format long and copy it out...
+ */
+ /* Fifosize KB */
+ cfpkt_extr_head(pkt, &tmp16, 2);
+ linkparam.u.utility.fifosize_kb =
+ le16_to_cpu(tmp16);
+ /* Fifosize bufs */
+ cfpkt_extr_head(pkt, &tmp16, 2);
+ linkparam.u.utility.fifosize_bufs =
+ le16_to_cpu(tmp16);
+ /* name */
+ cp = (u8 *) linkparam.u.utility.name;
+ caif_assert(sizeof(linkparam.u.utility.name)
+ >= UTILITY_NAME_LENGTH);
+ for (i = 0;
+ i < UTILITY_NAME_LENGTH
+ && cfpkt_more(pkt); i++) {
+ cfpkt_extr_head(pkt, &tmp, 1);
+ *cp++ = tmp;
+ }
+ /* Length */
+ cfpkt_extr_head(pkt, &len, 1);
+ linkparam.u.utility.paramlen = len;
+ /* Param Data */
+ cp = linkparam.u.utility.params;
+ while (cfpkt_more(pkt) && len--) {
+ cfpkt_extr_head(pkt, &tmp, 1);
+ *cp++ = tmp;
+ }
+ if (CFCTRL_ERR_BIT & cmdrsp)
+ break;
+ /* Link ID */
+ cfpkt_extr_head(pkt, &linkid, 1);
+ /* Length */
+ cfpkt_extr_head(pkt, &len, 1);
+ /* Param Data */
+ cfpkt_extr_head(pkt, &param, len);
+ break;
+ default:
+ pr_warning("CAIF: %s(): Request setup "
+ "- invalid link type (%d)",
+ __func__, serv);
+ goto error;
+ }
+
+ rsp.cmd = cmd;
+ rsp.param = linkparam;
+ req = cfctrl_remove_req(cfctrl, &rsp);
+
+ if (CFCTRL_ERR_BIT == (CFCTRL_ERR_BIT & cmdrsp) ||
+ cfpkt_erroneous(pkt)) {
+ pr_err("CAIF: %s(): Invalid O/E bit or parse "
+ "error on CAIF control channel",
+ __func__);
+ cfctrl->res.reject_rsp(cfctrl->serv.layer.up,
+ 0,
+ req ? req->client_layer
+ : NULL);
+ } else {
+ cfctrl->res.linksetup_rsp(cfctrl->serv.
+ layer.up, linkid,
+ serv, physlinkid,
+ req ? req->
+ client_layer : NULL);
+ }
+
+ if (req != NULL)
+ kfree(req);
+ }
+ break;
+ case CFCTRL_CMD_LINK_DESTROY:
+ cfpkt_extr_head(pkt, &linkid, 1);
+ cfctrl->res.linkdestroy_rsp(cfctrl->serv.layer.up, linkid);
+ break;
+ case CFCTRL_CMD_LINK_ERR:
+ pr_err("CAIF: %s(): Frame Error Indication received\n",
+ __func__);
+ cfctrl->res.linkerror_ind();
+ break;
+ case CFCTRL_CMD_ENUM:
+ cfctrl->res.enum_rsp();
+ break;
+ case CFCTRL_CMD_SLEEP:
+ cfctrl->res.sleep_rsp();
+ break;
+ case CFCTRL_CMD_WAKE:
+ cfctrl->res.wake_rsp();
+ break;
+ case CFCTRL_CMD_LINK_RECONF:
+ cfctrl->res.restart_rsp();
+ break;
+ case CFCTRL_CMD_RADIO_SET:
+ cfctrl->res.radioset_rsp();
+ break;
+ default:
+ pr_err("CAIF: %s(): Unrecognized Control Frame\n", __func__);
+ goto error;
+ break;
+ }
+ ret = 0;
+error:
+ cfpkt_destroy(pkt);
+ return ret;
+}
+
+static void cfctrl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
+ int phyid)
+{
+ struct cfctrl *this = container_obj(layr);
+ switch (ctrl) {
+ case _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND:
+ case CAIF_CTRLCMD_FLOW_OFF_IND:
+ spin_lock(&this->info_list_lock);
+ if (this->first_req != NULL) {
+ pr_debug("CAIF: %s(): Received flow off in "
+ "control layer", __func__);
+ }
+ spin_unlock(&this->info_list_lock);
+ break;
+ default:
+ break;
+ }
+}
+
+#ifndef CAIF_NO_LOOP
+static int handle_loop(struct cfctrl *ctrl, int cmd, struct cfpkt *pkt)
+{
+ static int last_linkid;
+ u8 linkid, linktype, tmp;
+ switch (cmd) {
+ case CFCTRL_CMD_LINK_SETUP:
+ spin_lock(&ctrl->loop_linkid_lock);
+ for (linkid = last_linkid + 1; linkid < 255; linkid++)
+ if (!ctrl->loop_linkused[linkid])
+ goto found;
+ for (linkid = last_linkid - 1; linkid > 0; linkid--)
+ if (!ctrl->loop_linkused[linkid])
+ goto found;
+ spin_unlock(&ctrl->loop_linkid_lock);
+ pr_err("CAIF: %s(): Out of link-ids\n", __func__);
+ return -EINVAL;
+found:
+ if (!ctrl->loop_linkused[linkid])
+ ctrl->loop_linkused[linkid] = 1;
+
+ last_linkid = linkid;
+
+ cfpkt_add_trail(pkt, &linkid, 1);
+ spin_unlock(&ctrl->loop_linkid_lock);
+ cfpkt_peek_head(pkt, &linktype, 1);
+ if (linktype == CFCTRL_SRV_UTIL) {
+ tmp = 0x01;
+ cfpkt_add_trail(pkt, &tmp, 1);
+ cfpkt_add_trail(pkt, &tmp, 1);
+ }
+ break;
+
+ case CFCTRL_CMD_LINK_DESTROY:
+ spin_lock(&ctrl->loop_linkid_lock);
+ cfpkt_peek_head(pkt, &linkid, 1);
+ ctrl->loop_linkused[linkid] = 0;
+ spin_unlock(&ctrl->loop_linkid_lock);
+ break;
+ default:
+ break;
+ }
+ return CAIF_SUCCESS;
+}
+#endif
diff --git a/net/caif/cfdbgl.c b/net/caif/cfdbgl.c
new file mode 100644
index 000000000000..ab6b6dc34cf8
--- /dev/null
+++ b/net/caif/cfdbgl.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/stddef.h>
+#include <linux/slab.h>
+#include <net/caif/caif_layer.h>
+#include <net/caif/cfsrvl.h>
+#include <net/caif/cfpkt.h>
+
+static int cfdbgl_receive(struct cflayer *layr, struct cfpkt *pkt);
+static int cfdbgl_transmit(struct cflayer *layr, struct cfpkt *pkt);
+
+struct cflayer *cfdbgl_create(u8 channel_id, struct dev_info *dev_info)
+{
+ struct cfsrvl *dbg = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
+ if (!dbg) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return NULL;
+ }
+ caif_assert(offsetof(struct cfsrvl, layer) == 0);
+ memset(dbg, 0, sizeof(struct cfsrvl));
+ cfsrvl_init(dbg, channel_id, dev_info);
+ dbg->layer.receive = cfdbgl_receive;
+ dbg->layer.transmit = cfdbgl_transmit;
+ snprintf(dbg->layer.name, CAIF_LAYER_NAME_SZ - 1, "dbg%d", channel_id);
+ return &dbg->layer;
+}
+
+static int cfdbgl_receive(struct cflayer *layr, struct cfpkt *pkt)
+{
+ return layr->up->receive(layr->up, pkt);
+}
+
+static int cfdbgl_transmit(struct cflayer *layr, struct cfpkt *pkt)
+{
+ return layr->dn->transmit(layr->dn, pkt);
+}
diff --git a/net/caif/cfdgml.c b/net/caif/cfdgml.c
new file mode 100644
index 000000000000..53194840ecb6
--- /dev/null
+++ b/net/caif/cfdgml.c
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/stddef.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <net/caif/caif_layer.h>
+#include <net/caif/cfsrvl.h>
+#include <net/caif/cfpkt.h>
+
+#define container_obj(layr) ((struct cfsrvl *) layr)
+
+#define DGM_CMD_BIT 0x80
+#define DGM_FLOW_OFF 0x81
+#define DGM_FLOW_ON 0x80
+#define DGM_CTRL_PKT_SIZE 1
+
+static int cfdgml_receive(struct cflayer *layr, struct cfpkt *pkt);
+static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt);
+
+struct cflayer *cfdgml_create(u8 channel_id, struct dev_info *dev_info)
+{
+ struct cfsrvl *dgm = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
+ if (!dgm) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return NULL;
+ }
+ caif_assert(offsetof(struct cfsrvl, layer) == 0);
+ memset(dgm, 0, sizeof(struct cfsrvl));
+ cfsrvl_init(dgm, channel_id, dev_info);
+ dgm->layer.receive = cfdgml_receive;
+ dgm->layer.transmit = cfdgml_transmit;
+ snprintf(dgm->layer.name, CAIF_LAYER_NAME_SZ - 1, "dgm%d", channel_id);
+ dgm->layer.name[CAIF_LAYER_NAME_SZ - 1] = '\0';
+ return &dgm->layer;
+}
+
+static int cfdgml_receive(struct cflayer *layr, struct cfpkt *pkt)
+{
+ u8 cmd = -1;
+ u8 dgmhdr[3];
+ int ret;
+ caif_assert(layr->up != NULL);
+ caif_assert(layr->receive != NULL);
+ caif_assert(layr->ctrlcmd != NULL);
+
+ if (cfpkt_extr_head(pkt, &cmd, 1) < 0) {
+ pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
+ cfpkt_destroy(pkt);
+ return -EPROTO;
+ }
+
+ if ((cmd & DGM_CMD_BIT) == 0) {
+ if (cfpkt_extr_head(pkt, &dgmhdr, 3) < 0) {
+ pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
+ cfpkt_destroy(pkt);
+ return -EPROTO;
+ }
+ ret = layr->up->receive(layr->up, pkt);
+ return ret;
+ }
+
+ switch (cmd) {
+ case DGM_FLOW_OFF: /* FLOW OFF */
+ layr->ctrlcmd(layr, CAIF_CTRLCMD_FLOW_OFF_IND, 0);
+ cfpkt_destroy(pkt);
+ return 0;
+ case DGM_FLOW_ON: /* FLOW ON */
+ layr->ctrlcmd(layr, CAIF_CTRLCMD_FLOW_ON_IND, 0);
+ cfpkt_destroy(pkt);
+ return 0;
+ default:
+ cfpkt_destroy(pkt);
+ pr_info("CAIF: %s(): Unknown datagram control %d (0x%x)\n",
+ __func__, cmd, cmd);
+ return -EPROTO;
+ }
+}
+
+static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt)
+{
+ u32 zero = 0;
+ struct caif_payload_info *info;
+ struct cfsrvl *service = container_obj(layr);
+ int ret;
+ if (!cfsrvl_ready(service, &ret))
+ return ret;
+
+ cfpkt_add_head(pkt, &zero, 4);
+
+ /* Add info for MUX-layer to route the packet out. */
+ info = cfpkt_info(pkt);
+ info->channel_id = service->layer.id;
+ /* To optimize alignment, we add up the size of CAIF header
+ * before payload.
+ */
+ info->hdr_len = 4;
+ info->dev_info = &service->dev_info;
+ ret = layr->dn->transmit(layr->dn, pkt);
+ if (ret < 0) {
+ u32 tmp32;
+ cfpkt_extr_head(pkt, &tmp32, 4);
+ }
+ return ret;
+}
diff --git a/net/caif/cffrml.c b/net/caif/cffrml.c
new file mode 100644
index 000000000000..e86a4ca3b217
--- /dev/null
+++ b/net/caif/cffrml.c
@@ -0,0 +1,151 @@
+/*
+ * CAIF Framing Layer.
+ *
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/stddef.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/crc-ccitt.h>
+#include <net/caif/caif_layer.h>
+#include <net/caif/cfpkt.h>
+#include <net/caif/cffrml.h>
+
+#define container_obj(layr) container_of(layr, struct cffrml, layer)
+
+struct cffrml {
+ struct cflayer layer;
+ bool dofcs; /* !< FCS active */
+};
+
+static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt);
+static int cffrml_transmit(struct cflayer *layr, struct cfpkt *pkt);
+static void cffrml_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
+ int phyid);
+
+static u32 cffrml_rcv_error;
+static u32 cffrml_rcv_checsum_error;
+struct cflayer *cffrml_create(u16 phyid, bool use_fcs)
+{
+ struct cffrml *this = kmalloc(sizeof(struct cffrml), GFP_ATOMIC);
+ if (!this) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return NULL;
+ }
+ caif_assert(offsetof(struct cffrml, layer) == 0);
+
+ memset(this, 0, sizeof(struct cflayer));
+ this->layer.receive = cffrml_receive;
+ this->layer.transmit = cffrml_transmit;
+ this->layer.ctrlcmd = cffrml_ctrlcmd;
+ snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "frm%d", phyid);
+ this->dofcs = use_fcs;
+ this->layer.id = phyid;
+ return (struct cflayer *) this;
+}
+
+void cffrml_set_uplayer(struct cflayer *this, struct cflayer *up)
+{
+ this->up = up;
+}
+
+void cffrml_set_dnlayer(struct cflayer *this, struct cflayer *dn)
+{
+ this->dn = dn;
+}
+
+static u16 cffrml_checksum(u16 chks, void *buf, u16 len)
+{
+ /* FIXME: FCS should be moved to glue in order to use OS-Specific
+ * solutions
+ */
+ return crc_ccitt(chks, buf, len);
+}
+
+static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt)
+{
+ u16 tmp;
+ u16 len;
+ u16 hdrchks;
+ u16 pktchks;
+ struct cffrml *this;
+ this = container_obj(layr);
+
+ cfpkt_extr_head(pkt, &tmp, 2);
+ len = le16_to_cpu(tmp);
+
+ /* Subtract for FCS on length if FCS is not used. */
+ if (!this->dofcs)
+ len -= 2;
+
+ if (cfpkt_setlen(pkt, len) < 0) {
+ ++cffrml_rcv_error;
+ pr_err("CAIF: %s():Framing length error (%d)\n", __func__, len);
+ cfpkt_destroy(pkt);
+ return -EPROTO;
+ }
+ /*
+ * Don't do extract if FCS is false, rather do setlen - then we don't
+ * get a cache-miss.
+ */
+ if (this->dofcs) {
+ cfpkt_extr_trail(pkt, &tmp, 2);
+ hdrchks = le16_to_cpu(tmp);
+ pktchks = cfpkt_iterate(pkt, cffrml_checksum, 0xffff);
+ if (pktchks != hdrchks) {
+ cfpkt_add_trail(pkt, &tmp, 2);
+ ++cffrml_rcv_error;
+ ++cffrml_rcv_checsum_error;
+ pr_info("CAIF: %s(): Frame checksum error "
+ "(0x%x != 0x%x)\n", __func__, hdrchks, pktchks);
+ return -EILSEQ;
+ }
+ }
+ if (cfpkt_erroneous(pkt)) {
+ ++cffrml_rcv_error;
+ pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
+ cfpkt_destroy(pkt);
+ return -EPROTO;
+ }
+ return layr->up->receive(layr->up, pkt);
+}
+
+static int cffrml_transmit(struct cflayer *layr, struct cfpkt *pkt)
+{
+ int tmp;
+ u16 chks;
+ u16 len;
+ int ret;
+ struct cffrml *this = container_obj(layr);
+ if (this->dofcs) {
+ chks = cfpkt_iterate(pkt, cffrml_checksum, 0xffff);
+ tmp = cpu_to_le16(chks);
+ cfpkt_add_trail(pkt, &tmp, 2);
+ } else {
+ cfpkt_pad_trail(pkt, 2);
+ }
+ len = cfpkt_getlen(pkt);
+ tmp = cpu_to_le16(len);
+ cfpkt_add_head(pkt, &tmp, 2);
+ cfpkt_info(pkt)->hdr_len += 2;
+ if (cfpkt_erroneous(pkt)) {
+ pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
+ return -EPROTO;
+ }
+ ret = layr->dn->transmit(layr->dn, pkt);
+ if (ret < 0) {
+ /* Remove header on faulty packet. */
+ cfpkt_extr_head(pkt, &tmp, 2);
+ }
+ return ret;
+}
+
+static void cffrml_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
+ int phyid)
+{
+ if (layr->up->ctrlcmd)
+ layr->up->ctrlcmd(layr->up, ctrl, layr->id);
+}
diff --git a/net/caif/cfmuxl.c b/net/caif/cfmuxl.c
new file mode 100644
index 000000000000..7372f27f1d32
--- /dev/null
+++ b/net/caif/cfmuxl.c
@@ -0,0 +1,251 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#include <linux/stddef.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <net/caif/cfpkt.h>
+#include <net/caif/cfmuxl.h>
+#include <net/caif/cfsrvl.h>
+#include <net/caif/cffrml.h>
+
+#define container_obj(layr) container_of(layr, struct cfmuxl, layer)
+
+#define CAIF_CTRL_CHANNEL 0
+#define UP_CACHE_SIZE 8
+#define DN_CACHE_SIZE 8
+
+struct cfmuxl {
+ struct cflayer layer;
+ struct list_head srvl_list;
+ struct list_head frml_list;
+ struct cflayer *up_cache[UP_CACHE_SIZE];
+ struct cflayer *dn_cache[DN_CACHE_SIZE];
+ /*
+ * Set when inserting or removing downwards layers.
+ */
+ spinlock_t transmit_lock;
+
+ /*
+ * Set when inserting or removing upwards layers.
+ */
+ spinlock_t receive_lock;
+
+};
+
+static int cfmuxl_receive(struct cflayer *layr, struct cfpkt *pkt);
+static int cfmuxl_transmit(struct cflayer *layr, struct cfpkt *pkt);
+static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
+ int phyid);
+static struct cflayer *get_up(struct cfmuxl *muxl, u16 id);
+
+struct cflayer *cfmuxl_create(void)
+{
+ struct cfmuxl *this = kmalloc(sizeof(struct cfmuxl), GFP_ATOMIC);
+ if (!this)
+ return NULL;
+ memset(this, 0, sizeof(*this));
+ this->layer.receive = cfmuxl_receive;
+ this->layer.transmit = cfmuxl_transmit;
+ this->layer.ctrlcmd = cfmuxl_ctrlcmd;
+ INIT_LIST_HEAD(&this->srvl_list);
+ INIT_LIST_HEAD(&this->frml_list);
+ spin_lock_init(&this->transmit_lock);
+ spin_lock_init(&this->receive_lock);
+ snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "mux");
+ return &this->layer;
+}
+
+int cfmuxl_set_uplayer(struct cflayer *layr, struct cflayer *up, u8 linkid)
+{
+ struct cfmuxl *muxl = container_obj(layr);
+ spin_lock(&muxl->receive_lock);
+ cfsrvl_get(up);
+ list_add(&up->node, &muxl->srvl_list);
+ spin_unlock(&muxl->receive_lock);
+ return 0;
+}
+
+bool cfmuxl_is_phy_inuse(struct cflayer *layr, u8 phyid)
+{
+ struct list_head *node;
+ struct cflayer *layer;
+ struct cfmuxl *muxl = container_obj(layr);
+ bool match = false;
+ spin_lock(&muxl->receive_lock);
+
+ list_for_each(node, &muxl->srvl_list) {
+ layer = list_entry(node, struct cflayer, node);
+ if (cfsrvl_phyid_match(layer, phyid)) {
+ match = true;
+ break;
+ }
+
+ }
+ spin_unlock(&muxl->receive_lock);
+ return match;
+}
+
+u8 cfmuxl_get_phyid(struct cflayer *layr, u8 channel_id)
+{
+ struct cflayer *up;
+ int phyid;
+ struct cfmuxl *muxl = container_obj(layr);
+ spin_lock(&muxl->receive_lock);
+ up = get_up(muxl, channel_id);
+ if (up != NULL)
+ phyid = cfsrvl_getphyid(up);
+ else
+ phyid = 0;
+ spin_unlock(&muxl->receive_lock);
+ return phyid;
+}
+
+int cfmuxl_set_dnlayer(struct cflayer *layr, struct cflayer *dn, u8 phyid)
+{
+ struct cfmuxl *muxl = (struct cfmuxl *) layr;
+ spin_lock(&muxl->transmit_lock);
+ list_add(&dn->node, &muxl->frml_list);
+ spin_unlock(&muxl->transmit_lock);
+ return 0;
+}
+
+static struct cflayer *get_from_id(struct list_head *list, u16 id)
+{
+ struct list_head *node;
+ struct cflayer *layer;
+ list_for_each(node, list) {
+ layer = list_entry(node, struct cflayer, node);
+ if (layer->id == id)
+ return layer;
+ }
+ return NULL;
+}
+
+struct cflayer *cfmuxl_remove_dnlayer(struct cflayer *layr, u8 phyid)
+{
+ struct cfmuxl *muxl = container_obj(layr);
+ struct cflayer *dn;
+ spin_lock(&muxl->transmit_lock);
+ memset(muxl->dn_cache, 0, sizeof(muxl->dn_cache));
+ dn = get_from_id(&muxl->frml_list, phyid);
+ if (dn == NULL) {
+ spin_unlock(&muxl->transmit_lock);
+ return NULL;
+ }
+ list_del(&dn->node);
+ caif_assert(dn != NULL);
+ spin_unlock(&muxl->transmit_lock);
+ return dn;
+}
+
+/* Invariant: lock is taken */
+static struct cflayer *get_up(struct cfmuxl *muxl, u16 id)
+{
+ struct cflayer *up;
+ int idx = id % UP_CACHE_SIZE;
+ up = muxl->up_cache[idx];
+ if (up == NULL || up->id != id) {
+ up = get_from_id(&muxl->srvl_list, id);
+ muxl->up_cache[idx] = up;
+ }
+ return up;
+}
+
+/* Invariant: lock is taken */
+static struct cflayer *get_dn(struct cfmuxl *muxl, struct dev_info *dev_info)
+{
+ struct cflayer *dn;
+ int idx = dev_info->id % DN_CACHE_SIZE;
+ dn = muxl->dn_cache[idx];
+ if (dn == NULL || dn->id != dev_info->id) {
+ dn = get_from_id(&muxl->frml_list, dev_info->id);
+ muxl->dn_cache[idx] = dn;
+ }
+ return dn;
+}
+
+struct cflayer *cfmuxl_remove_uplayer(struct cflayer *layr, u8 id)
+{
+ struct cflayer *up;
+ struct cfmuxl *muxl = container_obj(layr);
+ spin_lock(&muxl->receive_lock);
+ up = get_up(muxl, id);
+ if (up == NULL)
+ return NULL;
+ memset(muxl->up_cache, 0, sizeof(muxl->up_cache));
+ list_del(&up->node);
+ cfsrvl_put(up);
+ spin_unlock(&muxl->receive_lock);
+ return up;
+}
+
+static int cfmuxl_receive(struct cflayer *layr, struct cfpkt *pkt)
+{
+ int ret;
+ struct cfmuxl *muxl = container_obj(layr);
+ u8 id;
+ struct cflayer *up;
+ if (cfpkt_extr_head(pkt, &id, 1) < 0) {
+ pr_err("CAIF: %s(): erroneous Caif Packet\n", __func__);
+ cfpkt_destroy(pkt);
+ return -EPROTO;
+ }
+
+ spin_lock(&muxl->receive_lock);
+ up = get_up(muxl, id);
+ spin_unlock(&muxl->receive_lock);
+ if (up == NULL) {
+ pr_info("CAIF: %s():Received data on unknown link ID = %d "
+ "(0x%x) up == NULL", __func__, id, id);
+ cfpkt_destroy(pkt);
+ /*
+ * Don't return ERROR, since modem misbehaves and sends out
+ * flow on before linksetup response.
+ */
+ return /* CFGLU_EPROT; */ 0;
+ }
+ cfsrvl_get(up);
+ ret = up->receive(up, pkt);
+ cfsrvl_put(up);
+ return ret;
+}
+
+static int cfmuxl_transmit(struct cflayer *layr, struct cfpkt *pkt)
+{
+ int ret;
+ struct cfmuxl *muxl = container_obj(layr);
+ u8 linkid;
+ struct cflayer *dn;
+ struct caif_payload_info *info = cfpkt_info(pkt);
+ dn = get_dn(muxl, cfpkt_info(pkt)->dev_info);
+ if (dn == NULL) {
+ pr_warning("CAIF: %s(): Send data on unknown phy "
+ "ID = %d (0x%x)\n",
+ __func__, info->dev_info->id, info->dev_info->id);
+ return -ENOTCONN;
+ }
+ info->hdr_len += 1;
+ linkid = info->channel_id;
+ cfpkt_add_head(pkt, &linkid, 1);
+ ret = dn->transmit(dn, pkt);
+ /* Remove MUX protocol header upon error. */
+ if (ret < 0)
+ cfpkt_extr_head(pkt, &linkid, 1);
+ return ret;
+}
+
+static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
+ int phyid)
+{
+ struct cfmuxl *muxl = container_obj(layr);
+ struct list_head *node;
+ struct cflayer *layer;
+ list_for_each(node, &muxl->srvl_list) {
+ layer = list_entry(node, struct cflayer, node);
+ if (cfsrvl_phyid_match(layer, phyid))
+ layer->ctrlcmd(layer, ctrl, phyid);
+ }
+}
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
new file mode 100644
index 000000000000..83fff2ff6658
--- /dev/null
+++ b/net/caif/cfpkt_skbuff.c
@@ -0,0 +1,571 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/string.h>
+#include <linux/skbuff.h>
+#include <linux/hardirq.h>
+#include <net/caif/cfpkt.h>
+
+#define PKT_PREFIX CAIF_NEEDED_HEADROOM
+#define PKT_POSTFIX CAIF_NEEDED_TAILROOM
+#define PKT_LEN_WHEN_EXTENDING 128
+#define PKT_ERROR(pkt, errmsg) do { \
+ cfpkt_priv(pkt)->erronous = true; \
+ skb_reset_tail_pointer(&pkt->skb); \
+ pr_warning("CAIF: " errmsg);\
+ } while (0)
+
+struct cfpktq {
+ struct sk_buff_head head;
+ atomic_t count;
+ /* Lock protects count updates */
+ spinlock_t lock;
+};
+
+/*
+ * net/caif/ is generic and does not
+ * understand SKB, so we do this typecast
+ */
+struct cfpkt {
+ struct sk_buff skb;
+};
+
+/* Private data inside SKB */
+struct cfpkt_priv_data {
+ struct dev_info dev_info;
+ bool erronous;
+};
+
+inline struct cfpkt_priv_data *cfpkt_priv(struct cfpkt *pkt)
+{
+ return (struct cfpkt_priv_data *) pkt->skb.cb;
+}
+
+inline bool is_erronous(struct cfpkt *pkt)
+{
+ return cfpkt_priv(pkt)->erronous;
+}
+
+inline struct sk_buff *pkt_to_skb(struct cfpkt *pkt)
+{
+ return &pkt->skb;
+}
+
+inline struct cfpkt *skb_to_pkt(struct sk_buff *skb)
+{
+ return (struct cfpkt *) skb;
+}
+
+
+struct cfpkt *cfpkt_fromnative(enum caif_direction dir, void *nativepkt)
+{
+ struct cfpkt *pkt = skb_to_pkt(nativepkt);
+ cfpkt_priv(pkt)->erronous = false;
+ return pkt;
+}
+EXPORT_SYMBOL(cfpkt_fromnative);
+
+void *cfpkt_tonative(struct cfpkt *pkt)
+{
+ return (void *) pkt;
+}
+EXPORT_SYMBOL(cfpkt_tonative);
+
+static struct cfpkt *cfpkt_create_pfx(u16 len, u16 pfx)
+{
+ struct sk_buff *skb;
+
+ if (likely(in_interrupt()))
+ skb = alloc_skb(len + pfx, GFP_ATOMIC);
+ else
+ skb = alloc_skb(len + pfx, GFP_KERNEL);
+
+ if (unlikely(skb == NULL))
+ return NULL;
+
+ skb_reserve(skb, pfx);
+ return skb_to_pkt(skb);
+}
+
+inline struct cfpkt *cfpkt_create(u16 len)
+{
+ return cfpkt_create_pfx(len + PKT_POSTFIX, PKT_PREFIX);
+}
+EXPORT_SYMBOL(cfpkt_create);
+
+void cfpkt_destroy(struct cfpkt *pkt)
+{
+ struct sk_buff *skb = pkt_to_skb(pkt);
+ kfree_skb(skb);
+}
+EXPORT_SYMBOL(cfpkt_destroy);
+
+inline bool cfpkt_more(struct cfpkt *pkt)
+{
+ struct sk_buff *skb = pkt_to_skb(pkt);
+ return skb->len > 0;
+}
+EXPORT_SYMBOL(cfpkt_more);
+
+int cfpkt_peek_head(struct cfpkt *pkt, void *data, u16 len)
+{
+ struct sk_buff *skb = pkt_to_skb(pkt);
+ if (skb_headlen(skb) >= len) {
+ memcpy(data, skb->data, len);
+ return 0;
+ }
+ return !cfpkt_extr_head(pkt, data, len) &&
+ !cfpkt_add_head(pkt, data, len);
+}
+EXPORT_SYMBOL(cfpkt_peek_head);
+
+int cfpkt_extr_head(struct cfpkt *pkt, void *data, u16 len)
+{
+ struct sk_buff *skb = pkt_to_skb(pkt);
+ u8 *from;
+ if (unlikely(is_erronous(pkt)))
+ return -EPROTO;
+
+ if (unlikely(len > skb->len)) {
+ PKT_ERROR(pkt, "cfpkt_extr_head read beyond end of packet\n");
+ return -EPROTO;
+ }
+
+ if (unlikely(len > skb_headlen(skb))) {
+ if (unlikely(skb_linearize(skb) != 0)) {
+ PKT_ERROR(pkt, "cfpkt_extr_head linearize failed\n");
+ return -EPROTO;
+ }
+ }
+ from = skb_pull(skb, len);
+ from -= len;
+ memcpy(data, from, len);
+ return 0;
+}
+EXPORT_SYMBOL(cfpkt_extr_head);
+
+int cfpkt_extr_trail(struct cfpkt *pkt, void *dta, u16 len)
+{
+ struct sk_buff *skb = pkt_to_skb(pkt);
+ u8 *data = dta;
+ u8 *from;
+ if (unlikely(is_erronous(pkt)))
+ return -EPROTO;
+
+ if (unlikely(skb_linearize(skb) != 0)) {
+ PKT_ERROR(pkt, "cfpkt_extr_trail linearize failed\n");
+ return -EPROTO;
+ }
+ if (unlikely(skb->data + len > skb_tail_pointer(skb))) {
+ PKT_ERROR(pkt, "cfpkt_extr_trail read beyond end of packet\n");
+ return -EPROTO;
+ }
+ from = skb_tail_pointer(skb) - len;
+ skb_trim(skb, skb->len - len);
+ memcpy(data, from, len);
+ return 0;
+}
+EXPORT_SYMBOL(cfpkt_extr_trail);
+
+int cfpkt_pad_trail(struct cfpkt *pkt, u16 len)
+{
+ return cfpkt_add_body(pkt, NULL, len);
+}
+EXPORT_SYMBOL(cfpkt_pad_trail);
+
+int cfpkt_add_body(struct cfpkt *pkt, const void *data, u16 len)
+{
+ struct sk_buff *skb = pkt_to_skb(pkt);
+ struct sk_buff *lastskb;
+ u8 *to;
+ u16 addlen = 0;
+
+
+ if (unlikely(is_erronous(pkt)))
+ return -EPROTO;
+
+ lastskb = skb;
+
+ /* Check whether we need to add space at the tail */
+ if (unlikely(skb_tailroom(skb) < len)) {
+ if (likely(len < PKT_LEN_WHEN_EXTENDING))
+ addlen = PKT_LEN_WHEN_EXTENDING;
+ else
+ addlen = len;
+ }
+
+ /* Check whether we need to change the SKB before writing to the tail */
+ if (unlikely((addlen > 0) || skb_cloned(skb) || skb_shared(skb))) {
+
+ /* Make sure data is writable */
+ if (unlikely(skb_cow_data(skb, addlen, &lastskb) < 0)) {
+ PKT_ERROR(pkt, "cfpkt_add_body: cow failed\n");
+ return -EPROTO;
+ }
+ /*
+ * Is the SKB non-linear after skb_cow_data()? If so, we are
+ * going to add data to the last SKB, so we need to adjust
+ * lengths of the top SKB.
+ */
+ if (lastskb != skb) {
+ pr_warning("CAIF: %s(): Packet is non-linear\n",
+ __func__);
+ skb->len += len;
+ skb->data_len += len;
+ }
+ }
+
+ /* All set to put the last SKB and optionally write data there. */
+ to = skb_put(lastskb, len);
+ if (likely(data))
+ memcpy(to, data, len);
+ return 0;
+}
+EXPORT_SYMBOL(cfpkt_add_body);
+
+inline int cfpkt_addbdy(struct cfpkt *pkt, u8 data)
+{
+ return cfpkt_add_body(pkt, &data, 1);
+}
+EXPORT_SYMBOL(cfpkt_addbdy);
+
+int cfpkt_add_head(struct cfpkt *pkt, const void *data2, u16 len)
+{
+ struct sk_buff *skb = pkt_to_skb(pkt);
+ struct sk_buff *lastskb;
+ u8 *to;
+ const u8 *data = data2;
+ if (unlikely(is_erronous(pkt)))
+ return -EPROTO;
+ if (unlikely(skb_headroom(skb) < len)) {
+ PKT_ERROR(pkt, "cfpkt_add_head: no headroom\n");
+ return -EPROTO;
+ }
+
+ /* Make sure data is writable */
+ if (unlikely(skb_cow_data(skb, 0, &lastskb) < 0)) {
+ PKT_ERROR(pkt, "cfpkt_add_head: cow failed\n");
+ return -EPROTO;
+ }
+
+ to = skb_push(skb, len);
+ memcpy(to, data, len);
+ return 0;
+}
+EXPORT_SYMBOL(cfpkt_add_head);
+
+inline int cfpkt_add_trail(struct cfpkt *pkt, const void *data, u16 len)
+{
+ return cfpkt_add_body(pkt, data, len);
+}
+EXPORT_SYMBOL(cfpkt_add_trail);
+
+inline u16 cfpkt_getlen(struct cfpkt *pkt)
+{
+ struct sk_buff *skb = pkt_to_skb(pkt);
+ return skb->len;
+}
+EXPORT_SYMBOL(cfpkt_getlen);
+
+inline u16 cfpkt_iterate(struct cfpkt *pkt,
+ u16 (*iter_func)(u16, void *, u16),
+ u16 data)
+{
+ /*
+ * Don't care about the performance hit of linearizing,
+ * Checksum should not be used on high-speed interfaces anyway.
+ */
+ if (unlikely(is_erronous(pkt)))
+ return -EPROTO;
+ if (unlikely(skb_linearize(&pkt->skb) != 0)) {
+ PKT_ERROR(pkt, "cfpkt_iterate: linearize failed\n");
+ return -EPROTO;
+ }
+ return iter_func(data, pkt->skb.data, cfpkt_getlen(pkt));
+}
+EXPORT_SYMBOL(cfpkt_iterate);
+
+int cfpkt_setlen(struct cfpkt *pkt, u16 len)
+{
+ struct sk_buff *skb = pkt_to_skb(pkt);
+
+
+ if (unlikely(is_erronous(pkt)))
+ return -EPROTO;
+
+ if (likely(len <= skb->len)) {
+ if (unlikely(skb->data_len))
+ ___pskb_trim(skb, len);
+ else
+ skb_trim(skb, len);
+
+ return cfpkt_getlen(pkt);
+ }
+
+ /* Need to expand SKB */
+ if (unlikely(!cfpkt_pad_trail(pkt, len - skb->len)))
+ PKT_ERROR(pkt, "cfpkt_setlen: skb_pad_trail failed\n");
+
+ return cfpkt_getlen(pkt);
+}
+EXPORT_SYMBOL(cfpkt_setlen);
+
+struct cfpkt *cfpkt_create_uplink(const unsigned char *data, unsigned int len)
+{
+ struct cfpkt *pkt = cfpkt_create_pfx(len + PKT_POSTFIX, PKT_PREFIX);
+ if (unlikely(data != NULL))
+ cfpkt_add_body(pkt, data, len);
+ return pkt;
+}
+EXPORT_SYMBOL(cfpkt_create_uplink);
+
+struct cfpkt *cfpkt_append(struct cfpkt *dstpkt,
+ struct cfpkt *addpkt,
+ u16 expectlen)
+{
+ struct sk_buff *dst = pkt_to_skb(dstpkt);
+ struct sk_buff *add = pkt_to_skb(addpkt);
+ u16 addlen = skb_headlen(add);
+ u16 neededtailspace;
+ struct sk_buff *tmp;
+ u16 dstlen;
+ u16 createlen;
+ if (unlikely(is_erronous(dstpkt) || is_erronous(addpkt))) {
+ cfpkt_destroy(addpkt);
+ return dstpkt;
+ }
+ if (expectlen > addlen)
+ neededtailspace = expectlen;
+ else
+ neededtailspace = addlen;
+
+ if (dst->tail + neededtailspace > dst->end) {
+ /* Create a dumplicate of 'dst' with more tail space */
+ dstlen = skb_headlen(dst);
+ createlen = dstlen + neededtailspace;
+ tmp = pkt_to_skb(
+ cfpkt_create(createlen + PKT_PREFIX + PKT_POSTFIX));
+ if (!tmp)
+ return NULL;
+ skb_set_tail_pointer(tmp, dstlen);
+ tmp->len = dstlen;
+ memcpy(tmp->data, dst->data, dstlen);
+ cfpkt_destroy(dstpkt);
+ dst = tmp;
+ }
+ memcpy(skb_tail_pointer(dst), add->data, skb_headlen(add));
+ cfpkt_destroy(addpkt);
+ dst->tail += addlen;
+ dst->len += addlen;
+ return skb_to_pkt(dst);
+}
+EXPORT_SYMBOL(cfpkt_append);
+
+struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos)
+{
+ struct sk_buff *skb2;
+ struct sk_buff *skb = pkt_to_skb(pkt);
+ u8 *split = skb->data + pos;
+ u16 len2nd = skb_tail_pointer(skb) - split;
+
+ if (unlikely(is_erronous(pkt)))
+ return NULL;
+
+ if (skb->data + pos > skb_tail_pointer(skb)) {
+ PKT_ERROR(pkt,
+ "cfpkt_split: trying to split beyond end of packet");
+ return NULL;
+ }
+
+ /* Create a new packet for the second part of the data */
+ skb2 = pkt_to_skb(
+ cfpkt_create_pfx(len2nd + PKT_PREFIX + PKT_POSTFIX,
+ PKT_PREFIX));
+
+ if (skb2 == NULL)
+ return NULL;
+
+ /* Reduce the length of the original packet */
+ skb_set_tail_pointer(skb, pos);
+ skb->len = pos;
+
+ memcpy(skb2->data, split, len2nd);
+ skb2->tail += len2nd;
+ skb2->len += len2nd;
+ return skb_to_pkt(skb2);
+}
+EXPORT_SYMBOL(cfpkt_split);
+
+char *cfpkt_log_pkt(struct cfpkt *pkt, char *buf, int buflen)
+{
+ struct sk_buff *skb = pkt_to_skb(pkt);
+ char *p = buf;
+ int i;
+
+ /*
+ * Sanity check buffer length, it needs to be at least as large as
+ * the header info: ~=50+ bytes
+ */
+ if (buflen < 50)
+ return NULL;
+
+ snprintf(buf, buflen, "%s: pkt:%p len:%ld(%ld+%ld) {%ld,%ld} data: [",
+ is_erronous(pkt) ? "ERRONOUS-SKB" :
+ (skb->data_len != 0 ? "COMPLEX-SKB" : "SKB"),
+ skb,
+ (long) skb->len,
+ (long) (skb_tail_pointer(skb) - skb->data),
+ (long) skb->data_len,
+ (long) (skb->data - skb->head),
+ (long) (skb_tail_pointer(skb) - skb->head));
+ p = buf + strlen(buf);
+
+ for (i = 0; i < skb_tail_pointer(skb) - skb->data && i < 300; i++) {
+ if (p > buf + buflen - 10) {
+ sprintf(p, "...");
+ p = buf + strlen(buf);
+ break;
+ }
+ sprintf(p, "%02x,", skb->data[i]);
+ p = buf + strlen(buf);
+ }
+ sprintf(p, "]\n");
+ return buf;
+}
+EXPORT_SYMBOL(cfpkt_log_pkt);
+
+int cfpkt_raw_append(struct cfpkt *pkt, void **buf, unsigned int buflen)
+{
+ struct sk_buff *skb = pkt_to_skb(pkt);
+ struct sk_buff *lastskb;
+
+ caif_assert(buf != NULL);
+ if (unlikely(is_erronous(pkt)))
+ return -EPROTO;
+ /* Make sure SKB is writable */
+ if (unlikely(skb_cow_data(skb, 0, &lastskb) < 0)) {
+ PKT_ERROR(pkt, "cfpkt_raw_append: skb_cow_data failed\n");
+ return -EPROTO;
+ }
+
+ if (unlikely(skb_linearize(skb) != 0)) {
+ PKT_ERROR(pkt, "cfpkt_raw_append: linearize failed\n");
+ return -EPROTO;
+ }
+
+ if (unlikely(skb_tailroom(skb) < buflen)) {
+ PKT_ERROR(pkt, "cfpkt_raw_append: buffer too short - failed\n");
+ return -EPROTO;
+ }
+
+ *buf = skb_put(skb, buflen);
+ return 1;
+}
+EXPORT_SYMBOL(cfpkt_raw_append);
+
+int cfpkt_raw_extract(struct cfpkt *pkt, void **buf, unsigned int buflen)
+{
+ struct sk_buff *skb = pkt_to_skb(pkt);
+
+ caif_assert(buf != NULL);
+ if (unlikely(is_erronous(pkt)))
+ return -EPROTO;
+
+ if (unlikely(buflen > skb->len)) {
+ PKT_ERROR(pkt, "cfpkt_raw_extract: buflen too large "
+ "- failed\n");
+ return -EPROTO;
+ }
+
+ if (unlikely(buflen > skb_headlen(skb))) {
+ if (unlikely(skb_linearize(skb) != 0)) {
+ PKT_ERROR(pkt, "cfpkt_raw_extract: linearize failed\n");
+ return -EPROTO;
+ }
+ }
+
+ *buf = skb->data;
+ skb_pull(skb, buflen);
+
+ return 1;
+}
+EXPORT_SYMBOL(cfpkt_raw_extract);
+
+inline bool cfpkt_erroneous(struct cfpkt *pkt)
+{
+ return cfpkt_priv(pkt)->erronous;
+}
+EXPORT_SYMBOL(cfpkt_erroneous);
+
+struct cfpktq *cfpktq_create(void)
+{
+ struct cfpktq *q = kmalloc(sizeof(struct cfpktq), GFP_ATOMIC);
+ if (!q)
+ return NULL;
+ skb_queue_head_init(&q->head);
+ atomic_set(&q->count, 0);
+ spin_lock_init(&q->lock);
+ return q;
+}
+EXPORT_SYMBOL(cfpktq_create);
+
+void cfpkt_queue(struct cfpktq *pktq, struct cfpkt *pkt, unsigned short prio)
+{
+ atomic_inc(&pktq->count);
+ spin_lock(&pktq->lock);
+ skb_queue_tail(&pktq->head, pkt_to_skb(pkt));
+ spin_unlock(&pktq->lock);
+
+}
+EXPORT_SYMBOL(cfpkt_queue);
+
+struct cfpkt *cfpkt_qpeek(struct cfpktq *pktq)
+{
+ struct cfpkt *tmp;
+ spin_lock(&pktq->lock);
+ tmp = skb_to_pkt(skb_peek(&pktq->head));
+ spin_unlock(&pktq->lock);
+ return tmp;
+}
+EXPORT_SYMBOL(cfpkt_qpeek);
+
+struct cfpkt *cfpkt_dequeue(struct cfpktq *pktq)
+{
+ struct cfpkt *pkt;
+ spin_lock(&pktq->lock);
+ pkt = skb_to_pkt(skb_dequeue(&pktq->head));
+ if (pkt) {
+ atomic_dec(&pktq->count);
+ caif_assert(atomic_read(&pktq->count) >= 0);
+ }
+ spin_unlock(&pktq->lock);
+ return pkt;
+}
+EXPORT_SYMBOL(cfpkt_dequeue);
+
+int cfpkt_qcount(struct cfpktq *pktq)
+{
+ return atomic_read(&pktq->count);
+}
+EXPORT_SYMBOL(cfpkt_qcount);
+
+struct cfpkt *cfpkt_clone_release(struct cfpkt *pkt)
+{
+ struct cfpkt *clone;
+ clone = skb_to_pkt(skb_clone(pkt_to_skb(pkt), GFP_ATOMIC));
+ /* Free original packet. */
+ cfpkt_destroy(pkt);
+ if (!clone)
+ return NULL;
+ return clone;
+}
+EXPORT_SYMBOL(cfpkt_clone_release);
+
+struct caif_payload_info *cfpkt_info(struct cfpkt *pkt)
+{
+ return (struct caif_payload_info *)&pkt_to_skb(pkt)->cb;
+}
+EXPORT_SYMBOL(cfpkt_info);
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c
new file mode 100644
index 000000000000..cd2830fec935
--- /dev/null
+++ b/net/caif/cfrfml.c
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/stddef.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <net/caif/caif_layer.h>
+#include <net/caif/cfsrvl.h>
+#include <net/caif/cfpkt.h>
+
+#define container_obj(layr) container_of(layr, struct cfsrvl, layer)
+
+#define RFM_SEGMENTATION_BIT 0x01
+#define RFM_PAYLOAD 0x00
+#define RFM_CMD_BIT 0x80
+#define RFM_FLOW_OFF 0x81
+#define RFM_FLOW_ON 0x80
+#define RFM_SET_PIN 0x82
+#define RFM_CTRL_PKT_SIZE 1
+
+static int cfrfml_receive(struct cflayer *layr, struct cfpkt *pkt);
+static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt);
+static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl);
+
+struct cflayer *cfrfml_create(u8 channel_id, struct dev_info *dev_info)
+{
+ struct cfsrvl *rfm = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
+ if (!rfm) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return NULL;
+ }
+ caif_assert(offsetof(struct cfsrvl, layer) == 0);
+ memset(rfm, 0, sizeof(struct cfsrvl));
+ cfsrvl_init(rfm, channel_id, dev_info);
+ rfm->layer.modemcmd = cfservl_modemcmd;
+ rfm->layer.receive = cfrfml_receive;
+ rfm->layer.transmit = cfrfml_transmit;
+ snprintf(rfm->layer.name, CAIF_LAYER_NAME_SZ, "rfm%d", channel_id);
+ return &rfm->layer;
+}
+
+static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
+{
+ return -EPROTO;
+}
+
+static int cfrfml_receive(struct cflayer *layr, struct cfpkt *pkt)
+{
+ u8 tmp;
+ bool segmented;
+ int ret;
+ caif_assert(layr->up != NULL);
+ caif_assert(layr->receive != NULL);
+
+ /*
+ * RFM is taking care of segmentation and stripping of
+ * segmentation bit.
+ */
+ if (cfpkt_extr_head(pkt, &tmp, 1) < 0) {
+ pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
+ cfpkt_destroy(pkt);
+ return -EPROTO;
+ }
+ segmented = tmp & RFM_SEGMENTATION_BIT;
+ caif_assert(!segmented);
+
+ ret = layr->up->receive(layr->up, pkt);
+ return ret;
+}
+
+static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt)
+{
+ u8 tmp = 0;
+ int ret;
+ struct cfsrvl *service = container_obj(layr);
+
+ caif_assert(layr->dn != NULL);
+ caif_assert(layr->dn->transmit != NULL);
+
+ if (!cfsrvl_ready(service, &ret))
+ return ret;
+
+ if (!cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) {
+ pr_err("CAIF: %s():Packet too large - size=%d\n",
+ __func__, cfpkt_getlen(pkt));
+ return -EOVERFLOW;
+ }
+ if (cfpkt_add_head(pkt, &tmp, 1) < 0) {
+ pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
+ return -EPROTO;
+ }
+
+ /* Add info for MUX-layer to route the packet out. */
+ cfpkt_info(pkt)->channel_id = service->layer.id;
+ /*
+ * To optimize alignment, we add up the size of CAIF header before
+ * payload.
+ */
+ cfpkt_info(pkt)->hdr_len = 1;
+ cfpkt_info(pkt)->dev_info = &service->dev_info;
+ ret = layr->dn->transmit(layr->dn, pkt);
+ if (ret < 0)
+ cfpkt_extr_head(pkt, &tmp, 1);
+ return ret;
+}
diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c
new file mode 100644
index 000000000000..06029ea2da2f
--- /dev/null
+++ b/net/caif/cfserl.c
@@ -0,0 +1,192 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/stddef.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <net/caif/caif_layer.h>
+#include <net/caif/cfpkt.h>
+#include <net/caif/cfserl.h>
+
+#define container_obj(layr) ((struct cfserl *) layr)
+
+#define CFSERL_STX 0x02
+#define CAIF_MINIUM_PACKET_SIZE 4
+struct cfserl {
+ struct cflayer layer;
+ struct cfpkt *incomplete_frm;
+ /* Protects parallel processing of incoming packets */
+ spinlock_t sync;
+ bool usestx;
+};
+#define STXLEN(layr) (layr->usestx ? 1 : 0)
+
+static int cfserl_receive(struct cflayer *layr, struct cfpkt *pkt);
+static int cfserl_transmit(struct cflayer *layr, struct cfpkt *pkt);
+static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
+ int phyid);
+
+struct cflayer *cfserl_create(int type, int instance, bool use_stx)
+{
+ struct cfserl *this = kmalloc(sizeof(struct cfserl), GFP_ATOMIC);
+ if (!this) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return NULL;
+ }
+ caif_assert(offsetof(struct cfserl, layer) == 0);
+ memset(this, 0, sizeof(struct cfserl));
+ this->layer.receive = cfserl_receive;
+ this->layer.transmit = cfserl_transmit;
+ this->layer.ctrlcmd = cfserl_ctrlcmd;
+ this->layer.type = type;
+ this->usestx = use_stx;
+ spin_lock_init(&this->sync);
+ snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "ser1");
+ return &this->layer;
+}
+
+static int cfserl_receive(struct cflayer *l, struct cfpkt *newpkt)
+{
+ struct cfserl *layr = container_obj(l);
+ u16 pkt_len;
+ struct cfpkt *pkt = NULL;
+ struct cfpkt *tail_pkt = NULL;
+ u8 tmp8;
+ u16 tmp;
+ u8 stx = CFSERL_STX;
+ int ret;
+ u16 expectlen = 0;
+ caif_assert(newpkt != NULL);
+ spin_lock(&layr->sync);
+
+ if (layr->incomplete_frm != NULL) {
+
+ layr->incomplete_frm =
+ cfpkt_append(layr->incomplete_frm, newpkt, expectlen);
+ pkt = layr->incomplete_frm;
+ } else {
+ pkt = newpkt;
+ }
+ layr->incomplete_frm = NULL;
+
+ do {
+ /* Search for STX at start of pkt if STX is used */
+ if (layr->usestx) {
+ cfpkt_extr_head(pkt, &tmp8, 1);
+ if (tmp8 != CFSERL_STX) {
+ while (cfpkt_more(pkt)
+ && tmp8 != CFSERL_STX) {
+ cfpkt_extr_head(pkt, &tmp8, 1);
+ }
+ if (!cfpkt_more(pkt)) {
+ cfpkt_destroy(pkt);
+ layr->incomplete_frm = NULL;
+ spin_unlock(&layr->sync);
+ return -EPROTO;
+ }
+ }
+ }
+
+ pkt_len = cfpkt_getlen(pkt);
+
+ /*
+ * pkt_len is the accumulated length of the packet data
+ * we have received so far.
+ * Exit if frame doesn't hold length.
+ */
+
+ if (pkt_len < 2) {
+ if (layr->usestx)
+ cfpkt_add_head(pkt, &stx, 1);
+ layr->incomplete_frm = pkt;
+ spin_unlock(&layr->sync);
+ return 0;
+ }
+
+ /*
+ * Find length of frame.
+ * expectlen is the length we need for a full frame.
+ */
+ cfpkt_peek_head(pkt, &tmp, 2);
+ expectlen = le16_to_cpu(tmp) + 2;
+ /*
+ * Frame error handling
+ */
+ if (expectlen < CAIF_MINIUM_PACKET_SIZE
+ || expectlen > CAIF_MAX_FRAMESIZE) {
+ if (!layr->usestx) {
+ if (pkt != NULL)
+ cfpkt_destroy(pkt);
+ layr->incomplete_frm = NULL;
+ expectlen = 0;
+ spin_unlock(&layr->sync);
+ return -EPROTO;
+ }
+ continue;
+ }
+
+ if (pkt_len < expectlen) {
+ /* Too little received data */
+ if (layr->usestx)
+ cfpkt_add_head(pkt, &stx, 1);
+ layr->incomplete_frm = pkt;
+ spin_unlock(&layr->sync);
+ return 0;
+ }
+
+ /*
+ * Enough data for at least one frame.
+ * Split the frame, if too long
+ */
+ if (pkt_len > expectlen)
+ tail_pkt = cfpkt_split(pkt, expectlen);
+ else
+ tail_pkt = NULL;
+
+ /* Send the first part of packet upwards.*/
+ spin_unlock(&layr->sync);
+ ret = layr->layer.up->receive(layr->layer.up, pkt);
+ spin_lock(&layr->sync);
+ if (ret == -EILSEQ) {
+ if (layr->usestx) {
+ if (tail_pkt != NULL)
+ pkt = cfpkt_append(pkt, tail_pkt, 0);
+
+ /* Start search for next STX if frame failed */
+ continue;
+ } else {
+ cfpkt_destroy(pkt);
+ pkt = NULL;
+ }
+ }
+
+ pkt = tail_pkt;
+
+ } while (pkt != NULL);
+
+ spin_unlock(&layr->sync);
+ return 0;
+}
+
+static int cfserl_transmit(struct cflayer *layer, struct cfpkt *newpkt)
+{
+ struct cfserl *layr = container_obj(layer);
+ int ret;
+ u8 tmp8 = CFSERL_STX;
+ if (layr->usestx)
+ cfpkt_add_head(newpkt, &tmp8, 1);
+ ret = layer->dn->transmit(layer->dn, newpkt);
+ if (ret < 0)
+ cfpkt_extr_head(newpkt, &tmp8, 1);
+
+ return ret;
+}
+
+static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
+ int phyid)
+{
+ layr->up->ctrlcmd(layr->up, ctrl, phyid);
+}
diff --git a/net/caif/cfsrvl.c b/net/caif/cfsrvl.c
new file mode 100644
index 000000000000..aff31f34528f
--- /dev/null
+++ b/net/caif/cfsrvl.c
@@ -0,0 +1,192 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <net/caif/caif_layer.h>
+#include <net/caif/cfsrvl.h>
+#include <net/caif/cfpkt.h>
+
+#define SRVL_CTRL_PKT_SIZE 1
+#define SRVL_FLOW_OFF 0x81
+#define SRVL_FLOW_ON 0x80
+#define SRVL_SET_PIN 0x82
+#define SRVL_CTRL_PKT_SIZE 1
+
+#define container_obj(layr) container_of(layr, struct cfsrvl, layer)
+
+static void cfservl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
+ int phyid)
+{
+ struct cfsrvl *service = container_obj(layr);
+ caif_assert(layr->up != NULL);
+ caif_assert(layr->up->ctrlcmd != NULL);
+ switch (ctrl) {
+ case CAIF_CTRLCMD_INIT_RSP:
+ service->open = true;
+ layr->up->ctrlcmd(layr->up, ctrl, phyid);
+ break;
+ case CAIF_CTRLCMD_DEINIT_RSP:
+ case CAIF_CTRLCMD_INIT_FAIL_RSP:
+ service->open = false;
+ layr->up->ctrlcmd(layr->up, ctrl, phyid);
+ break;
+ case _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND:
+ if (phyid != service->dev_info.id)
+ break;
+ if (service->modem_flow_on)
+ layr->up->ctrlcmd(layr->up,
+ CAIF_CTRLCMD_FLOW_OFF_IND, phyid);
+ service->phy_flow_on = false;
+ break;
+ case _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND:
+ if (phyid != service->dev_info.id)
+ return;
+ if (service->modem_flow_on) {
+ layr->up->ctrlcmd(layr->up,
+ CAIF_CTRLCMD_FLOW_ON_IND,
+ phyid);
+ }
+ service->phy_flow_on = true;
+ break;
+ case CAIF_CTRLCMD_FLOW_OFF_IND:
+ if (service->phy_flow_on) {
+ layr->up->ctrlcmd(layr->up,
+ CAIF_CTRLCMD_FLOW_OFF_IND, phyid);
+ }
+ service->modem_flow_on = false;
+ break;
+ case CAIF_CTRLCMD_FLOW_ON_IND:
+ if (service->phy_flow_on) {
+ layr->up->ctrlcmd(layr->up,
+ CAIF_CTRLCMD_FLOW_ON_IND, phyid);
+ }
+ service->modem_flow_on = true;
+ break;
+ case _CAIF_CTRLCMD_PHYIF_DOWN_IND:
+ /* In case interface is down, let's fake a remove shutdown */
+ layr->up->ctrlcmd(layr->up,
+ CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND, phyid);
+ break;
+ case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
+ layr->up->ctrlcmd(layr->up, ctrl, phyid);
+ break;
+ default:
+ pr_warning("CAIF: %s(): "
+ "Unexpected ctrl in cfsrvl (%d)\n", __func__, ctrl);
+ /* We have both modem and phy flow on, send flow on */
+ layr->up->ctrlcmd(layr->up, ctrl, phyid);
+ service->phy_flow_on = true;
+ break;
+ }
+}
+
+static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
+{
+ struct cfsrvl *service = container_obj(layr);
+ caif_assert(layr != NULL);
+ caif_assert(layr->dn != NULL);
+ caif_assert(layr->dn->transmit != NULL);
+ switch (ctrl) {
+ case CAIF_MODEMCMD_FLOW_ON_REQ:
+ {
+ struct cfpkt *pkt;
+ struct caif_payload_info *info;
+ u8 flow_on = SRVL_FLOW_ON;
+ pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE);
+ if (!pkt) {
+ pr_warning("CAIF: %s(): Out of memory\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ if (cfpkt_add_head(pkt, &flow_on, 1) < 0) {
+ pr_err("CAIF: %s(): Packet is erroneous!\n",
+ __func__);
+ cfpkt_destroy(pkt);
+ return -EPROTO;
+ }
+ info = cfpkt_info(pkt);
+ info->channel_id = service->layer.id;
+ info->hdr_len = 1;
+ info->dev_info = &service->dev_info;
+ return layr->dn->transmit(layr->dn, pkt);
+ }
+ case CAIF_MODEMCMD_FLOW_OFF_REQ:
+ {
+ struct cfpkt *pkt;
+ struct caif_payload_info *info;
+ u8 flow_off = SRVL_FLOW_OFF;
+ pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE);
+ if (cfpkt_add_head(pkt, &flow_off, 1) < 0) {
+ pr_err("CAIF: %s(): Packet is erroneous!\n",
+ __func__);
+ cfpkt_destroy(pkt);
+ return -EPROTO;
+ }
+ info = cfpkt_info(pkt);
+ info->channel_id = service->layer.id;
+ info->hdr_len = 1;
+ info->dev_info = &service->dev_info;
+ return layr->dn->transmit(layr->dn, pkt);
+ }
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+void cfservl_destroy(struct cflayer *layer)
+{
+ kfree(layer);
+}
+
+void cfsrvl_init(struct cfsrvl *service,
+ u8 channel_id,
+ struct dev_info *dev_info)
+{
+ caif_assert(offsetof(struct cfsrvl, layer) == 0);
+ service->open = false;
+ service->modem_flow_on = true;
+ service->phy_flow_on = true;
+ service->layer.id = channel_id;
+ service->layer.ctrlcmd = cfservl_ctrlcmd;
+ service->layer.modemcmd = cfservl_modemcmd;
+ service->dev_info = *dev_info;
+ kref_init(&service->ref);
+}
+
+void cfsrvl_release(struct kref *kref)
+{
+ struct cfsrvl *service = container_of(kref, struct cfsrvl, ref);
+ kfree(service);
+}
+
+bool cfsrvl_ready(struct cfsrvl *service, int *err)
+{
+ if (service->open && service->modem_flow_on && service->phy_flow_on)
+ return true;
+ if (!service->open) {
+ *err = -ENOTCONN;
+ return false;
+ }
+ caif_assert(!(service->modem_flow_on && service->phy_flow_on));
+ *err = -EAGAIN;
+ return false;
+}
+u8 cfsrvl_getphyid(struct cflayer *layer)
+{
+ struct cfsrvl *servl = container_obj(layer);
+ return servl->dev_info.id;
+}
+
+bool cfsrvl_phyid_match(struct cflayer *layer, int phyid)
+{
+ struct cfsrvl *servl = container_obj(layer);
+ return servl->dev_info.id == phyid;
+}
diff --git a/net/caif/cfutill.c b/net/caif/cfutill.c
new file mode 100644
index 000000000000..5fd2c9ea8b42
--- /dev/null
+++ b/net/caif/cfutill.c
@@ -0,0 +1,115 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <net/caif/caif_layer.h>
+#include <net/caif/cfsrvl.h>
+#include <net/caif/cfpkt.h>
+
+#define container_obj(layr) ((struct cfsrvl *) layr)
+#define UTIL_PAYLOAD 0x00
+#define UTIL_CMD_BIT 0x80
+#define UTIL_REMOTE_SHUTDOWN 0x82
+#define UTIL_FLOW_OFF 0x81
+#define UTIL_FLOW_ON 0x80
+#define UTIL_CTRL_PKT_SIZE 1
+static int cfutill_receive(struct cflayer *layr, struct cfpkt *pkt);
+static int cfutill_transmit(struct cflayer *layr, struct cfpkt *pkt);
+
+struct cflayer *cfutill_create(u8 channel_id, struct dev_info *dev_info)
+{
+ struct cfsrvl *util = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
+ if (!util) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return NULL;
+ }
+ caif_assert(offsetof(struct cfsrvl, layer) == 0);
+ memset(util, 0, sizeof(struct cfsrvl));
+ cfsrvl_init(util, channel_id, dev_info);
+ util->layer.receive = cfutill_receive;
+ util->layer.transmit = cfutill_transmit;
+ snprintf(util->layer.name, CAIF_LAYER_NAME_SZ - 1, "util1");
+ return &util->layer;
+}
+
+static int cfutill_receive(struct cflayer *layr, struct cfpkt *pkt)
+{
+ u8 cmd = -1;
+ struct cfsrvl *service = container_obj(layr);
+ caif_assert(layr != NULL);
+ caif_assert(layr->up != NULL);
+ caif_assert(layr->up->receive != NULL);
+ caif_assert(layr->up->ctrlcmd != NULL);
+ if (cfpkt_extr_head(pkt, &cmd, 1) < 0) {
+ pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
+ cfpkt_destroy(pkt);
+ return -EPROTO;
+ }
+
+ switch (cmd) {
+ case UTIL_PAYLOAD:
+ return layr->up->receive(layr->up, pkt);
+ case UTIL_FLOW_OFF:
+ layr->ctrlcmd(layr, CAIF_CTRLCMD_FLOW_OFF_IND, 0);
+ cfpkt_destroy(pkt);
+ return 0;
+ case UTIL_FLOW_ON:
+ layr->ctrlcmd(layr, CAIF_CTRLCMD_FLOW_ON_IND, 0);
+ cfpkt_destroy(pkt);
+ return 0;
+ case UTIL_REMOTE_SHUTDOWN: /* Remote Shutdown Request */
+ pr_err("CAIF: %s(): REMOTE SHUTDOWN REQUEST RECEIVED\n",
+ __func__);
+ layr->ctrlcmd(layr, CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND, 0);
+ service->open = false;
+ cfpkt_destroy(pkt);
+ return 0;
+ default:
+ cfpkt_destroy(pkt);
+ pr_warning("CAIF: %s(): Unknown service control %d (0x%x)\n",
+ __func__, cmd, cmd);
+ return -EPROTO;
+ }
+}
+
+static int cfutill_transmit(struct cflayer *layr, struct cfpkt *pkt)
+{
+ u8 zero = 0;
+ struct caif_payload_info *info;
+ int ret;
+ struct cfsrvl *service = container_obj(layr);
+ caif_assert(layr != NULL);
+ caif_assert(layr->dn != NULL);
+ caif_assert(layr->dn->transmit != NULL);
+ if (!cfsrvl_ready(service, &ret))
+ return ret;
+
+ if (cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) {
+ pr_err("CAIF: %s(): packet too large size=%d\n",
+ __func__, cfpkt_getlen(pkt));
+ return -EOVERFLOW;
+ }
+
+ cfpkt_add_head(pkt, &zero, 1);
+ /* Add info for MUX-layer to route the packet out. */
+ info = cfpkt_info(pkt);
+ info->channel_id = service->layer.id;
+ /*
+ * To optimize alignment, we add up the size of CAIF header before
+ * payload.
+ */
+ info->hdr_len = 1;
+ info->dev_info = &service->dev_info;
+ ret = layr->dn->transmit(layr->dn, pkt);
+ if (ret < 0) {
+ u32 tmp32;
+ cfpkt_extr_head(pkt, &tmp32, 4);
+ }
+ return ret;
+}
diff --git a/net/caif/cfveil.c b/net/caif/cfveil.c
new file mode 100644
index 000000000000..0fd827f49491
--- /dev/null
+++ b/net/caif/cfveil.c
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/stddef.h>
+#include <linux/slab.h>
+#include <net/caif/caif_layer.h>
+#include <net/caif/cfsrvl.h>
+#include <net/caif/cfpkt.h>
+
+#define VEI_PAYLOAD 0x00
+#define VEI_CMD_BIT 0x80
+#define VEI_FLOW_OFF 0x81
+#define VEI_FLOW_ON 0x80
+#define VEI_SET_PIN 0x82
+#define VEI_CTRL_PKT_SIZE 1
+#define container_obj(layr) container_of(layr, struct cfsrvl, layer)
+
+static int cfvei_receive(struct cflayer *layr, struct cfpkt *pkt);
+static int cfvei_transmit(struct cflayer *layr, struct cfpkt *pkt);
+
+struct cflayer *cfvei_create(u8 channel_id, struct dev_info *dev_info)
+{
+ struct cfsrvl *vei = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
+ if (!vei) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return NULL;
+ }
+ caif_assert(offsetof(struct cfsrvl, layer) == 0);
+ memset(vei, 0, sizeof(struct cfsrvl));
+ cfsrvl_init(vei, channel_id, dev_info);
+ vei->layer.receive = cfvei_receive;
+ vei->layer.transmit = cfvei_transmit;
+ snprintf(vei->layer.name, CAIF_LAYER_NAME_SZ - 1, "vei%d", channel_id);
+ return &vei->layer;
+}
+
+static int cfvei_receive(struct cflayer *layr, struct cfpkt *pkt)
+{
+ u8 cmd;
+ int ret;
+ caif_assert(layr->up != NULL);
+ caif_assert(layr->receive != NULL);
+ caif_assert(layr->ctrlcmd != NULL);
+
+
+ if (cfpkt_extr_head(pkt, &cmd, 1) < 0) {
+ pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
+ cfpkt_destroy(pkt);
+ return -EPROTO;
+ }
+ switch (cmd) {
+ case VEI_PAYLOAD:
+ ret = layr->up->receive(layr->up, pkt);
+ return ret;
+ case VEI_FLOW_OFF:
+ layr->ctrlcmd(layr, CAIF_CTRLCMD_FLOW_OFF_IND, 0);
+ cfpkt_destroy(pkt);
+ return 0;
+ case VEI_FLOW_ON:
+ layr->ctrlcmd(layr, CAIF_CTRLCMD_FLOW_ON_IND, 0);
+ cfpkt_destroy(pkt);
+ return 0;
+ case VEI_SET_PIN: /* SET RS232 PIN */
+ cfpkt_destroy(pkt);
+ return 0;
+ default: /* SET RS232 PIN */
+ pr_warning("CAIF: %s():Unknown VEI control packet %d (0x%x)!\n",
+ __func__, cmd, cmd);
+ cfpkt_destroy(pkt);
+ return -EPROTO;
+ }
+}
+
+static int cfvei_transmit(struct cflayer *layr, struct cfpkt *pkt)
+{
+ u8 tmp = 0;
+ struct caif_payload_info *info;
+ int ret;
+ struct cfsrvl *service = container_obj(layr);
+ if (!cfsrvl_ready(service, &ret))
+ return ret;
+ caif_assert(layr->dn != NULL);
+ caif_assert(layr->dn->transmit != NULL);
+ if (!cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) {
+ pr_warning("CAIF: %s(): Packet too large - size=%d\n",
+ __func__, cfpkt_getlen(pkt));
+ return -EOVERFLOW;
+ }
+
+ if (cfpkt_add_head(pkt, &tmp, 1) < 0) {
+ pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
+ return -EPROTO;
+ }
+
+ /* Add info-> for MUX-layer to route the packet out. */
+ info = cfpkt_info(pkt);
+ info->channel_id = service->layer.id;
+ info->hdr_len = 1;
+ info->dev_info = &service->dev_info;
+ ret = layr->dn->transmit(layr->dn, pkt);
+ if (ret < 0)
+ cfpkt_extr_head(pkt, &tmp, 1);
+ return ret;
+}
diff --git a/net/caif/cfvidl.c b/net/caif/cfvidl.c
new file mode 100644
index 000000000000..89ad4ea239f1
--- /dev/null
+++ b/net/caif/cfvidl.c
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <net/caif/caif_layer.h>
+#include <net/caif/cfsrvl.h>
+#include <net/caif/cfpkt.h>
+
+#define container_obj(layr) ((struct cfsrvl *) layr)
+
+static int cfvidl_receive(struct cflayer *layr, struct cfpkt *pkt);
+static int cfvidl_transmit(struct cflayer *layr, struct cfpkt *pkt);
+
+struct cflayer *cfvidl_create(u8 channel_id, struct dev_info *dev_info)
+{
+ struct cfsrvl *vid = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
+ if (!vid) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return NULL;
+ }
+ caif_assert(offsetof(struct cfsrvl, layer) == 0);
+
+ memset(vid, 0, sizeof(struct cfsrvl));
+ cfsrvl_init(vid, channel_id, dev_info);
+ vid->layer.receive = cfvidl_receive;
+ vid->layer.transmit = cfvidl_transmit;
+ snprintf(vid->layer.name, CAIF_LAYER_NAME_SZ - 1, "vid1");
+ return &vid->layer;
+}
+
+static int cfvidl_receive(struct cflayer *layr, struct cfpkt *pkt)
+{
+ u32 videoheader;
+ if (cfpkt_extr_head(pkt, &videoheader, 4) < 0) {
+ pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
+ cfpkt_destroy(pkt);
+ return -EPROTO;
+ }
+ return layr->up->receive(layr->up, pkt);
+}
+
+static int cfvidl_transmit(struct cflayer *layr, struct cfpkt *pkt)
+{
+ struct cfsrvl *service = container_obj(layr);
+ struct caif_payload_info *info;
+ u32 videoheader = 0;
+ int ret;
+ if (!cfsrvl_ready(service, &ret))
+ return ret;
+ cfpkt_add_head(pkt, &videoheader, 4);
+ /* Add info for MUX-layer to route the packet out */
+ info = cfpkt_info(pkt);
+ info->channel_id = service->layer.id;
+ info->dev_info = &service->dev_info;
+ ret = layr->dn->transmit(layr->dn, pkt);
+ if (ret < 0)
+ cfpkt_extr_head(pkt, &videoheader, 4);
+ return ret;
+}
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
new file mode 100644
index 000000000000..610966abe2dc
--- /dev/null
+++ b/net/caif/chnl_net.c
@@ -0,0 +1,467 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Authors: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * Daniel Martensson / Daniel.Martensson@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/version.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/if_ether.h>
+#include <linux/moduleparam.h>
+#include <linux/ip.h>
+#include <linux/sched.h>
+#include <linux/sockios.h>
+#include <linux/caif/if_caif.h>
+#include <net/rtnetlink.h>
+#include <net/caif/caif_layer.h>
+#include <net/caif/cfcnfg.h>
+#include <net/caif/cfpkt.h>
+#include <net/caif/caif_dev.h>
+
+/* GPRS PDP connection has MTU to 1500 */
+#define SIZE_MTU 1500
+/* 5 sec. connect timeout */
+#define CONNECT_TIMEOUT (5 * HZ)
+#define CAIF_NET_DEFAULT_QUEUE_LEN 500
+
+#undef pr_debug
+#define pr_debug pr_warning
+
+/*This list is protected by the rtnl lock. */
+static LIST_HEAD(chnl_net_list);
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_RTNL_LINK("caif");
+
+enum caif_states {
+ CAIF_CONNECTED = 1,
+ CAIF_CONNECTING,
+ CAIF_DISCONNECTED,
+ CAIF_SHUTDOWN
+};
+
+struct chnl_net {
+ struct cflayer chnl;
+ struct net_device_stats stats;
+ struct caif_connect_request conn_req;
+ struct list_head list_field;
+ struct net_device *netdev;
+ char name[256];
+ wait_queue_head_t netmgmt_wq;
+ /* Flow status to remember and control the transmission. */
+ bool flowenabled;
+ enum caif_states state;
+};
+
+static void robust_list_del(struct list_head *delete_node)
+{
+ struct list_head *list_node;
+ struct list_head *n;
+ ASSERT_RTNL();
+ list_for_each_safe(list_node, n, &chnl_net_list) {
+ if (list_node == delete_node) {
+ list_del(list_node);
+ return;
+ }
+ }
+ WARN_ON(1);
+}
+
+static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
+{
+ struct sk_buff *skb;
+ struct chnl_net *priv = container_of(layr, struct chnl_net, chnl);
+ int pktlen;
+ int err = 0;
+
+ priv = container_of(layr, struct chnl_net, chnl);
+
+ if (!priv)
+ return -EINVAL;
+
+ /* Get length of CAIF packet. */
+ pktlen = cfpkt_getlen(pkt);
+
+ skb = (struct sk_buff *) cfpkt_tonative(pkt);
+ /* Pass some minimum information and
+ * send the packet to the net stack.
+ */
+ skb->dev = priv->netdev;
+ skb->protocol = htons(ETH_P_IP);
+
+ /* If we change the header in loop mode, the checksum is corrupted. */
+ if (priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb->ip_summed = CHECKSUM_NONE;
+
+ if (in_interrupt())
+ netif_rx(skb);
+ else
+ netif_rx_ni(skb);
+
+ /* Update statistics. */
+ priv->netdev->stats.rx_packets++;
+ priv->netdev->stats.rx_bytes += pktlen;
+
+ return err;
+}
+
+static int delete_device(struct chnl_net *dev)
+{
+ ASSERT_RTNL();
+ if (dev->netdev)
+ unregister_netdevice(dev->netdev);
+ return 0;
+}
+
+static void close_work(struct work_struct *work)
+{
+ struct chnl_net *dev = NULL;
+ struct list_head *list_node;
+ struct list_head *_tmp;
+ /* May be called with or without RTNL lock held */
+ int islocked = rtnl_is_locked();
+ if (!islocked)
+ rtnl_lock();
+ list_for_each_safe(list_node, _tmp, &chnl_net_list) {
+ dev = list_entry(list_node, struct chnl_net, list_field);
+ if (dev->state == CAIF_SHUTDOWN)
+ dev_close(dev->netdev);
+ }
+ if (!islocked)
+ rtnl_unlock();
+}
+static DECLARE_WORK(close_worker, close_work);
+
+static void chnl_flowctrl_cb(struct cflayer *layr, enum caif_ctrlcmd flow,
+ int phyid)
+{
+ struct chnl_net *priv = container_of(layr, struct chnl_net, chnl);
+ pr_debug("CAIF: %s(): NET flowctrl func called flow: %s\n",
+ __func__,
+ flow == CAIF_CTRLCMD_FLOW_ON_IND ? "ON" :
+ flow == CAIF_CTRLCMD_INIT_RSP ? "INIT" :
+ flow == CAIF_CTRLCMD_FLOW_OFF_IND ? "OFF" :
+ flow == CAIF_CTRLCMD_DEINIT_RSP ? "CLOSE/DEINIT" :
+ flow == CAIF_CTRLCMD_INIT_FAIL_RSP ? "OPEN_FAIL" :
+ flow == CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND ?
+ "REMOTE_SHUTDOWN" : "UKNOWN CTRL COMMAND");
+
+
+
+ switch (flow) {
+ case CAIF_CTRLCMD_FLOW_OFF_IND:
+ priv->flowenabled = false;
+ netif_stop_queue(priv->netdev);
+ break;
+ case CAIF_CTRLCMD_DEINIT_RSP:
+ priv->state = CAIF_DISCONNECTED;
+ break;
+ case CAIF_CTRLCMD_INIT_FAIL_RSP:
+ priv->state = CAIF_DISCONNECTED;
+ wake_up_interruptible(&priv->netmgmt_wq);
+ break;
+ case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
+ priv->state = CAIF_SHUTDOWN;
+ netif_tx_disable(priv->netdev);
+ schedule_work(&close_worker);
+ break;
+ case CAIF_CTRLCMD_FLOW_ON_IND:
+ priv->flowenabled = true;
+ netif_wake_queue(priv->netdev);
+ break;
+ case CAIF_CTRLCMD_INIT_RSP:
+ priv->state = CAIF_CONNECTED;
+ priv->flowenabled = true;
+ netif_wake_queue(priv->netdev);
+ wake_up_interruptible(&priv->netmgmt_wq);
+ break;
+ default:
+ break;
+ }
+}
+
+static int chnl_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct chnl_net *priv;
+ struct cfpkt *pkt = NULL;
+ int len;
+ int result = -1;
+ /* Get our private data. */
+ priv = netdev_priv(dev);
+
+ if (skb->len > priv->netdev->mtu) {
+ pr_warning("CAIF: %s(): Size of skb exceeded MTU\n", __func__);
+ return -ENOSPC;
+ }
+
+ if (!priv->flowenabled) {
+ pr_debug("CAIF: %s(): dropping packets flow off\n", __func__);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP)
+ swap(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
+
+ /* Store original SKB length. */
+ len = skb->len;
+
+ pkt = cfpkt_fromnative(CAIF_DIR_OUT, (void *) skb);
+
+ /* Send the packet down the stack. */
+ result = priv->chnl.dn->transmit(priv->chnl.dn, pkt);
+ if (result) {
+ if (result == -EAGAIN)
+ result = NETDEV_TX_BUSY;
+ return result;
+ }
+
+ /* Update statistics. */
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += len;
+
+ return NETDEV_TX_OK;
+}
+
+static int chnl_net_open(struct net_device *dev)
+{
+ struct chnl_net *priv = NULL;
+ int result = -1;
+ ASSERT_RTNL();
+ priv = netdev_priv(dev);
+ if (!priv) {
+ pr_debug("CAIF: %s(): chnl_net_open: no priv\n", __func__);
+ return -ENODEV;
+ }
+
+ if (priv->state != CAIF_CONNECTING) {
+ priv->state = CAIF_CONNECTING;
+ result = caif_connect_client(&priv->conn_req, &priv->chnl);
+ if (result != 0) {
+ priv->state = CAIF_DISCONNECTED;
+ pr_debug("CAIF: %s(): err: "
+ "Unable to register and open device,"
+ " Err:%d\n",
+ __func__,
+ result);
+ return result;
+ }
+ }
+
+ result = wait_event_interruptible_timeout(priv->netmgmt_wq,
+ priv->state != CAIF_CONNECTING,
+ CONNECT_TIMEOUT);
+
+ if (result == -ERESTARTSYS) {
+ pr_debug("CAIF: %s(): wait_event_interruptible"
+ " woken by a signal\n", __func__);
+ return -ERESTARTSYS;
+ }
+ if (result == 0) {
+ pr_debug("CAIF: %s(): connect timeout\n", __func__);
+ caif_disconnect_client(&priv->chnl);
+ priv->state = CAIF_DISCONNECTED;
+ pr_debug("CAIF: %s(): state disconnected\n", __func__);
+ return -ETIMEDOUT;
+ }
+
+ if (priv->state != CAIF_CONNECTED) {
+ pr_debug("CAIF: %s(): connect failed\n", __func__);
+ return -ECONNREFUSED;
+ }
+ pr_debug("CAIF: %s(): CAIF Netdevice connected\n", __func__);
+ return 0;
+}
+
+static int chnl_net_stop(struct net_device *dev)
+{
+ struct chnl_net *priv;
+
+ ASSERT_RTNL();
+ priv = netdev_priv(dev);
+ priv->state = CAIF_DISCONNECTED;
+ caif_disconnect_client(&priv->chnl);
+ return 0;
+}
+
+static int chnl_net_init(struct net_device *dev)
+{
+ struct chnl_net *priv;
+ ASSERT_RTNL();
+ priv = netdev_priv(dev);
+ strncpy(priv->name, dev->name, sizeof(priv->name));
+ return 0;
+}
+
+static void chnl_net_uninit(struct net_device *dev)
+{
+ struct chnl_net *priv;
+ ASSERT_RTNL();
+ priv = netdev_priv(dev);
+ robust_list_del(&priv->list_field);
+}
+
+static const struct net_device_ops netdev_ops = {
+ .ndo_open = chnl_net_open,
+ .ndo_stop = chnl_net_stop,
+ .ndo_init = chnl_net_init,
+ .ndo_uninit = chnl_net_uninit,
+ .ndo_start_xmit = chnl_net_start_xmit,
+};
+
+static void ipcaif_net_setup(struct net_device *dev)
+{
+ struct chnl_net *priv;
+ dev->netdev_ops = &netdev_ops;
+ dev->destructor = free_netdev;
+ dev->flags |= IFF_NOARP;
+ dev->flags |= IFF_POINTOPOINT;
+ dev->needed_headroom = CAIF_NEEDED_HEADROOM;
+ dev->needed_tailroom = CAIF_NEEDED_TAILROOM;
+ dev->mtu = SIZE_MTU;
+ dev->tx_queue_len = CAIF_NET_DEFAULT_QUEUE_LEN;
+
+ priv = netdev_priv(dev);
+ priv->chnl.receive = chnl_recv_cb;
+ priv->chnl.ctrlcmd = chnl_flowctrl_cb;
+ priv->netdev = dev;
+ priv->conn_req.protocol = CAIFPROTO_DATAGRAM;
+ priv->conn_req.link_selector = CAIF_LINK_HIGH_BANDW;
+ priv->conn_req.priority = CAIF_PRIO_LOW;
+ /* Insert illegal value */
+ priv->conn_req.sockaddr.u.dgm.connection_id = -1;
+ priv->flowenabled = false;
+
+ ASSERT_RTNL();
+ init_waitqueue_head(&priv->netmgmt_wq);
+ list_add(&priv->list_field, &chnl_net_list);
+}
+
+
+static int ipcaif_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+ struct chnl_net *priv;
+ u8 loop;
+ priv = netdev_priv(dev);
+ NLA_PUT_U32(skb, IFLA_CAIF_IPV4_CONNID,
+ priv->conn_req.sockaddr.u.dgm.connection_id);
+ NLA_PUT_U32(skb, IFLA_CAIF_IPV6_CONNID,
+ priv->conn_req.sockaddr.u.dgm.connection_id);
+ loop = priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP;
+ NLA_PUT_U8(skb, IFLA_CAIF_LOOPBACK, loop);
+
+
+ return 0;
+nla_put_failure:
+ return -EMSGSIZE;
+
+}
+
+static void caif_netlink_parms(struct nlattr *data[],
+ struct caif_connect_request *conn_req)
+{
+ if (!data) {
+ pr_warning("CAIF: %s: no params data found\n", __func__);
+ return;
+ }
+ if (data[IFLA_CAIF_IPV4_CONNID])
+ conn_req->sockaddr.u.dgm.connection_id =
+ nla_get_u32(data[IFLA_CAIF_IPV4_CONNID]);
+ if (data[IFLA_CAIF_IPV6_CONNID])
+ conn_req->sockaddr.u.dgm.connection_id =
+ nla_get_u32(data[IFLA_CAIF_IPV6_CONNID]);
+ if (data[IFLA_CAIF_LOOPBACK]) {
+ if (nla_get_u8(data[IFLA_CAIF_LOOPBACK]))
+ conn_req->protocol = CAIFPROTO_DATAGRAM_LOOP;
+ else
+ conn_req->protocol = CAIFPROTO_DATAGRAM;
+ }
+}
+
+static int ipcaif_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[])
+{
+ int ret;
+ struct chnl_net *caifdev;
+ ASSERT_RTNL();
+ caifdev = netdev_priv(dev);
+ caif_netlink_parms(data, &caifdev->conn_req);
+ dev_net_set(caifdev->netdev, src_net);
+
+ ret = register_netdevice(dev);
+ if (ret)
+ pr_warning("CAIF: %s(): device rtml registration failed\n",
+ __func__);
+ return ret;
+}
+
+static int ipcaif_changelink(struct net_device *dev, struct nlattr *tb[],
+ struct nlattr *data[])
+{
+ struct chnl_net *caifdev;
+ ASSERT_RTNL();
+ caifdev = netdev_priv(dev);
+ caif_netlink_parms(data, &caifdev->conn_req);
+ netdev_state_change(dev);
+ return 0;
+}
+
+static size_t ipcaif_get_size(const struct net_device *dev)
+{
+ return
+ /* IFLA_CAIF_IPV4_CONNID */
+ nla_total_size(4) +
+ /* IFLA_CAIF_IPV6_CONNID */
+ nla_total_size(4) +
+ /* IFLA_CAIF_LOOPBACK */
+ nla_total_size(2) +
+ 0;
+}
+
+static const struct nla_policy ipcaif_policy[IFLA_CAIF_MAX + 1] = {
+ [IFLA_CAIF_IPV4_CONNID] = { .type = NLA_U32 },
+ [IFLA_CAIF_IPV6_CONNID] = { .type = NLA_U32 },
+ [IFLA_CAIF_LOOPBACK] = { .type = NLA_U8 }
+};
+
+
+static struct rtnl_link_ops ipcaif_link_ops __read_mostly = {
+ .kind = "caif",
+ .priv_size = sizeof(struct chnl_net),
+ .setup = ipcaif_net_setup,
+ .maxtype = IFLA_CAIF_MAX,
+ .policy = ipcaif_policy,
+ .newlink = ipcaif_newlink,
+ .changelink = ipcaif_changelink,
+ .get_size = ipcaif_get_size,
+ .fill_info = ipcaif_fill_info,
+
+};
+
+static int __init chnl_init_module(void)
+{
+ return rtnl_link_register(&ipcaif_link_ops);
+}
+
+static void __exit chnl_exit_module(void)
+{
+ struct chnl_net *dev = NULL;
+ struct list_head *list_node;
+ struct list_head *_tmp;
+ rtnl_link_unregister(&ipcaif_link_ops);
+ rtnl_lock();
+ list_for_each_safe(list_node, _tmp, &chnl_net_list) {
+ dev = list_entry(list_node, struct chnl_net, list_field);
+ list_del(list_node);
+ delete_device(dev);
+ }
+ rtnl_unlock();
+}
+
+module_init(chnl_init_module);
+module_exit(chnl_exit_module);
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 907dc871fac8..9c65e9deb9c3 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -713,8 +713,6 @@ static void bcm_remove_op(struct bcm_op *op)
kfree(op->last_frames);
kfree(op);
-
- return;
}
static void bcm_rx_unreg(struct net_device *dev, struct bcm_op *op)
diff --git a/net/core/Makefile b/net/core/Makefile
index 08791ac3e05a..51c3eec850ef 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -7,7 +7,7 @@ obj-y := sock.o request_sock.o skbuff.o iovec.o datagram.o stream.o scm.o \
obj-$(CONFIG_SYSCTL) += sysctl_net_core.o
-obj-y += dev.o ethtool.o dev_mcast.o dst.o netevent.o \
+obj-y += dev.o ethtool.o dev_addr_lists.o dst.o netevent.o \
neighbour.o rtnetlink.o utils.o link_watch.o filter.o
obj-$(CONFIG_XFRM) += flow.o
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 2dccd4ee591b..e0097531417a 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -86,7 +86,7 @@ static int wait_for_packet(struct sock *sk, int *err, long *timeo_p)
int error;
DEFINE_WAIT_FUNC(wait, receiver_wake_function);
- prepare_to_wait_exclusive(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
/* Socket errors? */
error = sock_error(sk);
@@ -115,7 +115,7 @@ static int wait_for_packet(struct sock *sk, int *err, long *timeo_p)
error = 0;
*timeo_p = schedule_timeout(*timeo_p);
out:
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
return error;
interrupted:
error = sock_intr_errno(*timeo_p);
@@ -229,9 +229,18 @@ EXPORT_SYMBOL(skb_free_datagram);
void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb)
{
- lock_sock(sk);
- skb_free_datagram(sk, skb);
- release_sock(sk);
+ if (likely(atomic_read(&skb->users) == 1))
+ smp_rmb();
+ else if (likely(!atomic_dec_and_test(&skb->users)))
+ return;
+
+ lock_sock_bh(sk);
+ skb_orphan(skb);
+ sk_mem_reclaim_partial(sk);
+ unlock_sock_bh(sk);
+
+ /* skb is now orphaned, can be freed outside of locked section */
+ __kfree_skb(skb);
}
EXPORT_SYMBOL(skb_free_datagram_locked);
@@ -726,7 +735,7 @@ unsigned int datagram_poll(struct file *file, struct socket *sock,
struct sock *sk = sock->sk;
unsigned int mask;
- sock_poll_wait(file, sk->sk_sleep, wait);
+ sock_poll_wait(file, sk_sleep(sk), wait);
mask = 0;
/* exceptional events? */
diff --git a/net/core/dev.c b/net/core/dev.c
index f769098774b7..d273e4e3ecdc 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -130,6 +130,7 @@
#include <linux/jhash.h>
#include <linux/random.h>
#include <trace/events/napi.h>
+#include <linux/pci.h>
#include "net-sysfs.h"
@@ -207,6 +208,20 @@ static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
}
+static inline void rps_lock(struct softnet_data *sd)
+{
+#ifdef CONFIG_RPS
+ spin_lock(&sd->input_pkt_queue.lock);
+#endif
+}
+
+static inline void rps_unlock(struct softnet_data *sd)
+{
+#ifdef CONFIG_RPS
+ spin_unlock(&sd->input_pkt_queue.lock);
+#endif
+}
+
/* Device list insertion */
static int list_netdevice(struct net_device *dev)
{
@@ -249,7 +264,7 @@ static RAW_NOTIFIER_HEAD(netdev_chain);
* queue in the local softnet handler.
*/
-DEFINE_PER_CPU(struct softnet_data, softnet_data);
+DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
EXPORT_PER_CPU_SYMBOL(softnet_data);
#ifdef CONFIG_LOCKDEP
@@ -773,14 +788,17 @@ EXPORT_SYMBOL(__dev_getfirstbyhwtype);
struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
{
- struct net_device *dev;
+ struct net_device *dev, *ret = NULL;
- rtnl_lock();
- dev = __dev_getfirstbyhwtype(net, type);
- if (dev)
- dev_hold(dev);
- rtnl_unlock();
- return dev;
+ rcu_read_lock();
+ for_each_netdev_rcu(net, dev)
+ if (dev->type == type) {
+ dev_hold(dev);
+ ret = dev;
+ break;
+ }
+ rcu_read_unlock();
+ return ret;
}
EXPORT_SYMBOL(dev_getfirstbyhwtype);
@@ -984,15 +1002,10 @@ int dev_change_name(struct net_device *dev, const char *newname)
return err;
rollback:
- /* For now only devices in the initial network namespace
- * are in sysfs.
- */
- if (net_eq(net, &init_net)) {
- ret = device_rename(&dev->dev, dev->name);
- if (ret) {
- memcpy(dev->name, oldname, IFNAMSIZ);
- return ret;
- }
+ ret = device_rename(&dev->dev, dev->name);
+ if (ret) {
+ memcpy(dev->name, oldname, IFNAMSIZ);
+ return ret;
}
write_lock_bh(&dev_base_lock);
@@ -1085,9 +1098,9 @@ void netdev_state_change(struct net_device *dev)
}
EXPORT_SYMBOL(netdev_state_change);
-void netdev_bonding_change(struct net_device *dev, unsigned long event)
+int netdev_bonding_change(struct net_device *dev, unsigned long event)
{
- call_netdevice_notifiers(event, dev);
+ return call_netdevice_notifiers(event, dev);
}
EXPORT_SYMBOL(netdev_bonding_change);
@@ -1417,6 +1430,7 @@ EXPORT_SYMBOL(unregister_netdevice_notifier);
int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
{
+ ASSERT_RTNL();
return raw_notifier_call_chain(&netdev_chain, val, dev);
}
@@ -1435,7 +1449,7 @@ void net_disable_timestamp(void)
}
EXPORT_SYMBOL(net_disable_timestamp);
-static inline void net_timestamp(struct sk_buff *skb)
+static inline void net_timestamp_set(struct sk_buff *skb)
{
if (atomic_read(&netstamp_needed))
__net_timestamp(skb);
@@ -1443,6 +1457,12 @@ static inline void net_timestamp(struct sk_buff *skb)
skb->tstamp.tv64 = 0;
}
+static inline void net_timestamp_check(struct sk_buff *skb)
+{
+ if (!skb->tstamp.tv64 && atomic_read(&netstamp_needed))
+ __net_timestamp(skb);
+}
+
/**
* dev_forward_skb - loopback an skb to another netif
*
@@ -1451,7 +1471,7 @@ static inline void net_timestamp(struct sk_buff *skb)
*
* return values:
* NET_RX_SUCCESS (no congestion)
- * NET_RX_DROP (packet was dropped)
+ * NET_RX_DROP (packet was dropped, but freed)
*
* dev_forward_skb can be used for injecting an skb from the
* start_xmit function of one device into the receive queue
@@ -1465,12 +1485,11 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
{
skb_orphan(skb);
- if (!(dev->flags & IFF_UP))
- return NET_RX_DROP;
-
- if (skb->len > (dev->mtu + dev->hard_header_len))
+ if (!(dev->flags & IFF_UP) ||
+ (skb->len > (dev->mtu + dev->hard_header_len))) {
+ kfree_skb(skb);
return NET_RX_DROP;
-
+ }
skb_set_dev(skb, dev);
skb->tstamp.tv64 = 0;
skb->pkt_type = PACKET_HOST;
@@ -1490,9 +1509,9 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
#ifdef CONFIG_NET_CLS_ACT
if (!(skb->tstamp.tv64 && (G_TC_FROM(skb->tc_verd) & AT_INGRESS)))
- net_timestamp(skb);
+ net_timestamp_set(skb);
#else
- net_timestamp(skb);
+ net_timestamp_set(skb);
#endif
rcu_read_lock();
@@ -1538,8 +1557,9 @@ static inline void __netif_reschedule(struct Qdisc *q)
local_irq_save(flags);
sd = &__get_cpu_var(softnet_data);
- q->next_sched = sd->output_queue;
- sd->output_queue = q;
+ q->next_sched = NULL;
+ *sd->output_queue_tailp = q;
+ sd->output_queue_tailp = &q->next_sched;
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_restore(flags);
}
@@ -1784,18 +1804,27 @@ EXPORT_SYMBOL(netdev_rx_csum_fault);
* 2. No high memory really exists on this machine.
*/
-static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
+static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
{
#ifdef CONFIG_HIGHMEM
int i;
+ if (!(dev->features & NETIF_F_HIGHDMA)) {
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
+ if (PageHighMem(skb_shinfo(skb)->frags[i].page))
+ return 1;
+ }
- if (dev->features & NETIF_F_HIGHDMA)
- return 0;
-
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
- if (PageHighMem(skb_shinfo(skb)->frags[i].page))
- return 1;
+ if (PCI_DMA_BUS_IS_PHYS) {
+ struct device *pdev = dev->dev.parent;
+ if (!pdev)
+ return 0;
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ dma_addr_t addr = page_to_phys(skb_shinfo(skb)->frags[i].page);
+ if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
+ return 1;
+ }
+ }
#endif
return 0;
}
@@ -1853,6 +1882,17 @@ static int dev_gso_segment(struct sk_buff *skb)
return 0;
}
+/*
+ * Try to orphan skb early, right before transmission by the device.
+ * We cannot orphan skb if tx timestamp is requested, since
+ * drivers need to call skb_tstamp_tx() to send the timestamp.
+ */
+static inline void skb_orphan_try(struct sk_buff *skb)
+{
+ if (!skb_tx(skb)->flags)
+ skb_orphan(skb);
+}
+
int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq)
{
@@ -1863,13 +1903,6 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
if (!list_empty(&ptype_all))
dev_queue_xmit_nit(skb, dev);
- if (netif_needs_gso(dev, skb)) {
- if (unlikely(dev_gso_segment(skb)))
- goto out_kfree_skb;
- if (skb->next)
- goto gso;
- }
-
/*
* If device doesnt need skb->dst, release it right now while
* its hot in this cpu cache
@@ -1877,23 +1910,18 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
skb_dst_drop(skb);
+ skb_orphan_try(skb);
+
+ if (netif_needs_gso(dev, skb)) {
+ if (unlikely(dev_gso_segment(skb)))
+ goto out_kfree_skb;
+ if (skb->next)
+ goto gso;
+ }
+
rc = ops->ndo_start_xmit(skb, dev);
if (rc == NETDEV_TX_OK)
txq_trans_update(txq);
- /*
- * TODO: if skb_orphan() was called by
- * dev->hard_start_xmit() (for example, the unmodified
- * igb driver does that; bnx2 doesn't), then
- * skb_tx_software_timestamp() will be unable to send
- * back the time stamp.
- *
- * How can this be prevented? Always create another
- * reference to the socket before calling
- * dev->hard_start_xmit()? Prevent that skb_orphan()
- * does anything in dev->hard_start_xmit() by clearing
- * the skb destructor before the call and restoring it
- * afterwards, then doing the skb_orphan() ourselves?
- */
return rc;
}
@@ -1932,7 +1960,7 @@ out_kfree_skb:
return rc;
}
-static u32 skb_tx_hashrnd;
+static u32 hashrnd __read_mostly;
u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
{
@@ -1948,9 +1976,9 @@ u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
if (skb->sk && skb->sk->sk_hash)
hash = skb->sk->sk_hash;
else
- hash = skb->protocol;
+ hash = (__force u16) skb->protocol;
- hash = jhash_1word(hash, skb_tx_hashrnd);
+ hash = jhash_1word(hash, hashrnd);
return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
}
@@ -1960,10 +1988,9 @@ static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
{
if (unlikely(queue_index >= dev->real_num_tx_queues)) {
if (net_ratelimit()) {
- WARN(1, "%s selects TX queue %d, but "
- "real number of TX queues is %d\n",
- dev->name, queue_index,
- dev->real_num_tx_queues);
+ pr_warning("%s selects TX queue %d, but "
+ "real number of TX queues is %d\n",
+ dev->name, queue_index, dev->real_num_tx_queues);
}
return 0;
}
@@ -1990,7 +2017,7 @@ static struct netdev_queue *dev_pick_tx(struct net_device *dev,
queue_index = skb_tx_hash(dev, skb);
if (sk) {
- struct dst_entry *dst = rcu_dereference_bh(sk->sk_dst_cache);
+ struct dst_entry *dst = rcu_dereference_check(sk->sk_dst_cache, 1);
if (dst && skb_dst(skb) == dst)
sk_tx_queue_set(sk, queue_index);
@@ -2020,6 +2047,8 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
* waiting to be sent out; and the qdisc is not running -
* xmit the skb directly.
*/
+ if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
+ skb_dst_force(skb);
__qdisc_update_bstats(q, skb->len);
if (sch_direct_xmit(skb, q, dev, txq, root_lock))
__qdisc_run(q);
@@ -2028,6 +2057,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
rc = NET_XMIT_SUCCESS;
} else {
+ skb_dst_force(skb);
rc = qdisc_enqueue_root(skb, q);
qdisc_run(q);
}
@@ -2175,11 +2205,249 @@ EXPORT_SYMBOL(dev_queue_xmit);
=======================================================================*/
int netdev_max_backlog __read_mostly = 1000;
+int netdev_tstamp_prequeue __read_mostly = 1;
int netdev_budget __read_mostly = 300;
int weight_p __read_mostly = 64; /* old backlog weight */
-DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
+/* Called with irq disabled */
+static inline void ____napi_schedule(struct softnet_data *sd,
+ struct napi_struct *napi)
+{
+ list_add_tail(&napi->poll_list, &sd->poll_list);
+ __raise_softirq_irqoff(NET_RX_SOFTIRQ);
+}
+#ifdef CONFIG_RPS
+
+/* One global table that all flow-based protocols share. */
+struct rps_sock_flow_table *rps_sock_flow_table __read_mostly;
+EXPORT_SYMBOL(rps_sock_flow_table);
+
+/*
+ * get_rps_cpu is called from netif_receive_skb and returns the target
+ * CPU from the RPS map of the receiving queue for a given skb.
+ * rcu_read_lock must be held on entry.
+ */
+static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
+ struct rps_dev_flow **rflowp)
+{
+ struct ipv6hdr *ip6;
+ struct iphdr *ip;
+ struct netdev_rx_queue *rxqueue;
+ struct rps_map *map;
+ struct rps_dev_flow_table *flow_table;
+ struct rps_sock_flow_table *sock_flow_table;
+ int cpu = -1;
+ u8 ip_proto;
+ u16 tcpu;
+ u32 addr1, addr2, ihl;
+ union {
+ u32 v32;
+ u16 v16[2];
+ } ports;
+
+ if (skb_rx_queue_recorded(skb)) {
+ u16 index = skb_get_rx_queue(skb);
+ if (unlikely(index >= dev->num_rx_queues)) {
+ if (net_ratelimit()) {
+ pr_warning("%s received packet on queue "
+ "%u, but number of RX queues is %u\n",
+ dev->name, index, dev->num_rx_queues);
+ }
+ goto done;
+ }
+ rxqueue = dev->_rx + index;
+ } else
+ rxqueue = dev->_rx;
+
+ if (!rxqueue->rps_map && !rxqueue->rps_flow_table)
+ goto done;
+
+ if (skb->rxhash)
+ goto got_hash; /* Skip hash computation on packet header */
+
+ switch (skb->protocol) {
+ case __constant_htons(ETH_P_IP):
+ if (!pskb_may_pull(skb, sizeof(*ip)))
+ goto done;
+
+ ip = (struct iphdr *) skb->data;
+ ip_proto = ip->protocol;
+ addr1 = (__force u32) ip->saddr;
+ addr2 = (__force u32) ip->daddr;
+ ihl = ip->ihl;
+ break;
+ case __constant_htons(ETH_P_IPV6):
+ if (!pskb_may_pull(skb, sizeof(*ip6)))
+ goto done;
+
+ ip6 = (struct ipv6hdr *) skb->data;
+ ip_proto = ip6->nexthdr;
+ addr1 = (__force u32) ip6->saddr.s6_addr32[3];
+ addr2 = (__force u32) ip6->daddr.s6_addr32[3];
+ ihl = (40 >> 2);
+ break;
+ default:
+ goto done;
+ }
+ switch (ip_proto) {
+ case IPPROTO_TCP:
+ case IPPROTO_UDP:
+ case IPPROTO_DCCP:
+ case IPPROTO_ESP:
+ case IPPROTO_AH:
+ case IPPROTO_SCTP:
+ case IPPROTO_UDPLITE:
+ if (pskb_may_pull(skb, (ihl * 4) + 4)) {
+ ports.v32 = * (__force u32 *) (skb->data + (ihl * 4));
+ if (ports.v16[1] < ports.v16[0])
+ swap(ports.v16[0], ports.v16[1]);
+ break;
+ }
+ default:
+ ports.v32 = 0;
+ break;
+ }
+
+ /* get a consistent hash (same value on both flow directions) */
+ if (addr2 < addr1)
+ swap(addr1, addr2);
+ skb->rxhash = jhash_3words(addr1, addr2, ports.v32, hashrnd);
+ if (!skb->rxhash)
+ skb->rxhash = 1;
+
+got_hash:
+ flow_table = rcu_dereference(rxqueue->rps_flow_table);
+ sock_flow_table = rcu_dereference(rps_sock_flow_table);
+ if (flow_table && sock_flow_table) {
+ u16 next_cpu;
+ struct rps_dev_flow *rflow;
+
+ rflow = &flow_table->flows[skb->rxhash & flow_table->mask];
+ tcpu = rflow->cpu;
+
+ next_cpu = sock_flow_table->ents[skb->rxhash &
+ sock_flow_table->mask];
+
+ /*
+ * If the desired CPU (where last recvmsg was done) is
+ * different from current CPU (one in the rx-queue flow
+ * table entry), switch if one of the following holds:
+ * - Current CPU is unset (equal to RPS_NO_CPU).
+ * - Current CPU is offline.
+ * - The current CPU's queue tail has advanced beyond the
+ * last packet that was enqueued using this table entry.
+ * This guarantees that all previous packets for the flow
+ * have been dequeued, thus preserving in order delivery.
+ */
+ if (unlikely(tcpu != next_cpu) &&
+ (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
+ ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
+ rflow->last_qtail)) >= 0)) {
+ tcpu = rflow->cpu = next_cpu;
+ if (tcpu != RPS_NO_CPU)
+ rflow->last_qtail = per_cpu(softnet_data,
+ tcpu).input_queue_head;
+ }
+ if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
+ *rflowp = rflow;
+ cpu = tcpu;
+ goto done;
+ }
+ }
+
+ map = rcu_dereference(rxqueue->rps_map);
+ if (map) {
+ tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
+
+ if (cpu_online(tcpu)) {
+ cpu = tcpu;
+ goto done;
+ }
+ }
+
+done:
+ return cpu;
+}
+
+/* Called from hardirq (IPI) context */
+static void rps_trigger_softirq(void *data)
+{
+ struct softnet_data *sd = data;
+
+ ____napi_schedule(sd, &sd->backlog);
+ sd->received_rps++;
+}
+
+#endif /* CONFIG_RPS */
+
+/*
+ * Check if this softnet_data structure is another cpu one
+ * If yes, queue it to our IPI list and return 1
+ * If no, return 0
+ */
+static int rps_ipi_queued(struct softnet_data *sd)
+{
+#ifdef CONFIG_RPS
+ struct softnet_data *mysd = &__get_cpu_var(softnet_data);
+
+ if (sd != mysd) {
+ sd->rps_ipi_next = mysd->rps_ipi_list;
+ mysd->rps_ipi_list = sd;
+
+ __raise_softirq_irqoff(NET_RX_SOFTIRQ);
+ return 1;
+ }
+#endif /* CONFIG_RPS */
+ return 0;
+}
+
+/*
+ * enqueue_to_backlog is called to queue an skb to a per CPU backlog
+ * queue (may be a remote CPU queue).
+ */
+static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
+ unsigned int *qtail)
+{
+ struct softnet_data *sd;
+ unsigned long flags;
+
+ sd = &per_cpu(softnet_data, cpu);
+
+ local_irq_save(flags);
+
+ rps_lock(sd);
+ if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) {
+ if (skb_queue_len(&sd->input_pkt_queue)) {
+enqueue:
+ __skb_queue_tail(&sd->input_pkt_queue, skb);
+#ifdef CONFIG_RPS
+ *qtail = sd->input_queue_head +
+ skb_queue_len(&sd->input_pkt_queue);
+#endif
+ rps_unlock(sd);
+ local_irq_restore(flags);
+ return NET_RX_SUCCESS;
+ }
+
+ /* Schedule NAPI for backlog device
+ * We can use non atomic operation since we own the queue lock
+ */
+ if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
+ if (!rps_ipi_queued(sd))
+ ____napi_schedule(sd, &sd->backlog);
+ }
+ goto enqueue;
+ }
+
+ sd->dropped++;
+ rps_unlock(sd);
+
+ local_irq_restore(flags);
+
+ kfree_skb(skb);
+ return NET_RX_DROP;
+}
/**
* netif_rx - post buffer to the network code
@@ -2198,41 +2466,38 @@ DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
int netif_rx(struct sk_buff *skb)
{
- struct softnet_data *queue;
- unsigned long flags;
+ int ret;
/* if netpoll wants it, pretend we never saw it */
if (netpoll_rx(skb))
return NET_RX_DROP;
- if (!skb->tstamp.tv64)
- net_timestamp(skb);
+ if (netdev_tstamp_prequeue)
+ net_timestamp_check(skb);
- /*
- * The code is rearranged so that the path is the most
- * short when CPU is congested, but is still operating.
- */
- local_irq_save(flags);
- queue = &__get_cpu_var(softnet_data);
+#ifdef CONFIG_RPS
+ {
+ struct rps_dev_flow voidflow, *rflow = &voidflow;
+ int cpu;
- __get_cpu_var(netdev_rx_stat).total++;
- if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
- if (queue->input_pkt_queue.qlen) {
-enqueue:
- __skb_queue_tail(&queue->input_pkt_queue, skb);
- local_irq_restore(flags);
- return NET_RX_SUCCESS;
- }
+ rcu_read_lock();
- napi_schedule(&queue->backlog);
- goto enqueue;
- }
+ cpu = get_rps_cpu(skb->dev, skb, &rflow);
+ if (cpu < 0)
+ cpu = smp_processor_id();
- __get_cpu_var(netdev_rx_stat).dropped++;
- local_irq_restore(flags);
+ ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
- kfree_skb(skb);
- return NET_RX_DROP;
+ rcu_read_unlock();
+ }
+#else
+ {
+ unsigned int qtail;
+ ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
+ put_cpu();
+ }
+#endif
+ return ret;
}
EXPORT_SYMBOL(netif_rx);
@@ -2277,6 +2542,7 @@ static void net_tx_action(struct softirq_action *h)
local_irq_disable();
head = sd->output_queue;
sd->output_queue = NULL;
+ sd->output_queue_tailp = &sd->output_queue;
local_irq_enable();
while (head) {
@@ -2353,7 +2619,8 @@ static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
#endif
#if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
-struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
+struct sk_buff *(*macvlan_handle_frame_hook)(struct macvlan_port *p,
+ struct sk_buff *skb) __read_mostly;
EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
@@ -2361,14 +2628,17 @@ static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
int *ret,
struct net_device *orig_dev)
{
- if (skb->dev->macvlan_port == NULL)
+ struct macvlan_port *port;
+
+ port = rcu_dereference(skb->dev->macvlan_port);
+ if (!port)
return skb;
if (*pt_prev) {
*ret = deliver_skb(skb, *pt_prev, orig_dev);
*pt_prev = NULL;
}
- return macvlan_handle_frame_hook(skb);
+ return macvlan_handle_frame_hook(port, skb);
}
#else
#define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
@@ -2469,22 +2739,56 @@ void netif_nit_deliver(struct sk_buff *skb)
rcu_read_unlock();
}
-/**
- * netif_receive_skb - process receive buffer from network
- * @skb: buffer to process
- *
- * netif_receive_skb() is the main receive data processing function.
- * It always succeeds. The buffer may be dropped during processing
- * for congestion control or by the protocol layers.
- *
- * This function may only be called from softirq context and interrupts
- * should be enabled.
- *
- * Return values (usually ignored):
- * NET_RX_SUCCESS: no congestion
- * NET_RX_DROP: packet was dropped
+static inline void skb_bond_set_mac_by_master(struct sk_buff *skb,
+ struct net_device *master)
+{
+ if (skb->pkt_type == PACKET_HOST) {
+ u16 *dest = (u16 *) eth_hdr(skb)->h_dest;
+
+ memcpy(dest, master->dev_addr, ETH_ALEN);
+ }
+}
+
+/* On bonding slaves other than the currently active slave, suppress
+ * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
+ * ARP on active-backup slaves with arp_validate enabled.
*/
-int netif_receive_skb(struct sk_buff *skb)
+int __skb_bond_should_drop(struct sk_buff *skb, struct net_device *master)
+{
+ struct net_device *dev = skb->dev;
+
+ if (master->priv_flags & IFF_MASTER_ARPMON)
+ dev->last_rx = jiffies;
+
+ if ((master->priv_flags & IFF_MASTER_ALB) && master->br_port) {
+ /* Do address unmangle. The local destination address
+ * will be always the one master has. Provides the right
+ * functionality in a bridge.
+ */
+ skb_bond_set_mac_by_master(skb, master);
+ }
+
+ if (dev->priv_flags & IFF_SLAVE_INACTIVE) {
+ if ((dev->priv_flags & IFF_SLAVE_NEEDARP) &&
+ skb->protocol == __cpu_to_be16(ETH_P_ARP))
+ return 0;
+
+ if (master->priv_flags & IFF_MASTER_ALB) {
+ if (skb->pkt_type != PACKET_BROADCAST &&
+ skb->pkt_type != PACKET_MULTICAST)
+ return 0;
+ }
+ if (master->priv_flags & IFF_MASTER_8023AD &&
+ skb->protocol == __cpu_to_be16(ETH_P_SLOW))
+ return 0;
+
+ return 1;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(__skb_bond_should_drop);
+
+static int __netif_receive_skb(struct sk_buff *skb)
{
struct packet_type *ptype, *pt_prev;
struct net_device *orig_dev;
@@ -2494,8 +2798,8 @@ int netif_receive_skb(struct sk_buff *skb)
int ret = NET_RX_DROP;
__be16 type;
- if (!skb->tstamp.tv64)
- net_timestamp(skb);
+ if (!netdev_tstamp_prequeue)
+ net_timestamp_check(skb);
if (vlan_tx_tag_present(skb) && vlan_hwaccel_do_receive(skb))
return NET_RX_SUCCESS;
@@ -2517,7 +2821,7 @@ int netif_receive_skb(struct sk_buff *skb)
skb->dev = master;
}
- __get_cpu_var(netdev_rx_stat).total++;
+ __get_cpu_var(softnet_data).processed++;
skb_reset_network_header(skb);
skb_reset_transport_header(skb);
@@ -2595,20 +2899,77 @@ out:
rcu_read_unlock();
return ret;
}
+
+/**
+ * netif_receive_skb - process receive buffer from network
+ * @skb: buffer to process
+ *
+ * netif_receive_skb() is the main receive data processing function.
+ * It always succeeds. The buffer may be dropped during processing
+ * for congestion control or by the protocol layers.
+ *
+ * This function may only be called from softirq context and interrupts
+ * should be enabled.
+ *
+ * Return values (usually ignored):
+ * NET_RX_SUCCESS: no congestion
+ * NET_RX_DROP: packet was dropped
+ */
+int netif_receive_skb(struct sk_buff *skb)
+{
+ if (netdev_tstamp_prequeue)
+ net_timestamp_check(skb);
+
+#ifdef CONFIG_RPS
+ {
+ struct rps_dev_flow voidflow, *rflow = &voidflow;
+ int cpu, ret;
+
+ rcu_read_lock();
+
+ cpu = get_rps_cpu(skb->dev, skb, &rflow);
+
+ if (cpu >= 0) {
+ ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
+ rcu_read_unlock();
+ } else {
+ rcu_read_unlock();
+ ret = __netif_receive_skb(skb);
+ }
+
+ return ret;
+ }
+#else
+ return __netif_receive_skb(skb);
+#endif
+}
EXPORT_SYMBOL(netif_receive_skb);
-/* Network device is going away, flush any packets still pending */
+/* Network device is going away, flush any packets still pending
+ * Called with irqs disabled.
+ */
static void flush_backlog(void *arg)
{
struct net_device *dev = arg;
- struct softnet_data *queue = &__get_cpu_var(softnet_data);
+ struct softnet_data *sd = &__get_cpu_var(softnet_data);
struct sk_buff *skb, *tmp;
- skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
+ rps_lock(sd);
+ skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
if (skb->dev == dev) {
- __skb_unlink(skb, &queue->input_pkt_queue);
+ __skb_unlink(skb, &sd->input_pkt_queue);
kfree_skb(skb);
+ input_queue_head_add(sd, 1);
}
+ }
+ rps_unlock(sd);
+
+ skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
+ if (skb->dev == dev) {
+ __skb_unlink(skb, &sd->process_queue);
+ kfree_skb(skb);
+ }
+ }
}
static int napi_gro_complete(struct sk_buff *skb)
@@ -2911,27 +3272,85 @@ gro_result_t napi_gro_frags(struct napi_struct *napi)
}
EXPORT_SYMBOL(napi_gro_frags);
+/*
+ * net_rps_action sends any pending IPI's for rps.
+ * Note: called with local irq disabled, but exits with local irq enabled.
+ */
+static void net_rps_action_and_irq_enable(struct softnet_data *sd)
+{
+#ifdef CONFIG_RPS
+ struct softnet_data *remsd = sd->rps_ipi_list;
+
+ if (remsd) {
+ sd->rps_ipi_list = NULL;
+
+ local_irq_enable();
+
+ /* Send pending IPI's to kick RPS processing on remote cpus. */
+ while (remsd) {
+ struct softnet_data *next = remsd->rps_ipi_next;
+
+ if (cpu_online(remsd->cpu))
+ __smp_call_function_single(remsd->cpu,
+ &remsd->csd, 0);
+ remsd = next;
+ }
+ } else
+#endif
+ local_irq_enable();
+}
+
static int process_backlog(struct napi_struct *napi, int quota)
{
int work = 0;
- struct softnet_data *queue = &__get_cpu_var(softnet_data);
- unsigned long start_time = jiffies;
+ struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
+#ifdef CONFIG_RPS
+ /* Check if we have pending ipi, its better to send them now,
+ * not waiting net_rx_action() end.
+ */
+ if (sd->rps_ipi_list) {
+ local_irq_disable();
+ net_rps_action_and_irq_enable(sd);
+ }
+#endif
napi->weight = weight_p;
- do {
+ local_irq_disable();
+ while (work < quota) {
struct sk_buff *skb;
+ unsigned int qlen;
- local_irq_disable();
- skb = __skb_dequeue(&queue->input_pkt_queue);
- if (!skb) {
- __napi_complete(napi);
+ while ((skb = __skb_dequeue(&sd->process_queue))) {
local_irq_enable();
- break;
+ __netif_receive_skb(skb);
+ if (++work >= quota)
+ return work;
+ local_irq_disable();
}
- local_irq_enable();
- netif_receive_skb(skb);
- } while (++work < quota && jiffies == start_time);
+ rps_lock(sd);
+ qlen = skb_queue_len(&sd->input_pkt_queue);
+ if (qlen) {
+ input_queue_head_add(sd, qlen);
+ skb_queue_splice_tail_init(&sd->input_pkt_queue,
+ &sd->process_queue);
+ }
+ if (qlen < quota - work) {
+ /*
+ * Inline a custom version of __napi_complete().
+ * only current cpu owns and manipulates this napi,
+ * and NAPI_STATE_SCHED is the only possible flag set on backlog.
+ * we can use a plain write instead of clear_bit(),
+ * and we dont need an smp_mb() memory barrier.
+ */
+ list_del(&napi->poll_list);
+ napi->state = 0;
+
+ quota = work + qlen;
+ }
+ rps_unlock(sd);
+ }
+ local_irq_enable();
return work;
}
@@ -2947,8 +3366,7 @@ void __napi_schedule(struct napi_struct *n)
unsigned long flags;
local_irq_save(flags);
- list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
- __raise_softirq_irqoff(NET_RX_SOFTIRQ);
+ ____napi_schedule(&__get_cpu_var(softnet_data), n);
local_irq_restore(flags);
}
EXPORT_SYMBOL(__napi_schedule);
@@ -3019,17 +3437,16 @@ void netif_napi_del(struct napi_struct *napi)
}
EXPORT_SYMBOL(netif_napi_del);
-
static void net_rx_action(struct softirq_action *h)
{
- struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
+ struct softnet_data *sd = &__get_cpu_var(softnet_data);
unsigned long time_limit = jiffies + 2;
int budget = netdev_budget;
void *have;
local_irq_disable();
- while (!list_empty(list)) {
+ while (!list_empty(&sd->poll_list)) {
struct napi_struct *n;
int work, weight;
@@ -3047,7 +3464,7 @@ static void net_rx_action(struct softirq_action *h)
* entries to the tail of this list, and only ->poll()
* calls can remove this head entry from the list.
*/
- n = list_first_entry(list, struct napi_struct, poll_list);
+ n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
have = netpoll_poll_lock(n);
@@ -3082,13 +3499,13 @@ static void net_rx_action(struct softirq_action *h)
napi_complete(n);
local_irq_disable();
} else
- list_move_tail(&n->poll_list, list);
+ list_move_tail(&n->poll_list, &sd->poll_list);
}
netpoll_poll_unlock(have);
}
out:
- local_irq_enable();
+ net_rps_action_and_irq_enable(sd);
#ifdef CONFIG_NET_DMA
/*
@@ -3101,7 +3518,7 @@ out:
return;
softnet_break:
- __get_cpu_var(netdev_rx_stat).time_squeeze++;
+ sd->time_squeeze++;
__raise_softirq_irqoff(NET_RX_SOFTIRQ);
goto out;
}
@@ -3302,17 +3719,17 @@ static int dev_seq_show(struct seq_file *seq, void *v)
return 0;
}
-static struct netif_rx_stats *softnet_get_online(loff_t *pos)
+static struct softnet_data *softnet_get_online(loff_t *pos)
{
- struct netif_rx_stats *rc = NULL;
+ struct softnet_data *sd = NULL;
while (*pos < nr_cpu_ids)
if (cpu_online(*pos)) {
- rc = &per_cpu(netdev_rx_stat, *pos);
+ sd = &per_cpu(softnet_data, *pos);
break;
} else
++*pos;
- return rc;
+ return sd;
}
static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
@@ -3332,12 +3749,12 @@ static void softnet_seq_stop(struct seq_file *seq, void *v)
static int softnet_seq_show(struct seq_file *seq, void *v)
{
- struct netif_rx_stats *s = v;
+ struct softnet_data *sd = v;
- seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
- s->total, s->dropped, s->time_squeeze, 0,
+ seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
+ sd->processed, sd->dropped, sd->time_squeeze, 0,
0, 0, 0, 0, /* was fastroute */
- s->cpu_collision);
+ sd->cpu_collision, sd->received_rps);
return 0;
}
@@ -3560,11 +3977,10 @@ int netdev_set_master(struct net_device *slave, struct net_device *master)
slave->master = master;
- synchronize_net();
-
- if (old)
+ if (old) {
+ synchronize_net();
dev_put(old);
-
+ }
if (master)
slave->flags |= IFF_SLAVE;
else
@@ -3741,562 +4157,6 @@ void dev_set_rx_mode(struct net_device *dev)
netif_addr_unlock_bh(dev);
}
-/* hw addresses list handling functions */
-
-static int __hw_addr_add(struct netdev_hw_addr_list *list, unsigned char *addr,
- int addr_len, unsigned char addr_type)
-{
- struct netdev_hw_addr *ha;
- int alloc_size;
-
- if (addr_len > MAX_ADDR_LEN)
- return -EINVAL;
-
- list_for_each_entry(ha, &list->list, list) {
- if (!memcmp(ha->addr, addr, addr_len) &&
- ha->type == addr_type) {
- ha->refcount++;
- return 0;
- }
- }
-
-
- alloc_size = sizeof(*ha);
- if (alloc_size < L1_CACHE_BYTES)
- alloc_size = L1_CACHE_BYTES;
- ha = kmalloc(alloc_size, GFP_ATOMIC);
- if (!ha)
- return -ENOMEM;
- memcpy(ha->addr, addr, addr_len);
- ha->type = addr_type;
- ha->refcount = 1;
- ha->synced = false;
- list_add_tail_rcu(&ha->list, &list->list);
- list->count++;
- return 0;
-}
-
-static void ha_rcu_free(struct rcu_head *head)
-{
- struct netdev_hw_addr *ha;
-
- ha = container_of(head, struct netdev_hw_addr, rcu_head);
- kfree(ha);
-}
-
-static int __hw_addr_del(struct netdev_hw_addr_list *list, unsigned char *addr,
- int addr_len, unsigned char addr_type)
-{
- struct netdev_hw_addr *ha;
-
- list_for_each_entry(ha, &list->list, list) {
- if (!memcmp(ha->addr, addr, addr_len) &&
- (ha->type == addr_type || !addr_type)) {
- if (--ha->refcount)
- return 0;
- list_del_rcu(&ha->list);
- call_rcu(&ha->rcu_head, ha_rcu_free);
- list->count--;
- return 0;
- }
- }
- return -ENOENT;
-}
-
-static int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
- struct netdev_hw_addr_list *from_list,
- int addr_len,
- unsigned char addr_type)
-{
- int err;
- struct netdev_hw_addr *ha, *ha2;
- unsigned char type;
-
- list_for_each_entry(ha, &from_list->list, list) {
- type = addr_type ? addr_type : ha->type;
- err = __hw_addr_add(to_list, ha->addr, addr_len, type);
- if (err)
- goto unroll;
- }
- return 0;
-
-unroll:
- list_for_each_entry(ha2, &from_list->list, list) {
- if (ha2 == ha)
- break;
- type = addr_type ? addr_type : ha2->type;
- __hw_addr_del(to_list, ha2->addr, addr_len, type);
- }
- return err;
-}
-
-static void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
- struct netdev_hw_addr_list *from_list,
- int addr_len,
- unsigned char addr_type)
-{
- struct netdev_hw_addr *ha;
- unsigned char type;
-
- list_for_each_entry(ha, &from_list->list, list) {
- type = addr_type ? addr_type : ha->type;
- __hw_addr_del(to_list, ha->addr, addr_len, addr_type);
- }
-}
-
-static int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
- struct netdev_hw_addr_list *from_list,
- int addr_len)
-{
- int err = 0;
- struct netdev_hw_addr *ha, *tmp;
-
- list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
- if (!ha->synced) {
- err = __hw_addr_add(to_list, ha->addr,
- addr_len, ha->type);
- if (err)
- break;
- ha->synced = true;
- ha->refcount++;
- } else if (ha->refcount == 1) {
- __hw_addr_del(to_list, ha->addr, addr_len, ha->type);
- __hw_addr_del(from_list, ha->addr, addr_len, ha->type);
- }
- }
- return err;
-}
-
-static void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
- struct netdev_hw_addr_list *from_list,
- int addr_len)
-{
- struct netdev_hw_addr *ha, *tmp;
-
- list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
- if (ha->synced) {
- __hw_addr_del(to_list, ha->addr,
- addr_len, ha->type);
- ha->synced = false;
- __hw_addr_del(from_list, ha->addr,
- addr_len, ha->type);
- }
- }
-}
-
-static void __hw_addr_flush(struct netdev_hw_addr_list *list)
-{
- struct netdev_hw_addr *ha, *tmp;
-
- list_for_each_entry_safe(ha, tmp, &list->list, list) {
- list_del_rcu(&ha->list);
- call_rcu(&ha->rcu_head, ha_rcu_free);
- }
- list->count = 0;
-}
-
-static void __hw_addr_init(struct netdev_hw_addr_list *list)
-{
- INIT_LIST_HEAD(&list->list);
- list->count = 0;
-}
-
-/* Device addresses handling functions */
-
-static void dev_addr_flush(struct net_device *dev)
-{
- /* rtnl_mutex must be held here */
-
- __hw_addr_flush(&dev->dev_addrs);
- dev->dev_addr = NULL;
-}
-
-static int dev_addr_init(struct net_device *dev)
-{
- unsigned char addr[MAX_ADDR_LEN];
- struct netdev_hw_addr *ha;
- int err;
-
- /* rtnl_mutex must be held here */
-
- __hw_addr_init(&dev->dev_addrs);
- memset(addr, 0, sizeof(addr));
- err = __hw_addr_add(&dev->dev_addrs, addr, sizeof(addr),
- NETDEV_HW_ADDR_T_LAN);
- if (!err) {
- /*
- * Get the first (previously created) address from the list
- * and set dev_addr pointer to this location.
- */
- ha = list_first_entry(&dev->dev_addrs.list,
- struct netdev_hw_addr, list);
- dev->dev_addr = ha->addr;
- }
- return err;
-}
-
-/**
- * dev_addr_add - Add a device address
- * @dev: device
- * @addr: address to add
- * @addr_type: address type
- *
- * Add a device address to the device or increase the reference count if
- * it already exists.
- *
- * The caller must hold the rtnl_mutex.
- */
-int dev_addr_add(struct net_device *dev, unsigned char *addr,
- unsigned char addr_type)
-{
- int err;
-
- ASSERT_RTNL();
-
- err = __hw_addr_add(&dev->dev_addrs, addr, dev->addr_len, addr_type);
- if (!err)
- call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
- return err;
-}
-EXPORT_SYMBOL(dev_addr_add);
-
-/**
- * dev_addr_del - Release a device address.
- * @dev: device
- * @addr: address to delete
- * @addr_type: address type
- *
- * Release reference to a device address and remove it from the device
- * if the reference count drops to zero.
- *
- * The caller must hold the rtnl_mutex.
- */
-int dev_addr_del(struct net_device *dev, unsigned char *addr,
- unsigned char addr_type)
-{
- int err;
- struct netdev_hw_addr *ha;
-
- ASSERT_RTNL();
-
- /*
- * We can not remove the first address from the list because
- * dev->dev_addr points to that.
- */
- ha = list_first_entry(&dev->dev_addrs.list,
- struct netdev_hw_addr, list);
- if (ha->addr == dev->dev_addr && ha->refcount == 1)
- return -ENOENT;
-
- err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len,
- addr_type);
- if (!err)
- call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
- return err;
-}
-EXPORT_SYMBOL(dev_addr_del);
-
-/**
- * dev_addr_add_multiple - Add device addresses from another device
- * @to_dev: device to which addresses will be added
- * @from_dev: device from which addresses will be added
- * @addr_type: address type - 0 means type will be used from from_dev
- *
- * Add device addresses of the one device to another.
- **
- * The caller must hold the rtnl_mutex.
- */
-int dev_addr_add_multiple(struct net_device *to_dev,
- struct net_device *from_dev,
- unsigned char addr_type)
-{
- int err;
-
- ASSERT_RTNL();
-
- if (from_dev->addr_len != to_dev->addr_len)
- return -EINVAL;
- err = __hw_addr_add_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
- to_dev->addr_len, addr_type);
- if (!err)
- call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
- return err;
-}
-EXPORT_SYMBOL(dev_addr_add_multiple);
-
-/**
- * dev_addr_del_multiple - Delete device addresses by another device
- * @to_dev: device where the addresses will be deleted
- * @from_dev: device by which addresses the addresses will be deleted
- * @addr_type: address type - 0 means type will used from from_dev
- *
- * Deletes addresses in to device by the list of addresses in from device.
- *
- * The caller must hold the rtnl_mutex.
- */
-int dev_addr_del_multiple(struct net_device *to_dev,
- struct net_device *from_dev,
- unsigned char addr_type)
-{
- ASSERT_RTNL();
-
- if (from_dev->addr_len != to_dev->addr_len)
- return -EINVAL;
- __hw_addr_del_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
- to_dev->addr_len, addr_type);
- call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
- return 0;
-}
-EXPORT_SYMBOL(dev_addr_del_multiple);
-
-/* multicast addresses handling functions */
-
-int __dev_addr_delete(struct dev_addr_list **list, int *count,
- void *addr, int alen, int glbl)
-{
- struct dev_addr_list *da;
-
- for (; (da = *list) != NULL; list = &da->next) {
- if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
- alen == da->da_addrlen) {
- if (glbl) {
- int old_glbl = da->da_gusers;
- da->da_gusers = 0;
- if (old_glbl == 0)
- break;
- }
- if (--da->da_users)
- return 0;
-
- *list = da->next;
- kfree(da);
- (*count)--;
- return 0;
- }
- }
- return -ENOENT;
-}
-
-int __dev_addr_add(struct dev_addr_list **list, int *count,
- void *addr, int alen, int glbl)
-{
- struct dev_addr_list *da;
-
- for (da = *list; da != NULL; da = da->next) {
- if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
- da->da_addrlen == alen) {
- if (glbl) {
- int old_glbl = da->da_gusers;
- da->da_gusers = 1;
- if (old_glbl)
- return 0;
- }
- da->da_users++;
- return 0;
- }
- }
-
- da = kzalloc(sizeof(*da), GFP_ATOMIC);
- if (da == NULL)
- return -ENOMEM;
- memcpy(da->da_addr, addr, alen);
- da->da_addrlen = alen;
- da->da_users = 1;
- da->da_gusers = glbl ? 1 : 0;
- da->next = *list;
- *list = da;
- (*count)++;
- return 0;
-}
-
-/**
- * dev_unicast_delete - Release secondary unicast address.
- * @dev: device
- * @addr: address to delete
- *
- * Release reference to a secondary unicast address and remove it
- * from the device if the reference count drops to zero.
- *
- * The caller must hold the rtnl_mutex.
- */
-int dev_unicast_delete(struct net_device *dev, void *addr)
-{
- int err;
-
- ASSERT_RTNL();
-
- netif_addr_lock_bh(dev);
- err = __hw_addr_del(&dev->uc, addr, dev->addr_len,
- NETDEV_HW_ADDR_T_UNICAST);
- if (!err)
- __dev_set_rx_mode(dev);
- netif_addr_unlock_bh(dev);
- return err;
-}
-EXPORT_SYMBOL(dev_unicast_delete);
-
-/**
- * dev_unicast_add - add a secondary unicast address
- * @dev: device
- * @addr: address to add
- *
- * Add a secondary unicast address to the device or increase
- * the reference count if it already exists.
- *
- * The caller must hold the rtnl_mutex.
- */
-int dev_unicast_add(struct net_device *dev, void *addr)
-{
- int err;
-
- ASSERT_RTNL();
-
- netif_addr_lock_bh(dev);
- err = __hw_addr_add(&dev->uc, addr, dev->addr_len,
- NETDEV_HW_ADDR_T_UNICAST);
- if (!err)
- __dev_set_rx_mode(dev);
- netif_addr_unlock_bh(dev);
- return err;
-}
-EXPORT_SYMBOL(dev_unicast_add);
-
-int __dev_addr_sync(struct dev_addr_list **to, int *to_count,
- struct dev_addr_list **from, int *from_count)
-{
- struct dev_addr_list *da, *next;
- int err = 0;
-
- da = *from;
- while (da != NULL) {
- next = da->next;
- if (!da->da_synced) {
- err = __dev_addr_add(to, to_count,
- da->da_addr, da->da_addrlen, 0);
- if (err < 0)
- break;
- da->da_synced = 1;
- da->da_users++;
- } else if (da->da_users == 1) {
- __dev_addr_delete(to, to_count,
- da->da_addr, da->da_addrlen, 0);
- __dev_addr_delete(from, from_count,
- da->da_addr, da->da_addrlen, 0);
- }
- da = next;
- }
- return err;
-}
-EXPORT_SYMBOL_GPL(__dev_addr_sync);
-
-void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
- struct dev_addr_list **from, int *from_count)
-{
- struct dev_addr_list *da, *next;
-
- da = *from;
- while (da != NULL) {
- next = da->next;
- if (da->da_synced) {
- __dev_addr_delete(to, to_count,
- da->da_addr, da->da_addrlen, 0);
- da->da_synced = 0;
- __dev_addr_delete(from, from_count,
- da->da_addr, da->da_addrlen, 0);
- }
- da = next;
- }
-}
-EXPORT_SYMBOL_GPL(__dev_addr_unsync);
-
-/**
- * dev_unicast_sync - Synchronize device's unicast list to another device
- * @to: destination device
- * @from: source device
- *
- * Add newly added addresses to the destination device and release
- * addresses that have no users left. The source device must be
- * locked by netif_tx_lock_bh.
- *
- * This function is intended to be called from the dev->set_rx_mode
- * function of layered software devices.
- */
-int dev_unicast_sync(struct net_device *to, struct net_device *from)
-{
- int err = 0;
-
- if (to->addr_len != from->addr_len)
- return -EINVAL;
-
- netif_addr_lock_bh(to);
- err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
- if (!err)
- __dev_set_rx_mode(to);
- netif_addr_unlock_bh(to);
- return err;
-}
-EXPORT_SYMBOL(dev_unicast_sync);
-
-/**
- * dev_unicast_unsync - Remove synchronized addresses from the destination device
- * @to: destination device
- * @from: source device
- *
- * Remove all addresses that were added to the destination device by
- * dev_unicast_sync(). This function is intended to be called from the
- * dev->stop function of layered software devices.
- */
-void dev_unicast_unsync(struct net_device *to, struct net_device *from)
-{
- if (to->addr_len != from->addr_len)
- return;
-
- netif_addr_lock_bh(from);
- netif_addr_lock(to);
- __hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
- __dev_set_rx_mode(to);
- netif_addr_unlock(to);
- netif_addr_unlock_bh(from);
-}
-EXPORT_SYMBOL(dev_unicast_unsync);
-
-static void dev_unicast_flush(struct net_device *dev)
-{
- netif_addr_lock_bh(dev);
- __hw_addr_flush(&dev->uc);
- netif_addr_unlock_bh(dev);
-}
-
-static void dev_unicast_init(struct net_device *dev)
-{
- __hw_addr_init(&dev->uc);
-}
-
-
-static void __dev_addr_discard(struct dev_addr_list **list)
-{
- struct dev_addr_list *tmp;
-
- while (*list != NULL) {
- tmp = *list;
- *list = tmp->next;
- if (tmp->da_users > tmp->da_gusers)
- printk("__dev_addr_discard: address leakage! "
- "da_users=%d\n", tmp->da_users);
- kfree(tmp);
- }
-}
-
-static void dev_addr_discard(struct net_device *dev)
-{
- netif_addr_lock_bh(dev);
-
- __dev_addr_discard(&dev->mc_list);
- netdev_mc_count(dev) = 0;
-
- netif_addr_unlock_bh(dev);
-}
-
/**
* dev_get_flags - get flags reported to userspace
* @dev: device
@@ -4607,8 +4467,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
return -EINVAL;
if (!netif_device_present(dev))
return -ENODEV;
- return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
- dev->addr_len, 1);
+ return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data);
case SIOCDELMULTI:
if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
@@ -4616,8 +4475,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
return -EINVAL;
if (!netif_device_present(dev))
return -ENODEV;
- return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
- dev->addr_len, 1);
+ return dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data);
case SIOCSIFTXQLEN:
if (ifr->ifr_qlen < 0)
@@ -4924,8 +4782,8 @@ static void rollback_registered_many(struct list_head *head)
/*
* Flush the unicast and multicast chains
*/
- dev_unicast_flush(dev);
- dev_addr_discard(dev);
+ dev_uc_flush(dev);
+ dev_mc_flush(dev);
if (dev->netdev_ops->ndo_uninit)
dev->netdev_ops->ndo_uninit(dev);
@@ -5074,6 +4932,24 @@ int register_netdevice(struct net_device *dev)
dev->iflink = -1;
+#ifdef CONFIG_RPS
+ if (!dev->num_rx_queues) {
+ /*
+ * Allocate a single RX queue if driver never called
+ * alloc_netdev_mq
+ */
+
+ dev->_rx = kzalloc(sizeof(struct netdev_rx_queue), GFP_KERNEL);
+ if (!dev->_rx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ dev->_rx->first = dev->_rx;
+ atomic_set(&dev->_rx->count, 1);
+ dev->num_rx_queues = 1;
+ }
+#endif
/* Init, if this function is available */
if (dev->netdev_ops->ndo_init) {
ret = dev->netdev_ops->ndo_init(dev);
@@ -5113,8 +4989,6 @@ int register_netdevice(struct net_device *dev)
if (dev->features & NETIF_F_SG)
dev->features |= NETIF_F_GSO;
- netdev_initialize_kobject(dev);
-
ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
ret = notifier_to_errno(ret);
if (ret)
@@ -5434,6 +5308,10 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
struct net_device *dev;
size_t alloc_size;
struct net_device *p;
+#ifdef CONFIG_RPS
+ struct netdev_rx_queue *rx;
+ int i;
+#endif
BUG_ON(strlen(name) >= sizeof(dev->name));
@@ -5459,13 +5337,32 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
goto free_p;
}
+#ifdef CONFIG_RPS
+ rx = kcalloc(queue_count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
+ if (!rx) {
+ printk(KERN_ERR "alloc_netdev: Unable to allocate "
+ "rx queues.\n");
+ goto free_tx;
+ }
+
+ atomic_set(&rx->count, queue_count);
+
+ /*
+ * Set a pointer to first element in the array which holds the
+ * reference count.
+ */
+ for (i = 0; i < queue_count; i++)
+ rx[i].first = rx;
+#endif
+
dev = PTR_ALIGN(p, NETDEV_ALIGN);
dev->padded = (char *)dev - (char *)p;
if (dev_addr_init(dev))
- goto free_tx;
+ goto free_rx;
- dev_unicast_init(dev);
+ dev_mc_init(dev);
+ dev_uc_init(dev);
dev_net_set(dev, &init_net);
@@ -5473,6 +5370,11 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
dev->num_tx_queues = queue_count;
dev->real_num_tx_queues = queue_count;
+#ifdef CONFIG_RPS
+ dev->_rx = rx;
+ dev->num_rx_queues = queue_count;
+#endif
+
dev->gso_max_size = GSO_MAX_SIZE;
netdev_init_queues(dev);
@@ -5487,9 +5389,12 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
strcpy(dev->name, name);
return dev;
+free_rx:
+#ifdef CONFIG_RPS
+ kfree(rx);
free_tx:
+#endif
kfree(tx);
-
free_p:
kfree(p);
return NULL;
@@ -5635,15 +5540,6 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
if (dev->features & NETIF_F_NETNS_LOCAL)
goto out;
-#ifdef CONFIG_SYSFS
- /* Don't allow real devices to be moved when sysfs
- * is enabled.
- */
- err = -EINVAL;
- if (dev->dev.parent)
- goto out;
-#endif
-
/* Ensure the device has been registrered */
err = -EINVAL;
if (dev->reg_state != NETREG_REGISTERED)
@@ -5691,10 +5587,8 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
/*
* Flush the unicast and multicast chains
*/
- dev_unicast_flush(dev);
- dev_addr_discard(dev);
-
- netdev_unregister_kobject(dev);
+ dev_uc_flush(dev);
+ dev_mc_flush(dev);
/* Actually switch the network namespace */
dev_net_set(dev, net);
@@ -5708,7 +5602,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
}
/* Fixup kobjects */
- err = netdev_register_kobject(dev);
+ err = device_rename(&dev->dev, dev->name);
WARN_ON(err);
/* Add the device back in the hashes */
@@ -5735,7 +5629,6 @@ static int dev_cpu_callback(struct notifier_block *nfb,
void *ocpu)
{
struct sk_buff **list_skb;
- struct Qdisc **list_net;
struct sk_buff *skb;
unsigned int cpu, oldcpu = (unsigned long)ocpu;
struct softnet_data *sd, *oldsd;
@@ -5756,19 +5649,23 @@ static int dev_cpu_callback(struct notifier_block *nfb,
*list_skb = oldsd->completion_queue;
oldsd->completion_queue = NULL;
- /* Find end of our output_queue. */
- list_net = &sd->output_queue;
- while (*list_net)
- list_net = &(*list_net)->next_sched;
/* Append output queue from offline CPU. */
- *list_net = oldsd->output_queue;
- oldsd->output_queue = NULL;
+ if (oldsd->output_queue) {
+ *sd->output_queue_tailp = oldsd->output_queue;
+ sd->output_queue_tailp = oldsd->output_queue_tailp;
+ oldsd->output_queue = NULL;
+ oldsd->output_queue_tailp = &oldsd->output_queue;
+ }
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_enable();
/* Process offline CPU's input_pkt_queue */
- while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
+ while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
+ netif_rx(skb);
+ input_queue_head_add(oldsd, 1);
+ }
+ while ((skb = __skb_dequeue(&oldsd->process_queue)))
netif_rx(skb);
return NOTIFY_OK;
@@ -5985,17 +5882,26 @@ static int __init net_dev_init(void)
*/
for_each_possible_cpu(i) {
- struct softnet_data *queue;
+ struct softnet_data *sd = &per_cpu(softnet_data, i);
- queue = &per_cpu(softnet_data, i);
- skb_queue_head_init(&queue->input_pkt_queue);
- queue->completion_queue = NULL;
- INIT_LIST_HEAD(&queue->poll_list);
+ memset(sd, 0, sizeof(*sd));
+ skb_queue_head_init(&sd->input_pkt_queue);
+ skb_queue_head_init(&sd->process_queue);
+ sd->completion_queue = NULL;
+ INIT_LIST_HEAD(&sd->poll_list);
+ sd->output_queue = NULL;
+ sd->output_queue_tailp = &sd->output_queue;
+#ifdef CONFIG_RPS
+ sd->csd.func = rps_trigger_softirq;
+ sd->csd.info = sd;
+ sd->csd.flags = 0;
+ sd->cpu = i;
+#endif
- queue->backlog.poll = process_backlog;
- queue->backlog.weight = weight_p;
- queue->backlog.gro_list = NULL;
- queue->backlog.gro_count = 0;
+ sd->backlog.poll = process_backlog;
+ sd->backlog.weight = weight_p;
+ sd->backlog.gro_list = NULL;
+ sd->backlog.gro_count = 0;
}
dev_boot_phase = 0;
@@ -6030,7 +5936,7 @@ subsys_initcall(net_dev_init);
static int __init initialize_hashrnd(void)
{
- get_random_bytes(&skb_tx_hashrnd, sizeof(skb_tx_hashrnd));
+ get_random_bytes(&hashrnd, sizeof(hashrnd));
return 0;
}
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
new file mode 100644
index 000000000000..508f9c18992f
--- /dev/null
+++ b/net/core/dev_addr_lists.c
@@ -0,0 +1,741 @@
+/*
+ * net/core/dev_addr_lists.c - Functions for handling net device lists
+ * Copyright (c) 2010 Jiri Pirko <jpirko@redhat.com>
+ *
+ * This file contains functions for working with unicast, multicast and device
+ * addresses lists.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/list.h>
+#include <linux/proc_fs.h>
+
+/*
+ * General list handling functions
+ */
+
+static int __hw_addr_add_ex(struct netdev_hw_addr_list *list,
+ unsigned char *addr, int addr_len,
+ unsigned char addr_type, bool global)
+{
+ struct netdev_hw_addr *ha;
+ int alloc_size;
+
+ if (addr_len > MAX_ADDR_LEN)
+ return -EINVAL;
+
+ list_for_each_entry(ha, &list->list, list) {
+ if (!memcmp(ha->addr, addr, addr_len) &&
+ ha->type == addr_type) {
+ if (global) {
+ /* check if addr is already used as global */
+ if (ha->global_use)
+ return 0;
+ else
+ ha->global_use = true;
+ }
+ ha->refcount++;
+ return 0;
+ }
+ }
+
+
+ alloc_size = sizeof(*ha);
+ if (alloc_size < L1_CACHE_BYTES)
+ alloc_size = L1_CACHE_BYTES;
+ ha = kmalloc(alloc_size, GFP_ATOMIC);
+ if (!ha)
+ return -ENOMEM;
+ memcpy(ha->addr, addr, addr_len);
+ ha->type = addr_type;
+ ha->refcount = 1;
+ ha->global_use = global;
+ ha->synced = false;
+ list_add_tail_rcu(&ha->list, &list->list);
+ list->count++;
+ return 0;
+}
+
+static int __hw_addr_add(struct netdev_hw_addr_list *list, unsigned char *addr,
+ int addr_len, unsigned char addr_type)
+{
+ return __hw_addr_add_ex(list, addr, addr_len, addr_type, false);
+}
+
+static void ha_rcu_free(struct rcu_head *head)
+{
+ struct netdev_hw_addr *ha;
+
+ ha = container_of(head, struct netdev_hw_addr, rcu_head);
+ kfree(ha);
+}
+
+static int __hw_addr_del_ex(struct netdev_hw_addr_list *list,
+ unsigned char *addr, int addr_len,
+ unsigned char addr_type, bool global)
+{
+ struct netdev_hw_addr *ha;
+
+ list_for_each_entry(ha, &list->list, list) {
+ if (!memcmp(ha->addr, addr, addr_len) &&
+ (ha->type == addr_type || !addr_type)) {
+ if (global) {
+ if (!ha->global_use)
+ break;
+ else
+ ha->global_use = false;
+ }
+ if (--ha->refcount)
+ return 0;
+ list_del_rcu(&ha->list);
+ call_rcu(&ha->rcu_head, ha_rcu_free);
+ list->count--;
+ return 0;
+ }
+ }
+ return -ENOENT;
+}
+
+static int __hw_addr_del(struct netdev_hw_addr_list *list, unsigned char *addr,
+ int addr_len, unsigned char addr_type)
+{
+ return __hw_addr_del_ex(list, addr, addr_len, addr_type, false);
+}
+
+int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
+ struct netdev_hw_addr_list *from_list,
+ int addr_len, unsigned char addr_type)
+{
+ int err;
+ struct netdev_hw_addr *ha, *ha2;
+ unsigned char type;
+
+ list_for_each_entry(ha, &from_list->list, list) {
+ type = addr_type ? addr_type : ha->type;
+ err = __hw_addr_add(to_list, ha->addr, addr_len, type);
+ if (err)
+ goto unroll;
+ }
+ return 0;
+
+unroll:
+ list_for_each_entry(ha2, &from_list->list, list) {
+ if (ha2 == ha)
+ break;
+ type = addr_type ? addr_type : ha2->type;
+ __hw_addr_del(to_list, ha2->addr, addr_len, type);
+ }
+ return err;
+}
+EXPORT_SYMBOL(__hw_addr_add_multiple);
+
+void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
+ struct netdev_hw_addr_list *from_list,
+ int addr_len, unsigned char addr_type)
+{
+ struct netdev_hw_addr *ha;
+ unsigned char type;
+
+ list_for_each_entry(ha, &from_list->list, list) {
+ type = addr_type ? addr_type : ha->type;
+ __hw_addr_del(to_list, ha->addr, addr_len, addr_type);
+ }
+}
+EXPORT_SYMBOL(__hw_addr_del_multiple);
+
+int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
+ struct netdev_hw_addr_list *from_list,
+ int addr_len)
+{
+ int err = 0;
+ struct netdev_hw_addr *ha, *tmp;
+
+ list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
+ if (!ha->synced) {
+ err = __hw_addr_add(to_list, ha->addr,
+ addr_len, ha->type);
+ if (err)
+ break;
+ ha->synced = true;
+ ha->refcount++;
+ } else if (ha->refcount == 1) {
+ __hw_addr_del(to_list, ha->addr, addr_len, ha->type);
+ __hw_addr_del(from_list, ha->addr, addr_len, ha->type);
+ }
+ }
+ return err;
+}
+EXPORT_SYMBOL(__hw_addr_sync);
+
+void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
+ struct netdev_hw_addr_list *from_list,
+ int addr_len)
+{
+ struct netdev_hw_addr *ha, *tmp;
+
+ list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
+ if (ha->synced) {
+ __hw_addr_del(to_list, ha->addr,
+ addr_len, ha->type);
+ ha->synced = false;
+ __hw_addr_del(from_list, ha->addr,
+ addr_len, ha->type);
+ }
+ }
+}
+EXPORT_SYMBOL(__hw_addr_unsync);
+
+void __hw_addr_flush(struct netdev_hw_addr_list *list)
+{
+ struct netdev_hw_addr *ha, *tmp;
+
+ list_for_each_entry_safe(ha, tmp, &list->list, list) {
+ list_del_rcu(&ha->list);
+ call_rcu(&ha->rcu_head, ha_rcu_free);
+ }
+ list->count = 0;
+}
+EXPORT_SYMBOL(__hw_addr_flush);
+
+void __hw_addr_init(struct netdev_hw_addr_list *list)
+{
+ INIT_LIST_HEAD(&list->list);
+ list->count = 0;
+}
+EXPORT_SYMBOL(__hw_addr_init);
+
+/*
+ * Device addresses handling functions
+ */
+
+/**
+ * dev_addr_flush - Flush device address list
+ * @dev: device
+ *
+ * Flush device address list and reset ->dev_addr.
+ *
+ * The caller must hold the rtnl_mutex.
+ */
+void dev_addr_flush(struct net_device *dev)
+{
+ /* rtnl_mutex must be held here */
+
+ __hw_addr_flush(&dev->dev_addrs);
+ dev->dev_addr = NULL;
+}
+EXPORT_SYMBOL(dev_addr_flush);
+
+/**
+ * dev_addr_init - Init device address list
+ * @dev: device
+ *
+ * Init device address list and create the first element,
+ * used by ->dev_addr.
+ *
+ * The caller must hold the rtnl_mutex.
+ */
+int dev_addr_init(struct net_device *dev)
+{
+ unsigned char addr[MAX_ADDR_LEN];
+ struct netdev_hw_addr *ha;
+ int err;
+
+ /* rtnl_mutex must be held here */
+
+ __hw_addr_init(&dev->dev_addrs);
+ memset(addr, 0, sizeof(addr));
+ err = __hw_addr_add(&dev->dev_addrs, addr, sizeof(addr),
+ NETDEV_HW_ADDR_T_LAN);
+ if (!err) {
+ /*
+ * Get the first (previously created) address from the list
+ * and set dev_addr pointer to this location.
+ */
+ ha = list_first_entry(&dev->dev_addrs.list,
+ struct netdev_hw_addr, list);
+ dev->dev_addr = ha->addr;
+ }
+ return err;
+}
+EXPORT_SYMBOL(dev_addr_init);
+
+/**
+ * dev_addr_add - Add a device address
+ * @dev: device
+ * @addr: address to add
+ * @addr_type: address type
+ *
+ * Add a device address to the device or increase the reference count if
+ * it already exists.
+ *
+ * The caller must hold the rtnl_mutex.
+ */
+int dev_addr_add(struct net_device *dev, unsigned char *addr,
+ unsigned char addr_type)
+{
+ int err;
+
+ ASSERT_RTNL();
+
+ err = __hw_addr_add(&dev->dev_addrs, addr, dev->addr_len, addr_type);
+ if (!err)
+ call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
+ return err;
+}
+EXPORT_SYMBOL(dev_addr_add);
+
+/**
+ * dev_addr_del - Release a device address.
+ * @dev: device
+ * @addr: address to delete
+ * @addr_type: address type
+ *
+ * Release reference to a device address and remove it from the device
+ * if the reference count drops to zero.
+ *
+ * The caller must hold the rtnl_mutex.
+ */
+int dev_addr_del(struct net_device *dev, unsigned char *addr,
+ unsigned char addr_type)
+{
+ int err;
+ struct netdev_hw_addr *ha;
+
+ ASSERT_RTNL();
+
+ /*
+ * We can not remove the first address from the list because
+ * dev->dev_addr points to that.
+ */
+ ha = list_first_entry(&dev->dev_addrs.list,
+ struct netdev_hw_addr, list);
+ if (ha->addr == dev->dev_addr && ha->refcount == 1)
+ return -ENOENT;
+
+ err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len,
+ addr_type);
+ if (!err)
+ call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
+ return err;
+}
+EXPORT_SYMBOL(dev_addr_del);
+
+/**
+ * dev_addr_add_multiple - Add device addresses from another device
+ * @to_dev: device to which addresses will be added
+ * @from_dev: device from which addresses will be added
+ * @addr_type: address type - 0 means type will be used from from_dev
+ *
+ * Add device addresses of the one device to another.
+ **
+ * The caller must hold the rtnl_mutex.
+ */
+int dev_addr_add_multiple(struct net_device *to_dev,
+ struct net_device *from_dev,
+ unsigned char addr_type)
+{
+ int err;
+
+ ASSERT_RTNL();
+
+ if (from_dev->addr_len != to_dev->addr_len)
+ return -EINVAL;
+ err = __hw_addr_add_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
+ to_dev->addr_len, addr_type);
+ if (!err)
+ call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
+ return err;
+}
+EXPORT_SYMBOL(dev_addr_add_multiple);
+
+/**
+ * dev_addr_del_multiple - Delete device addresses by another device
+ * @to_dev: device where the addresses will be deleted
+ * @from_dev: device by which addresses the addresses will be deleted
+ * @addr_type: address type - 0 means type will used from from_dev
+ *
+ * Deletes addresses in to device by the list of addresses in from device.
+ *
+ * The caller must hold the rtnl_mutex.
+ */
+int dev_addr_del_multiple(struct net_device *to_dev,
+ struct net_device *from_dev,
+ unsigned char addr_type)
+{
+ ASSERT_RTNL();
+
+ if (from_dev->addr_len != to_dev->addr_len)
+ return -EINVAL;
+ __hw_addr_del_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
+ to_dev->addr_len, addr_type);
+ call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
+ return 0;
+}
+EXPORT_SYMBOL(dev_addr_del_multiple);
+
+/*
+ * Unicast list handling functions
+ */
+
+/**
+ * dev_uc_add - Add a secondary unicast address
+ * @dev: device
+ * @addr: address to add
+ *
+ * Add a secondary unicast address to the device or increase
+ * the reference count if it already exists.
+ */
+int dev_uc_add(struct net_device *dev, unsigned char *addr)
+{
+ int err;
+
+ netif_addr_lock_bh(dev);
+ err = __hw_addr_add(&dev->uc, addr, dev->addr_len,
+ NETDEV_HW_ADDR_T_UNICAST);
+ if (!err)
+ __dev_set_rx_mode(dev);
+ netif_addr_unlock_bh(dev);
+ return err;
+}
+EXPORT_SYMBOL(dev_uc_add);
+
+/**
+ * dev_uc_del - Release secondary unicast address.
+ * @dev: device
+ * @addr: address to delete
+ *
+ * Release reference to a secondary unicast address and remove it
+ * from the device if the reference count drops to zero.
+ */
+int dev_uc_del(struct net_device *dev, unsigned char *addr)
+{
+ int err;
+
+ netif_addr_lock_bh(dev);
+ err = __hw_addr_del(&dev->uc, addr, dev->addr_len,
+ NETDEV_HW_ADDR_T_UNICAST);
+ if (!err)
+ __dev_set_rx_mode(dev);
+ netif_addr_unlock_bh(dev);
+ return err;
+}
+EXPORT_SYMBOL(dev_uc_del);
+
+/**
+ * dev_uc_sync - Synchronize device's unicast list to another device
+ * @to: destination device
+ * @from: source device
+ *
+ * Add newly added addresses to the destination device and release
+ * addresses that have no users left. The source device must be
+ * locked by netif_tx_lock_bh.
+ *
+ * This function is intended to be called from the dev->set_rx_mode
+ * function of layered software devices.
+ */
+int dev_uc_sync(struct net_device *to, struct net_device *from)
+{
+ int err = 0;
+
+ if (to->addr_len != from->addr_len)
+ return -EINVAL;
+
+ netif_addr_lock_bh(to);
+ err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
+ if (!err)
+ __dev_set_rx_mode(to);
+ netif_addr_unlock_bh(to);
+ return err;
+}
+EXPORT_SYMBOL(dev_uc_sync);
+
+/**
+ * dev_uc_unsync - Remove synchronized addresses from the destination device
+ * @to: destination device
+ * @from: source device
+ *
+ * Remove all addresses that were added to the destination device by
+ * dev_uc_sync(). This function is intended to be called from the
+ * dev->stop function of layered software devices.
+ */
+void dev_uc_unsync(struct net_device *to, struct net_device *from)
+{
+ if (to->addr_len != from->addr_len)
+ return;
+
+ netif_addr_lock_bh(from);
+ netif_addr_lock(to);
+ __hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
+ __dev_set_rx_mode(to);
+ netif_addr_unlock(to);
+ netif_addr_unlock_bh(from);
+}
+EXPORT_SYMBOL(dev_uc_unsync);
+
+/**
+ * dev_uc_flush - Flush unicast addresses
+ * @dev: device
+ *
+ * Flush unicast addresses.
+ */
+void dev_uc_flush(struct net_device *dev)
+{
+ netif_addr_lock_bh(dev);
+ __hw_addr_flush(&dev->uc);
+ netif_addr_unlock_bh(dev);
+}
+EXPORT_SYMBOL(dev_uc_flush);
+
+/**
+ * dev_uc_flush - Init unicast address list
+ * @dev: device
+ *
+ * Init unicast address list.
+ */
+void dev_uc_init(struct net_device *dev)
+{
+ __hw_addr_init(&dev->uc);
+}
+EXPORT_SYMBOL(dev_uc_init);
+
+/*
+ * Multicast list handling functions
+ */
+
+static int __dev_mc_add(struct net_device *dev, unsigned char *addr,
+ bool global)
+{
+ int err;
+
+ netif_addr_lock_bh(dev);
+ err = __hw_addr_add_ex(&dev->mc, addr, dev->addr_len,
+ NETDEV_HW_ADDR_T_MULTICAST, global);
+ if (!err)
+ __dev_set_rx_mode(dev);
+ netif_addr_unlock_bh(dev);
+ return err;
+}
+/**
+ * dev_mc_add - Add a multicast address
+ * @dev: device
+ * @addr: address to add
+ *
+ * Add a multicast address to the device or increase
+ * the reference count if it already exists.
+ */
+int dev_mc_add(struct net_device *dev, unsigned char *addr)
+{
+ return __dev_mc_add(dev, addr, false);
+}
+EXPORT_SYMBOL(dev_mc_add);
+
+/**
+ * dev_mc_add_global - Add a global multicast address
+ * @dev: device
+ * @addr: address to add
+ *
+ * Add a global multicast address to the device.
+ */
+int dev_mc_add_global(struct net_device *dev, unsigned char *addr)
+{
+ return __dev_mc_add(dev, addr, true);
+}
+EXPORT_SYMBOL(dev_mc_add_global);
+
+static int __dev_mc_del(struct net_device *dev, unsigned char *addr,
+ bool global)
+{
+ int err;
+
+ netif_addr_lock_bh(dev);
+ err = __hw_addr_del_ex(&dev->mc, addr, dev->addr_len,
+ NETDEV_HW_ADDR_T_MULTICAST, global);
+ if (!err)
+ __dev_set_rx_mode(dev);
+ netif_addr_unlock_bh(dev);
+ return err;
+}
+
+/**
+ * dev_mc_del - Delete a multicast address.
+ * @dev: device
+ * @addr: address to delete
+ *
+ * Release reference to a multicast address and remove it
+ * from the device if the reference count drops to zero.
+ */
+int dev_mc_del(struct net_device *dev, unsigned char *addr)
+{
+ return __dev_mc_del(dev, addr, false);
+}
+EXPORT_SYMBOL(dev_mc_del);
+
+/**
+ * dev_mc_del_global - Delete a global multicast address.
+ * @dev: device
+ * @addr: address to delete
+ *
+ * Release reference to a multicast address and remove it
+ * from the device if the reference count drops to zero.
+ */
+int dev_mc_del_global(struct net_device *dev, unsigned char *addr)
+{
+ return __dev_mc_del(dev, addr, true);
+}
+EXPORT_SYMBOL(dev_mc_del_global);
+
+/**
+ * dev_mc_sync - Synchronize device's unicast list to another device
+ * @to: destination device
+ * @from: source device
+ *
+ * Add newly added addresses to the destination device and release
+ * addresses that have no users left. The source device must be
+ * locked by netif_tx_lock_bh.
+ *
+ * This function is intended to be called from the dev->set_multicast_list
+ * or dev->set_rx_mode function of layered software devices.
+ */
+int dev_mc_sync(struct net_device *to, struct net_device *from)
+{
+ int err = 0;
+
+ if (to->addr_len != from->addr_len)
+ return -EINVAL;
+
+ netif_addr_lock_bh(to);
+ err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len);
+ if (!err)
+ __dev_set_rx_mode(to);
+ netif_addr_unlock_bh(to);
+ return err;
+}
+EXPORT_SYMBOL(dev_mc_sync);
+
+/**
+ * dev_mc_unsync - Remove synchronized addresses from the destination device
+ * @to: destination device
+ * @from: source device
+ *
+ * Remove all addresses that were added to the destination device by
+ * dev_mc_sync(). This function is intended to be called from the
+ * dev->stop function of layered software devices.
+ */
+void dev_mc_unsync(struct net_device *to, struct net_device *from)
+{
+ if (to->addr_len != from->addr_len)
+ return;
+
+ netif_addr_lock_bh(from);
+ netif_addr_lock(to);
+ __hw_addr_unsync(&to->mc, &from->mc, to->addr_len);
+ __dev_set_rx_mode(to);
+ netif_addr_unlock(to);
+ netif_addr_unlock_bh(from);
+}
+EXPORT_SYMBOL(dev_mc_unsync);
+
+/**
+ * dev_mc_flush - Flush multicast addresses
+ * @dev: device
+ *
+ * Flush multicast addresses.
+ */
+void dev_mc_flush(struct net_device *dev)
+{
+ netif_addr_lock_bh(dev);
+ __hw_addr_flush(&dev->mc);
+ netif_addr_unlock_bh(dev);
+}
+EXPORT_SYMBOL(dev_mc_flush);
+
+/**
+ * dev_mc_flush - Init multicast address list
+ * @dev: device
+ *
+ * Init multicast address list.
+ */
+void dev_mc_init(struct net_device *dev)
+{
+ __hw_addr_init(&dev->mc);
+}
+EXPORT_SYMBOL(dev_mc_init);
+
+#ifdef CONFIG_PROC_FS
+#include <linux/seq_file.h>
+
+static int dev_mc_seq_show(struct seq_file *seq, void *v)
+{
+ struct netdev_hw_addr *ha;
+ struct net_device *dev = v;
+
+ if (v == SEQ_START_TOKEN)
+ return 0;
+
+ netif_addr_lock_bh(dev);
+ netdev_for_each_mc_addr(ha, dev) {
+ int i;
+
+ seq_printf(seq, "%-4d %-15s %-5d %-5d ", dev->ifindex,
+ dev->name, ha->refcount, ha->global_use);
+
+ for (i = 0; i < dev->addr_len; i++)
+ seq_printf(seq, "%02x", ha->addr[i]);
+
+ seq_putc(seq, '\n');
+ }
+ netif_addr_unlock_bh(dev);
+ return 0;
+}
+
+static const struct seq_operations dev_mc_seq_ops = {
+ .start = dev_seq_start,
+ .next = dev_seq_next,
+ .stop = dev_seq_stop,
+ .show = dev_mc_seq_show,
+};
+
+static int dev_mc_seq_open(struct inode *inode, struct file *file)
+{
+ return seq_open_net(inode, file, &dev_mc_seq_ops,
+ sizeof(struct seq_net_private));
+}
+
+static const struct file_operations dev_mc_seq_fops = {
+ .owner = THIS_MODULE,
+ .open = dev_mc_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_net,
+};
+
+#endif
+
+static int __net_init dev_mc_net_init(struct net *net)
+{
+ if (!proc_net_fops_create(net, "dev_mcast", 0, &dev_mc_seq_fops))
+ return -ENOMEM;
+ return 0;
+}
+
+static void __net_exit dev_mc_net_exit(struct net *net)
+{
+ proc_net_remove(net, "dev_mcast");
+}
+
+static struct pernet_operations __net_initdata dev_mc_net_ops = {
+ .init = dev_mc_net_init,
+ .exit = dev_mc_net_exit,
+};
+
+void __init dev_mcast_init(void)
+{
+ register_pernet_subsys(&dev_mc_net_ops);
+}
+
diff --git a/net/core/dev_mcast.c b/net/core/dev_mcast.c
deleted file mode 100644
index 3dc295beb483..000000000000
--- a/net/core/dev_mcast.c
+++ /dev/null
@@ -1,232 +0,0 @@
-/*
- * Linux NET3: Multicast List maintenance.
- *
- * Authors:
- * Tim Kordas <tjk@nostromo.eeap.cwru.edu>
- * Richard Underwood <richard@wuzz.demon.co.uk>
- *
- * Stir fried together from the IP multicast and CAP patches above
- * Alan Cox <alan@lxorguk.ukuu.org.uk>
- *
- * Fixes:
- * Alan Cox : Update the device on a real delete
- * rather than any time but...
- * Alan Cox : IFF_ALLMULTI support.
- * Alan Cox : New format set_multicast_list() calls.
- * Gleb Natapov : Remove dev_mc_lock.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/module.h>
-#include <asm/uaccess.h>
-#include <asm/system.h>
-#include <linux/bitops.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/socket.h>
-#include <linux/sockios.h>
-#include <linux/in.h>
-#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/if_ether.h>
-#include <linux/inet.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/init.h>
-#include <net/net_namespace.h>
-#include <net/ip.h>
-#include <net/route.h>
-#include <linux/skbuff.h>
-#include <net/sock.h>
-#include <net/arp.h>
-
-
-/*
- * Device multicast list maintenance.
- *
- * This is used both by IP and by the user level maintenance functions.
- * Unlike BSD we maintain a usage count on a given multicast address so
- * that a casual user application can add/delete multicasts used by
- * protocols without doing damage to the protocols when it deletes the
- * entries. It also helps IP as it tracks overlapping maps.
- *
- * Device mc lists are changed by bh at least if IPv6 is enabled,
- * so that it must be bh protected.
- *
- * We block accesses to device mc filters with netif_tx_lock.
- */
-
-/*
- * Delete a device level multicast
- */
-
-int dev_mc_delete(struct net_device *dev, void *addr, int alen, int glbl)
-{
- int err;
-
- netif_addr_lock_bh(dev);
- err = __dev_addr_delete(&dev->mc_list, &dev->mc_count,
- addr, alen, glbl);
- if (!err) {
- /*
- * We have altered the list, so the card
- * loaded filter is now wrong. Fix it
- */
-
- __dev_set_rx_mode(dev);
- }
- netif_addr_unlock_bh(dev);
- return err;
-}
-
-/*
- * Add a device level multicast
- */
-
-int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl)
-{
- int err;
-
- netif_addr_lock_bh(dev);
- if (alen != dev->addr_len)
- err = -EINVAL;
- else
- err = __dev_addr_add(&dev->mc_list, &dev->mc_count, addr, alen, glbl);
- if (!err)
- __dev_set_rx_mode(dev);
- netif_addr_unlock_bh(dev);
- return err;
-}
-
-/**
- * dev_mc_sync - Synchronize device's multicast list to another device
- * @to: destination device
- * @from: source device
- *
- * Add newly added addresses to the destination device and release
- * addresses that have no users left. The source device must be
- * locked by netif_tx_lock_bh.
- *
- * This function is intended to be called from the dev->set_multicast_list
- * or dev->set_rx_mode function of layered software devices.
- */
-int dev_mc_sync(struct net_device *to, struct net_device *from)
-{
- int err = 0;
-
- netif_addr_lock_bh(to);
- err = __dev_addr_sync(&to->mc_list, &to->mc_count,
- &from->mc_list, &from->mc_count);
- if (!err)
- __dev_set_rx_mode(to);
- netif_addr_unlock_bh(to);
-
- return err;
-}
-EXPORT_SYMBOL(dev_mc_sync);
-
-
-/**
- * dev_mc_unsync - Remove synchronized addresses from the destination
- * device
- * @to: destination device
- * @from: source device
- *
- * Remove all addresses that were added to the destination device by
- * dev_mc_sync(). This function is intended to be called from the
- * dev->stop function of layered software devices.
- */
-void dev_mc_unsync(struct net_device *to, struct net_device *from)
-{
- netif_addr_lock_bh(from);
- netif_addr_lock(to);
-
- __dev_addr_unsync(&to->mc_list, &to->mc_count,
- &from->mc_list, &from->mc_count);
- __dev_set_rx_mode(to);
-
- netif_addr_unlock(to);
- netif_addr_unlock_bh(from);
-}
-EXPORT_SYMBOL(dev_mc_unsync);
-
-#ifdef CONFIG_PROC_FS
-static int dev_mc_seq_show(struct seq_file *seq, void *v)
-{
- struct dev_addr_list *m;
- struct net_device *dev = v;
-
- if (v == SEQ_START_TOKEN)
- return 0;
-
- netif_addr_lock_bh(dev);
- for (m = dev->mc_list; m; m = m->next) {
- int i;
-
- seq_printf(seq, "%-4d %-15s %-5d %-5d ", dev->ifindex,
- dev->name, m->dmi_users, m->dmi_gusers);
-
- for (i = 0; i < m->dmi_addrlen; i++)
- seq_printf(seq, "%02x", m->dmi_addr[i]);
-
- seq_putc(seq, '\n');
- }
- netif_addr_unlock_bh(dev);
- return 0;
-}
-
-static const struct seq_operations dev_mc_seq_ops = {
- .start = dev_seq_start,
- .next = dev_seq_next,
- .stop = dev_seq_stop,
- .show = dev_mc_seq_show,
-};
-
-static int dev_mc_seq_open(struct inode *inode, struct file *file)
-{
- return seq_open_net(inode, file, &dev_mc_seq_ops,
- sizeof(struct seq_net_private));
-}
-
-static const struct file_operations dev_mc_seq_fops = {
- .owner = THIS_MODULE,
- .open = dev_mc_seq_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release_net,
-};
-
-#endif
-
-static int __net_init dev_mc_net_init(struct net *net)
-{
- if (!proc_net_fops_create(net, "dev_mcast", 0, &dev_mc_seq_fops))
- return -ENOMEM;
- return 0;
-}
-
-static void __net_exit dev_mc_net_exit(struct net *net)
-{
- proc_net_remove(net, "dev_mcast");
-}
-
-static struct pernet_operations __net_initdata dev_mc_net_ops = {
- .init = dev_mc_net_init,
- .exit = dev_mc_net_exit,
-};
-
-void __init dev_mcast_init(void)
-{
- register_pernet_subsys(&dev_mc_net_ops);
-}
-
-EXPORT_SYMBOL(dev_mc_add);
-EXPORT_SYMBOL(dev_mc_delete);
diff --git a/net/core/dst.c b/net/core/dst.c
index f307bc18f6a0..9920722cc82b 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -44,7 +44,7 @@ static atomic_t dst_total = ATOMIC_INIT(0);
*/
static struct {
spinlock_t lock;
- struct dst_entry *list;
+ struct dst_entry *list;
unsigned long timer_inc;
unsigned long timer_expires;
} dst_garbage = {
@@ -52,7 +52,7 @@ static struct {
.timer_inc = DST_GC_MAX,
};
static void dst_gc_task(struct work_struct *work);
-static void ___dst_free(struct dst_entry * dst);
+static void ___dst_free(struct dst_entry *dst);
static DECLARE_DELAYED_WORK(dst_gc_work, dst_gc_task);
@@ -136,8 +136,8 @@ loop:
}
expires = dst_garbage.timer_expires;
/*
- * if the next desired timer is more than 4 seconds in the future
- * then round the timer to whole seconds
+ * if the next desired timer is more than 4 seconds in the
+ * future then round the timer to whole seconds
*/
if (expires > 4*HZ)
expires = round_jiffies_relative(expires);
@@ -152,7 +152,8 @@ loop:
" expires: %lu elapsed: %lu us\n",
atomic_read(&dst_total), delayed, work_performed,
expires,
- elapsed.tv_sec * USEC_PER_SEC + elapsed.tv_nsec / NSEC_PER_USEC);
+ elapsed.tv_sec * USEC_PER_SEC +
+ elapsed.tv_nsec / NSEC_PER_USEC);
#endif
}
@@ -163,9 +164,9 @@ int dst_discard(struct sk_buff *skb)
}
EXPORT_SYMBOL(dst_discard);
-void * dst_alloc(struct dst_ops * ops)
+void *dst_alloc(struct dst_ops *ops)
{
- struct dst_entry * dst;
+ struct dst_entry *dst;
if (ops->gc && atomic_read(&ops->entries) > ops->gc_thresh) {
if (ops->gc(ops))
@@ -185,19 +186,20 @@ void * dst_alloc(struct dst_ops * ops)
atomic_inc(&ops->entries);
return dst;
}
+EXPORT_SYMBOL(dst_alloc);
-static void ___dst_free(struct dst_entry * dst)
+static void ___dst_free(struct dst_entry *dst)
{
/* The first case (dev==NULL) is required, when
protocol module is unloaded.
*/
- if (dst->dev == NULL || !(dst->dev->flags&IFF_UP)) {
+ if (dst->dev == NULL || !(dst->dev->flags&IFF_UP))
dst->input = dst->output = dst_discard;
- }
dst->obsolete = 2;
}
+EXPORT_SYMBOL(__dst_free);
-void __dst_free(struct dst_entry * dst)
+void __dst_free(struct dst_entry *dst)
{
spin_lock_bh(&dst_garbage.lock);
___dst_free(dst);
@@ -262,15 +264,16 @@ again:
}
return NULL;
}
+EXPORT_SYMBOL(dst_destroy);
void dst_release(struct dst_entry *dst)
{
if (dst) {
- int newrefcnt;
+ int newrefcnt;
smp_mb__before_atomic_dec();
- newrefcnt = atomic_dec_return(&dst->__refcnt);
- WARN_ON(newrefcnt < 0);
+ newrefcnt = atomic_dec_return(&dst->__refcnt);
+ WARN_ON(newrefcnt < 0);
}
}
EXPORT_SYMBOL(dst_release);
@@ -283,8 +286,8 @@ EXPORT_SYMBOL(dst_release);
*
* Commented and originally written by Alexey.
*/
-static inline void dst_ifdown(struct dst_entry *dst, struct net_device *dev,
- int unregister)
+static void dst_ifdown(struct dst_entry *dst, struct net_device *dev,
+ int unregister)
{
if (dst->ops->ifdown)
dst->ops->ifdown(dst, dev, unregister);
@@ -306,7 +309,8 @@ static inline void dst_ifdown(struct dst_entry *dst, struct net_device *dev,
}
}
-static int dst_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
+static int dst_dev_event(struct notifier_block *this, unsigned long event,
+ void *ptr)
{
struct net_device *dev = ptr;
struct dst_entry *dst, *last = NULL;
@@ -329,9 +333,8 @@ static int dst_dev_event(struct notifier_block *this, unsigned long event, void
last->next = dst;
else
dst_busy_list = dst;
- for (; dst; dst = dst->next) {
+ for (; dst; dst = dst->next)
dst_ifdown(dst, dev, event != NETDEV_DOWN);
- }
mutex_unlock(&dst_gc_mutex);
break;
}
@@ -346,7 +349,3 @@ void __init dst_init(void)
{
register_netdevice_notifier(&dst_dev_notifier);
}
-
-EXPORT_SYMBOL(__dst_free);
-EXPORT_SYMBOL(dst_alloc);
-EXPORT_SYMBOL(dst_destroy);
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 9d55c57f318a..a0f4964033d2 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -18,8 +18,8 @@
#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/bitops.h>
+#include <linux/uaccess.h>
#include <linux/slab.h>
-#include <asm/uaccess.h>
/*
* Some useful ethtool_ops methods that're device independent.
@@ -31,6 +31,7 @@ u32 ethtool_op_get_link(struct net_device *dev)
{
return netif_carrier_ok(dev) ? 1 : 0;
}
+EXPORT_SYMBOL(ethtool_op_get_link);
u32 ethtool_op_get_rx_csum(struct net_device *dev)
{
@@ -63,6 +64,7 @@ int ethtool_op_set_tx_hw_csum(struct net_device *dev, u32 data)
return 0;
}
+EXPORT_SYMBOL(ethtool_op_set_tx_hw_csum);
int ethtool_op_set_tx_ipv6_csum(struct net_device *dev, u32 data)
{
@@ -73,11 +75,13 @@ int ethtool_op_set_tx_ipv6_csum(struct net_device *dev, u32 data)
return 0;
}
+EXPORT_SYMBOL(ethtool_op_set_tx_ipv6_csum);
u32 ethtool_op_get_sg(struct net_device *dev)
{
return (dev->features & NETIF_F_SG) != 0;
}
+EXPORT_SYMBOL(ethtool_op_get_sg);
int ethtool_op_set_sg(struct net_device *dev, u32 data)
{
@@ -88,11 +92,13 @@ int ethtool_op_set_sg(struct net_device *dev, u32 data)
return 0;
}
+EXPORT_SYMBOL(ethtool_op_set_sg);
u32 ethtool_op_get_tso(struct net_device *dev)
{
return (dev->features & NETIF_F_TSO) != 0;
}
+EXPORT_SYMBOL(ethtool_op_get_tso);
int ethtool_op_set_tso(struct net_device *dev, u32 data)
{
@@ -103,11 +109,13 @@ int ethtool_op_set_tso(struct net_device *dev, u32 data)
return 0;
}
+EXPORT_SYMBOL(ethtool_op_set_tso);
u32 ethtool_op_get_ufo(struct net_device *dev)
{
return (dev->features & NETIF_F_UFO) != 0;
}
+EXPORT_SYMBOL(ethtool_op_get_ufo);
int ethtool_op_set_ufo(struct net_device *dev, u32 data)
{
@@ -117,12 +125,13 @@ int ethtool_op_set_ufo(struct net_device *dev, u32 data)
dev->features &= ~NETIF_F_UFO;
return 0;
}
+EXPORT_SYMBOL(ethtool_op_set_ufo);
/* the following list of flags are the same as their associated
* NETIF_F_xxx values in include/linux/netdevice.h
*/
static const u32 flags_dup_features =
- (ETH_FLAG_LRO | ETH_FLAG_NTUPLE);
+ (ETH_FLAG_LRO | ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH);
u32 ethtool_op_get_flags(struct net_device *dev)
{
@@ -133,6 +142,7 @@ u32 ethtool_op_get_flags(struct net_device *dev)
return dev->features & flags_dup_features;
}
+EXPORT_SYMBOL(ethtool_op_get_flags);
int ethtool_op_set_flags(struct net_device *dev, u32 data)
{
@@ -153,9 +163,15 @@ int ethtool_op_set_flags(struct net_device *dev, u32 data)
features &= ~NETIF_F_NTUPLE;
}
+ if (data & ETH_FLAG_RXHASH)
+ features |= NETIF_F_RXHASH;
+ else
+ features &= ~NETIF_F_RXHASH;
+
dev->features = features;
return 0;
}
+EXPORT_SYMBOL(ethtool_op_set_flags);
void ethtool_ntuple_flush(struct net_device *dev)
{
@@ -201,7 +217,8 @@ static int ethtool_set_settings(struct net_device *dev, void __user *useraddr)
return dev->ethtool_ops->set_settings(dev, &cmd);
}
-static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev, void __user *useraddr)
+static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev,
+ void __user *useraddr)
{
struct ethtool_drvinfo info;
const struct ethtool_ops *ops = dev->ethtool_ops;
@@ -241,7 +258,7 @@ static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev, void _
}
static noinline_for_stack int ethtool_get_sset_info(struct net_device *dev,
- void __user *useraddr)
+ void __user *useraddr)
{
struct ethtool_sset_info info;
const struct ethtool_ops *ops = dev->ethtool_ops;
@@ -300,7 +317,8 @@ out:
return ret;
}
-static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev, void __user *useraddr)
+static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev,
+ void __user *useraddr)
{
struct ethtool_rxnfc cmd;
@@ -313,7 +331,8 @@ static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev, void __u
return dev->ethtool_ops->set_rxnfc(dev, &cmd);
}
-static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev, void __user *useraddr)
+static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev,
+ void __user *useraddr)
{
struct ethtool_rxnfc info;
const struct ethtool_ops *ops = dev->ethtool_ops;
@@ -358,8 +377,8 @@ err_out:
}
static void __rx_ntuple_filter_add(struct ethtool_rx_ntuple_list *list,
- struct ethtool_rx_ntuple_flow_spec *spec,
- struct ethtool_rx_ntuple_flow_spec_container *fsc)
+ struct ethtool_rx_ntuple_flow_spec *spec,
+ struct ethtool_rx_ntuple_flow_spec_container *fsc)
{
/* don't add filters forever */
@@ -385,7 +404,8 @@ static void __rx_ntuple_filter_add(struct ethtool_rx_ntuple_list *list,
list->count++;
}
-static noinline_for_stack int ethtool_set_rx_ntuple(struct net_device *dev, void __user *useraddr)
+static noinline_for_stack int ethtool_set_rx_ntuple(struct net_device *dev,
+ void __user *useraddr)
{
struct ethtool_rx_ntuple cmd;
const struct ethtool_ops *ops = dev->ethtool_ops;
@@ -502,7 +522,7 @@ static int ethtool_get_rx_ntuple(struct net_device *dev, void __user *useraddr)
p += ETH_GSTRING_LEN;
num_strings++;
goto unknown_filter;
- };
+ }
/* now the rest of the filters */
switch (fsc->fs.flow_type) {
@@ -510,125 +530,125 @@ static int ethtool_get_rx_ntuple(struct net_device *dev, void __user *useraddr)
case UDP_V4_FLOW:
case SCTP_V4_FLOW:
sprintf(p, "\tSrc IP addr: 0x%x\n",
- fsc->fs.h_u.tcp_ip4_spec.ip4src);
+ fsc->fs.h_u.tcp_ip4_spec.ip4src);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tSrc IP mask: 0x%x\n",
- fsc->fs.m_u.tcp_ip4_spec.ip4src);
+ fsc->fs.m_u.tcp_ip4_spec.ip4src);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tDest IP addr: 0x%x\n",
- fsc->fs.h_u.tcp_ip4_spec.ip4dst);
+ fsc->fs.h_u.tcp_ip4_spec.ip4dst);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tDest IP mask: 0x%x\n",
- fsc->fs.m_u.tcp_ip4_spec.ip4dst);
+ fsc->fs.m_u.tcp_ip4_spec.ip4dst);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tSrc Port: %d, mask: 0x%x\n",
- fsc->fs.h_u.tcp_ip4_spec.psrc,
- fsc->fs.m_u.tcp_ip4_spec.psrc);
+ fsc->fs.h_u.tcp_ip4_spec.psrc,
+ fsc->fs.m_u.tcp_ip4_spec.psrc);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tDest Port: %d, mask: 0x%x\n",
- fsc->fs.h_u.tcp_ip4_spec.pdst,
- fsc->fs.m_u.tcp_ip4_spec.pdst);
+ fsc->fs.h_u.tcp_ip4_spec.pdst,
+ fsc->fs.m_u.tcp_ip4_spec.pdst);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tTOS: %d, mask: 0x%x\n",
- fsc->fs.h_u.tcp_ip4_spec.tos,
- fsc->fs.m_u.tcp_ip4_spec.tos);
+ fsc->fs.h_u.tcp_ip4_spec.tos,
+ fsc->fs.m_u.tcp_ip4_spec.tos);
p += ETH_GSTRING_LEN;
num_strings++;
break;
case AH_ESP_V4_FLOW:
case ESP_V4_FLOW:
sprintf(p, "\tSrc IP addr: 0x%x\n",
- fsc->fs.h_u.ah_ip4_spec.ip4src);
+ fsc->fs.h_u.ah_ip4_spec.ip4src);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tSrc IP mask: 0x%x\n",
- fsc->fs.m_u.ah_ip4_spec.ip4src);
+ fsc->fs.m_u.ah_ip4_spec.ip4src);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tDest IP addr: 0x%x\n",
- fsc->fs.h_u.ah_ip4_spec.ip4dst);
+ fsc->fs.h_u.ah_ip4_spec.ip4dst);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tDest IP mask: 0x%x\n",
- fsc->fs.m_u.ah_ip4_spec.ip4dst);
+ fsc->fs.m_u.ah_ip4_spec.ip4dst);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tSPI: %d, mask: 0x%x\n",
- fsc->fs.h_u.ah_ip4_spec.spi,
- fsc->fs.m_u.ah_ip4_spec.spi);
+ fsc->fs.h_u.ah_ip4_spec.spi,
+ fsc->fs.m_u.ah_ip4_spec.spi);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tTOS: %d, mask: 0x%x\n",
- fsc->fs.h_u.ah_ip4_spec.tos,
- fsc->fs.m_u.ah_ip4_spec.tos);
+ fsc->fs.h_u.ah_ip4_spec.tos,
+ fsc->fs.m_u.ah_ip4_spec.tos);
p += ETH_GSTRING_LEN;
num_strings++;
break;
case IP_USER_FLOW:
sprintf(p, "\tSrc IP addr: 0x%x\n",
- fsc->fs.h_u.raw_ip4_spec.ip4src);
+ fsc->fs.h_u.raw_ip4_spec.ip4src);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tSrc IP mask: 0x%x\n",
- fsc->fs.m_u.raw_ip4_spec.ip4src);
+ fsc->fs.m_u.raw_ip4_spec.ip4src);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tDest IP addr: 0x%x\n",
- fsc->fs.h_u.raw_ip4_spec.ip4dst);
+ fsc->fs.h_u.raw_ip4_spec.ip4dst);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tDest IP mask: 0x%x\n",
- fsc->fs.m_u.raw_ip4_spec.ip4dst);
+ fsc->fs.m_u.raw_ip4_spec.ip4dst);
p += ETH_GSTRING_LEN;
num_strings++;
break;
case IPV4_FLOW:
sprintf(p, "\tSrc IP addr: 0x%x\n",
- fsc->fs.h_u.usr_ip4_spec.ip4src);
+ fsc->fs.h_u.usr_ip4_spec.ip4src);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tSrc IP mask: 0x%x\n",
- fsc->fs.m_u.usr_ip4_spec.ip4src);
+ fsc->fs.m_u.usr_ip4_spec.ip4src);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tDest IP addr: 0x%x\n",
- fsc->fs.h_u.usr_ip4_spec.ip4dst);
+ fsc->fs.h_u.usr_ip4_spec.ip4dst);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tDest IP mask: 0x%x\n",
- fsc->fs.m_u.usr_ip4_spec.ip4dst);
+ fsc->fs.m_u.usr_ip4_spec.ip4dst);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tL4 bytes: 0x%x, mask: 0x%x\n",
- fsc->fs.h_u.usr_ip4_spec.l4_4_bytes,
- fsc->fs.m_u.usr_ip4_spec.l4_4_bytes);
+ fsc->fs.h_u.usr_ip4_spec.l4_4_bytes,
+ fsc->fs.m_u.usr_ip4_spec.l4_4_bytes);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tTOS: %d, mask: 0x%x\n",
- fsc->fs.h_u.usr_ip4_spec.tos,
- fsc->fs.m_u.usr_ip4_spec.tos);
+ fsc->fs.h_u.usr_ip4_spec.tos,
+ fsc->fs.m_u.usr_ip4_spec.tos);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tIP Version: %d, mask: 0x%x\n",
- fsc->fs.h_u.usr_ip4_spec.ip_ver,
- fsc->fs.m_u.usr_ip4_spec.ip_ver);
+ fsc->fs.h_u.usr_ip4_spec.ip_ver,
+ fsc->fs.m_u.usr_ip4_spec.ip_ver);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tProtocol: %d, mask: 0x%x\n",
- fsc->fs.h_u.usr_ip4_spec.proto,
- fsc->fs.m_u.usr_ip4_spec.proto);
+ fsc->fs.h_u.usr_ip4_spec.proto,
+ fsc->fs.m_u.usr_ip4_spec.proto);
p += ETH_GSTRING_LEN;
num_strings++;
break;
- };
+ }
sprintf(p, "\tVLAN: %d, mask: 0x%x\n",
- fsc->fs.vlan_tag, fsc->fs.vlan_tag_mask);
+ fsc->fs.vlan_tag, fsc->fs.vlan_tag_mask);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tUser-defined: 0x%Lx\n", fsc->fs.data);
@@ -641,7 +661,7 @@ static int ethtool_get_rx_ntuple(struct net_device *dev, void __user *useraddr)
sprintf(p, "\tAction: Drop\n");
else
sprintf(p, "\tAction: Direct to queue %d\n",
- fsc->fs.action);
+ fsc->fs.action);
p += ETH_GSTRING_LEN;
num_strings++;
unknown_filter:
@@ -853,7 +873,8 @@ static int ethtool_set_eeprom(struct net_device *dev, void __user *useraddr)
return ret;
}
-static noinline_for_stack int ethtool_get_coalesce(struct net_device *dev, void __user *useraddr)
+static noinline_for_stack int ethtool_get_coalesce(struct net_device *dev,
+ void __user *useraddr)
{
struct ethtool_coalesce coalesce = { .cmd = ETHTOOL_GCOALESCE };
@@ -867,7 +888,8 @@ static noinline_for_stack int ethtool_get_coalesce(struct net_device *dev, void
return 0;
}
-static noinline_for_stack int ethtool_set_coalesce(struct net_device *dev, void __user *useraddr)
+static noinline_for_stack int ethtool_set_coalesce(struct net_device *dev,
+ void __user *useraddr)
{
struct ethtool_coalesce coalesce;
@@ -971,6 +993,7 @@ static int ethtool_set_tx_csum(struct net_device *dev, char __user *useraddr)
return dev->ethtool_ops->set_tx_csum(dev, edata.data);
}
+EXPORT_SYMBOL(ethtool_op_set_tx_csum);
static int ethtool_set_rx_csum(struct net_device *dev, char __user *useraddr)
{
@@ -1042,7 +1065,7 @@ static int ethtool_get_gso(struct net_device *dev, char __user *useraddr)
edata.data = dev->features & NETIF_F_GSO;
if (copy_to_user(useraddr, &edata, sizeof(edata)))
- return -EFAULT;
+ return -EFAULT;
return 0;
}
@@ -1065,7 +1088,7 @@ static int ethtool_get_gro(struct net_device *dev, char __user *useraddr)
edata.data = dev->features & NETIF_F_GRO;
if (copy_to_user(useraddr, &edata, sizeof(edata)))
- return -EFAULT;
+ return -EFAULT;
return 0;
}
@@ -1277,7 +1300,8 @@ static int ethtool_set_value(struct net_device *dev, char __user *useraddr,
return actor(dev, edata.data);
}
-static noinline_for_stack int ethtool_flash_device(struct net_device *dev, char __user *useraddr)
+static noinline_for_stack int ethtool_flash_device(struct net_device *dev,
+ char __user *useraddr)
{
struct ethtool_flash efl;
@@ -1306,11 +1330,11 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
if (!dev->ethtool_ops)
return -EOPNOTSUPP;
- if (copy_from_user(&ethcmd, useraddr, sizeof (ethcmd)))
+ if (copy_from_user(&ethcmd, useraddr, sizeof(ethcmd)))
return -EFAULT;
/* Allow some commands to be done by anyone */
- switch(ethcmd) {
+ switch (ethcmd) {
case ETHTOOL_GDRVINFO:
case ETHTOOL_GMSGLVL:
case ETHTOOL_GCOALESCE:
@@ -1338,10 +1362,11 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
return -EPERM;
}
- if (dev->ethtool_ops->begin)
- if ((rc = dev->ethtool_ops->begin(dev)) < 0)
+ if (dev->ethtool_ops->begin) {
+ rc = dev->ethtool_ops->begin(dev);
+ if (rc < 0)
return rc;
-
+ }
old_features = dev->features;
switch (ethcmd) {
@@ -1531,16 +1556,3 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
return rc;
}
-
-EXPORT_SYMBOL(ethtool_op_get_link);
-EXPORT_SYMBOL(ethtool_op_get_sg);
-EXPORT_SYMBOL(ethtool_op_get_tso);
-EXPORT_SYMBOL(ethtool_op_set_sg);
-EXPORT_SYMBOL(ethtool_op_set_tso);
-EXPORT_SYMBOL(ethtool_op_set_tx_csum);
-EXPORT_SYMBOL(ethtool_op_set_tx_hw_csum);
-EXPORT_SYMBOL(ethtool_op_set_tx_ipv6_csum);
-EXPORT_SYMBOL(ethtool_op_set_ufo);
-EXPORT_SYMBOL(ethtool_op_get_ufo);
-EXPORT_SYMBOL(ethtool_op_set_flags);
-EXPORT_SYMBOL(ethtool_op_get_flags);
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index d2c3e7dc2e5f..42e84e08a1be 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -39,6 +39,24 @@ int fib_default_rule_add(struct fib_rules_ops *ops,
}
EXPORT_SYMBOL(fib_default_rule_add);
+u32 fib_default_rule_pref(struct fib_rules_ops *ops)
+{
+ struct list_head *pos;
+ struct fib_rule *rule;
+
+ if (!list_empty(&ops->rules_list)) {
+ pos = ops->rules_list.next;
+ if (pos->next != &ops->rules_list) {
+ rule = list_entry(pos->next, struct fib_rule, list);
+ if (rule->pref)
+ return rule->pref - 1;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(fib_default_rule_pref);
+
static void notify_rule_change(int event, struct fib_rule *rule,
struct fib_rules_ops *ops, struct nlmsghdr *nlh,
u32 pid);
@@ -104,12 +122,12 @@ errout:
}
struct fib_rules_ops *
-fib_rules_register(struct fib_rules_ops *tmpl, struct net *net)
+fib_rules_register(const struct fib_rules_ops *tmpl, struct net *net)
{
struct fib_rules_ops *ops;
int err;
- ops = kmemdup(tmpl, sizeof (*ops), GFP_KERNEL);
+ ops = kmemdup(tmpl, sizeof(*ops), GFP_KERNEL);
if (ops == NULL)
return ERR_PTR(-ENOMEM);
@@ -124,7 +142,6 @@ fib_rules_register(struct fib_rules_ops *tmpl, struct net *net)
return ops;
}
-
EXPORT_SYMBOL_GPL(fib_rules_register);
void fib_rules_cleanup_ops(struct fib_rules_ops *ops)
@@ -158,7 +175,6 @@ void fib_rules_unregister(struct fib_rules_ops *ops)
call_rcu(&ops->rcu, fib_rules_put_rcu);
}
-
EXPORT_SYMBOL_GPL(fib_rules_unregister);
static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
@@ -221,7 +237,6 @@ out:
return err;
}
-
EXPORT_SYMBOL_GPL(fib_rules_lookup);
static int validate_rulemsg(struct fib_rule_hdr *frh, struct nlattr **tb,
@@ -520,6 +535,7 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
return -EMSGSIZE;
frh = nlmsg_data(nlh);
+ frh->family = ops->family;
frh->table = rule->table;
NLA_PUT_U32(skb, FRA_TABLE, rule->table);
frh->res1 = 0;
@@ -614,7 +630,7 @@ static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb)
break;
cb->args[1] = 0;
- skip:
+skip:
idx++;
}
rcu_read_unlock();
@@ -686,7 +702,6 @@ static int fib_rules_event(struct notifier_block *this, unsigned long event,
struct fib_rules_ops *ops;
ASSERT_RTNL();
- rcu_read_lock();
switch (event) {
case NETDEV_REGISTER:
@@ -700,8 +715,6 @@ static int fib_rules_event(struct notifier_block *this, unsigned long event,
break;
}
- rcu_read_unlock();
-
return NOTIFY_DONE;
}
diff --git a/net/core/filter.c b/net/core/filter.c
index ff943bed21af..da69fb728d32 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -302,6 +302,8 @@ load_b:
A = skb->pkt_type;
continue;
case SKF_AD_IFINDEX:
+ if (!skb->dev)
+ return 0;
A = skb->dev->ifindex;
continue;
case SKF_AD_MARK:
@@ -310,6 +312,11 @@ load_b:
case SKF_AD_QUEUE:
A = skb->queue_mapping;
continue;
+ case SKF_AD_HATYPE:
+ if (!skb->dev)
+ return 0;
+ A = skb->dev->type;
+ continue;
case SKF_AD_NLATTR: {
struct nlattr *nla;
diff --git a/net/core/flow.c b/net/core/flow.c
index 96015871ecea..161900674009 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -26,113 +26,158 @@
#include <linux/security.h>
struct flow_cache_entry {
- struct flow_cache_entry *next;
- u16 family;
- u8 dir;
- u32 genid;
- struct flowi key;
- void *object;
- atomic_t *object_ref;
+ union {
+ struct hlist_node hlist;
+ struct list_head gc_list;
+ } u;
+ u16 family;
+ u8 dir;
+ u32 genid;
+ struct flowi key;
+ struct flow_cache_object *object;
};
-atomic_t flow_cache_genid = ATOMIC_INIT(0);
-
-static u32 flow_hash_shift;
-#define flow_hash_size (1 << flow_hash_shift)
-static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
-
-#define flow_table(cpu) (per_cpu(flow_tables, cpu))
-
-static struct kmem_cache *flow_cachep __read_mostly;
+struct flow_cache_percpu {
+ struct hlist_head *hash_table;
+ int hash_count;
+ u32 hash_rnd;
+ int hash_rnd_recalc;
+ struct tasklet_struct flush_tasklet;
+};
-static int flow_lwm, flow_hwm;
+struct flow_flush_info {
+ struct flow_cache *cache;
+ atomic_t cpuleft;
+ struct completion completion;
+};
-struct flow_percpu_info {
- int hash_rnd_recalc;
- u32 hash_rnd;
- int count;
+struct flow_cache {
+ u32 hash_shift;
+ unsigned long order;
+ struct flow_cache_percpu *percpu;
+ struct notifier_block hotcpu_notifier;
+ int low_watermark;
+ int high_watermark;
+ struct timer_list rnd_timer;
};
-static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 };
-#define flow_hash_rnd_recalc(cpu) \
- (per_cpu(flow_hash_info, cpu).hash_rnd_recalc)
-#define flow_hash_rnd(cpu) \
- (per_cpu(flow_hash_info, cpu).hash_rnd)
-#define flow_count(cpu) \
- (per_cpu(flow_hash_info, cpu).count)
+atomic_t flow_cache_genid = ATOMIC_INIT(0);
+static struct flow_cache flow_cache_global;
+static struct kmem_cache *flow_cachep;
-static struct timer_list flow_hash_rnd_timer;
+static DEFINE_SPINLOCK(flow_cache_gc_lock);
+static LIST_HEAD(flow_cache_gc_list);
-#define FLOW_HASH_RND_PERIOD (10 * 60 * HZ)
-
-struct flow_flush_info {
- atomic_t cpuleft;
- struct completion completion;
-};
-static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
-
-#define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
+#define flow_cache_hash_size(cache) (1 << (cache)->hash_shift)
+#define FLOW_HASH_RND_PERIOD (10 * 60 * HZ)
static void flow_cache_new_hashrnd(unsigned long arg)
{
+ struct flow_cache *fc = (void *) arg;
int i;
for_each_possible_cpu(i)
- flow_hash_rnd_recalc(i) = 1;
+ per_cpu_ptr(fc->percpu, i)->hash_rnd_recalc = 1;
- flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
- add_timer(&flow_hash_rnd_timer);
+ fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
+ add_timer(&fc->rnd_timer);
+}
+
+static int flow_entry_valid(struct flow_cache_entry *fle)
+{
+ if (atomic_read(&flow_cache_genid) != fle->genid)
+ return 0;
+ if (fle->object && !fle->object->ops->check(fle->object))
+ return 0;
+ return 1;
}
-static void flow_entry_kill(int cpu, struct flow_cache_entry *fle)
+static void flow_entry_kill(struct flow_cache_entry *fle)
{
if (fle->object)
- atomic_dec(fle->object_ref);
+ fle->object->ops->delete(fle->object);
kmem_cache_free(flow_cachep, fle);
- flow_count(cpu)--;
}
-static void __flow_cache_shrink(int cpu, int shrink_to)
+static void flow_cache_gc_task(struct work_struct *work)
{
- struct flow_cache_entry *fle, **flp;
- int i;
+ struct list_head gc_list;
+ struct flow_cache_entry *fce, *n;
- for (i = 0; i < flow_hash_size; i++) {
- int k = 0;
+ INIT_LIST_HEAD(&gc_list);
+ spin_lock_bh(&flow_cache_gc_lock);
+ list_splice_tail_init(&flow_cache_gc_list, &gc_list);
+ spin_unlock_bh(&flow_cache_gc_lock);
- flp = &flow_table(cpu)[i];
- while ((fle = *flp) != NULL && k < shrink_to) {
- k++;
- flp = &fle->next;
- }
- while ((fle = *flp) != NULL) {
- *flp = fle->next;
- flow_entry_kill(cpu, fle);
- }
+ list_for_each_entry_safe(fce, n, &gc_list, u.gc_list)
+ flow_entry_kill(fce);
+}
+static DECLARE_WORK(flow_cache_gc_work, flow_cache_gc_task);
+
+static void flow_cache_queue_garbage(struct flow_cache_percpu *fcp,
+ int deleted, struct list_head *gc_list)
+{
+ if (deleted) {
+ fcp->hash_count -= deleted;
+ spin_lock_bh(&flow_cache_gc_lock);
+ list_splice_tail(gc_list, &flow_cache_gc_list);
+ spin_unlock_bh(&flow_cache_gc_lock);
+ schedule_work(&flow_cache_gc_work);
}
}
-static void flow_cache_shrink(int cpu)
+static void __flow_cache_shrink(struct flow_cache *fc,
+ struct flow_cache_percpu *fcp,
+ int shrink_to)
{
- int shrink_to = flow_lwm / flow_hash_size;
+ struct flow_cache_entry *fle;
+ struct hlist_node *entry, *tmp;
+ LIST_HEAD(gc_list);
+ int i, deleted = 0;
+
+ for (i = 0; i < flow_cache_hash_size(fc); i++) {
+ int saved = 0;
+
+ hlist_for_each_entry_safe(fle, entry, tmp,
+ &fcp->hash_table[i], u.hlist) {
+ if (saved < shrink_to &&
+ flow_entry_valid(fle)) {
+ saved++;
+ } else {
+ deleted++;
+ hlist_del(&fle->u.hlist);
+ list_add_tail(&fle->u.gc_list, &gc_list);
+ }
+ }
+ }
- __flow_cache_shrink(cpu, shrink_to);
+ flow_cache_queue_garbage(fcp, deleted, &gc_list);
}
-static void flow_new_hash_rnd(int cpu)
+static void flow_cache_shrink(struct flow_cache *fc,
+ struct flow_cache_percpu *fcp)
{
- get_random_bytes(&flow_hash_rnd(cpu), sizeof(u32));
- flow_hash_rnd_recalc(cpu) = 0;
+ int shrink_to = fc->low_watermark / flow_cache_hash_size(fc);
- __flow_cache_shrink(cpu, 0);
+ __flow_cache_shrink(fc, fcp, shrink_to);
}
-static u32 flow_hash_code(struct flowi *key, int cpu)
+static void flow_new_hash_rnd(struct flow_cache *fc,
+ struct flow_cache_percpu *fcp)
+{
+ get_random_bytes(&fcp->hash_rnd, sizeof(u32));
+ fcp->hash_rnd_recalc = 0;
+ __flow_cache_shrink(fc, fcp, 0);
+}
+
+static u32 flow_hash_code(struct flow_cache *fc,
+ struct flow_cache_percpu *fcp,
+ struct flowi *key)
{
u32 *k = (u32 *) key;
- return (jhash2(k, (sizeof(*key) / sizeof(u32)), flow_hash_rnd(cpu)) &
- (flow_hash_size - 1));
+ return (jhash2(k, (sizeof(*key) / sizeof(u32)), fcp->hash_rnd)
+ & (flow_cache_hash_size(fc) - 1));
}
#if (BITS_PER_LONG == 64)
@@ -165,114 +210,117 @@ static int flow_key_compare(struct flowi *key1, struct flowi *key2)
return 0;
}
-void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir,
- flow_resolve_t resolver)
+struct flow_cache_object *
+flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir,
+ flow_resolve_t resolver, void *ctx)
{
- struct flow_cache_entry *fle, **head;
+ struct flow_cache *fc = &flow_cache_global;
+ struct flow_cache_percpu *fcp;
+ struct flow_cache_entry *fle, *tfle;
+ struct hlist_node *entry;
+ struct flow_cache_object *flo;
unsigned int hash;
- int cpu;
local_bh_disable();
- cpu = smp_processor_id();
+ fcp = per_cpu_ptr(fc->percpu, smp_processor_id());
fle = NULL;
+ flo = NULL;
/* Packet really early in init? Making flow_cache_init a
* pre-smp initcall would solve this. --RR */
- if (!flow_table(cpu))
+ if (!fcp->hash_table)
goto nocache;
- if (flow_hash_rnd_recalc(cpu))
- flow_new_hash_rnd(cpu);
- hash = flow_hash_code(key, cpu);
+ if (fcp->hash_rnd_recalc)
+ flow_new_hash_rnd(fc, fcp);
- head = &flow_table(cpu)[hash];
- for (fle = *head; fle; fle = fle->next) {
- if (fle->family == family &&
- fle->dir == dir &&
- flow_key_compare(key, &fle->key) == 0) {
- if (fle->genid == atomic_read(&flow_cache_genid)) {
- void *ret = fle->object;
-
- if (ret)
- atomic_inc(fle->object_ref);
- local_bh_enable();
-
- return ret;
- }
+ hash = flow_hash_code(fc, fcp, key);
+ hlist_for_each_entry(tfle, entry, &fcp->hash_table[hash], u.hlist) {
+ if (tfle->family == family &&
+ tfle->dir == dir &&
+ flow_key_compare(key, &tfle->key) == 0) {
+ fle = tfle;
break;
}
}
- if (!fle) {
- if (flow_count(cpu) > flow_hwm)
- flow_cache_shrink(cpu);
+ if (unlikely(!fle)) {
+ if (fcp->hash_count > fc->high_watermark)
+ flow_cache_shrink(fc, fcp);
fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC);
if (fle) {
- fle->next = *head;
- *head = fle;
fle->family = family;
fle->dir = dir;
memcpy(&fle->key, key, sizeof(*key));
fle->object = NULL;
- flow_count(cpu)++;
+ hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
+ fcp->hash_count++;
}
+ } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
+ flo = fle->object;
+ if (!flo)
+ goto ret_object;
+ flo = flo->ops->get(flo);
+ if (flo)
+ goto ret_object;
+ } else if (fle->object) {
+ flo = fle->object;
+ flo->ops->delete(flo);
+ fle->object = NULL;
}
nocache:
- {
- int err;
- void *obj;
- atomic_t *obj_ref;
-
- err = resolver(net, key, family, dir, &obj, &obj_ref);
-
- if (fle && !err) {
- fle->genid = atomic_read(&flow_cache_genid);
-
- if (fle->object)
- atomic_dec(fle->object_ref);
-
- fle->object = obj;
- fle->object_ref = obj_ref;
- if (obj)
- atomic_inc(fle->object_ref);
- }
- local_bh_enable();
-
- if (err)
- obj = ERR_PTR(err);
- return obj;
+ flo = NULL;
+ if (fle) {
+ flo = fle->object;
+ fle->object = NULL;
}
+ flo = resolver(net, key, family, dir, flo, ctx);
+ if (fle) {
+ fle->genid = atomic_read(&flow_cache_genid);
+ if (!IS_ERR(flo))
+ fle->object = flo;
+ else
+ fle->genid--;
+ } else {
+ if (flo && !IS_ERR(flo))
+ flo->ops->delete(flo);
+ }
+ret_object:
+ local_bh_enable();
+ return flo;
}
static void flow_cache_flush_tasklet(unsigned long data)
{
struct flow_flush_info *info = (void *)data;
- int i;
- int cpu;
-
- cpu = smp_processor_id();
- for (i = 0; i < flow_hash_size; i++) {
- struct flow_cache_entry *fle;
-
- fle = flow_table(cpu)[i];
- for (; fle; fle = fle->next) {
- unsigned genid = atomic_read(&flow_cache_genid);
-
- if (!fle->object || fle->genid == genid)
+ struct flow_cache *fc = info->cache;
+ struct flow_cache_percpu *fcp;
+ struct flow_cache_entry *fle;
+ struct hlist_node *entry, *tmp;
+ LIST_HEAD(gc_list);
+ int i, deleted = 0;
+
+ fcp = per_cpu_ptr(fc->percpu, smp_processor_id());
+ for (i = 0; i < flow_cache_hash_size(fc); i++) {
+ hlist_for_each_entry_safe(fle, entry, tmp,
+ &fcp->hash_table[i], u.hlist) {
+ if (flow_entry_valid(fle))
continue;
- fle->object = NULL;
- atomic_dec(fle->object_ref);
+ deleted++;
+ hlist_del(&fle->u.hlist);
+ list_add_tail(&fle->u.gc_list, &gc_list);
}
}
+ flow_cache_queue_garbage(fcp, deleted, &gc_list);
+
if (atomic_dec_and_test(&info->cpuleft))
complete(&info->completion);
}
-static void flow_cache_flush_per_cpu(void *) __attribute__((__unused__));
static void flow_cache_flush_per_cpu(void *data)
{
struct flow_flush_info *info = data;
@@ -280,8 +328,7 @@ static void flow_cache_flush_per_cpu(void *data)
struct tasklet_struct *tasklet;
cpu = smp_processor_id();
-
- tasklet = flow_flush_tasklet(cpu);
+ tasklet = &per_cpu_ptr(info->cache->percpu, cpu)->flush_tasklet;
tasklet->data = (unsigned long)info;
tasklet_schedule(tasklet);
}
@@ -294,6 +341,7 @@ void flow_cache_flush(void)
/* Don't want cpus going down or up during this. */
get_online_cpus();
mutex_lock(&flow_flush_sem);
+ info.cache = &flow_cache_global;
atomic_set(&info.cpuleft, num_online_cpus());
init_completion(&info.completion);
@@ -307,62 +355,75 @@ void flow_cache_flush(void)
put_online_cpus();
}
-static void __init flow_cache_cpu_prepare(int cpu)
+static void __init flow_cache_cpu_prepare(struct flow_cache *fc,
+ struct flow_cache_percpu *fcp)
{
- struct tasklet_struct *tasklet;
- unsigned long order;
-
- for (order = 0;
- (PAGE_SIZE << order) <
- (sizeof(struct flow_cache_entry *)*flow_hash_size);
- order++)
- /* NOTHING */;
-
- flow_table(cpu) = (struct flow_cache_entry **)
- __get_free_pages(GFP_KERNEL|__GFP_ZERO, order);
- if (!flow_table(cpu))
- panic("NET: failed to allocate flow cache order %lu\n", order);
-
- flow_hash_rnd_recalc(cpu) = 1;
- flow_count(cpu) = 0;
-
- tasklet = flow_flush_tasklet(cpu);
- tasklet_init(tasklet, flow_cache_flush_tasklet, 0);
+ fcp->hash_table = (struct hlist_head *)
+ __get_free_pages(GFP_KERNEL|__GFP_ZERO, fc->order);
+ if (!fcp->hash_table)
+ panic("NET: failed to allocate flow cache order %lu\n", fc->order);
+
+ fcp->hash_rnd_recalc = 1;
+ fcp->hash_count = 0;
+ tasklet_init(&fcp->flush_tasklet, flow_cache_flush_tasklet, 0);
}
static int flow_cache_cpu(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
+ struct flow_cache *fc = container_of(nfb, struct flow_cache, hotcpu_notifier);
+ int cpu = (unsigned long) hcpu;
+ struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
+
if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
- __flow_cache_shrink((unsigned long)hcpu, 0);
+ __flow_cache_shrink(fc, fcp, 0);
return NOTIFY_OK;
}
-static int __init flow_cache_init(void)
+static int flow_cache_init(struct flow_cache *fc)
{
+ unsigned long order;
int i;
- flow_cachep = kmem_cache_create("flow_cache",
- sizeof(struct flow_cache_entry),
- 0, SLAB_PANIC,
- NULL);
- flow_hash_shift = 10;
- flow_lwm = 2 * flow_hash_size;
- flow_hwm = 4 * flow_hash_size;
+ fc->hash_shift = 10;
+ fc->low_watermark = 2 * flow_cache_hash_size(fc);
+ fc->high_watermark = 4 * flow_cache_hash_size(fc);
+
+ for (order = 0;
+ (PAGE_SIZE << order) <
+ (sizeof(struct hlist_head)*flow_cache_hash_size(fc));
+ order++)
+ /* NOTHING */;
+ fc->order = order;
+ fc->percpu = alloc_percpu(struct flow_cache_percpu);
- setup_timer(&flow_hash_rnd_timer, flow_cache_new_hashrnd, 0);
- flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
- add_timer(&flow_hash_rnd_timer);
+ setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd,
+ (unsigned long) fc);
+ fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
+ add_timer(&fc->rnd_timer);
for_each_possible_cpu(i)
- flow_cache_cpu_prepare(i);
+ flow_cache_cpu_prepare(fc, per_cpu_ptr(fc->percpu, i));
+
+ fc->hotcpu_notifier = (struct notifier_block){
+ .notifier_call = flow_cache_cpu,
+ };
+ register_hotcpu_notifier(&fc->hotcpu_notifier);
- hotcpu_notifier(flow_cache_cpu, 0);
return 0;
}
-module_init(flow_cache_init);
+static int __init flow_cache_init_global(void)
+{
+ flow_cachep = kmem_cache_create("flow_cache",
+ sizeof(struct flow_cache_entry),
+ 0, SLAB_PANIC, NULL);
+
+ return flow_cache_init(&flow_cache_global);
+}
+
+module_init(flow_cache_init_global);
EXPORT_SYMBOL(flow_cache_genid);
EXPORT_SYMBOL(flow_cache_lookup);
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 59cfc7d8fc45..99e7052d7323 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -14,9 +14,12 @@
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/slab.h>
+#include <linux/nsproxy.h>
#include <net/sock.h>
+#include <net/net_namespace.h>
#include <linux/rtnetlink.h>
#include <linux/wireless.h>
+#include <linux/vmalloc.h>
#include <net/wext.h>
#include "net-sysfs.h"
@@ -466,18 +469,345 @@ static struct attribute_group wireless_group = {
.attrs = wireless_attrs,
};
#endif
-
#endif /* CONFIG_SYSFS */
+#ifdef CONFIG_RPS
+/*
+ * RX queue sysfs structures and functions.
+ */
+struct rx_queue_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct netdev_rx_queue *queue,
+ struct rx_queue_attribute *attr, char *buf);
+ ssize_t (*store)(struct netdev_rx_queue *queue,
+ struct rx_queue_attribute *attr, const char *buf, size_t len);
+};
+#define to_rx_queue_attr(_attr) container_of(_attr, \
+ struct rx_queue_attribute, attr)
+
+#define to_rx_queue(obj) container_of(obj, struct netdev_rx_queue, kobj)
+
+static ssize_t rx_queue_attr_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
+ struct netdev_rx_queue *queue = to_rx_queue(kobj);
+
+ if (!attribute->show)
+ return -EIO;
+
+ return attribute->show(queue, attribute, buf);
+}
+
+static ssize_t rx_queue_attr_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
+ struct netdev_rx_queue *queue = to_rx_queue(kobj);
+
+ if (!attribute->store)
+ return -EIO;
+
+ return attribute->store(queue, attribute, buf, count);
+}
+
+static struct sysfs_ops rx_queue_sysfs_ops = {
+ .show = rx_queue_attr_show,
+ .store = rx_queue_attr_store,
+};
+
+static ssize_t show_rps_map(struct netdev_rx_queue *queue,
+ struct rx_queue_attribute *attribute, char *buf)
+{
+ struct rps_map *map;
+ cpumask_var_t mask;
+ size_t len = 0;
+ int i;
+
+ if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
+ return -ENOMEM;
+
+ rcu_read_lock();
+ map = rcu_dereference(queue->rps_map);
+ if (map)
+ for (i = 0; i < map->len; i++)
+ cpumask_set_cpu(map->cpus[i], mask);
+
+ len += cpumask_scnprintf(buf + len, PAGE_SIZE, mask);
+ if (PAGE_SIZE - len < 3) {
+ rcu_read_unlock();
+ free_cpumask_var(mask);
+ return -EINVAL;
+ }
+ rcu_read_unlock();
+
+ free_cpumask_var(mask);
+ len += sprintf(buf + len, "\n");
+ return len;
+}
+
+static void rps_map_release(struct rcu_head *rcu)
+{
+ struct rps_map *map = container_of(rcu, struct rps_map, rcu);
+
+ kfree(map);
+}
+
+static ssize_t store_rps_map(struct netdev_rx_queue *queue,
+ struct rx_queue_attribute *attribute,
+ const char *buf, size_t len)
+{
+ struct rps_map *old_map, *map;
+ cpumask_var_t mask;
+ int err, cpu, i;
+ static DEFINE_SPINLOCK(rps_map_lock);
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (!alloc_cpumask_var(&mask, GFP_KERNEL))
+ return -ENOMEM;
+
+ err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
+ if (err) {
+ free_cpumask_var(mask);
+ return err;
+ }
+
+ map = kzalloc(max_t(unsigned,
+ RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
+ GFP_KERNEL);
+ if (!map) {
+ free_cpumask_var(mask);
+ return -ENOMEM;
+ }
+
+ i = 0;
+ for_each_cpu_and(cpu, mask, cpu_online_mask)
+ map->cpus[i++] = cpu;
+
+ if (i)
+ map->len = i;
+ else {
+ kfree(map);
+ map = NULL;
+ }
+
+ spin_lock(&rps_map_lock);
+ old_map = queue->rps_map;
+ rcu_assign_pointer(queue->rps_map, map);
+ spin_unlock(&rps_map_lock);
+
+ if (old_map)
+ call_rcu(&old_map->rcu, rps_map_release);
+
+ free_cpumask_var(mask);
+ return len;
+}
+
+static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
+ struct rx_queue_attribute *attr,
+ char *buf)
+{
+ struct rps_dev_flow_table *flow_table;
+ unsigned int val = 0;
+
+ rcu_read_lock();
+ flow_table = rcu_dereference(queue->rps_flow_table);
+ if (flow_table)
+ val = flow_table->mask + 1;
+ rcu_read_unlock();
+
+ return sprintf(buf, "%u\n", val);
+}
+
+static void rps_dev_flow_table_release_work(struct work_struct *work)
+{
+ struct rps_dev_flow_table *table = container_of(work,
+ struct rps_dev_flow_table, free_work);
+
+ vfree(table);
+}
+
+static void rps_dev_flow_table_release(struct rcu_head *rcu)
+{
+ struct rps_dev_flow_table *table = container_of(rcu,
+ struct rps_dev_flow_table, rcu);
+
+ INIT_WORK(&table->free_work, rps_dev_flow_table_release_work);
+ schedule_work(&table->free_work);
+}
+
+static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
+ struct rx_queue_attribute *attr,
+ const char *buf, size_t len)
+{
+ unsigned int count;
+ char *endp;
+ struct rps_dev_flow_table *table, *old_table;
+ static DEFINE_SPINLOCK(rps_dev_flow_lock);
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ count = simple_strtoul(buf, &endp, 0);
+ if (endp == buf)
+ return -EINVAL;
+
+ if (count) {
+ int i;
+
+ if (count > 1<<30) {
+ /* Enforce a limit to prevent overflow */
+ return -EINVAL;
+ }
+ count = roundup_pow_of_two(count);
+ table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(count));
+ if (!table)
+ return -ENOMEM;
+
+ table->mask = count - 1;
+ for (i = 0; i < count; i++)
+ table->flows[i].cpu = RPS_NO_CPU;
+ } else
+ table = NULL;
+
+ spin_lock(&rps_dev_flow_lock);
+ old_table = queue->rps_flow_table;
+ rcu_assign_pointer(queue->rps_flow_table, table);
+ spin_unlock(&rps_dev_flow_lock);
+
+ if (old_table)
+ call_rcu(&old_table->rcu, rps_dev_flow_table_release);
+
+ return len;
+}
+
+static struct rx_queue_attribute rps_cpus_attribute =
+ __ATTR(rps_cpus, S_IRUGO | S_IWUSR, show_rps_map, store_rps_map);
+
+
+static struct rx_queue_attribute rps_dev_flow_table_cnt_attribute =
+ __ATTR(rps_flow_cnt, S_IRUGO | S_IWUSR,
+ show_rps_dev_flow_table_cnt, store_rps_dev_flow_table_cnt);
+
+static struct attribute *rx_queue_default_attrs[] = {
+ &rps_cpus_attribute.attr,
+ &rps_dev_flow_table_cnt_attribute.attr,
+ NULL
+};
+
+static void rx_queue_release(struct kobject *kobj)
+{
+ struct netdev_rx_queue *queue = to_rx_queue(kobj);
+ struct netdev_rx_queue *first = queue->first;
+
+ if (queue->rps_map)
+ call_rcu(&queue->rps_map->rcu, rps_map_release);
+
+ if (queue->rps_flow_table)
+ call_rcu(&queue->rps_flow_table->rcu,
+ rps_dev_flow_table_release);
+
+ if (atomic_dec_and_test(&first->count))
+ kfree(first);
+}
+
+static struct kobj_type rx_queue_ktype = {
+ .sysfs_ops = &rx_queue_sysfs_ops,
+ .release = rx_queue_release,
+ .default_attrs = rx_queue_default_attrs,
+};
+
+static int rx_queue_add_kobject(struct net_device *net, int index)
+{
+ struct netdev_rx_queue *queue = net->_rx + index;
+ struct kobject *kobj = &queue->kobj;
+ int error = 0;
+
+ kobj->kset = net->queues_kset;
+ error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL,
+ "rx-%u", index);
+ if (error) {
+ kobject_put(kobj);
+ return error;
+ }
+
+ kobject_uevent(kobj, KOBJ_ADD);
+
+ return error;
+}
+
+static int rx_queue_register_kobjects(struct net_device *net)
+{
+ int i;
+ int error = 0;
+
+ net->queues_kset = kset_create_and_add("queues",
+ NULL, &net->dev.kobj);
+ if (!net->queues_kset)
+ return -ENOMEM;
+ for (i = 0; i < net->num_rx_queues; i++) {
+ error = rx_queue_add_kobject(net, i);
+ if (error)
+ break;
+ }
+
+ if (error)
+ while (--i >= 0)
+ kobject_put(&net->_rx[i].kobj);
+
+ return error;
+}
+
+static void rx_queue_remove_kobjects(struct net_device *net)
+{
+ int i;
+
+ for (i = 0; i < net->num_rx_queues; i++)
+ kobject_put(&net->_rx[i].kobj);
+ kset_unregister(net->queues_kset);
+}
+#endif /* CONFIG_RPS */
+
+static const void *net_current_ns(void)
+{
+ return current->nsproxy->net_ns;
+}
+
+static const void *net_initial_ns(void)
+{
+ return &init_net;
+}
+
+static const void *net_netlink_ns(struct sock *sk)
+{
+ return sock_net(sk);
+}
+
+static struct kobj_ns_type_operations net_ns_type_operations = {
+ .type = KOBJ_NS_TYPE_NET,
+ .current_ns = net_current_ns,
+ .netlink_ns = net_netlink_ns,
+ .initial_ns = net_initial_ns,
+};
+
+static void net_kobj_ns_exit(struct net *net)
+{
+ kobj_ns_exit(KOBJ_NS_TYPE_NET, net);
+}
+
+static struct pernet_operations kobj_net_ops = {
+ .exit = net_kobj_ns_exit,
+};
+
+
#ifdef CONFIG_HOTPLUG
static int netdev_uevent(struct device *d, struct kobj_uevent_env *env)
{
struct net_device *dev = to_net_dev(d);
int retval;
- if (!net_eq(dev_net(dev), &init_net))
- return 0;
-
/* pass interface to uevent. */
retval = add_uevent_var(env, "INTERFACE=%s", dev->name);
if (retval)
@@ -507,6 +837,13 @@ static void netdev_release(struct device *d)
kfree((char *)dev - dev->padded);
}
+static const void *net_namespace(struct device *d)
+{
+ struct net_device *dev;
+ dev = container_of(d, struct net_device, dev);
+ return dev_net(dev);
+}
+
static struct class net_class = {
.name = "net",
.dev_release = netdev_release,
@@ -516,6 +853,8 @@ static struct class net_class = {
#ifdef CONFIG_HOTPLUG
.dev_uevent = netdev_uevent,
#endif
+ .ns_type = &net_ns_type_operations,
+ .namespace = net_namespace,
};
/* Delete sysfs entries but hold kobject reference until after all
@@ -527,8 +866,9 @@ void netdev_unregister_kobject(struct net_device * net)
kobject_get(&dev->kobj);
- if (!net_eq(dev_net(net), &init_net))
- return;
+#ifdef CONFIG_RPS
+ rx_queue_remove_kobjects(net);
+#endif
device_del(dev);
}
@@ -538,7 +878,9 @@ int netdev_register_kobject(struct net_device *net)
{
struct device *dev = &(net->dev);
const struct attribute_group **groups = net->sysfs_groups;
+ int error = 0;
+ device_initialize(dev);
dev->class = &net_class;
dev->platform_data = net;
dev->groups = groups;
@@ -561,10 +903,19 @@ int netdev_register_kobject(struct net_device *net)
#endif
#endif /* CONFIG_SYSFS */
- if (!net_eq(dev_net(net), &init_net))
- return 0;
+ error = device_add(dev);
+ if (error)
+ return error;
+
+#ifdef CONFIG_RPS
+ error = rx_queue_register_kobjects(net);
+ if (error) {
+ device_del(dev);
+ return error;
+ }
+#endif
- return device_add(dev);
+ return error;
}
int netdev_class_create_file(struct class_attribute *class_attr)
@@ -580,13 +931,9 @@ void netdev_class_remove_file(struct class_attribute *class_attr)
EXPORT_SYMBOL(netdev_class_create_file);
EXPORT_SYMBOL(netdev_class_remove_file);
-void netdev_initialize_kobject(struct net_device *net)
-{
- struct device *device = &(net->dev);
- device_initialize(device);
-}
-
int netdev_kobject_init(void)
{
+ kobj_ns_type_register(&net_ns_type_operations);
+ register_pernet_subsys(&kobj_net_ops);
return class_register(&net_class);
}
diff --git a/net/core/net-sysfs.h b/net/core/net-sysfs.h
index 14e7524260b3..805555e8b187 100644
--- a/net/core/net-sysfs.h
+++ b/net/core/net-sysfs.h
@@ -4,5 +4,4 @@
int netdev_kobject_init(void);
int netdev_register_kobject(struct net_device *);
void netdev_unregister_kobject(struct net_device *);
-void netdev_initialize_kobject(struct net_device *);
#endif
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index bd8c4712ea24..c988e685433a 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -27,6 +27,51 @@ EXPORT_SYMBOL(init_net);
#define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */
+static void net_generic_release(struct rcu_head *rcu)
+{
+ struct net_generic *ng;
+
+ ng = container_of(rcu, struct net_generic, rcu);
+ kfree(ng);
+}
+
+static int net_assign_generic(struct net *net, int id, void *data)
+{
+ struct net_generic *ng, *old_ng;
+
+ BUG_ON(!mutex_is_locked(&net_mutex));
+ BUG_ON(id == 0);
+
+ ng = old_ng = net->gen;
+ if (old_ng->len >= id)
+ goto assign;
+
+ ng = kzalloc(sizeof(struct net_generic) +
+ id * sizeof(void *), GFP_KERNEL);
+ if (ng == NULL)
+ return -ENOMEM;
+
+ /*
+ * Some synchronisation notes:
+ *
+ * The net_generic explores the net->gen array inside rcu
+ * read section. Besides once set the net->gen->ptr[x]
+ * pointer never changes (see rules in netns/generic.h).
+ *
+ * That said, we simply duplicate this array and schedule
+ * the old copy for kfree after a grace period.
+ */
+
+ ng->len = id;
+ memcpy(&ng->ptr, &old_ng->ptr, old_ng->len * sizeof(void*));
+
+ rcu_assign_pointer(net->gen, ng);
+ call_rcu(&old_ng->rcu, net_generic_release);
+assign:
+ ng->ptr[id - 1] = data;
+ return 0;
+}
+
static int ops_init(const struct pernet_operations *ops, struct net *net)
{
int err;
@@ -469,10 +514,10 @@ EXPORT_SYMBOL_GPL(register_pernet_subsys);
* addition run the exit method for all existing network
* namespaces.
*/
-void unregister_pernet_subsys(struct pernet_operations *module)
+void unregister_pernet_subsys(struct pernet_operations *ops)
{
mutex_lock(&net_mutex);
- unregister_pernet_operations(module);
+ unregister_pernet_operations(ops);
mutex_unlock(&net_mutex);
}
EXPORT_SYMBOL_GPL(unregister_pernet_subsys);
@@ -526,49 +571,3 @@ void unregister_pernet_device(struct pernet_operations *ops)
mutex_unlock(&net_mutex);
}
EXPORT_SYMBOL_GPL(unregister_pernet_device);
-
-static void net_generic_release(struct rcu_head *rcu)
-{
- struct net_generic *ng;
-
- ng = container_of(rcu, struct net_generic, rcu);
- kfree(ng);
-}
-
-int net_assign_generic(struct net *net, int id, void *data)
-{
- struct net_generic *ng, *old_ng;
-
- BUG_ON(!mutex_is_locked(&net_mutex));
- BUG_ON(id == 0);
-
- ng = old_ng = net->gen;
- if (old_ng->len >= id)
- goto assign;
-
- ng = kzalloc(sizeof(struct net_generic) +
- id * sizeof(void *), GFP_KERNEL);
- if (ng == NULL)
- return -ENOMEM;
-
- /*
- * Some synchronisation notes:
- *
- * The net_generic explores the net->gen array inside rcu
- * read section. Besides once set the net->gen->ptr[x]
- * pointer never changes (see rules in netns/generic.h).
- *
- * That said, we simply duplicate this array and schedule
- * the old copy for kfree after a grace period.
- */
-
- ng->len = id;
- memcpy(&ng->ptr, &old_ng->ptr, old_ng->len * sizeof(void*));
-
- rcu_assign_pointer(net->gen, ng);
- call_rcu(&old_ng->rcu, net_generic_release);
-assign:
- ng->ptr[id - 1] = data;
- return 0;
-}
-EXPORT_SYMBOL_GPL(net_assign_generic);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index a58f59b97597..94825b109551 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -179,9 +179,8 @@ static void service_arp_queue(struct netpoll_info *npi)
}
}
-void netpoll_poll(struct netpoll *np)
+void netpoll_poll_dev(struct net_device *dev)
{
- struct net_device *dev = np->dev;
const struct net_device_ops *ops;
if (!dev || !netif_running(dev))
@@ -201,6 +200,11 @@ void netpoll_poll(struct netpoll *np)
zap_completion_queue();
}
+void netpoll_poll(struct netpoll *np)
+{
+ netpoll_poll_dev(np->dev);
+}
+
static void refill_skbs(void)
{
struct sk_buff *skb;
@@ -282,7 +286,7 @@ static int netpoll_owner_active(struct net_device *dev)
return 0;
}
-static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
+void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
{
int status = NETDEV_TX_BUSY;
unsigned long tries;
@@ -308,7 +312,9 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
tries > 0; --tries) {
if (__netif_tx_trylock(txq)) {
if (!netif_tx_queue_stopped(txq)) {
+ dev->priv_flags |= IFF_IN_NETPOLL;
status = ops->ndo_start_xmit(skb, dev);
+ dev->priv_flags &= ~IFF_IN_NETPOLL;
if (status == NETDEV_TX_OK)
txq_trans_update(txq);
}
@@ -756,7 +762,10 @@ int netpoll_setup(struct netpoll *np)
atomic_inc(&npinfo->refcnt);
}
- if (!ndev->netdev_ops->ndo_poll_controller) {
+ npinfo->netpoll = np;
+
+ if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
+ !ndev->netdev_ops->ndo_poll_controller) {
printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
np->name, np->dev_name);
err = -ENOTSUPP;
@@ -878,6 +887,7 @@ void netpoll_cleanup(struct netpoll *np)
}
if (atomic_dec_and_test(&npinfo->refcnt)) {
+ const struct net_device_ops *ops;
skb_queue_purge(&npinfo->arp_tx);
skb_queue_purge(&npinfo->txq);
cancel_rearming_delayed_work(&npinfo->tx_work);
@@ -885,7 +895,11 @@ void netpoll_cleanup(struct netpoll *np)
/* clean after last, unfinished work */
__skb_queue_purge(&npinfo->txq);
kfree(npinfo);
- np->dev->npinfo = NULL;
+ ops = np->dev->netdev_ops;
+ if (ops->ndo_netpoll_cleanup)
+ ops->ndo_netpoll_cleanup(np->dev);
+ else
+ np->dev->npinfo = NULL;
}
}
@@ -908,6 +922,7 @@ void netpoll_set_trap(int trap)
atomic_dec(&trapped);
}
+EXPORT_SYMBOL(netpoll_send_skb);
EXPORT_SYMBOL(netpoll_set_trap);
EXPORT_SYMBOL(netpoll_trap);
EXPORT_SYMBOL(netpoll_print_options);
@@ -915,4 +930,5 @@ EXPORT_SYMBOL(netpoll_parse_options);
EXPORT_SYMBOL(netpoll_setup);
EXPORT_SYMBOL(netpoll_cleanup);
EXPORT_SYMBOL(netpoll_send_udp);
+EXPORT_SYMBOL(netpoll_poll_dev);
EXPORT_SYMBOL(netpoll_poll);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 43923811bd6a..2ad68da418df 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -169,7 +169,7 @@
#include <asm/dma.h>
#include <asm/div64.h> /* do_div */
-#define VERSION "2.72"
+#define VERSION "2.73"
#define IP_NAME_SZ 32
#define MAX_MPLS_LABELS 16 /* This is the max label stack depth */
#define MPLS_STACK_BOTTOM htonl(0x00000100)
@@ -190,6 +190,7 @@
#define F_IPSEC_ON (1<<12) /* ipsec on for flows */
#define F_QUEUE_MAP_RND (1<<13) /* queue map Random */
#define F_QUEUE_MAP_CPU (1<<14) /* queue map mirrors smp_processor_id() */
+#define F_NODE (1<<15) /* Node memory alloc*/
/* Thread control flag bits */
#define T_STOP (1<<0) /* Stop run */
@@ -372,6 +373,7 @@ struct pktgen_dev {
u16 queue_map_min;
u16 queue_map_max;
+ int node; /* Memory node */
#ifdef CONFIG_XFRM
__u8 ipsmode; /* IPSEC mode (config) */
@@ -607,6 +609,9 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
if (pkt_dev->traffic_class)
seq_printf(seq, " traffic_class: 0x%02x\n", pkt_dev->traffic_class);
+ if (pkt_dev->node >= 0)
+ seq_printf(seq, " node: %d\n", pkt_dev->node);
+
seq_printf(seq, " Flags: ");
if (pkt_dev->flags & F_IPV6)
@@ -660,6 +665,9 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
if (pkt_dev->flags & F_SVID_RND)
seq_printf(seq, "SVID_RND ");
+ if (pkt_dev->flags & F_NODE)
+ seq_printf(seq, "NODE_ALLOC ");
+
seq_puts(seq, "\n");
/* not really stopped, more like last-running-at */
@@ -1074,6 +1082,21 @@ static ssize_t pktgen_if_write(struct file *file,
pkt_dev->dst_mac_count);
return count;
}
+ if (!strcmp(name, "node")) {
+ len = num_arg(&user_buffer[i], 10, &value);
+ if (len < 0)
+ return len;
+
+ i += len;
+
+ if (node_possible(value)) {
+ pkt_dev->node = value;
+ sprintf(pg_result, "OK: node=%d", pkt_dev->node);
+ }
+ else
+ sprintf(pg_result, "ERROR: node not possible");
+ return count;
+ }
if (!strcmp(name, "flag")) {
char f[32];
memset(f, 0, 32);
@@ -1166,12 +1189,18 @@ static ssize_t pktgen_if_write(struct file *file,
else if (strcmp(f, "!IPV6") == 0)
pkt_dev->flags &= ~F_IPV6;
+ else if (strcmp(f, "NODE_ALLOC") == 0)
+ pkt_dev->flags |= F_NODE;
+
+ else if (strcmp(f, "!NODE_ALLOC") == 0)
+ pkt_dev->flags &= ~F_NODE;
+
else {
sprintf(pg_result,
"Flag -:%s:- unknown\nAvailable flags, (prepend ! to un-set flag):\n%s",
f,
"IPSRC_RND, IPDST_RND, UDPSRC_RND, UDPDST_RND, "
- "MACSRC_RND, MACDST_RND, TXSIZE_RND, IPV6, MPLS_RND, VID_RND, SVID_RND, FLOW_SEQ, IPSEC\n");
+ "MACSRC_RND, MACDST_RND, TXSIZE_RND, IPV6, MPLS_RND, VID_RND, SVID_RND, FLOW_SEQ, IPSEC, NODE_ALLOC\n");
return count;
}
sprintf(pg_result, "OK: flags=0x%x", pkt_dev->flags);
@@ -2572,9 +2601,27 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
mod_cur_headers(pkt_dev);
datalen = (odev->hard_header_len + 16) & ~0xf;
- skb = __netdev_alloc_skb(odev,
- pkt_dev->cur_pkt_size + 64
- + datalen + pkt_dev->pkt_overhead, GFP_NOWAIT);
+
+ if (pkt_dev->flags & F_NODE) {
+ int node;
+
+ if (pkt_dev->node >= 0)
+ node = pkt_dev->node;
+ else
+ node = numa_node_id();
+
+ skb = __alloc_skb(NET_SKB_PAD + pkt_dev->cur_pkt_size + 64
+ + datalen + pkt_dev->pkt_overhead, GFP_NOWAIT, 0, node);
+ if (likely(skb)) {
+ skb_reserve(skb, NET_SKB_PAD);
+ skb->dev = odev;
+ }
+ }
+ else
+ skb = __netdev_alloc_skb(odev,
+ pkt_dev->cur_pkt_size + 64
+ + datalen + pkt_dev->pkt_overhead, GFP_NOWAIT);
+
if (!skb) {
sprintf(pkt_dev->result, "No memory");
return NULL;
@@ -3674,6 +3721,7 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
pkt_dev->svlan_p = 0;
pkt_dev->svlan_cfi = 0;
pkt_dev->svlan_id = 0xffff;
+ pkt_dev->node = -1;
err = pktgen_setup_dev(pkt_dev, ifname);
if (err)
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index fe776c9ddeca..e4b9870e4706 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -98,7 +98,7 @@ int lockdep_rtnl_is_held(void)
EXPORT_SYMBOL(lockdep_rtnl_is_held);
#endif /* #ifdef CONFIG_PROVE_LOCKING */
-static struct rtnl_link *rtnl_msg_handlers[NPROTO];
+static struct rtnl_link *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1];
static inline int rtm_msgindex(int msgtype)
{
@@ -118,7 +118,11 @@ static rtnl_doit_func rtnl_get_doit(int protocol, int msgindex)
{
struct rtnl_link *tab;
- tab = rtnl_msg_handlers[protocol];
+ if (protocol <= RTNL_FAMILY_MAX)
+ tab = rtnl_msg_handlers[protocol];
+ else
+ tab = NULL;
+
if (tab == NULL || tab[msgindex].doit == NULL)
tab = rtnl_msg_handlers[PF_UNSPEC];
@@ -129,7 +133,11 @@ static rtnl_dumpit_func rtnl_get_dumpit(int protocol, int msgindex)
{
struct rtnl_link *tab;
- tab = rtnl_msg_handlers[protocol];
+ if (protocol <= RTNL_FAMILY_MAX)
+ tab = rtnl_msg_handlers[protocol];
+ else
+ tab = NULL;
+
if (tab == NULL || tab[msgindex].dumpit == NULL)
tab = rtnl_msg_handlers[PF_UNSPEC];
@@ -159,7 +167,7 @@ int __rtnl_register(int protocol, int msgtype,
struct rtnl_link *tab;
int msgindex;
- BUG_ON(protocol < 0 || protocol >= NPROTO);
+ BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
msgindex = rtm_msgindex(msgtype);
tab = rtnl_msg_handlers[protocol];
@@ -211,7 +219,7 @@ int rtnl_unregister(int protocol, int msgtype)
{
int msgindex;
- BUG_ON(protocol < 0 || protocol >= NPROTO);
+ BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
msgindex = rtm_msgindex(msgtype);
if (rtnl_msg_handlers[protocol] == NULL)
@@ -233,7 +241,7 @@ EXPORT_SYMBOL_GPL(rtnl_unregister);
*/
void rtnl_unregister_all(int protocol)
{
- BUG_ON(protocol < 0 || protocol >= NPROTO);
+ BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
kfree(rtnl_msg_handlers[protocol]);
rtnl_msg_handlers[protocol] = NULL;
@@ -600,17 +608,83 @@ static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
a->rx_compressed = b->rx_compressed;
a->tx_compressed = b->tx_compressed;
-};
+}
+static void copy_rtnl_link_stats64(void *v, const struct net_device_stats *b)
+{
+ struct rtnl_link_stats64 a;
+
+ a.rx_packets = b->rx_packets;
+ a.tx_packets = b->tx_packets;
+ a.rx_bytes = b->rx_bytes;
+ a.tx_bytes = b->tx_bytes;
+ a.rx_errors = b->rx_errors;
+ a.tx_errors = b->tx_errors;
+ a.rx_dropped = b->rx_dropped;
+ a.tx_dropped = b->tx_dropped;
+
+ a.multicast = b->multicast;
+ a.collisions = b->collisions;
+
+ a.rx_length_errors = b->rx_length_errors;
+ a.rx_over_errors = b->rx_over_errors;
+ a.rx_crc_errors = b->rx_crc_errors;
+ a.rx_frame_errors = b->rx_frame_errors;
+ a.rx_fifo_errors = b->rx_fifo_errors;
+ a.rx_missed_errors = b->rx_missed_errors;
+
+ a.tx_aborted_errors = b->tx_aborted_errors;
+ a.tx_carrier_errors = b->tx_carrier_errors;
+ a.tx_fifo_errors = b->tx_fifo_errors;
+ a.tx_heartbeat_errors = b->tx_heartbeat_errors;
+ a.tx_window_errors = b->tx_window_errors;
+
+ a.rx_compressed = b->rx_compressed;
+ a.tx_compressed = b->tx_compressed;
+ memcpy(v, &a, sizeof(a));
+}
+
+/* All VF info */
static inline int rtnl_vfinfo_size(const struct net_device *dev)
{
- if (dev->dev.parent && dev_is_pci(dev->dev.parent))
- return dev_num_vf(dev->dev.parent) *
- sizeof(struct ifla_vf_info);
- else
+ if (dev->dev.parent && dev_is_pci(dev->dev.parent)) {
+
+ int num_vfs = dev_num_vf(dev->dev.parent);
+ size_t size = nlmsg_total_size(sizeof(struct nlattr));
+ size += nlmsg_total_size(num_vfs * sizeof(struct nlattr));
+ size += num_vfs * (sizeof(struct ifla_vf_mac) +
+ sizeof(struct ifla_vf_vlan) +
+ sizeof(struct ifla_vf_tx_rate));
+ return size;
+ } else
return 0;
}
+static size_t rtnl_port_size(const struct net_device *dev)
+{
+ size_t port_size = nla_total_size(4) /* PORT_VF */
+ + nla_total_size(PORT_PROFILE_MAX) /* PORT_PROFILE */
+ + nla_total_size(sizeof(struct ifla_port_vsi))
+ /* PORT_VSI_TYPE */
+ + nla_total_size(PORT_UUID_MAX) /* PORT_INSTANCE_UUID */
+ + nla_total_size(PORT_UUID_MAX) /* PORT_HOST_UUID */
+ + nla_total_size(1) /* PROT_VDP_REQUEST */
+ + nla_total_size(2); /* PORT_VDP_RESPONSE */
+ size_t vf_ports_size = nla_total_size(sizeof(struct nlattr));
+ size_t vf_port_size = nla_total_size(sizeof(struct nlattr))
+ + port_size;
+ size_t port_self_size = nla_total_size(sizeof(struct nlattr))
+ + port_size;
+
+ if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent)
+ return 0;
+ if (dev_num_vf(dev->dev.parent))
+ return port_self_size + vf_ports_size +
+ vf_port_size * dev_num_vf(dev->dev.parent);
+ else
+ return port_self_size;
+}
+
static inline size_t if_nlmsg_size(const struct net_device *dev)
{
return NLMSG_ALIGN(sizeof(struct ifinfomsg))
@@ -619,6 +693,7 @@ static inline size_t if_nlmsg_size(const struct net_device *dev)
+ nla_total_size(IFNAMSIZ) /* IFLA_QDISC */
+ nla_total_size(sizeof(struct rtnl_link_ifmap))
+ nla_total_size(sizeof(struct rtnl_link_stats))
+ + nla_total_size(sizeof(struct rtnl_link_stats64))
+ nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
+ nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */
+ nla_total_size(4) /* IFLA_TXQLEN */
@@ -629,10 +704,83 @@ static inline size_t if_nlmsg_size(const struct net_device *dev)
+ nla_total_size(1) /* IFLA_OPERSTATE */
+ nla_total_size(1) /* IFLA_LINKMODE */
+ nla_total_size(4) /* IFLA_NUM_VF */
- + nla_total_size(rtnl_vfinfo_size(dev)) /* IFLA_VFINFO */
+ + rtnl_vfinfo_size(dev) /* IFLA_VFINFO_LIST */
+ + rtnl_port_size(dev) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
+ rtnl_link_get_size(dev); /* IFLA_LINKINFO */
}
+static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
+{
+ struct nlattr *vf_ports;
+ struct nlattr *vf_port;
+ int vf;
+ int err;
+
+ vf_ports = nla_nest_start(skb, IFLA_VF_PORTS);
+ if (!vf_ports)
+ return -EMSGSIZE;
+
+ for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) {
+ vf_port = nla_nest_start(skb, IFLA_VF_PORT);
+ if (!vf_port) {
+ nla_nest_cancel(skb, vf_ports);
+ return -EMSGSIZE;
+ }
+ NLA_PUT_U32(skb, IFLA_PORT_VF, vf);
+ err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb);
+ if (err) {
+nla_put_failure:
+ nla_nest_cancel(skb, vf_port);
+ continue;
+ }
+ nla_nest_end(skb, vf_port);
+ }
+
+ nla_nest_end(skb, vf_ports);
+
+ return 0;
+}
+
+static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev)
+{
+ struct nlattr *port_self;
+ int err;
+
+ port_self = nla_nest_start(skb, IFLA_PORT_SELF);
+ if (!port_self)
+ return -EMSGSIZE;
+
+ err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb);
+ if (err) {
+ nla_nest_cancel(skb, port_self);
+ return err;
+ }
+
+ nla_nest_end(skb, port_self);
+
+ return 0;
+}
+
+static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev)
+{
+ int err;
+
+ if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent)
+ return 0;
+
+ err = rtnl_port_self_fill(skb, dev);
+ if (err)
+ return err;
+
+ if (dev_num_vf(dev->dev.parent)) {
+ err = rtnl_vf_ports_fill(skb, dev);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
int type, u32 pid, u32 seq, u32 change,
unsigned int flags)
@@ -698,17 +846,52 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
stats = dev_get_stats(dev);
copy_rtnl_link_stats(nla_data(attr), stats);
+ attr = nla_reserve(skb, IFLA_STATS64,
+ sizeof(struct rtnl_link_stats64));
+ if (attr == NULL)
+ goto nla_put_failure;
+ copy_rtnl_link_stats64(nla_data(attr), stats);
+
+ if (dev->dev.parent)
+ NLA_PUT_U32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent));
+
if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent) {
int i;
- struct ifla_vf_info ivi;
- NLA_PUT_U32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent));
- for (i = 0; i < dev_num_vf(dev->dev.parent); i++) {
+ struct nlattr *vfinfo, *vf;
+ int num_vfs = dev_num_vf(dev->dev.parent);
+
+ vfinfo = nla_nest_start(skb, IFLA_VFINFO_LIST);
+ if (!vfinfo)
+ goto nla_put_failure;
+ for (i = 0; i < num_vfs; i++) {
+ struct ifla_vf_info ivi;
+ struct ifla_vf_mac vf_mac;
+ struct ifla_vf_vlan vf_vlan;
+ struct ifla_vf_tx_rate vf_tx_rate;
if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi))
break;
- NLA_PUT(skb, IFLA_VFINFO, sizeof(ivi), &ivi);
+ vf_mac.vf = vf_vlan.vf = vf_tx_rate.vf = ivi.vf;
+ memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
+ vf_vlan.vlan = ivi.vlan;
+ vf_vlan.qos = ivi.qos;
+ vf_tx_rate.rate = ivi.tx_rate;
+ vf = nla_nest_start(skb, IFLA_VF_INFO);
+ if (!vf) {
+ nla_nest_cancel(skb, vfinfo);
+ goto nla_put_failure;
+ }
+ NLA_PUT(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac);
+ NLA_PUT(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan);
+ NLA_PUT(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate), &vf_tx_rate);
+ nla_nest_end(skb, vf);
}
+ nla_nest_end(skb, vfinfo);
}
+
+ if (rtnl_port_fill(skb, dev))
+ goto nla_put_failure;
+
if (dev->rtnl_link_ops) {
if (rtnl_link_fill(skb, dev) < 0)
goto nla_put_failure;
@@ -769,6 +952,22 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = {
[IFLA_LINKINFO] = { .type = NLA_NESTED },
[IFLA_NET_NS_PID] = { .type = NLA_U32 },
[IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 },
+ [IFLA_VFINFO_LIST] = {. type = NLA_NESTED },
+ [IFLA_VF_PORTS] = { .type = NLA_NESTED },
+ [IFLA_PORT_SELF] = { .type = NLA_NESTED },
+};
+EXPORT_SYMBOL(ifla_policy);
+
+static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
+ [IFLA_INFO_KIND] = { .type = NLA_STRING },
+ [IFLA_INFO_DATA] = { .type = NLA_NESTED },
+};
+
+static const struct nla_policy ifla_vfinfo_policy[IFLA_VF_INFO_MAX+1] = {
+ [IFLA_VF_INFO] = { .type = NLA_NESTED },
+};
+
+static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
[IFLA_VF_MAC] = { .type = NLA_BINARY,
.len = sizeof(struct ifla_vf_mac) },
[IFLA_VF_VLAN] = { .type = NLA_BINARY,
@@ -776,11 +975,19 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = {
[IFLA_VF_TX_RATE] = { .type = NLA_BINARY,
.len = sizeof(struct ifla_vf_tx_rate) },
};
-EXPORT_SYMBOL(ifla_policy);
-static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
- [IFLA_INFO_KIND] = { .type = NLA_STRING },
- [IFLA_INFO_DATA] = { .type = NLA_NESTED },
+static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = {
+ [IFLA_PORT_VF] = { .type = NLA_U32 },
+ [IFLA_PORT_PROFILE] = { .type = NLA_STRING,
+ .len = PORT_PROFILE_MAX },
+ [IFLA_PORT_VSI_TYPE] = { .type = NLA_BINARY,
+ .len = sizeof(struct ifla_port_vsi)},
+ [IFLA_PORT_INSTANCE_UUID] = { .type = NLA_BINARY,
+ .len = PORT_UUID_MAX },
+ [IFLA_PORT_HOST_UUID] = { .type = NLA_STRING,
+ .len = PORT_UUID_MAX },
+ [IFLA_PORT_REQUEST] = { .type = NLA_U8, },
+ [IFLA_PORT_RESPONSE] = { .type = NLA_U16, },
};
struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[])
@@ -812,6 +1019,52 @@ static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[])
return 0;
}
+static int do_setvfinfo(struct net_device *dev, struct nlattr *attr)
+{
+ int rem, err = -EINVAL;
+ struct nlattr *vf;
+ const struct net_device_ops *ops = dev->netdev_ops;
+
+ nla_for_each_nested(vf, attr, rem) {
+ switch (nla_type(vf)) {
+ case IFLA_VF_MAC: {
+ struct ifla_vf_mac *ivm;
+ ivm = nla_data(vf);
+ err = -EOPNOTSUPP;
+ if (ops->ndo_set_vf_mac)
+ err = ops->ndo_set_vf_mac(dev, ivm->vf,
+ ivm->mac);
+ break;
+ }
+ case IFLA_VF_VLAN: {
+ struct ifla_vf_vlan *ivv;
+ ivv = nla_data(vf);
+ err = -EOPNOTSUPP;
+ if (ops->ndo_set_vf_vlan)
+ err = ops->ndo_set_vf_vlan(dev, ivv->vf,
+ ivv->vlan,
+ ivv->qos);
+ break;
+ }
+ case IFLA_VF_TX_RATE: {
+ struct ifla_vf_tx_rate *ivt;
+ ivt = nla_data(vf);
+ err = -EOPNOTSUPP;
+ if (ops->ndo_set_vf_tx_rate)
+ err = ops->ndo_set_vf_tx_rate(dev, ivt->vf,
+ ivt->rate);
+ break;
+ }
+ default:
+ err = -EINVAL;
+ break;
+ }
+ if (err)
+ break;
+ }
+ return err;
+}
+
static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
struct nlattr **tb, char *ifname, int modified)
{
@@ -942,37 +1195,61 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
write_unlock_bh(&dev_base_lock);
}
- if (tb[IFLA_VF_MAC]) {
- struct ifla_vf_mac *ivm;
- ivm = nla_data(tb[IFLA_VF_MAC]);
- err = -EOPNOTSUPP;
- if (ops->ndo_set_vf_mac)
- err = ops->ndo_set_vf_mac(dev, ivm->vf, ivm->mac);
- if (err < 0)
- goto errout;
- modified = 1;
+ if (tb[IFLA_VFINFO_LIST]) {
+ struct nlattr *attr;
+ int rem;
+ nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) {
+ if (nla_type(attr) != IFLA_VF_INFO)
+ goto errout;
+ err = do_setvfinfo(dev, attr);
+ if (err < 0)
+ goto errout;
+ modified = 1;
+ }
}
+ err = 0;
+
+ if (tb[IFLA_VF_PORTS]) {
+ struct nlattr *port[IFLA_PORT_MAX+1];
+ struct nlattr *attr;
+ int vf;
+ int rem;
- if (tb[IFLA_VF_VLAN]) {
- struct ifla_vf_vlan *ivv;
- ivv = nla_data(tb[IFLA_VF_VLAN]);
err = -EOPNOTSUPP;
- if (ops->ndo_set_vf_vlan)
- err = ops->ndo_set_vf_vlan(dev, ivv->vf,
- ivv->vlan,
- ivv->qos);
- if (err < 0)
+ if (!ops->ndo_set_vf_port)
goto errout;
- modified = 1;
+
+ nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) {
+ if (nla_type(attr) != IFLA_VF_PORT)
+ continue;
+ err = nla_parse_nested(port, IFLA_PORT_MAX,
+ attr, ifla_port_policy);
+ if (err < 0)
+ goto errout;
+ if (!port[IFLA_PORT_VF]) {
+ err = -EOPNOTSUPP;
+ goto errout;
+ }
+ vf = nla_get_u32(port[IFLA_PORT_VF]);
+ err = ops->ndo_set_vf_port(dev, vf, port);
+ if (err < 0)
+ goto errout;
+ modified = 1;
+ }
}
err = 0;
- if (tb[IFLA_VF_TX_RATE]) {
- struct ifla_vf_tx_rate *ivt;
- ivt = nla_data(tb[IFLA_VF_TX_RATE]);
+ if (tb[IFLA_PORT_SELF]) {
+ struct nlattr *port[IFLA_PORT_MAX+1];
+
+ err = nla_parse_nested(port, IFLA_PORT_MAX,
+ tb[IFLA_PORT_SELF], ifla_port_policy);
+ if (err < 0)
+ goto errout;
+
err = -EOPNOTSUPP;
- if (ops->ndo_set_vf_tx_rate)
- err = ops->ndo_set_vf_tx_rate(dev, ivt->vf, ivt->rate);
+ if (ops->ndo_set_vf_port)
+ err = ops->ndo_set_vf_port(dev, PORT_SELF_VF, port);
if (err < 0)
goto errout;
modified = 1;
@@ -1336,7 +1613,7 @@ static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
if (s_idx == 0)
s_idx = 1;
- for (idx = 1; idx < NPROTO; idx++) {
+ for (idx = 1; idx <= RTNL_FAMILY_MAX; idx++) {
int type = cb->nlh->nlmsg_type-RTM_BASE;
if (idx < s_idx || idx == PF_PACKET)
continue;
@@ -1404,9 +1681,6 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
return 0;
family = ((struct rtgenmsg *)NLMSG_DATA(nlh))->rtgen_family;
- if (family >= NPROTO)
- return -EAFNOSUPPORT;
-
sz_idx = type>>2;
kind = type&3;
@@ -1474,6 +1748,7 @@ static int rtnetlink_event(struct notifier_block *this, unsigned long event, voi
case NETDEV_POST_INIT:
case NETDEV_REGISTER:
case NETDEV_CHANGE:
+ case NETDEV_PRE_TYPE_CHANGE:
case NETDEV_GOING_DOWN:
case NETDEV_UNREGISTER:
case NETDEV_UNREGISTER_BATCH:
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 931981774b1a..66d9c416851e 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -117,7 +117,7 @@ static const struct pipe_buf_operations sock_pipe_buf_ops = {
*
* Out of line support code for skb_put(). Not user callable.
*/
-void skb_over_panic(struct sk_buff *skb, int sz, void *here)
+static void skb_over_panic(struct sk_buff *skb, int sz, void *here)
{
printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p "
"data:%p tail:%#lx end:%#lx dev:%s\n",
@@ -126,7 +126,6 @@ void skb_over_panic(struct sk_buff *skb, int sz, void *here)
skb->dev ? skb->dev->name : "<NULL>");
BUG();
}
-EXPORT_SYMBOL(skb_over_panic);
/**
* skb_under_panic - private function
@@ -137,7 +136,7 @@ EXPORT_SYMBOL(skb_over_panic);
* Out of line support code for skb_push(). Not user callable.
*/
-void skb_under_panic(struct sk_buff *skb, int sz, void *here)
+static void skb_under_panic(struct sk_buff *skb, int sz, void *here)
{
printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p "
"data:%p tail:%#lx end:%#lx dev:%s\n",
@@ -146,7 +145,6 @@ void skb_under_panic(struct sk_buff *skb, int sz, void *here)
skb->dev ? skb->dev->name : "<NULL>");
BUG();
}
-EXPORT_SYMBOL(skb_under_panic);
/* Allocate a new skbuff. We do this ourselves so we can fill in a few
* 'private' fields and also do memory statistics to find all the
@@ -183,12 +181,14 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
if (!skb)
goto out;
+ prefetchw(skb);
size = SKB_DATA_ALIGN(size);
data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info),
gfp_mask, node);
if (!data)
goto nodata;
+ prefetchw(data + size);
/*
* Only clear those fields we need to clear, not those that we will
@@ -210,15 +210,8 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
/* make sure we initialize shinfo sequentially */
shinfo = skb_shinfo(skb);
+ memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
atomic_set(&shinfo->dataref, 1);
- shinfo->nr_frags = 0;
- shinfo->gso_size = 0;
- shinfo->gso_segs = 0;
- shinfo->gso_type = 0;
- shinfo->ip6_frag_id = 0;
- shinfo->tx_flags.flags = 0;
- skb_frag_list_init(skb);
- memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps));
if (fclone) {
struct sk_buff *child = skb + 1;
@@ -507,16 +500,10 @@ int skb_recycle_check(struct sk_buff *skb, int skb_size)
return 0;
skb_release_head_state(skb);
+
shinfo = skb_shinfo(skb);
+ memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
atomic_set(&shinfo->dataref, 1);
- shinfo->nr_frags = 0;
- shinfo->gso_size = 0;
- shinfo->gso_segs = 0;
- shinfo->gso_type = 0;
- shinfo->ip6_frag_id = 0;
- shinfo->tx_flags.flags = 0;
- skb_frag_list_init(skb);
- memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps));
memset(skb, 0, offsetof(struct sk_buff, tail));
skb->data = skb->head + NET_SKB_PAD;
@@ -533,7 +520,8 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
new->transport_header = old->transport_header;
new->network_header = old->network_header;
new->mac_header = old->mac_header;
- skb_dst_set(new, dst_clone(skb_dst(old)));
+ skb_dst_copy(new, old);
+ new->rxhash = old->rxhash;
#ifdef CONFIG_XFRM
new->sp = secpath_get(old->sp);
#endif
@@ -581,6 +569,7 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
C(len);
C(data_len);
C(mac_len);
+ C(rxhash);
n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
n->cloned = 1;
n->nohdr = 0;
@@ -1051,7 +1040,7 @@ EXPORT_SYMBOL(skb_push);
*/
unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
{
- return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
+ return skb_pull_inline(skb, len);
}
EXPORT_SYMBOL(skb_pull);
diff --git a/net/core/sock.c b/net/core/sock.c
index c5812bbc2cc9..bf88a167c8f2 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -307,6 +307,11 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
*/
skb_len = skb->len;
+ /* we escape from rcu protected region, make sure we dont leak
+ * a norefcounted dst
+ */
+ skb_dst_force(skb);
+
spin_lock_irqsave(&list->lock, flags);
skb->dropcount = atomic_read(&sk->sk_drops);
__skb_queue_tail(list, skb);
@@ -327,6 +332,10 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
skb->dev = NULL;
+ if (sk_rcvqueues_full(sk, skb)) {
+ atomic_inc(&sk->sk_drops);
+ goto discard_and_relse;
+ }
if (nested)
bh_lock_sock_nested(sk);
else
@@ -364,11 +373,11 @@ EXPORT_SYMBOL(sk_reset_txq);
struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
{
- struct dst_entry *dst = sk->sk_dst_cache;
+ struct dst_entry *dst = __sk_dst_get(sk);
if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
sk_tx_queue_clear(sk);
- sk->sk_dst_cache = NULL;
+ rcu_assign_pointer(sk->sk_dst_cache, NULL);
dst_release(dst);
return NULL;
}
@@ -1157,7 +1166,7 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
skb_queue_head_init(&newsk->sk_async_wait_queue);
#endif
- rwlock_init(&newsk->sk_dst_lock);
+ spin_lock_init(&newsk->sk_dst_lock);
rwlock_init(&newsk->sk_callback_lock);
lockdep_set_class_and_name(&newsk->sk_callback_lock,
af_callback_keys + newsk->sk_family,
@@ -1207,7 +1216,7 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
*/
sk_refcnt_debug_inc(newsk);
sk_set_socket(newsk, NULL);
- newsk->sk_sleep = NULL;
+ newsk->sk_wq = NULL;
if (newsk->sk_prot->sockets_allocated)
percpu_counter_inc(newsk->sk_prot->sockets_allocated);
@@ -1227,6 +1236,7 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
sk->sk_route_caps = dst->dev->features;
if (sk->sk_route_caps & NETIF_F_GSO)
sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
+ sk->sk_route_caps &= ~sk->sk_route_nocaps;
if (sk_can_gso(sk)) {
if (dst->header_len) {
sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
@@ -1395,7 +1405,7 @@ static long sock_wait_for_wmem(struct sock *sk, long timeo)
if (signal_pending(current))
break;
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
break;
if (sk->sk_shutdown & SEND_SHUTDOWN)
@@ -1404,7 +1414,7 @@ static long sock_wait_for_wmem(struct sock *sk, long timeo)
break;
timeo = schedule_timeout(timeo);
}
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
return timeo;
}
@@ -1531,6 +1541,7 @@ static void __release_sock(struct sock *sk)
do {
struct sk_buff *next = skb->next;
+ WARN_ON_ONCE(skb_dst_is_noref(skb));
skb->next = NULL;
sk_backlog_rcv(sk, skb);
@@ -1570,11 +1581,11 @@ int sk_wait_data(struct sock *sk, long *timeo)
int rc;
DEFINE_WAIT(wait);
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
return rc;
}
EXPORT_SYMBOL(sk_wait_data);
@@ -1796,41 +1807,53 @@ EXPORT_SYMBOL(sock_no_sendpage);
static void sock_def_wakeup(struct sock *sk)
{
- read_lock(&sk->sk_callback_lock);
- if (sk_has_sleeper(sk))
- wake_up_interruptible_all(sk->sk_sleep);
- read_unlock(&sk->sk_callback_lock);
+ struct socket_wq *wq;
+
+ rcu_read_lock();
+ wq = rcu_dereference(sk->sk_wq);
+ if (wq_has_sleeper(wq))
+ wake_up_interruptible_all(&wq->wait);
+ rcu_read_unlock();
}
static void sock_def_error_report(struct sock *sk)
{
- read_lock(&sk->sk_callback_lock);
- if (sk_has_sleeper(sk))
- wake_up_interruptible_poll(sk->sk_sleep, POLLERR);
+ struct socket_wq *wq;
+
+ rcu_read_lock();
+ wq = rcu_dereference(sk->sk_wq);
+ if (wq_has_sleeper(wq))
+ wake_up_interruptible_poll(&wq->wait, POLLERR);
sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
- read_unlock(&sk->sk_callback_lock);
+ rcu_read_unlock();
}
static void sock_def_readable(struct sock *sk, int len)
{
- read_lock(&sk->sk_callback_lock);
- if (sk_has_sleeper(sk))
- wake_up_interruptible_sync_poll(sk->sk_sleep, POLLIN |
+ struct socket_wq *wq;
+
+ rcu_read_lock();
+ wq = rcu_dereference(sk->sk_wq);
+ if (wq_has_sleeper(wq))
+ wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
POLLRDNORM | POLLRDBAND);
sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
- read_unlock(&sk->sk_callback_lock);
+ rcu_read_unlock();
}
static void sock_def_write_space(struct sock *sk)
{
- read_lock(&sk->sk_callback_lock);
+ struct socket_wq *wq;
+
+ rcu_read_lock();
/* Do not wake up a writer until he can make "significant"
* progress. --DaveM
*/
if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
- if (sk_has_sleeper(sk))
- wake_up_interruptible_sync_poll(sk->sk_sleep, POLLOUT |
+ wq = rcu_dereference(sk->sk_wq);
+ if (wq_has_sleeper(wq))
+ wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
POLLWRNORM | POLLWRBAND);
/* Should agree with poll, otherwise some programs break */
@@ -1838,7 +1861,7 @@ static void sock_def_write_space(struct sock *sk)
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
}
- read_unlock(&sk->sk_callback_lock);
+ rcu_read_unlock();
}
static void sock_def_destruct(struct sock *sk)
@@ -1885,7 +1908,6 @@ void sock_init_data(struct socket *sock, struct sock *sk)
sk->sk_allocation = GFP_KERNEL;
sk->sk_rcvbuf = sysctl_rmem_default;
sk->sk_sndbuf = sysctl_wmem_default;
- sk->sk_backlog.limit = sk->sk_rcvbuf << 1;
sk->sk_state = TCP_CLOSE;
sk_set_socket(sk, sock);
@@ -1893,12 +1915,12 @@ void sock_init_data(struct socket *sock, struct sock *sk)
if (sock) {
sk->sk_type = sock->type;
- sk->sk_sleep = &sock->wait;
+ sk->sk_wq = sock->wq;
sock->sk = sk;
} else
- sk->sk_sleep = NULL;
+ sk->sk_wq = NULL;
- rwlock_init(&sk->sk_dst_lock);
+ spin_lock_init(&sk->sk_dst_lock);
rwlock_init(&sk->sk_callback_lock);
lockdep_set_class_and_name(&sk->sk_callback_lock,
af_callback_keys + sk->sk_family,
diff --git a/net/core/stream.c b/net/core/stream.c
index a37debfeb1b2..cc196f42b8d8 100644
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -28,15 +28,19 @@
void sk_stream_write_space(struct sock *sk)
{
struct socket *sock = sk->sk_socket;
+ struct socket_wq *wq;
if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) && sock) {
clear_bit(SOCK_NOSPACE, &sock->flags);
- if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
- wake_up_interruptible_poll(sk->sk_sleep, POLLOUT |
+ rcu_read_lock();
+ wq = rcu_dereference(sk->sk_wq);
+ if (wq_has_sleeper(wq))
+ wake_up_interruptible_poll(&wq->wait, POLLOUT |
POLLWRNORM | POLLWRBAND);
- if (sock->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN))
+ if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN))
sock_wake_async(sock, SOCK_WAKE_SPACE, POLL_OUT);
+ rcu_read_unlock();
}
}
@@ -66,13 +70,13 @@ int sk_stream_wait_connect(struct sock *sk, long *timeo_p)
if (signal_pending(tsk))
return sock_intr_errno(*timeo_p);
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
sk->sk_write_pending++;
done = sk_wait_event(sk, timeo_p,
!sk->sk_err &&
!((1 << sk->sk_state) &
~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)));
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
sk->sk_write_pending--;
} while (!done);
return 0;
@@ -96,13 +100,13 @@ void sk_stream_wait_close(struct sock *sk, long timeout)
DEFINE_WAIT(wait);
do {
- prepare_to_wait(sk->sk_sleep, &wait,
+ prepare_to_wait(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
if (sk_wait_event(sk, &timeout, !sk_stream_closing(sk)))
break;
} while (!signal_pending(current) && timeout);
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
}
}
@@ -126,7 +130,7 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
while (1) {
set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
goto do_error;
@@ -157,7 +161,7 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
*timeo_p = current_timeo;
}
out:
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
return err;
do_error:
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index b7b6b8208f75..01eee5d984be 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -11,12 +11,72 @@
#include <linux/socket.h>
#include <linux/netdevice.h>
#include <linux/ratelimit.h>
+#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <net/ip.h>
#include <net/sock.h>
+#ifdef CONFIG_RPS
+static int rps_sock_flow_sysctl(ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ unsigned int orig_size, size;
+ int ret, i;
+ ctl_table tmp = {
+ .data = &size,
+ .maxlen = sizeof(size),
+ .mode = table->mode
+ };
+ struct rps_sock_flow_table *orig_sock_table, *sock_table;
+ static DEFINE_MUTEX(sock_flow_mutex);
+
+ mutex_lock(&sock_flow_mutex);
+
+ orig_sock_table = rps_sock_flow_table;
+ size = orig_size = orig_sock_table ? orig_sock_table->mask + 1 : 0;
+
+ ret = proc_dointvec(&tmp, write, buffer, lenp, ppos);
+
+ if (write) {
+ if (size) {
+ if (size > 1<<30) {
+ /* Enforce limit to prevent overflow */
+ mutex_unlock(&sock_flow_mutex);
+ return -EINVAL;
+ }
+ size = roundup_pow_of_two(size);
+ if (size != orig_size) {
+ sock_table =
+ vmalloc(RPS_SOCK_FLOW_TABLE_SIZE(size));
+ if (!sock_table) {
+ mutex_unlock(&sock_flow_mutex);
+ return -ENOMEM;
+ }
+
+ sock_table->mask = size - 1;
+ } else
+ sock_table = orig_sock_table;
+
+ for (i = 0; i < size; i++)
+ sock_table->ents[i] = RPS_NO_CPU;
+ } else
+ sock_table = NULL;
+
+ if (sock_table != orig_sock_table) {
+ rcu_assign_pointer(rps_sock_flow_table, sock_table);
+ synchronize_rcu();
+ vfree(orig_sock_table);
+ }
+ }
+
+ mutex_unlock(&sock_flow_mutex);
+
+ return ret;
+}
+#endif /* CONFIG_RPS */
+
static struct ctl_table net_core_table[] = {
#ifdef CONFIG_NET
{
@@ -62,6 +122,13 @@ static struct ctl_table net_core_table[] = {
.proc_handler = proc_dointvec
},
{
+ .procname = "netdev_tstamp_prequeue",
+ .data = &netdev_tstamp_prequeue,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
+ {
.procname = "message_cost",
.data = &net_ratelimit_state.interval,
.maxlen = sizeof(int),
@@ -82,6 +149,14 @@ static struct ctl_table net_core_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec
},
+#ifdef CONFIG_RPS
+ {
+ .procname = "rps_sock_flow_entries",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = rps_sock_flow_sysctl
+ },
+#endif
#endif /* CONFIG_NET */
{
.procname = "netdev_budget",
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index bcd7632299f5..d3235899c7e3 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -208,7 +208,7 @@ static void ccid3_hc_tx_no_feedback_timer(unsigned long data)
goto restart_timer;
}
- ccid3_pr_debug("%s(%p, state=%s) - entry \n", dccp_role(sk), sk,
+ ccid3_pr_debug("%s(%p, state=%s) - entry\n", dccp_role(sk), sk,
ccid3_tx_state_name(hc->tx_state));
if (hc->tx_state == TFRC_SSTATE_FBACK)
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index 5ef32c2f0d6a..a10a61a1ded2 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -189,7 +189,7 @@ enum {
#define DCCP_MIB_MAX __DCCP_MIB_MAX
struct dccp_mib {
unsigned long mibs[DCCP_MIB_MAX];
-} __SNMP_MIB_ALIGN__;
+};
DECLARE_SNMP_STAT(struct dccp_mib, dccp_statistics);
#define DCCP_INC_STATS(field) SNMP_INC_STATS(dccp_statistics, field)
@@ -223,7 +223,7 @@ static inline void dccp_csum_outgoing(struct sk_buff *skb)
skb->csum = skb_checksum(skb, 0, (cov > skb->len)? skb->len : cov, 0);
}
-extern void dccp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb);
+extern void dccp_v4_send_check(struct sock *sk, struct sk_buff *skb);
extern int dccp_retransmit_skb(struct sock *sk);
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 9ec717426024..58f7bc156850 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -415,7 +415,7 @@ static int dccp_rcv_request_sent_state_process(struct sock *sk,
if (!between48(DCCP_SKB_CB(skb)->dccpd_ack_seq,
dp->dccps_awl, dp->dccps_awh)) {
dccp_pr_debug("invalid ackno: S.AWL=%llu, "
- "P.ackno=%llu, S.AWH=%llu \n",
+ "P.ackno=%llu, S.AWH=%llu\n",
(unsigned long long)dp->dccps_awl,
(unsigned long long)DCCP_SKB_CB(skb)->dccpd_ack_seq,
(unsigned long long)dp->dccps_awh);
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 52ffa1cde15a..d9b11ef8694c 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -349,7 +349,7 @@ static inline __sum16 dccp_v4_csum_finish(struct sk_buff *skb,
return csum_tcpudp_magic(src, dst, skb->len, IPPROTO_DCCP, skb->csum);
}
-void dccp_v4_send_check(struct sock *sk, int unused, struct sk_buff *skb)
+void dccp_v4_send_check(struct sock *sk, struct sk_buff *skb)
{
const struct inet_sock *inet = inet_sk(sk);
struct dccp_hdr *dh = dccp_hdr(skb);
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 3b11e41a2929..091698899594 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -60,8 +60,7 @@ static inline __sum16 dccp_v6_csum_finish(struct sk_buff *skb,
return csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_DCCP, skb->csum);
}
-static inline void dccp_v6_send_check(struct sock *sk, int unused_value,
- struct sk_buff *skb)
+static inline void dccp_v6_send_check(struct sock *sk, struct sk_buff *skb)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct dccp_hdr *dh = dccp_hdr(skb);
@@ -293,7 +292,7 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
&ireq6->loc_addr,
&ireq6->rmt_addr);
ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
- err = ip6_xmit(sk, skb, &fl, opt, 0);
+ err = ip6_xmit(sk, skb, &fl, opt);
err = net_xmit_eval(err);
}
@@ -348,7 +347,7 @@ static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
if (!ip6_dst_lookup(ctl_sk, &dst, &fl)) {
if (xfrm_lookup(net, &dst, &fl, NULL, 0) >= 0) {
skb_dst_set(skb, dst);
- ip6_xmit(ctl_sk, skb, &fl, NULL, 0);
+ ip6_xmit(ctl_sk, skb, &fl, NULL);
DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
return;
diff --git a/net/dccp/output.c b/net/dccp/output.c
index fc3f436440b4..aadbdb58758b 100644
--- a/net/dccp/output.c
+++ b/net/dccp/output.c
@@ -129,14 +129,14 @@ static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
break;
}
- icsk->icsk_af_ops->send_check(sk, 0, skb);
+ icsk->icsk_af_ops->send_check(sk, skb);
if (set_ack)
dccp_event_ack_sent(sk);
DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
- err = icsk->icsk_af_ops->queue_xmit(skb, 0);
+ err = icsk->icsk_af_ops->queue_xmit(skb);
return net_xmit_eval(err);
}
return -ENOBUFS;
@@ -195,15 +195,17 @@ EXPORT_SYMBOL_GPL(dccp_sync_mss);
void dccp_write_space(struct sock *sk)
{
- read_lock(&sk->sk_callback_lock);
+ struct socket_wq *wq;
- if (sk_has_sleeper(sk))
- wake_up_interruptible(sk->sk_sleep);
+ rcu_read_lock();
+ wq = rcu_dereference(sk->sk_wq);
+ if (wq_has_sleeper(wq))
+ wake_up_interruptible(&wq->wait);
/* Should agree with poll, otherwise some programs break */
if (sock_writeable(sk))
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
- read_unlock(&sk->sk_callback_lock);
+ rcu_read_unlock();
}
/**
@@ -225,7 +227,7 @@ static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb, int delay)
dccp_pr_debug("delayed send by %d msec\n", delay);
jiffdelay = msecs_to_jiffies(delay);
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
sk->sk_write_pending++;
release_sock(sk);
@@ -241,7 +243,7 @@ static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb, int delay)
rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
} while ((delay = rc) > 0);
out:
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
return rc;
do_error:
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index a0e38d8018f5..b03ecf6b2bb0 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -312,7 +312,7 @@ unsigned int dccp_poll(struct file *file, struct socket *sock,
unsigned int mask;
struct sock *sk = sock->sk;
- sock_poll_wait(file, sk->sk_sleep, wait);
+ sock_poll_wait(file, sk_sleep(sk), wait);
if (sk->sk_state == DCCP_LISTEN)
return inet_csk_listen_poll(sk);
diff --git a/net/dccp/timer.c b/net/dccp/timer.c
index bbfeb5eae46a..1a9aa05d4dc4 100644
--- a/net/dccp/timer.c
+++ b/net/dccp/timer.c
@@ -38,7 +38,7 @@ static int dccp_write_timeout(struct sock *sk)
if (sk->sk_state == DCCP_REQUESTING || sk->sk_state == DCCP_PARTOPEN) {
if (icsk->icsk_retransmits != 0)
- dst_negative_advice(&sk->sk_dst_cache, sk);
+ dst_negative_advice(sk);
retry_until = icsk->icsk_syn_retries ?
: sysctl_dccp_request_retries;
} else {
@@ -63,7 +63,7 @@ static int dccp_write_timeout(struct sock *sk)
Golden words :-).
*/
- dst_negative_advice(&sk->sk_dst_cache, sk);
+ dst_negative_advice(sk);
}
retry_until = sysctl_dccp_retries2;
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 2b494fac9468..d6b93d19790f 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -446,7 +446,7 @@ static void dn_destruct(struct sock *sk)
skb_queue_purge(&scp->other_xmit_queue);
skb_queue_purge(&scp->other_receive_queue);
- dst_release(xchg(&sk->sk_dst_cache, NULL));
+ dst_release(rcu_dereference_check(sk->sk_dst_cache, 1));
}
static int dn_memory_pressure;
@@ -832,7 +832,7 @@ static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation)
scp->segsize_loc = dst_metric(__sk_dst_get(sk), RTAX_ADVMSS);
dn_send_conn_conf(sk, allocation);
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
for(;;) {
release_sock(sk);
if (scp->state == DN_CC)
@@ -850,9 +850,9 @@ static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation)
err = -EAGAIN;
if (!*timeo)
break;
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
}
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
if (err == 0) {
sk->sk_socket->state = SS_CONNECTED;
} else if (scp->state != DN_CC) {
@@ -873,7 +873,7 @@ static int dn_wait_run(struct sock *sk, long *timeo)
if (!*timeo)
return -EALREADY;
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
for(;;) {
release_sock(sk);
if (scp->state == DN_CI || scp->state == DN_CC)
@@ -891,9 +891,9 @@ static int dn_wait_run(struct sock *sk, long *timeo)
err = -ETIMEDOUT;
if (!*timeo)
break;
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
}
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
out:
if (err == 0) {
sk->sk_socket->state = SS_CONNECTED;
@@ -1040,7 +1040,7 @@ static struct sk_buff *dn_wait_for_connect(struct sock *sk, long *timeo)
struct sk_buff *skb = NULL;
int err = 0;
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
for(;;) {
release_sock(sk);
skb = skb_dequeue(&sk->sk_receive_queue);
@@ -1060,9 +1060,9 @@ static struct sk_buff *dn_wait_for_connect(struct sock *sk, long *timeo)
err = -EAGAIN;
if (!*timeo)
break;
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
}
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
return skb == NULL ? ERR_PTR(err) : skb;
}
@@ -1105,7 +1105,7 @@ static int dn_accept(struct socket *sock, struct socket *newsock, int flags)
release_sock(sk);
dst = skb_dst(skb);
- dst_release(xchg(&newsk->sk_dst_cache, dst));
+ sk_dst_set(newsk, dst);
skb_dst_set(skb, NULL);
DN_SK(newsk)->state = DN_CR;
@@ -1746,11 +1746,11 @@ static int dn_recvmsg(struct kiocb *iocb, struct socket *sock,
goto out;
}
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
sk_wait_event(sk, &timeo, dn_data_ready(sk, queue, flags, target));
clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
}
skb_queue_walk_safe(queue, skb, n) {
@@ -1956,7 +1956,7 @@ static int dn_sendmsg(struct kiocb *iocb, struct socket *sock,
}
if ((flags & MSG_TRYHARD) && sk->sk_dst_cache)
- dst_negative_advice(&sk->sk_dst_cache, sk);
+ dst_negative_advice(sk);
mss = scp->segsize_rem;
fctype = scp->services_rem & NSP_FC_MASK;
@@ -2003,12 +2003,12 @@ static int dn_sendmsg(struct kiocb *iocb, struct socket *sock,
goto out;
}
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
sk_wait_event(sk, &timeo,
!dn_queue_too_long(scp, queue, flags));
clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
continue;
}
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index cead68eb254c..4c409b46aa35 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -350,7 +350,7 @@ static void dn_dev_del_ifa(struct dn_dev *dn_db, struct dn_ifaddr **ifap, int de
if (dn_db->dev->type == ARPHRD_ETHER) {
if (ifa1->ifa_local != dn_eth2dn(dev->dev_addr)) {
dn_dn2eth(mac_addr, ifa1->ifa_local);
- dev_mc_delete(dev, mac_addr, ETH_ALEN, 0);
+ dev_mc_del(dev, mac_addr);
}
}
@@ -381,7 +381,7 @@ static int dn_dev_insert_ifa(struct dn_dev *dn_db, struct dn_ifaddr *ifa)
if (dev->type == ARPHRD_ETHER) {
if (ifa->ifa_local != dn_eth2dn(dev->dev_addr)) {
dn_dn2eth(mac_addr, ifa->ifa_local);
- dev_mc_add(dev, mac_addr, ETH_ALEN, 0);
+ dev_mc_add(dev, mac_addr);
}
}
@@ -1001,9 +1001,9 @@ static int dn_eth_up(struct net_device *dev)
struct dn_dev *dn_db = dev->dn_ptr;
if (dn_db->parms.forwarding == 0)
- dev_mc_add(dev, dn_rt_all_end_mcast, ETH_ALEN, 0);
+ dev_mc_add(dev, dn_rt_all_end_mcast);
else
- dev_mc_add(dev, dn_rt_all_rt_mcast, ETH_ALEN, 0);
+ dev_mc_add(dev, dn_rt_all_rt_mcast);
dn_db->use_long = 1;
@@ -1015,9 +1015,9 @@ static void dn_eth_down(struct net_device *dev)
struct dn_dev *dn_db = dev->dn_ptr;
if (dn_db->parms.forwarding == 0)
- dev_mc_delete(dev, dn_rt_all_end_mcast, ETH_ALEN, 0);
+ dev_mc_del(dev, dn_rt_all_end_mcast);
else
- dev_mc_delete(dev, dn_rt_all_rt_mcast, ETH_ALEN, 0);
+ dev_mc_del(dev, dn_rt_all_rt_mcast);
}
static void dn_dev_set_timer(struct net_device *dev);
@@ -1220,17 +1220,14 @@ void dn_dev_down(struct net_device *dev)
void dn_dev_init_pkt(struct sk_buff *skb)
{
- return;
}
void dn_dev_veri_pkt(struct sk_buff *skb)
{
- return;
}
void dn_dev_hello(struct sk_buff *skb)
{
- return;
}
void dn_dev_devices_off(void)
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c
index deb723dba44b..0363bb95cc7d 100644
--- a/net/decnet/dn_neigh.c
+++ b/net/decnet/dn_neigh.c
@@ -266,7 +266,8 @@ static int dn_long_output(struct sk_buff *skb)
skb_reset_network_header(skb);
- return NF_HOOK(PF_DECnet, NF_DN_POST_ROUTING, skb, NULL, neigh->dev, dn_neigh_output_packet);
+ return NF_HOOK(NFPROTO_DECNET, NF_DN_POST_ROUTING, skb, NULL,
+ neigh->dev, dn_neigh_output_packet);
}
static int dn_short_output(struct sk_buff *skb)
@@ -305,7 +306,8 @@ static int dn_short_output(struct sk_buff *skb)
skb_reset_network_header(skb);
- return NF_HOOK(PF_DECnet, NF_DN_POST_ROUTING, skb, NULL, neigh->dev, dn_neigh_output_packet);
+ return NF_HOOK(NFPROTO_DECNET, NF_DN_POST_ROUTING, skb, NULL,
+ neigh->dev, dn_neigh_output_packet);
}
/*
@@ -347,7 +349,8 @@ static int dn_phase3_output(struct sk_buff *skb)
skb_reset_network_header(skb);
- return NF_HOOK(PF_DECnet, NF_DN_POST_ROUTING, skb, NULL, neigh->dev, dn_neigh_output_packet);
+ return NF_HOOK(NFPROTO_DECNET, NF_DN_POST_ROUTING, skb, NULL,
+ neigh->dev, dn_neigh_output_packet);
}
/*
diff --git a/net/decnet/dn_nsp_in.c b/net/decnet/dn_nsp_in.c
index 25a37299bc65..b430549e2b91 100644
--- a/net/decnet/dn_nsp_in.c
+++ b/net/decnet/dn_nsp_in.c
@@ -810,7 +810,8 @@ free_out:
int dn_nsp_rx(struct sk_buff *skb)
{
- return NF_HOOK(PF_DECnet, NF_DN_LOCAL_IN, skb, skb->dev, NULL, dn_nsp_rx_packet);
+ return NF_HOOK(NFPROTO_DECNET, NF_DN_LOCAL_IN, skb, skb->dev, NULL,
+ dn_nsp_rx_packet);
}
/*
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 70ebe74027d5..812e6dff6067 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -264,7 +264,6 @@ static struct dst_entry *dn_dst_negative_advice(struct dst_entry *dst)
static void dn_dst_link_failure(struct sk_buff *skb)
{
- return;
}
static inline int compare_keys(struct flowi *fl1, struct flowi *fl2)
@@ -518,7 +517,8 @@ static int dn_route_rx_long(struct sk_buff *skb)
ptr++;
cb->hops = *ptr++; /* Visit Count */
- return NF_HOOK(PF_DECnet, NF_DN_PRE_ROUTING, skb, skb->dev, NULL, dn_route_rx_packet);
+ return NF_HOOK(NFPROTO_DECNET, NF_DN_PRE_ROUTING, skb, skb->dev, NULL,
+ dn_route_rx_packet);
drop_it:
kfree_skb(skb);
@@ -544,7 +544,8 @@ static int dn_route_rx_short(struct sk_buff *skb)
ptr += 2;
cb->hops = *ptr & 0x3f;
- return NF_HOOK(PF_DECnet, NF_DN_PRE_ROUTING, skb, skb->dev, NULL, dn_route_rx_packet);
+ return NF_HOOK(NFPROTO_DECNET, NF_DN_PRE_ROUTING, skb, skb->dev, NULL,
+ dn_route_rx_packet);
drop_it:
kfree_skb(skb);
@@ -646,16 +647,24 @@ int dn_route_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type
switch(flags & DN_RT_CNTL_MSK) {
case DN_RT_PKT_HELO:
- return NF_HOOK(PF_DECnet, NF_DN_HELLO, skb, skb->dev, NULL, dn_route_ptp_hello);
+ return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO,
+ skb, skb->dev, NULL,
+ dn_route_ptp_hello);
case DN_RT_PKT_L1RT:
case DN_RT_PKT_L2RT:
- return NF_HOOK(PF_DECnet, NF_DN_ROUTE, skb, skb->dev, NULL, dn_route_discard);
+ return NF_HOOK(NFPROTO_DECNET, NF_DN_ROUTE,
+ skb, skb->dev, NULL,
+ dn_route_discard);
case DN_RT_PKT_ERTH:
- return NF_HOOK(PF_DECnet, NF_DN_HELLO, skb, skb->dev, NULL, dn_neigh_router_hello);
+ return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO,
+ skb, skb->dev, NULL,
+ dn_neigh_router_hello);
case DN_RT_PKT_EEDH:
- return NF_HOOK(PF_DECnet, NF_DN_HELLO, skb, skb->dev, NULL, dn_neigh_endnode_hello);
+ return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO,
+ skb, skb->dev, NULL,
+ dn_neigh_endnode_hello);
}
} else {
if (dn->parms.state != DN_DEV_S_RU)
@@ -704,7 +713,8 @@ static int dn_output(struct sk_buff *skb)
cb->rt_flags |= DN_RT_F_IE;
cb->hops = 0;
- return NF_HOOK(PF_DECnet, NF_DN_LOCAL_OUT, skb, NULL, dev, neigh->output);
+ return NF_HOOK(NFPROTO_DECNET, NF_DN_LOCAL_OUT, skb, NULL, dev,
+ neigh->output);
error:
if (net_ratelimit())
@@ -753,7 +763,8 @@ static int dn_forward(struct sk_buff *skb)
if (rt->rt_flags & RTCF_DOREDIRECT)
cb->rt_flags |= DN_RT_F_IE;
- return NF_HOOK(PF_DECnet, NF_DN_FORWARD, skb, dev, skb->dev, neigh->output);
+ return NF_HOOK(NFPROTO_DECNET, NF_DN_FORWARD, skb, dev, skb->dev,
+ neigh->output);
drop:
kfree_skb(skb);
diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c
index 7466c546f286..48fdf10be7a1 100644
--- a/net/decnet/dn_rules.c
+++ b/net/decnet/dn_rules.c
@@ -196,7 +196,6 @@ static int dn_fib_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
{
struct dn_fib_rule *r = (struct dn_fib_rule *)rule;
- frh->family = AF_DECnet;
frh->dst_len = r->dst_len;
frh->src_len = r->src_len;
frh->tos = 0;
@@ -212,29 +211,12 @@ nla_put_failure:
return -ENOBUFS;
}
-static u32 dn_fib_rule_default_pref(struct fib_rules_ops *ops)
-{
- struct list_head *pos;
- struct fib_rule *rule;
-
- if (!list_empty(&dn_fib_rules_ops->rules_list)) {
- pos = dn_fib_rules_ops->rules_list.next;
- if (pos->next != &dn_fib_rules_ops->rules_list) {
- rule = list_entry(pos->next, struct fib_rule, list);
- if (rule->pref)
- return rule->pref - 1;
- }
- }
-
- return 0;
-}
-
static void dn_fib_rule_flush_cache(struct fib_rules_ops *ops)
{
dn_rt_cache_flush(-1);
}
-static struct fib_rules_ops dn_fib_rules_ops_template = {
+static const struct fib_rules_ops __net_initdata dn_fib_rules_ops_template = {
.family = AF_DECnet,
.rule_size = sizeof(struct dn_fib_rule),
.addr_size = sizeof(u16),
@@ -243,7 +225,7 @@ static struct fib_rules_ops dn_fib_rules_ops_template = {
.configure = dn_fib_rule_configure,
.compare = dn_fib_rule_compare,
.fill = dn_fib_rule_fill,
- .default_pref = dn_fib_rule_default_pref,
+ .default_pref = fib_default_rule_pref,
.flush_cache = dn_fib_rule_flush_cache,
.nlgroup = RTNLGRP_DECnet_RULE,
.policy = dn_fib_rule_policy,
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 2175e6d5cc8d..8fdca56bb08f 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -67,7 +67,7 @@ static int dsa_slave_open(struct net_device *dev)
return -ENETDOWN;
if (compare_ether_addr(dev->dev_addr, master->dev_addr)) {
- err = dev_unicast_add(master, dev->dev_addr);
+ err = dev_uc_add(master, dev->dev_addr);
if (err < 0)
goto out;
}
@@ -90,7 +90,7 @@ clear_allmulti:
dev_set_allmulti(master, -1);
del_unicast:
if (compare_ether_addr(dev->dev_addr, master->dev_addr))
- dev_unicast_delete(master, dev->dev_addr);
+ dev_uc_del(master, dev->dev_addr);
out:
return err;
}
@@ -101,14 +101,14 @@ static int dsa_slave_close(struct net_device *dev)
struct net_device *master = p->parent->dst->master_netdev;
dev_mc_unsync(master, dev);
- dev_unicast_unsync(master, dev);
+ dev_uc_unsync(master, dev);
if (dev->flags & IFF_ALLMULTI)
dev_set_allmulti(master, -1);
if (dev->flags & IFF_PROMISC)
dev_set_promiscuity(master, -1);
if (compare_ether_addr(dev->dev_addr, master->dev_addr))
- dev_unicast_delete(master, dev->dev_addr);
+ dev_uc_del(master, dev->dev_addr);
return 0;
}
@@ -130,7 +130,7 @@ static void dsa_slave_set_rx_mode(struct net_device *dev)
struct net_device *master = p->parent->dst->master_netdev;
dev_mc_sync(master, dev);
- dev_unicast_sync(master, dev);
+ dev_uc_sync(master, dev);
}
static int dsa_slave_set_mac_address(struct net_device *dev, void *a)
@@ -147,13 +147,13 @@ static int dsa_slave_set_mac_address(struct net_device *dev, void *a)
goto out;
if (compare_ether_addr(addr->sa_data, master->dev_addr)) {
- err = dev_unicast_add(master, addr->sa_data);
+ err = dev_uc_add(master, addr->sa_data);
if (err < 0)
return err;
}
if (compare_ether_addr(dev->dev_addr, master->dev_addr))
- dev_unicast_delete(master, dev->dev_addr);
+ dev_uc_del(master, dev->dev_addr);
out:
memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 205a1c12f3c0..61ec0329316c 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -136,7 +136,7 @@ int eth_rebuild_header(struct sk_buff *skb)
default:
printk(KERN_DEBUG
"%s: unable to resolve type %X addresses.\n",
- dev->name, (int)eth->h_proto);
+ dev->name, ntohs(eth->h_proto));
memcpy(eth->h_source, dev->dev_addr, ETH_ALEN);
break;
@@ -162,7 +162,7 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
skb->dev = dev;
skb_reset_mac_header(skb);
- skb_pull(skb, ETH_HLEN);
+ skb_pull_inline(skb, ETH_HLEN);
eth = eth_hdr(skb);
if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 0c94a1ac2946..8e3a1fd938ab 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -250,6 +250,20 @@ config IP_MROUTE
<file:Documentation/networking/multicast.txt>. If you haven't heard
about it, you don't need it.
+config IP_MROUTE_MULTIPLE_TABLES
+ bool "IP: multicast policy routing"
+ depends on IP_MROUTE && IP_ADVANCED_ROUTER
+ select FIB_RULES
+ help
+ Normally, a multicast router runs a userspace daemon and decides
+ what to do with a multicast packet based on the source and
+ destination addresses. If you say Y here, the multicast router
+ will also be able to take interfaces and packet marks into
+ account and run multiple instances of userspace daemons
+ simultaneously, each one handling a single table.
+
+ If unsure, say N.
+
config IP_PIMSM_V1
bool "IP: PIM-SM version 1 support"
depends on IP_MROUTE
@@ -587,9 +601,15 @@ choice
config DEFAULT_HTCP
bool "Htcp" if TCP_CONG_HTCP=y
+ config DEFAULT_HYBLA
+ bool "Hybla" if TCP_CONG_HYBLA=y
+
config DEFAULT_VEGAS
bool "Vegas" if TCP_CONG_VEGAS=y
+ config DEFAULT_VENO
+ bool "Veno" if TCP_CONG_VENO=y
+
config DEFAULT_WESTWOOD
bool "Westwood" if TCP_CONG_WESTWOOD=y
@@ -610,8 +630,10 @@ config DEFAULT_TCP_CONG
default "bic" if DEFAULT_BIC
default "cubic" if DEFAULT_CUBIC
default "htcp" if DEFAULT_HTCP
+ default "hybla" if DEFAULT_HYBLA
default "vegas" if DEFAULT_VEGAS
default "westwood" if DEFAULT_WESTWOOD
+ default "veno" if DEFAULT_VENO
default "reno" if DEFAULT_RENO
default "cubic"
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index f71357422380..551ce564b035 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -154,7 +154,7 @@ void inet_sock_destruct(struct sock *sk)
WARN_ON(sk->sk_forward_alloc);
kfree(inet->opt);
- dst_release(sk->sk_dst_cache);
+ dst_release(rcu_dereference_check(sk->sk_dst_cache, 1));
sk_refcnt_debug_dec(sk);
}
EXPORT_SYMBOL(inet_sock_destruct);
@@ -419,6 +419,8 @@ int inet_release(struct socket *sock)
if (sk) {
long timeout;
+ sock_rps_reset_flow(sk);
+
/* Applications forget to leave groups before exiting */
ip_mc_drop_socket(sk);
@@ -546,7 +548,7 @@ static long inet_wait_for_connect(struct sock *sk, long timeo)
{
DEFINE_WAIT(wait);
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
/* Basic assumption: if someone sets sk->sk_err, he _must_
* change state of the socket from TCP_SYN_*.
@@ -559,9 +561,9 @@ static long inet_wait_for_connect(struct sock *sk, long timeo)
lock_sock(sk);
if (signal_pending(current) || !timeo)
break;
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
}
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
return timeo;
}
@@ -720,6 +722,8 @@ int inet_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
{
struct sock *sk = sock->sk;
+ sock_rps_record_flow(sk);
+
/* We may need to bind the socket. */
if (!inet_sk(sk)->inet_num && inet_autobind(sk))
return -EAGAIN;
@@ -728,12 +732,13 @@ int inet_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
}
EXPORT_SYMBOL(inet_sendmsg);
-
static ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset,
size_t size, int flags)
{
struct sock *sk = sock->sk;
+ sock_rps_record_flow(sk);
+
/* We may need to bind the socket. */
if (!inet_sk(sk)->inet_num && inet_autobind(sk))
return -EAGAIN;
@@ -743,6 +748,22 @@ static ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset,
return sock_no_sendpage(sock, page, offset, size, flags);
}
+int inet_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
+ size_t size, int flags)
+{
+ struct sock *sk = sock->sk;
+ int addr_len = 0;
+ int err;
+
+ sock_rps_record_flow(sk);
+
+ err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
+ flags & ~MSG_DONTWAIT, &addr_len);
+ if (err >= 0)
+ msg->msg_namelen = addr_len;
+ return err;
+}
+EXPORT_SYMBOL(inet_recvmsg);
int inet_shutdown(struct socket *sock, int how)
{
@@ -872,7 +893,7 @@ const struct proto_ops inet_stream_ops = {
.setsockopt = sock_common_setsockopt,
.getsockopt = sock_common_getsockopt,
.sendmsg = tcp_sendmsg,
- .recvmsg = sock_common_recvmsg,
+ .recvmsg = inet_recvmsg,
.mmap = sock_no_mmap,
.sendpage = tcp_sendpage,
.splice_read = tcp_splice_read,
@@ -899,7 +920,7 @@ const struct proto_ops inet_dgram_ops = {
.setsockopt = sock_common_setsockopt,
.getsockopt = sock_common_getsockopt,
.sendmsg = inet_sendmsg,
- .recvmsg = sock_common_recvmsg,
+ .recvmsg = inet_recvmsg,
.mmap = sock_no_mmap,
.sendpage = inet_sendpage,
#ifdef CONFIG_COMPAT
@@ -929,7 +950,7 @@ static const struct proto_ops inet_sockraw_ops = {
.setsockopt = sock_common_setsockopt,
.getsockopt = sock_common_getsockopt,
.sendmsg = inet_sendmsg,
- .recvmsg = sock_common_recvmsg,
+ .recvmsg = inet_recvmsg,
.mmap = sock_no_mmap,
.sendpage = inet_sendpage,
#ifdef CONFIG_COMPAT
@@ -1302,8 +1323,8 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
goto out_unlock;
- id = ntohl(*(u32 *)&iph->id);
- flush = (u16)((ntohl(*(u32 *)iph) ^ skb_gro_len(skb)) | (id ^ IP_DF));
+ id = ntohl(*(__be32 *)&iph->id);
+ flush = (u16)((ntohl(*(__be32 *)iph) ^ skb_gro_len(skb)) | (id ^ IP_DF));
id >>= 16;
for (p = *head; p; p = p->next) {
@@ -1316,8 +1337,8 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
if ((iph->protocol ^ iph2->protocol) |
(iph->tos ^ iph2->tos) |
- (iph->saddr ^ iph2->saddr) |
- (iph->daddr ^ iph2->daddr)) {
+ ((__force u32)iph->saddr ^ (__force u32)iph2->saddr) |
+ ((__force u32)iph->daddr ^ (__force u32)iph2->daddr)) {
NAPI_GRO_CB(p)->same_flow = 0;
continue;
}
@@ -1407,10 +1428,10 @@ EXPORT_SYMBOL_GPL(snmp_fold_field);
int snmp_mib_init(void __percpu *ptr[2], size_t mibsize)
{
BUG_ON(ptr == NULL);
- ptr[0] = __alloc_percpu(mibsize, __alignof__(unsigned long long));
+ ptr[0] = __alloc_percpu(mibsize, __alignof__(unsigned long));
if (!ptr[0])
goto err0;
- ptr[1] = __alloc_percpu(mibsize, __alignof__(unsigned long long));
+ ptr[1] = __alloc_percpu(mibsize, __alignof__(unsigned long));
if (!ptr[1])
goto err1;
return 0;
@@ -1552,9 +1573,13 @@ static int __init inet_init(void)
BUILD_BUG_ON(sizeof(struct inet_skb_parm) > sizeof(dummy_skb->cb));
+ sysctl_local_reserved_ports = kzalloc(65536 / 8, GFP_KERNEL);
+ if (!sysctl_local_reserved_ports)
+ goto out;
+
rc = proto_register(&tcp_prot, 1);
if (rc)
- goto out;
+ goto out_free_reserved_ports;
rc = proto_register(&udp_prot, 1);
if (rc)
@@ -1653,6 +1678,8 @@ out_unregister_udp_proto:
proto_unregister(&udp_prot);
out_unregister_tcp_proto:
proto_unregister(&tcp_prot);
+out_free_reserved_ports:
+ kfree(sysctl_local_reserved_ports);
goto out;
}
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 6e747065c202..f094b75810db 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -661,13 +661,13 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
#endif
#endif
-#ifdef CONFIG_FDDI
+#if defined(CONFIG_FDDI) || defined(CONFIG_FDDI_MODULE)
case ARPHRD_FDDI:
arp->ar_hrd = htons(ARPHRD_ETHER);
arp->ar_pro = htons(ETH_P_IP);
break;
#endif
-#ifdef CONFIG_TR
+#if defined(CONFIG_TR) || defined(CONFIG_TR_MODULE)
case ARPHRD_IEEE802_TR:
arp->ar_hrd = htons(ARPHRD_IEEE802);
arp->ar_pro = htons(ETH_P_IP);
@@ -854,7 +854,7 @@ static int arp_process(struct sk_buff *skb)
}
if (arp->ar_op == htons(ARPOP_REQUEST) &&
- ip_route_input(skb, tip, sip, 0, dev) == 0) {
+ ip_route_input_noref(skb, tip, sip, 0, dev) == 0) {
rt = skb_rtable(skb);
addr_type = rt->rt_type;
@@ -1051,7 +1051,7 @@ static int arp_req_set(struct net *net, struct arpreq *r,
return -EINVAL;
}
switch (dev->type) {
-#ifdef CONFIG_FDDI
+#if defined(CONFIG_FDDI) || defined(CONFIG_FDDI_MODULE)
case ARPHRD_FDDI:
/*
* According to RFC 1390, FDDI devices should accept ARP
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index c97cd9ff697e..3a92a76ae41d 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -290,8 +290,6 @@ void cipso_v4_cache_invalidate(void)
cipso_v4_cache[iter].size = 0;
spin_unlock_bh(&cipso_v4_cache[iter].lock);
}
-
- return;
}
/**
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 90e3d6379a42..382bc768ed56 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1096,10 +1096,10 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
case NETDEV_DOWN:
ip_mc_down(in_dev);
break;
- case NETDEV_BONDING_OLDTYPE:
+ case NETDEV_PRE_TYPE_CHANGE:
ip_mc_unmap(in_dev);
break;
- case NETDEV_BONDING_NEWTYPE:
+ case NETDEV_POST_TYPE_CHANGE:
ip_mc_remap(in_dev);
break;
case NETDEV_CHANGEMTU:
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index ca2d07b1c706..76daeb5ff564 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -213,7 +213,6 @@ static int fib4_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
{
struct fib4_rule *rule4 = (struct fib4_rule *) rule;
- frh->family = AF_INET;
frh->dst_len = rule4->dst_len;
frh->src_len = rule4->src_len;
frh->tos = rule4->tos;
@@ -234,23 +233,6 @@ nla_put_failure:
return -ENOBUFS;
}
-static u32 fib4_rule_default_pref(struct fib_rules_ops *ops)
-{
- struct list_head *pos;
- struct fib_rule *rule;
-
- if (!list_empty(&ops->rules_list)) {
- pos = ops->rules_list.next;
- if (pos->next != &ops->rules_list) {
- rule = list_entry(pos->next, struct fib_rule, list);
- if (rule->pref)
- return rule->pref - 1;
- }
- }
-
- return 0;
-}
-
static size_t fib4_rule_nlmsg_payload(struct fib_rule *rule)
{
return nla_total_size(4) /* dst */
@@ -263,7 +245,7 @@ static void fib4_rule_flush_cache(struct fib_rules_ops *ops)
rt_cache_flush(ops->fro_net, -1);
}
-static struct fib_rules_ops fib4_rules_ops_template = {
+static const struct fib_rules_ops __net_initdata fib4_rules_ops_template = {
.family = AF_INET,
.rule_size = sizeof(struct fib4_rule),
.addr_size = sizeof(u32),
@@ -272,7 +254,7 @@ static struct fib_rules_ops fib4_rules_ops_template = {
.configure = fib4_rule_configure,
.compare = fib4_rule_compare,
.fill = fib4_rule_fill,
- .default_pref = fib4_rule_default_pref,
+ .default_pref = fib_default_rule_pref,
.nlmsg_payload = fib4_rule_nlmsg_payload,
.flush_cache = fib4_rule_flush_cache,
.nlgroup = RTNLGRP_IPV4_RULE,
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index c98f115fb0fd..79d057a939ba 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1022,8 +1022,6 @@ static void trie_rebalance(struct trie *t, struct tnode *tn)
rcu_assign_pointer(t->trie, (struct node *)tn);
tnode_free_flush();
-
- return;
}
/* only used from updater-side */
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index ac4dec132735..d65e9215bcd7 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -331,9 +331,10 @@ static void icmp_push_reply(struct icmp_bxm *icmp_param,
if (ip_append_data(sk, icmp_glue_bits, icmp_param,
icmp_param->data_len+icmp_param->head_len,
icmp_param->head_len,
- ipc, rt, MSG_DONTWAIT) < 0)
+ ipc, rt, MSG_DONTWAIT) < 0) {
+ ICMP_INC_STATS_BH(sock_net(sk), ICMP_MIB_OUTERRORS);
ip_flush_pending_frames(sk);
- else if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
+ } else if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
struct icmphdr *icmph = icmp_hdr(skb);
__wsum csum = 0;
struct sk_buff *skb1;
@@ -586,20 +587,20 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
err = __ip_route_output_key(net, &rt2, &fl);
else {
struct flowi fl2 = {};
- struct dst_entry *odst;
+ unsigned long orefdst;
fl2.fl4_dst = fl.fl4_src;
if (ip_route_output_key(net, &rt2, &fl2))
goto relookup_failed;
/* Ugh! */
- odst = skb_dst(skb_in);
+ orefdst = skb_in->_skb_refdst; /* save old refdst */
err = ip_route_input(skb_in, fl.fl4_dst, fl.fl4_src,
RT_TOS(tos), rt2->u.dst.dev);
dst_release(&rt2->u.dst);
rt2 = skb_rtable(skb_in);
- skb_dst_set(skb_in, odst);
+ skb_in->_skb_refdst = orefdst; /* restore old refdst */
}
if (err)
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 15d3eeda92f5..5fff865a4fa7 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -998,7 +998,7 @@ static void ip_mc_filter_add(struct in_device *in_dev, __be32 addr)
--ANK
*/
if (arp_mc_map(addr, buf, dev, 0) == 0)
- dev_mc_add(dev, buf, dev->addr_len, 0);
+ dev_mc_add(dev, buf);
}
/*
@@ -1011,7 +1011,7 @@ static void ip_mc_filter_del(struct in_device *in_dev, __be32 addr)
struct net_device *dev = in_dev->dev;
if (arp_mc_map(addr, buf, dev, 0) == 0)
- dev_mc_delete(dev, buf, dev->addr_len, 0);
+ dev_mc_del(dev, buf);
}
#ifdef CONFIG_IP_MULTICAST
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 8da6429269dd..70eb3507c406 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -37,6 +37,9 @@ struct local_ports sysctl_local_ports __read_mostly = {
.range = { 32768, 61000 },
};
+unsigned long *sysctl_local_reserved_ports;
+EXPORT_SYMBOL(sysctl_local_reserved_ports);
+
void inet_get_local_port_range(int *low, int *high)
{
unsigned seq;
@@ -108,6 +111,8 @@ again:
smallest_size = -1;
do {
+ if (inet_is_reserved_local_port(rover))
+ goto next_nolock;
head = &hashinfo->bhash[inet_bhashfn(net, rover,
hashinfo->bhash_size)];
spin_lock(&head->lock);
@@ -130,6 +135,7 @@ again:
break;
next:
spin_unlock(&head->lock);
+ next_nolock:
if (++rover > high)
rover = low;
} while (--remaining > 0);
@@ -234,7 +240,7 @@ static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
* having to remove and re-insert us on the wait queue.
*/
for (;;) {
- prepare_to_wait_exclusive(sk->sk_sleep, &wait,
+ prepare_to_wait_exclusive(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
release_sock(sk);
if (reqsk_queue_empty(&icsk->icsk_accept_queue))
@@ -253,7 +259,7 @@ static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
if (!timeo)
break;
}
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
return err;
}
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 2b79377b468d..d3e160a88219 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -456,6 +456,8 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
local_bh_disable();
for (i = 1; i <= remaining; i++) {
port = low + (i + offset) % remaining;
+ if (inet_is_reserved_local_port(port))
+ continue;
head = &hinfo->bhash[inet_bhashfn(net, port,
hinfo->bhash_size)];
spin_lock(&head->lock);
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index af10942b326c..56cdf68a074c 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -112,8 +112,8 @@ int ip_forward(struct sk_buff *skb)
skb->priority = rt_tos2priority(iph->tos);
- return NF_HOOK(PF_INET, NF_INET_FORWARD, skb, skb->dev, rt->u.dst.dev,
- ip_forward_finish);
+ return NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD, skb, skb->dev,
+ rt->u.dst.dev, ip_forward_finish);
sr_failed:
/*
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index fe381d12ecdd..32618e11076d 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -502,7 +502,6 @@ static void ipgre_err(struct sk_buff *skb, u32 info)
t->err_time = jiffies;
out:
rcu_read_unlock();
- return;
}
static inline void ipgre_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb)
@@ -538,7 +537,6 @@ static int ipgre_rcv(struct sk_buff *skb)
struct ip_tunnel *tunnel;
int offset = 4;
__be16 gre_proto;
- unsigned int len;
if (!pskb_may_pull(skb, 16))
goto drop_nolock;
@@ -629,8 +627,6 @@ static int ipgre_rcv(struct sk_buff *skb)
tunnel->i_seqno = seqno + 1;
}
- len = skb->len;
-
/* Warning: All skb pointers will be invalidated! */
if (tunnel->dev->type == ARPHRD_ETHER) {
if (!pskb_may_pull(skb, ETH_HLEN)) {
@@ -644,11 +640,7 @@ static int ipgre_rcv(struct sk_buff *skb)
skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
}
- stats->rx_packets++;
- stats->rx_bytes += len;
- skb->dev = tunnel->dev;
- skb_dst_drop(skb);
- nf_reset(skb);
+ skb_tunnel_rx(skb, tunnel->dev);
skb_reset_network_header(skb);
ipgre_ecn_decapsulate(iph, skb);
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index f8ab7a380d4a..d930dc5e4d85 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -266,7 +266,7 @@ int ip_local_deliver(struct sk_buff *skb)
return 0;
}
- return NF_HOOK(PF_INET, NF_INET_LOCAL_IN, skb, skb->dev, NULL,
+ return NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_IN, skb, skb->dev, NULL,
ip_local_deliver_finish);
}
@@ -331,8 +331,8 @@ static int ip_rcv_finish(struct sk_buff *skb)
* how the packet travels inside Linux networking.
*/
if (skb_dst(skb) == NULL) {
- int err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos,
- skb->dev);
+ int err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
+ iph->tos, skb->dev);
if (unlikely(err)) {
if (err == -EHOSTUNREACH)
IP_INC_STATS_BH(dev_net(skb->dev),
@@ -444,7 +444,7 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
/* Must drop socket now because of tproxy. */
skb_orphan(skb);
- return NF_HOOK(PF_INET, NF_INET_PRE_ROUTING, skb, dev, NULL,
+ return NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, dev, NULL,
ip_rcv_finish);
inhdr_error:
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 4c09a31fd140..ba9836c488ed 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -238,7 +238,6 @@ void ip_options_fragment(struct sk_buff * skb)
opt->rr_needaddr = 0;
opt->ts_needaddr = 0;
opt->ts_needtime = 0;
- return;
}
/*
@@ -601,6 +600,7 @@ int ip_options_rcv_srr(struct sk_buff *skb)
unsigned char *optptr = skb_network_header(skb) + opt->srr;
struct rtable *rt = skb_rtable(skb);
struct rtable *rt2;
+ unsigned long orefdst;
int err;
if (!opt->srr)
@@ -624,16 +624,16 @@ int ip_options_rcv_srr(struct sk_buff *skb)
}
memcpy(&nexthop, &optptr[srrptr-1], 4);
- rt = skb_rtable(skb);
+ orefdst = skb->_skb_refdst;
skb_dst_set(skb, NULL);
err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, skb->dev);
rt2 = skb_rtable(skb);
if (err || (rt2->rt_type != RTN_UNICAST && rt2->rt_type != RTN_LOCAL)) {
- ip_rt_put(rt2);
- skb_dst_set(skb, &rt->u.dst);
+ skb_dst_drop(skb);
+ skb->_skb_refdst = orefdst;
return -EINVAL;
}
- ip_rt_put(rt);
+ refdst_drop(orefdst);
if (rt2->rt_type != RTN_LOCAL)
break;
/* Superfast 8) loopback forward */
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index d1bcc9f21d4f..9a4a6c96cb0d 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -96,8 +96,8 @@ int __ip_local_out(struct sk_buff *skb)
iph->tot_len = htons(skb->len);
ip_send_check(iph);
- return nf_hook(PF_INET, NF_INET_LOCAL_OUT, skb, NULL, skb_dst(skb)->dev,
- dst_output);
+ return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL,
+ skb_dst(skb)->dev, dst_output);
}
int ip_local_out(struct sk_buff *skb)
@@ -272,8 +272,8 @@ int ip_mc_output(struct sk_buff *skb)
) {
struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
if (newskb)
- NF_HOOK(PF_INET, NF_INET_POST_ROUTING, newskb,
- NULL, newskb->dev,
+ NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
+ newskb, NULL, newskb->dev,
ip_dev_loopback_xmit);
}
@@ -288,12 +288,12 @@ int ip_mc_output(struct sk_buff *skb)
if (rt->rt_flags&RTCF_BROADCAST) {
struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
if (newskb)
- NF_HOOK(PF_INET, NF_INET_POST_ROUTING, newskb, NULL,
- newskb->dev, ip_dev_loopback_xmit);
+ NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING, newskb,
+ NULL, newskb->dev, ip_dev_loopback_xmit);
}
- return NF_HOOK_COND(PF_INET, NF_INET_POST_ROUTING, skb, NULL, skb->dev,
- ip_finish_output,
+ return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, NULL,
+ skb->dev, ip_finish_output,
!(IPCB(skb)->flags & IPSKB_REROUTED));
}
@@ -306,22 +306,24 @@ int ip_output(struct sk_buff *skb)
skb->dev = dev;
skb->protocol = htons(ETH_P_IP);
- return NF_HOOK_COND(PF_INET, NF_INET_POST_ROUTING, skb, NULL, dev,
+ return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, NULL, dev,
ip_finish_output,
!(IPCB(skb)->flags & IPSKB_REROUTED));
}
-int ip_queue_xmit(struct sk_buff *skb, int ipfragok)
+int ip_queue_xmit(struct sk_buff *skb)
{
struct sock *sk = skb->sk;
struct inet_sock *inet = inet_sk(sk);
struct ip_options *opt = inet->opt;
struct rtable *rt;
struct iphdr *iph;
+ int res;
/* Skip all of this if the packet is already routed,
* f.e. by something like SCTP.
*/
+ rcu_read_lock();
rt = skb_rtable(skb);
if (rt != NULL)
goto packet_routed;
@@ -359,7 +361,7 @@ int ip_queue_xmit(struct sk_buff *skb, int ipfragok)
}
sk_setup_caps(sk, &rt->u.dst);
}
- skb_dst_set(skb, dst_clone(&rt->u.dst));
+ skb_dst_set_noref(skb, &rt->u.dst);
packet_routed:
if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
@@ -370,7 +372,7 @@ packet_routed:
skb_reset_network_header(skb);
iph = ip_hdr(skb);
*((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
- if (ip_dont_fragment(sk, &rt->u.dst) && !ipfragok)
+ if (ip_dont_fragment(sk, &rt->u.dst) && !skb->local_df)
iph->frag_off = htons(IP_DF);
else
iph->frag_off = 0;
@@ -391,9 +393,12 @@ packet_routed:
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
- return ip_local_out(skb);
+ res = ip_local_out(skb);
+ rcu_read_unlock();
+ return res;
no_route:
+ rcu_read_unlock();
IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
kfree_skb(skb);
return -EHOSTUNREACH;
@@ -469,6 +474,10 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
hlen = iph->ihl * 4;
mtu = dst_mtu(&rt->u.dst) - hlen; /* Size of data space */
+#ifdef CONFIG_BRIDGE_NETFILTER
+ if (skb->nf_bridge)
+ mtu -= nf_bridge_mtu_reduction(skb);
+#endif
IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
/* When frag_list is given, use it. First, check its validity:
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 1e64dabbd232..ce231780a2b1 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -287,12 +287,8 @@ int ip_ra_control(struct sock *sk, unsigned char on,
void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
__be16 port, u32 info, u8 *payload)
{
- struct inet_sock *inet = inet_sk(sk);
struct sock_exterr_skb *serr;
- if (!inet->recverr)
- return;
-
skb = skb_clone(skb, GFP_ATOMIC);
if (!skb)
return;
@@ -958,6 +954,22 @@ e_inval:
return -EINVAL;
}
+/**
+ * ip_queue_rcv_skb - Queue an skb into sock receive queue
+ * @sk: socket
+ * @skb: buffer
+ *
+ * Queues an skb into socket receive queue. If IP_CMSG_PKTINFO option
+ * is not set, we drop skb dst entry now, while dst cache line is hot.
+ */
+int ip_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+{
+ if (!(inet_sk(sk)->cmsg_flags & IP_CMSG_PKTINFO))
+ skb_dst_drop(skb);
+ return sock_queue_rcv_skb(sk, skb);
+}
+EXPORT_SYMBOL(ip_queue_rcv_skb);
+
int ip_setsockopt(struct sock *sk, int level,
int optname, char __user *optval, unsigned int optlen)
{
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 067ce9e043dc..b9d84e800cf4 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -976,7 +976,7 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str
/* Is it a reply for the device we are configuring? */
if (b->xid != ic_dev_xid) {
if (net_ratelimit())
- printk(KERN_ERR "DHCP/BOOTP: Ignoring delayed packet \n");
+ printk(KERN_ERR "DHCP/BOOTP: Ignoring delayed packet\n");
goto drop_unlock;
}
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 0b27b14dcc9d..7fd636711037 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -374,11 +374,8 @@ static int ipip_rcv(struct sk_buff *skb)
skb->protocol = htons(ETH_P_IP);
skb->pkt_type = PACKET_HOST;
- tunnel->dev->stats.rx_packets++;
- tunnel->dev->stats.rx_bytes += skb->len;
- skb->dev = tunnel->dev;
- skb_dst_drop(skb);
- nf_reset(skb);
+ skb_tunnel_rx(skb, tunnel->dev);
+
ipip_ecn_decapsulate(iph, skb);
netif_rx(skb);
rcu_read_unlock();
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 9d4f6d1340a4..45889103b3e2 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -22,7 +22,7 @@
* overflow.
* Carlos Picoto : PIMv1 Support
* Pavlin Ivanov Radoslavov: PIMv2 Registers must checksum only PIM header
- * Relax this requrement to work with older peers.
+ * Relax this requirement to work with older peers.
*
*/
@@ -63,11 +63,40 @@
#include <net/ipip.h>
#include <net/checksum.h>
#include <net/netlink.h>
+#include <net/fib_rules.h>
#if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
#define CONFIG_IP_PIMSM 1
#endif
+struct mr_table {
+ struct list_head list;
+#ifdef CONFIG_NET_NS
+ struct net *net;
+#endif
+ u32 id;
+ struct sock *mroute_sk;
+ struct timer_list ipmr_expire_timer;
+ struct list_head mfc_unres_queue;
+ struct list_head mfc_cache_array[MFC_LINES];
+ struct vif_device vif_table[MAXVIFS];
+ int maxvif;
+ atomic_t cache_resolve_queue_len;
+ int mroute_do_assert;
+ int mroute_do_pim;
+#if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
+ int mroute_reg_vif_num;
+#endif
+};
+
+struct ipmr_rule {
+ struct fib_rule common;
+};
+
+struct ipmr_result {
+ struct mr_table *mrt;
+};
+
/* Big lock, protecting vif table, mrt cache and mroute socket state.
Note that the changes are semaphored via rtnl_lock.
*/
@@ -78,9 +107,7 @@ static DEFINE_RWLOCK(mrt_lock);
* Multicast router control variables
*/
-#define VIF_EXISTS(_net, _idx) ((_net)->ipv4.vif_table[_idx].dev != NULL)
-
-static struct mfc_cache *mfc_unres_queue; /* Queue of unresolved entries */
+#define VIF_EXISTS(_mrt, _idx) ((_mrt)->vif_table[_idx].dev != NULL)
/* Special spinlock for queue of unresolved entries */
static DEFINE_SPINLOCK(mfc_unres_lock);
@@ -95,12 +122,215 @@ static DEFINE_SPINLOCK(mfc_unres_lock);
static struct kmem_cache *mrt_cachep __read_mostly;
-static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local);
-static int ipmr_cache_report(struct net *net,
+static struct mr_table *ipmr_new_table(struct net *net, u32 id);
+static int ip_mr_forward(struct net *net, struct mr_table *mrt,
+ struct sk_buff *skb, struct mfc_cache *cache,
+ int local);
+static int ipmr_cache_report(struct mr_table *mrt,
struct sk_buff *pkt, vifi_t vifi, int assert);
-static int ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm);
+static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
+ struct mfc_cache *c, struct rtmsg *rtm);
+static void ipmr_expire_process(unsigned long arg);
+
+#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
+#define ipmr_for_each_table(mrt, net) \
+ list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list)
+
+static struct mr_table *ipmr_get_table(struct net *net, u32 id)
+{
+ struct mr_table *mrt;
+
+ ipmr_for_each_table(mrt, net) {
+ if (mrt->id == id)
+ return mrt;
+ }
+ return NULL;
+}
+
+static int ipmr_fib_lookup(struct net *net, struct flowi *flp,
+ struct mr_table **mrt)
+{
+ struct ipmr_result res;
+ struct fib_lookup_arg arg = { .result = &res, };
+ int err;
+
+ err = fib_rules_lookup(net->ipv4.mr_rules_ops, flp, 0, &arg);
+ if (err < 0)
+ return err;
+ *mrt = res.mrt;
+ return 0;
+}
+
+static int ipmr_rule_action(struct fib_rule *rule, struct flowi *flp,
+ int flags, struct fib_lookup_arg *arg)
+{
+ struct ipmr_result *res = arg->result;
+ struct mr_table *mrt;
-static struct timer_list ipmr_expire_timer;
+ switch (rule->action) {
+ case FR_ACT_TO_TBL:
+ break;
+ case FR_ACT_UNREACHABLE:
+ return -ENETUNREACH;
+ case FR_ACT_PROHIBIT:
+ return -EACCES;
+ case FR_ACT_BLACKHOLE:
+ default:
+ return -EINVAL;
+ }
+
+ mrt = ipmr_get_table(rule->fr_net, rule->table);
+ if (mrt == NULL)
+ return -EAGAIN;
+ res->mrt = mrt;
+ return 0;
+}
+
+static int ipmr_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
+{
+ return 1;
+}
+
+static const struct nla_policy ipmr_rule_policy[FRA_MAX + 1] = {
+ FRA_GENERIC_POLICY,
+};
+
+static int ipmr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
+ struct fib_rule_hdr *frh, struct nlattr **tb)
+{
+ return 0;
+}
+
+static int ipmr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
+ struct nlattr **tb)
+{
+ return 1;
+}
+
+static int ipmr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
+ struct fib_rule_hdr *frh)
+{
+ frh->dst_len = 0;
+ frh->src_len = 0;
+ frh->tos = 0;
+ return 0;
+}
+
+static const struct fib_rules_ops __net_initdata ipmr_rules_ops_template = {
+ .family = RTNL_FAMILY_IPMR,
+ .rule_size = sizeof(struct ipmr_rule),
+ .addr_size = sizeof(u32),
+ .action = ipmr_rule_action,
+ .match = ipmr_rule_match,
+ .configure = ipmr_rule_configure,
+ .compare = ipmr_rule_compare,
+ .default_pref = fib_default_rule_pref,
+ .fill = ipmr_rule_fill,
+ .nlgroup = RTNLGRP_IPV4_RULE,
+ .policy = ipmr_rule_policy,
+ .owner = THIS_MODULE,
+};
+
+static int __net_init ipmr_rules_init(struct net *net)
+{
+ struct fib_rules_ops *ops;
+ struct mr_table *mrt;
+ int err;
+
+ ops = fib_rules_register(&ipmr_rules_ops_template, net);
+ if (IS_ERR(ops))
+ return PTR_ERR(ops);
+
+ INIT_LIST_HEAD(&net->ipv4.mr_tables);
+
+ mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
+ if (mrt == NULL) {
+ err = -ENOMEM;
+ goto err1;
+ }
+
+ err = fib_default_rule_add(ops, 0x7fff, RT_TABLE_DEFAULT, 0);
+ if (err < 0)
+ goto err2;
+
+ net->ipv4.mr_rules_ops = ops;
+ return 0;
+
+err2:
+ kfree(mrt);
+err1:
+ fib_rules_unregister(ops);
+ return err;
+}
+
+static void __net_exit ipmr_rules_exit(struct net *net)
+{
+ struct mr_table *mrt, *next;
+
+ list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list)
+ kfree(mrt);
+ fib_rules_unregister(net->ipv4.mr_rules_ops);
+}
+#else
+#define ipmr_for_each_table(mrt, net) \
+ for (mrt = net->ipv4.mrt; mrt; mrt = NULL)
+
+static struct mr_table *ipmr_get_table(struct net *net, u32 id)
+{
+ return net->ipv4.mrt;
+}
+
+static int ipmr_fib_lookup(struct net *net, struct flowi *flp,
+ struct mr_table **mrt)
+{
+ *mrt = net->ipv4.mrt;
+ return 0;
+}
+
+static int __net_init ipmr_rules_init(struct net *net)
+{
+ net->ipv4.mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
+ return net->ipv4.mrt ? 0 : -ENOMEM;
+}
+
+static void __net_exit ipmr_rules_exit(struct net *net)
+{
+ kfree(net->ipv4.mrt);
+}
+#endif
+
+static struct mr_table *ipmr_new_table(struct net *net, u32 id)
+{
+ struct mr_table *mrt;
+ unsigned int i;
+
+ mrt = ipmr_get_table(net, id);
+ if (mrt != NULL)
+ return mrt;
+
+ mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
+ if (mrt == NULL)
+ return NULL;
+ write_pnet(&mrt->net, net);
+ mrt->id = id;
+
+ /* Forwarding cache */
+ for (i = 0; i < MFC_LINES; i++)
+ INIT_LIST_HEAD(&mrt->mfc_cache_array[i]);
+
+ INIT_LIST_HEAD(&mrt->mfc_unres_queue);
+
+ setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process,
+ (unsigned long)mrt);
+
+#ifdef CONFIG_IP_PIMSM
+ mrt->mroute_reg_vif_num = -1;
+#endif
+#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
+ list_add_tail_rcu(&mrt->list, &net->ipv4.mr_tables);
+#endif
+ return mrt;
+}
/* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
@@ -201,12 +431,22 @@ failure:
static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct net *net = dev_net(dev);
+ struct mr_table *mrt;
+ struct flowi fl = {
+ .oif = dev->ifindex,
+ .iif = skb->skb_iif,
+ .mark = skb->mark,
+ };
+ int err;
+
+ err = ipmr_fib_lookup(net, &fl, &mrt);
+ if (err < 0)
+ return err;
read_lock(&mrt_lock);
dev->stats.tx_bytes += skb->len;
dev->stats.tx_packets++;
- ipmr_cache_report(net, skb, net->ipv4.mroute_reg_vif_num,
- IGMPMSG_WHOLEPKT);
+ ipmr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, IGMPMSG_WHOLEPKT);
read_unlock(&mrt_lock);
kfree_skb(skb);
return NETDEV_TX_OK;
@@ -226,12 +466,18 @@ static void reg_vif_setup(struct net_device *dev)
dev->features |= NETIF_F_NETNS_LOCAL;
}
-static struct net_device *ipmr_reg_vif(struct net *net)
+static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
{
struct net_device *dev;
struct in_device *in_dev;
+ char name[IFNAMSIZ];
+
+ if (mrt->id == RT_TABLE_DEFAULT)
+ sprintf(name, "pimreg");
+ else
+ sprintf(name, "pimreg%u", mrt->id);
- dev = alloc_netdev(0, "pimreg", reg_vif_setup);
+ dev = alloc_netdev(0, name, reg_vif_setup);
if (dev == NULL)
return NULL;
@@ -276,17 +522,17 @@ failure:
* @notify: Set to 1, if the caller is a notifier_call
*/
-static int vif_delete(struct net *net, int vifi, int notify,
+static int vif_delete(struct mr_table *mrt, int vifi, int notify,
struct list_head *head)
{
struct vif_device *v;
struct net_device *dev;
struct in_device *in_dev;
- if (vifi < 0 || vifi >= net->ipv4.maxvif)
+ if (vifi < 0 || vifi >= mrt->maxvif)
return -EADDRNOTAVAIL;
- v = &net->ipv4.vif_table[vifi];
+ v = &mrt->vif_table[vifi];
write_lock_bh(&mrt_lock);
dev = v->dev;
@@ -298,17 +544,17 @@ static int vif_delete(struct net *net, int vifi, int notify,
}
#ifdef CONFIG_IP_PIMSM
- if (vifi == net->ipv4.mroute_reg_vif_num)
- net->ipv4.mroute_reg_vif_num = -1;
+ if (vifi == mrt->mroute_reg_vif_num)
+ mrt->mroute_reg_vif_num = -1;
#endif
- if (vifi+1 == net->ipv4.maxvif) {
+ if (vifi+1 == mrt->maxvif) {
int tmp;
for (tmp=vifi-1; tmp>=0; tmp--) {
- if (VIF_EXISTS(net, tmp))
+ if (VIF_EXISTS(mrt, tmp))
break;
}
- net->ipv4.maxvif = tmp+1;
+ mrt->maxvif = tmp+1;
}
write_unlock_bh(&mrt_lock);
@@ -329,7 +575,6 @@ static int vif_delete(struct net *net, int vifi, int notify,
static inline void ipmr_cache_free(struct mfc_cache *c)
{
- release_net(mfc_net(c));
kmem_cache_free(mrt_cachep, c);
}
@@ -337,13 +582,13 @@ static inline void ipmr_cache_free(struct mfc_cache *c)
and reporting error to netlink readers.
*/
-static void ipmr_destroy_unres(struct mfc_cache *c)
+static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
{
+ struct net *net = read_pnet(&mrt->net);
struct sk_buff *skb;
struct nlmsgerr *e;
- struct net *net = mfc_net(c);
- atomic_dec(&net->ipv4.cache_resolve_queue_len);
+ atomic_dec(&mrt->cache_resolve_queue_len);
while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) {
if (ip_hdr(skb)->version == 0) {
@@ -364,42 +609,40 @@ static void ipmr_destroy_unres(struct mfc_cache *c)
}
-/* Single timer process for all the unresolved queue. */
+/* Timer process for the unresolved queue. */
-static void ipmr_expire_process(unsigned long dummy)
+static void ipmr_expire_process(unsigned long arg)
{
+ struct mr_table *mrt = (struct mr_table *)arg;
unsigned long now;
unsigned long expires;
- struct mfc_cache *c, **cp;
+ struct mfc_cache *c, *next;
if (!spin_trylock(&mfc_unres_lock)) {
- mod_timer(&ipmr_expire_timer, jiffies+HZ/10);
+ mod_timer(&mrt->ipmr_expire_timer, jiffies+HZ/10);
return;
}
- if (mfc_unres_queue == NULL)
+ if (list_empty(&mrt->mfc_unres_queue))
goto out;
now = jiffies;
expires = 10*HZ;
- cp = &mfc_unres_queue;
- while ((c=*cp) != NULL) {
+ list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
if (time_after(c->mfc_un.unres.expires, now)) {
unsigned long interval = c->mfc_un.unres.expires - now;
if (interval < expires)
expires = interval;
- cp = &c->next;
continue;
}
- *cp = c->next;
-
- ipmr_destroy_unres(c);
+ list_del(&c->list);
+ ipmr_destroy_unres(mrt, c);
}
- if (mfc_unres_queue != NULL)
- mod_timer(&ipmr_expire_timer, jiffies + expires);
+ if (!list_empty(&mrt->mfc_unres_queue))
+ mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
out:
spin_unlock(&mfc_unres_lock);
@@ -407,17 +650,17 @@ out:
/* Fill oifs list. It is called under write locked mrt_lock. */
-static void ipmr_update_thresholds(struct mfc_cache *cache, unsigned char *ttls)
+static void ipmr_update_thresholds(struct mr_table *mrt, struct mfc_cache *cache,
+ unsigned char *ttls)
{
int vifi;
- struct net *net = mfc_net(cache);
cache->mfc_un.res.minvif = MAXVIFS;
cache->mfc_un.res.maxvif = 0;
memset(cache->mfc_un.res.ttls, 255, MAXVIFS);
- for (vifi = 0; vifi < net->ipv4.maxvif; vifi++) {
- if (VIF_EXISTS(net, vifi) &&
+ for (vifi = 0; vifi < mrt->maxvif; vifi++) {
+ if (VIF_EXISTS(mrt, vifi) &&
ttls[vifi] && ttls[vifi] < 255) {
cache->mfc_un.res.ttls[vifi] = ttls[vifi];
if (cache->mfc_un.res.minvif > vifi)
@@ -428,16 +671,17 @@ static void ipmr_update_thresholds(struct mfc_cache *cache, unsigned char *ttls)
}
}
-static int vif_add(struct net *net, struct vifctl *vifc, int mrtsock)
+static int vif_add(struct net *net, struct mr_table *mrt,
+ struct vifctl *vifc, int mrtsock)
{
int vifi = vifc->vifc_vifi;
- struct vif_device *v = &net->ipv4.vif_table[vifi];
+ struct vif_device *v = &mrt->vif_table[vifi];
struct net_device *dev;
struct in_device *in_dev;
int err;
/* Is vif busy ? */
- if (VIF_EXISTS(net, vifi))
+ if (VIF_EXISTS(mrt, vifi))
return -EADDRINUSE;
switch (vifc->vifc_flags) {
@@ -447,9 +691,9 @@ static int vif_add(struct net *net, struct vifctl *vifc, int mrtsock)
* Special Purpose VIF in PIM
* All the packets will be sent to the daemon
*/
- if (net->ipv4.mroute_reg_vif_num >= 0)
+ if (mrt->mroute_reg_vif_num >= 0)
return -EADDRINUSE;
- dev = ipmr_reg_vif(net);
+ dev = ipmr_reg_vif(net, mrt);
if (!dev)
return -ENOBUFS;
err = dev_set_allmulti(dev, 1);
@@ -525,49 +769,47 @@ static int vif_add(struct net *net, struct vifctl *vifc, int mrtsock)
v->dev = dev;
#ifdef CONFIG_IP_PIMSM
if (v->flags&VIFF_REGISTER)
- net->ipv4.mroute_reg_vif_num = vifi;
+ mrt->mroute_reg_vif_num = vifi;
#endif
- if (vifi+1 > net->ipv4.maxvif)
- net->ipv4.maxvif = vifi+1;
+ if (vifi+1 > mrt->maxvif)
+ mrt->maxvif = vifi+1;
write_unlock_bh(&mrt_lock);
return 0;
}
-static struct mfc_cache *ipmr_cache_find(struct net *net,
+static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt,
__be32 origin,
__be32 mcastgrp)
{
int line = MFC_HASH(mcastgrp, origin);
struct mfc_cache *c;
- for (c = net->ipv4.mfc_cache_array[line]; c; c = c->next) {
- if (c->mfc_origin==origin && c->mfc_mcastgrp==mcastgrp)
- break;
+ list_for_each_entry(c, &mrt->mfc_cache_array[line], list) {
+ if (c->mfc_origin == origin && c->mfc_mcastgrp == mcastgrp)
+ return c;
}
- return c;
+ return NULL;
}
/*
* Allocate a multicast cache entry
*/
-static struct mfc_cache *ipmr_cache_alloc(struct net *net)
+static struct mfc_cache *ipmr_cache_alloc(void)
{
struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
if (c == NULL)
return NULL;
c->mfc_un.res.minvif = MAXVIFS;
- mfc_net_set(c, net);
return c;
}
-static struct mfc_cache *ipmr_cache_alloc_unres(struct net *net)
+static struct mfc_cache *ipmr_cache_alloc_unres(void)
{
struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
if (c == NULL)
return NULL;
skb_queue_head_init(&c->mfc_un.unres.unresolved);
c->mfc_un.unres.expires = jiffies + 10*HZ;
- mfc_net_set(c, net);
return c;
}
@@ -575,7 +817,8 @@ static struct mfc_cache *ipmr_cache_alloc_unres(struct net *net)
* A cache entry has gone into a resolved state from queued
*/
-static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c)
+static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
+ struct mfc_cache *uc, struct mfc_cache *c)
{
struct sk_buff *skb;
struct nlmsgerr *e;
@@ -588,7 +831,7 @@ static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c)
if (ip_hdr(skb)->version == 0) {
struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
- if (ipmr_fill_mroute(skb, c, NLMSG_DATA(nlh)) > 0) {
+ if (__ipmr_fill_mroute(mrt, skb, c, NLMSG_DATA(nlh)) > 0) {
nlh->nlmsg_len = (skb_tail_pointer(skb) -
(u8 *)nlh);
} else {
@@ -600,9 +843,9 @@ static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c)
memset(&e->msg, 0, sizeof(e->msg));
}
- rtnl_unicast(skb, mfc_net(c), NETLINK_CB(skb).pid);
+ rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
} else
- ip_mr_forward(skb, c, 0);
+ ip_mr_forward(net, mrt, skb, c, 0);
}
}
@@ -613,7 +856,7 @@ static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c)
* Called under mrt_lock.
*/
-static int ipmr_cache_report(struct net *net,
+static int ipmr_cache_report(struct mr_table *mrt,
struct sk_buff *pkt, vifi_t vifi, int assert)
{
struct sk_buff *skb;
@@ -646,7 +889,7 @@ static int ipmr_cache_report(struct net *net,
memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr));
msg->im_msgtype = IGMPMSG_WHOLEPKT;
msg->im_mbz = 0;
- msg->im_vif = net->ipv4.mroute_reg_vif_num;
+ msg->im_vif = mrt->mroute_reg_vif_num;
ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2;
ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) +
sizeof(struct iphdr));
@@ -678,7 +921,7 @@ static int ipmr_cache_report(struct net *net,
skb->transport_header = skb->network_header;
}
- if (net->ipv4.mroute_sk == NULL) {
+ if (mrt->mroute_sk == NULL) {
kfree_skb(skb);
return -EINVAL;
}
@@ -686,7 +929,7 @@ static int ipmr_cache_report(struct net *net,
/*
* Deliver to mrouted
*/
- ret = sock_queue_rcv_skb(net->ipv4.mroute_sk, skb);
+ ret = sock_queue_rcv_skb(mrt->mroute_sk, skb);
if (ret < 0) {
if (net_ratelimit())
printk(KERN_WARNING "mroute: pending queue full, dropping entries.\n");
@@ -701,27 +944,29 @@ static int ipmr_cache_report(struct net *net,
*/
static int
-ipmr_cache_unresolved(struct net *net, vifi_t vifi, struct sk_buff *skb)
+ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, struct sk_buff *skb)
{
+ bool found = false;
int err;
struct mfc_cache *c;
const struct iphdr *iph = ip_hdr(skb);
spin_lock_bh(&mfc_unres_lock);
- for (c=mfc_unres_queue; c; c=c->next) {
- if (net_eq(mfc_net(c), net) &&
- c->mfc_mcastgrp == iph->daddr &&
- c->mfc_origin == iph->saddr)
+ list_for_each_entry(c, &mrt->mfc_unres_queue, list) {
+ if (c->mfc_mcastgrp == iph->daddr &&
+ c->mfc_origin == iph->saddr) {
+ found = true;
break;
+ }
}
- if (c == NULL) {
+ if (!found) {
/*
* Create a new entry if allowable
*/
- if (atomic_read(&net->ipv4.cache_resolve_queue_len) >= 10 ||
- (c = ipmr_cache_alloc_unres(net)) == NULL) {
+ if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
+ (c = ipmr_cache_alloc_unres()) == NULL) {
spin_unlock_bh(&mfc_unres_lock);
kfree_skb(skb);
@@ -738,7 +983,7 @@ ipmr_cache_unresolved(struct net *net, vifi_t vifi, struct sk_buff *skb)
/*
* Reflect first query at mrouted.
*/
- err = ipmr_cache_report(net, skb, vifi, IGMPMSG_NOCACHE);
+ err = ipmr_cache_report(mrt, skb, vifi, IGMPMSG_NOCACHE);
if (err < 0) {
/* If the report failed throw the cache entry
out - Brad Parker
@@ -750,11 +995,11 @@ ipmr_cache_unresolved(struct net *net, vifi_t vifi, struct sk_buff *skb)
return err;
}
- atomic_inc(&net->ipv4.cache_resolve_queue_len);
- c->next = mfc_unres_queue;
- mfc_unres_queue = c;
+ atomic_inc(&mrt->cache_resolve_queue_len);
+ list_add(&c->list, &mrt->mfc_unres_queue);
- mod_timer(&ipmr_expire_timer, c->mfc_un.unres.expires);
+ if (atomic_read(&mrt->cache_resolve_queue_len) == 1)
+ mod_timer(&mrt->ipmr_expire_timer, c->mfc_un.unres.expires);
}
/*
@@ -776,19 +1021,18 @@ ipmr_cache_unresolved(struct net *net, vifi_t vifi, struct sk_buff *skb)
* MFC cache manipulation by user space mroute daemon
*/
-static int ipmr_mfc_delete(struct net *net, struct mfcctl *mfc)
+static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc)
{
int line;
- struct mfc_cache *c, **cp;
+ struct mfc_cache *c, *next;
line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
- for (cp = &net->ipv4.mfc_cache_array[line];
- (c = *cp) != NULL; cp = &c->next) {
+ list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[line], list) {
if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) {
write_lock_bh(&mrt_lock);
- *cp = c->next;
+ list_del(&c->list);
write_unlock_bh(&mrt_lock);
ipmr_cache_free(c);
@@ -798,27 +1042,30 @@ static int ipmr_mfc_delete(struct net *net, struct mfcctl *mfc)
return -ENOENT;
}
-static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock)
+static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
+ struct mfcctl *mfc, int mrtsock)
{
+ bool found = false;
int line;
- struct mfc_cache *uc, *c, **cp;
+ struct mfc_cache *uc, *c;
if (mfc->mfcc_parent >= MAXVIFS)
return -ENFILE;
line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
- for (cp = &net->ipv4.mfc_cache_array[line];
- (c = *cp) != NULL; cp = &c->next) {
+ list_for_each_entry(c, &mrt->mfc_cache_array[line], list) {
if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
- c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr)
+ c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) {
+ found = true;
break;
+ }
}
- if (c != NULL) {
+ if (found) {
write_lock_bh(&mrt_lock);
c->mfc_parent = mfc->mfcc_parent;
- ipmr_update_thresholds(c, mfc->mfcc_ttls);
+ ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls);
if (!mrtsock)
c->mfc_flags |= MFC_STATIC;
write_unlock_bh(&mrt_lock);
@@ -828,43 +1075,42 @@ static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock)
if (!ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr))
return -EINVAL;
- c = ipmr_cache_alloc(net);
+ c = ipmr_cache_alloc();
if (c == NULL)
return -ENOMEM;
c->mfc_origin = mfc->mfcc_origin.s_addr;
c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr;
c->mfc_parent = mfc->mfcc_parent;
- ipmr_update_thresholds(c, mfc->mfcc_ttls);
+ ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls);
if (!mrtsock)
c->mfc_flags |= MFC_STATIC;
write_lock_bh(&mrt_lock);
- c->next = net->ipv4.mfc_cache_array[line];
- net->ipv4.mfc_cache_array[line] = c;
+ list_add(&c->list, &mrt->mfc_cache_array[line]);
write_unlock_bh(&mrt_lock);
/*
* Check to see if we resolved a queued list. If so we
* need to send on the frames and tidy up.
*/
+ found = false;
spin_lock_bh(&mfc_unres_lock);
- for (cp = &mfc_unres_queue; (uc=*cp) != NULL;
- cp = &uc->next) {
- if (net_eq(mfc_net(uc), net) &&
- uc->mfc_origin == c->mfc_origin &&
+ list_for_each_entry(uc, &mrt->mfc_unres_queue, list) {
+ if (uc->mfc_origin == c->mfc_origin &&
uc->mfc_mcastgrp == c->mfc_mcastgrp) {
- *cp = uc->next;
- atomic_dec(&net->ipv4.cache_resolve_queue_len);
+ list_del(&uc->list);
+ atomic_dec(&mrt->cache_resolve_queue_len);
+ found = true;
break;
}
}
- if (mfc_unres_queue == NULL)
- del_timer(&ipmr_expire_timer);
+ if (list_empty(&mrt->mfc_unres_queue))
+ del_timer(&mrt->ipmr_expire_timer);
spin_unlock_bh(&mfc_unres_lock);
- if (uc) {
- ipmr_cache_resolve(uc, c);
+ if (found) {
+ ipmr_cache_resolve(net, mrt, uc, c);
ipmr_cache_free(uc);
}
return 0;
@@ -874,53 +1120,41 @@ static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock)
* Close the multicast socket, and clear the vif tables etc
*/
-static void mroute_clean_tables(struct net *net)
+static void mroute_clean_tables(struct mr_table *mrt)
{
int i;
LIST_HEAD(list);
+ struct mfc_cache *c, *next;
/*
* Shut down all active vif entries
*/
- for (i = 0; i < net->ipv4.maxvif; i++) {
- if (!(net->ipv4.vif_table[i].flags&VIFF_STATIC))
- vif_delete(net, i, 0, &list);
+ for (i = 0; i < mrt->maxvif; i++) {
+ if (!(mrt->vif_table[i].flags&VIFF_STATIC))
+ vif_delete(mrt, i, 0, &list);
}
unregister_netdevice_many(&list);
/*
* Wipe the cache
*/
- for (i=0; i<MFC_LINES; i++) {
- struct mfc_cache *c, **cp;
-
- cp = &net->ipv4.mfc_cache_array[i];
- while ((c = *cp) != NULL) {
- if (c->mfc_flags&MFC_STATIC) {
- cp = &c->next;
+ for (i = 0; i < MFC_LINES; i++) {
+ list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) {
+ if (c->mfc_flags&MFC_STATIC)
continue;
- }
write_lock_bh(&mrt_lock);
- *cp = c->next;
+ list_del(&c->list);
write_unlock_bh(&mrt_lock);
ipmr_cache_free(c);
}
}
- if (atomic_read(&net->ipv4.cache_resolve_queue_len) != 0) {
- struct mfc_cache *c, **cp;
-
+ if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
spin_lock_bh(&mfc_unres_lock);
- cp = &mfc_unres_queue;
- while ((c = *cp) != NULL) {
- if (!net_eq(mfc_net(c), net)) {
- cp = &c->next;
- continue;
- }
- *cp = c->next;
-
- ipmr_destroy_unres(c);
+ list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
+ list_del(&c->list);
+ ipmr_destroy_unres(mrt, c);
}
spin_unlock_bh(&mfc_unres_lock);
}
@@ -929,16 +1163,19 @@ static void mroute_clean_tables(struct net *net)
static void mrtsock_destruct(struct sock *sk)
{
struct net *net = sock_net(sk);
+ struct mr_table *mrt;
rtnl_lock();
- if (sk == net->ipv4.mroute_sk) {
- IPV4_DEVCONF_ALL(net, MC_FORWARDING)--;
+ ipmr_for_each_table(mrt, net) {
+ if (sk == mrt->mroute_sk) {
+ IPV4_DEVCONF_ALL(net, MC_FORWARDING)--;
- write_lock_bh(&mrt_lock);
- net->ipv4.mroute_sk = NULL;
- write_unlock_bh(&mrt_lock);
+ write_lock_bh(&mrt_lock);
+ mrt->mroute_sk = NULL;
+ write_unlock_bh(&mrt_lock);
- mroute_clean_tables(net);
+ mroute_clean_tables(mrt);
+ }
}
rtnl_unlock();
}
@@ -956,9 +1193,14 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
struct vifctl vif;
struct mfcctl mfc;
struct net *net = sock_net(sk);
+ struct mr_table *mrt;
+
+ mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
+ if (mrt == NULL)
+ return -ENOENT;
if (optname != MRT_INIT) {
- if (sk != net->ipv4.mroute_sk && !capable(CAP_NET_ADMIN))
+ if (sk != mrt->mroute_sk && !capable(CAP_NET_ADMIN))
return -EACCES;
}
@@ -971,7 +1213,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
return -ENOPROTOOPT;
rtnl_lock();
- if (net->ipv4.mroute_sk) {
+ if (mrt->mroute_sk) {
rtnl_unlock();
return -EADDRINUSE;
}
@@ -979,7 +1221,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
ret = ip_ra_control(sk, 1, mrtsock_destruct);
if (ret == 0) {
write_lock_bh(&mrt_lock);
- net->ipv4.mroute_sk = sk;
+ mrt->mroute_sk = sk;
write_unlock_bh(&mrt_lock);
IPV4_DEVCONF_ALL(net, MC_FORWARDING)++;
@@ -987,7 +1229,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
rtnl_unlock();
return ret;
case MRT_DONE:
- if (sk != net->ipv4.mroute_sk)
+ if (sk != mrt->mroute_sk)
return -EACCES;
return ip_ra_control(sk, 0, NULL);
case MRT_ADD_VIF:
@@ -1000,9 +1242,9 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
return -ENFILE;
rtnl_lock();
if (optname == MRT_ADD_VIF) {
- ret = vif_add(net, &vif, sk == net->ipv4.mroute_sk);
+ ret = vif_add(net, mrt, &vif, sk == mrt->mroute_sk);
} else {
- ret = vif_delete(net, vif.vifc_vifi, 0, NULL);
+ ret = vif_delete(mrt, vif.vifc_vifi, 0, NULL);
}
rtnl_unlock();
return ret;
@@ -1019,9 +1261,9 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
return -EFAULT;
rtnl_lock();
if (optname == MRT_DEL_MFC)
- ret = ipmr_mfc_delete(net, &mfc);
+ ret = ipmr_mfc_delete(mrt, &mfc);
else
- ret = ipmr_mfc_add(net, &mfc, sk == net->ipv4.mroute_sk);
+ ret = ipmr_mfc_add(net, mrt, &mfc, sk == mrt->mroute_sk);
rtnl_unlock();
return ret;
/*
@@ -1032,7 +1274,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
int v;
if (get_user(v,(int __user *)optval))
return -EFAULT;
- net->ipv4.mroute_do_assert = (v) ? 1 : 0;
+ mrt->mroute_do_assert = (v) ? 1 : 0;
return 0;
}
#ifdef CONFIG_IP_PIMSM
@@ -1046,14 +1288,35 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
rtnl_lock();
ret = 0;
- if (v != net->ipv4.mroute_do_pim) {
- net->ipv4.mroute_do_pim = v;
- net->ipv4.mroute_do_assert = v;
+ if (v != mrt->mroute_do_pim) {
+ mrt->mroute_do_pim = v;
+ mrt->mroute_do_assert = v;
}
rtnl_unlock();
return ret;
}
#endif
+#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
+ case MRT_TABLE:
+ {
+ u32 v;
+
+ if (optlen != sizeof(u32))
+ return -EINVAL;
+ if (get_user(v, (u32 __user *)optval))
+ return -EFAULT;
+ if (sk == mrt->mroute_sk)
+ return -EBUSY;
+
+ rtnl_lock();
+ ret = 0;
+ if (!ipmr_new_table(net, v))
+ ret = -ENOMEM;
+ raw_sk(sk)->ipmr_table = v;
+ rtnl_unlock();
+ return ret;
+ }
+#endif
/*
* Spurious command, or MRT_VERSION which you cannot
* set.
@@ -1072,6 +1335,11 @@ int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int
int olr;
int val;
struct net *net = sock_net(sk);
+ struct mr_table *mrt;
+
+ mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
+ if (mrt == NULL)
+ return -ENOENT;
if (optname != MRT_VERSION &&
#ifdef CONFIG_IP_PIMSM
@@ -1093,10 +1361,10 @@ int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int
val = 0x0305;
#ifdef CONFIG_IP_PIMSM
else if (optname == MRT_PIM)
- val = net->ipv4.mroute_do_pim;
+ val = mrt->mroute_do_pim;
#endif
else
- val = net->ipv4.mroute_do_assert;
+ val = mrt->mroute_do_assert;
if (copy_to_user(optval, &val, olr))
return -EFAULT;
return 0;
@@ -1113,16 +1381,21 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
struct vif_device *vif;
struct mfc_cache *c;
struct net *net = sock_net(sk);
+ struct mr_table *mrt;
+
+ mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
+ if (mrt == NULL)
+ return -ENOENT;
switch (cmd) {
case SIOCGETVIFCNT:
if (copy_from_user(&vr, arg, sizeof(vr)))
return -EFAULT;
- if (vr.vifi >= net->ipv4.maxvif)
+ if (vr.vifi >= mrt->maxvif)
return -EINVAL;
read_lock(&mrt_lock);
- vif = &net->ipv4.vif_table[vr.vifi];
- if (VIF_EXISTS(net, vr.vifi)) {
+ vif = &mrt->vif_table[vr.vifi];
+ if (VIF_EXISTS(mrt, vr.vifi)) {
vr.icount = vif->pkt_in;
vr.ocount = vif->pkt_out;
vr.ibytes = vif->bytes_in;
@@ -1140,7 +1413,7 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
return -EFAULT;
read_lock(&mrt_lock);
- c = ipmr_cache_find(net, sr.src.s_addr, sr.grp.s_addr);
+ c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
if (c) {
sr.pktcnt = c->mfc_un.res.pkt;
sr.bytecnt = c->mfc_un.res.bytes;
@@ -1163,16 +1436,20 @@ static int ipmr_device_event(struct notifier_block *this, unsigned long event, v
{
struct net_device *dev = ptr;
struct net *net = dev_net(dev);
+ struct mr_table *mrt;
struct vif_device *v;
int ct;
LIST_HEAD(list);
if (event != NETDEV_UNREGISTER)
return NOTIFY_DONE;
- v = &net->ipv4.vif_table[0];
- for (ct = 0; ct < net->ipv4.maxvif; ct++, v++) {
- if (v->dev == dev)
- vif_delete(net, ct, 1, &list);
+
+ ipmr_for_each_table(mrt, net) {
+ v = &mrt->vif_table[0];
+ for (ct = 0; ct < mrt->maxvif; ct++, v++) {
+ if (v->dev == dev)
+ vif_delete(mrt, ct, 1, &list);
+ }
}
unregister_netdevice_many(&list);
return NOTIFY_DONE;
@@ -1231,11 +1508,11 @@ static inline int ipmr_forward_finish(struct sk_buff *skb)
* Processing handlers for ipmr_forward
*/
-static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi)
+static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
+ struct sk_buff *skb, struct mfc_cache *c, int vifi)
{
- struct net *net = mfc_net(c);
const struct iphdr *iph = ip_hdr(skb);
- struct vif_device *vif = &net->ipv4.vif_table[vifi];
+ struct vif_device *vif = &mrt->vif_table[vifi];
struct net_device *dev;
struct rtable *rt;
int encap = 0;
@@ -1249,7 +1526,7 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi)
vif->bytes_out += skb->len;
vif->dev->stats.tx_bytes += skb->len;
vif->dev->stats.tx_packets++;
- ipmr_cache_report(net, skb, vifi, IGMPMSG_WHOLEPKT);
+ ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT);
goto out_free;
}
#endif
@@ -1323,21 +1600,20 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi)
* not mrouter) cannot join to more than one interface - it will
* result in receiving multiple packets.
*/
- NF_HOOK(PF_INET, NF_INET_FORWARD, skb, skb->dev, dev,
+ NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD, skb, skb->dev, dev,
ipmr_forward_finish);
return;
out_free:
kfree_skb(skb);
- return;
}
-static int ipmr_find_vif(struct net_device *dev)
+static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev)
{
- struct net *net = dev_net(dev);
int ct;
- for (ct = net->ipv4.maxvif-1; ct >= 0; ct--) {
- if (net->ipv4.vif_table[ct].dev == dev)
+
+ for (ct = mrt->maxvif-1; ct >= 0; ct--) {
+ if (mrt->vif_table[ct].dev == dev)
break;
}
return ct;
@@ -1345,11 +1621,12 @@ static int ipmr_find_vif(struct net_device *dev)
/* "local" means that we should preserve one skb (for local delivery) */
-static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local)
+static int ip_mr_forward(struct net *net, struct mr_table *mrt,
+ struct sk_buff *skb, struct mfc_cache *cache,
+ int local)
{
int psend = -1;
int vif, ct;
- struct net *net = mfc_net(cache);
vif = cache->mfc_parent;
cache->mfc_un.res.pkt++;
@@ -1358,7 +1635,7 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local
/*
* Wrong interface: drop packet and (maybe) send PIM assert.
*/
- if (net->ipv4.vif_table[vif].dev != skb->dev) {
+ if (mrt->vif_table[vif].dev != skb->dev) {
int true_vifi;
if (skb_rtable(skb)->fl.iif == 0) {
@@ -1377,26 +1654,26 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local
}
cache->mfc_un.res.wrong_if++;
- true_vifi = ipmr_find_vif(skb->dev);
+ true_vifi = ipmr_find_vif(mrt, skb->dev);
- if (true_vifi >= 0 && net->ipv4.mroute_do_assert &&
+ if (true_vifi >= 0 && mrt->mroute_do_assert &&
/* pimsm uses asserts, when switching from RPT to SPT,
so that we cannot check that packet arrived on an oif.
It is bad, but otherwise we would need to move pretty
large chunk of pimd to kernel. Ough... --ANK
*/
- (net->ipv4.mroute_do_pim ||
+ (mrt->mroute_do_pim ||
cache->mfc_un.res.ttls[true_vifi] < 255) &&
time_after(jiffies,
cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
cache->mfc_un.res.last_assert = jiffies;
- ipmr_cache_report(net, skb, true_vifi, IGMPMSG_WRONGVIF);
+ ipmr_cache_report(mrt, skb, true_vifi, IGMPMSG_WRONGVIF);
}
goto dont_forward;
}
- net->ipv4.vif_table[vif].pkt_in++;
- net->ipv4.vif_table[vif].bytes_in += skb->len;
+ mrt->vif_table[vif].pkt_in++;
+ mrt->vif_table[vif].bytes_in += skb->len;
/*
* Forward the frame
@@ -1406,7 +1683,8 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local
if (psend != -1) {
struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
if (skb2)
- ipmr_queue_xmit(skb2, cache, psend);
+ ipmr_queue_xmit(net, mrt, skb2, cache,
+ psend);
}
psend = ct;
}
@@ -1415,9 +1693,9 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local
if (local) {
struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
if (skb2)
- ipmr_queue_xmit(skb2, cache, psend);
+ ipmr_queue_xmit(net, mrt, skb2, cache, psend);
} else {
- ipmr_queue_xmit(skb, cache, psend);
+ ipmr_queue_xmit(net, mrt, skb, cache, psend);
return 0;
}
}
@@ -1438,6 +1716,8 @@ int ip_mr_input(struct sk_buff *skb)
struct mfc_cache *cache;
struct net *net = dev_net(skb->dev);
int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL;
+ struct mr_table *mrt;
+ int err;
/* Packet is looped back after forward, it should not be
forwarded second time, but still can be delivered locally.
@@ -1445,6 +1725,10 @@ int ip_mr_input(struct sk_buff *skb)
if (IPCB(skb)->flags&IPSKB_FORWARDED)
goto dont_forward;
+ err = ipmr_fib_lookup(net, &skb_rtable(skb)->fl, &mrt);
+ if (err < 0)
+ return err;
+
if (!local) {
if (IPCB(skb)->opt.router_alert) {
if (ip_call_ra_chain(skb))
@@ -1457,9 +1741,9 @@ int ip_mr_input(struct sk_buff *skb)
that we can forward NO IGMP messages.
*/
read_lock(&mrt_lock);
- if (net->ipv4.mroute_sk) {
+ if (mrt->mroute_sk) {
nf_reset(skb);
- raw_rcv(net->ipv4.mroute_sk, skb);
+ raw_rcv(mrt->mroute_sk, skb);
read_unlock(&mrt_lock);
return 0;
}
@@ -1468,7 +1752,7 @@ int ip_mr_input(struct sk_buff *skb)
}
read_lock(&mrt_lock);
- cache = ipmr_cache_find(net, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
+ cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
/*
* No usable cache entry
@@ -1486,19 +1770,19 @@ int ip_mr_input(struct sk_buff *skb)
skb = skb2;
}
- vif = ipmr_find_vif(skb->dev);
+ vif = ipmr_find_vif(mrt, skb->dev);
if (vif >= 0) {
- int err = ipmr_cache_unresolved(net, vif, skb);
+ int err2 = ipmr_cache_unresolved(mrt, vif, skb);
read_unlock(&mrt_lock);
- return err;
+ return err2;
}
read_unlock(&mrt_lock);
kfree_skb(skb);
return -ENODEV;
}
- ip_mr_forward(skb, cache, local);
+ ip_mr_forward(net, mrt, skb, cache, local);
read_unlock(&mrt_lock);
@@ -1515,11 +1799,11 @@ dont_forward:
}
#ifdef CONFIG_IP_PIMSM
-static int __pim_rcv(struct sk_buff *skb, unsigned int pimlen)
+static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
+ unsigned int pimlen)
{
struct net_device *reg_dev = NULL;
struct iphdr *encap;
- struct net *net = dev_net(skb->dev);
encap = (struct iphdr *)(skb_transport_header(skb) + pimlen);
/*
@@ -1534,8 +1818,8 @@ static int __pim_rcv(struct sk_buff *skb, unsigned int pimlen)
return 1;
read_lock(&mrt_lock);
- if (net->ipv4.mroute_reg_vif_num >= 0)
- reg_dev = net->ipv4.vif_table[net->ipv4.mroute_reg_vif_num].dev;
+ if (mrt->mroute_reg_vif_num >= 0)
+ reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev;
if (reg_dev)
dev_hold(reg_dev);
read_unlock(&mrt_lock);
@@ -1546,14 +1830,12 @@ static int __pim_rcv(struct sk_buff *skb, unsigned int pimlen)
skb->mac_header = skb->network_header;
skb_pull(skb, (u8*)encap - skb->data);
skb_reset_network_header(skb);
- skb->dev = reg_dev;
skb->protocol = htons(ETH_P_IP);
skb->ip_summed = 0;
skb->pkt_type = PACKET_HOST;
- skb_dst_drop(skb);
- reg_dev->stats.rx_bytes += skb->len;
- reg_dev->stats.rx_packets++;
- nf_reset(skb);
+
+ skb_tunnel_rx(skb, reg_dev);
+
netif_rx(skb);
dev_put(reg_dev);
@@ -1570,17 +1852,21 @@ int pim_rcv_v1(struct sk_buff * skb)
{
struct igmphdr *pim;
struct net *net = dev_net(skb->dev);
+ struct mr_table *mrt;
if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
goto drop;
pim = igmp_hdr(skb);
- if (!net->ipv4.mroute_do_pim ||
+ if (ipmr_fib_lookup(net, &skb_rtable(skb)->fl, &mrt) < 0)
+ goto drop;
+
+ if (!mrt->mroute_do_pim ||
pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
goto drop;
- if (__pim_rcv(skb, sizeof(*pim))) {
+ if (__pim_rcv(mrt, skb, sizeof(*pim))) {
drop:
kfree_skb(skb);
}
@@ -1592,6 +1878,8 @@ drop:
static int pim_rcv(struct sk_buff * skb)
{
struct pimreghdr *pim;
+ struct net *net = dev_net(skb->dev);
+ struct mr_table *mrt;
if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
goto drop;
@@ -1603,7 +1891,10 @@ static int pim_rcv(struct sk_buff * skb)
csum_fold(skb_checksum(skb, 0, skb->len, 0))))
goto drop;
- if (__pim_rcv(skb, sizeof(*pim))) {
+ if (ipmr_fib_lookup(net, &skb_rtable(skb)->fl, &mrt) < 0)
+ goto drop;
+
+ if (__pim_rcv(mrt, skb, sizeof(*pim))) {
drop:
kfree_skb(skb);
}
@@ -1611,12 +1902,11 @@ drop:
}
#endif
-static int
-ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm)
+static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
+ struct mfc_cache *c, struct rtmsg *rtm)
{
int ct;
struct rtnexthop *nhp;
- struct net *net = mfc_net(c);
u8 *b = skb_tail_pointer(skb);
struct rtattr *mp_head;
@@ -1624,19 +1914,19 @@ ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm)
if (c->mfc_parent > MAXVIFS)
return -ENOENT;
- if (VIF_EXISTS(net, c->mfc_parent))
- RTA_PUT(skb, RTA_IIF, 4, &net->ipv4.vif_table[c->mfc_parent].dev->ifindex);
+ if (VIF_EXISTS(mrt, c->mfc_parent))
+ RTA_PUT(skb, RTA_IIF, 4, &mrt->vif_table[c->mfc_parent].dev->ifindex);
mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0));
for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
- if (VIF_EXISTS(net, ct) && c->mfc_un.res.ttls[ct] < 255) {
+ if (VIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
goto rtattr_failure;
nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
nhp->rtnh_flags = 0;
nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
- nhp->rtnh_ifindex = net->ipv4.vif_table[ct].dev->ifindex;
+ nhp->rtnh_ifindex = mrt->vif_table[ct].dev->ifindex;
nhp->rtnh_len = sizeof(*nhp);
}
}
@@ -1654,11 +1944,16 @@ int ipmr_get_route(struct net *net,
struct sk_buff *skb, struct rtmsg *rtm, int nowait)
{
int err;
+ struct mr_table *mrt;
struct mfc_cache *cache;
struct rtable *rt = skb_rtable(skb);
+ mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
+ if (mrt == NULL)
+ return -ENOENT;
+
read_lock(&mrt_lock);
- cache = ipmr_cache_find(net, rt->rt_src, rt->rt_dst);
+ cache = ipmr_cache_find(mrt, rt->rt_src, rt->rt_dst);
if (cache == NULL) {
struct sk_buff *skb2;
@@ -1672,7 +1967,7 @@ int ipmr_get_route(struct net *net,
}
dev = skb->dev;
- if (dev == NULL || (vif = ipmr_find_vif(dev)) < 0) {
+ if (dev == NULL || (vif = ipmr_find_vif(mrt, dev)) < 0) {
read_unlock(&mrt_lock);
return -ENODEV;
}
@@ -1689,24 +1984,107 @@ int ipmr_get_route(struct net *net,
iph->saddr = rt->rt_src;
iph->daddr = rt->rt_dst;
iph->version = 0;
- err = ipmr_cache_unresolved(net, vif, skb2);
+ err = ipmr_cache_unresolved(mrt, vif, skb2);
read_unlock(&mrt_lock);
return err;
}
if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY))
cache->mfc_flags |= MFC_NOTIFY;
- err = ipmr_fill_mroute(skb, cache, rtm);
+ err = __ipmr_fill_mroute(mrt, skb, cache, rtm);
read_unlock(&mrt_lock);
return err;
}
+static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
+ u32 pid, u32 seq, struct mfc_cache *c)
+{
+ struct nlmsghdr *nlh;
+ struct rtmsg *rtm;
+
+ nlh = nlmsg_put(skb, pid, seq, RTM_NEWROUTE, sizeof(*rtm), NLM_F_MULTI);
+ if (nlh == NULL)
+ return -EMSGSIZE;
+
+ rtm = nlmsg_data(nlh);
+ rtm->rtm_family = RTNL_FAMILY_IPMR;
+ rtm->rtm_dst_len = 32;
+ rtm->rtm_src_len = 32;
+ rtm->rtm_tos = 0;
+ rtm->rtm_table = mrt->id;
+ NLA_PUT_U32(skb, RTA_TABLE, mrt->id);
+ rtm->rtm_type = RTN_MULTICAST;
+ rtm->rtm_scope = RT_SCOPE_UNIVERSE;
+ rtm->rtm_protocol = RTPROT_UNSPEC;
+ rtm->rtm_flags = 0;
+
+ NLA_PUT_BE32(skb, RTA_SRC, c->mfc_origin);
+ NLA_PUT_BE32(skb, RTA_DST, c->mfc_mcastgrp);
+
+ if (__ipmr_fill_mroute(mrt, skb, c, rtm) < 0)
+ goto nla_put_failure;
+
+ return nlmsg_end(skb, nlh);
+
+nla_put_failure:
+ nlmsg_cancel(skb, nlh);
+ return -EMSGSIZE;
+}
+
+static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct net *net = sock_net(skb->sk);
+ struct mr_table *mrt;
+ struct mfc_cache *mfc;
+ unsigned int t = 0, s_t;
+ unsigned int h = 0, s_h;
+ unsigned int e = 0, s_e;
+
+ s_t = cb->args[0];
+ s_h = cb->args[1];
+ s_e = cb->args[2];
+
+ read_lock(&mrt_lock);
+ ipmr_for_each_table(mrt, net) {
+ if (t < s_t)
+ goto next_table;
+ if (t > s_t)
+ s_h = 0;
+ for (h = s_h; h < MFC_LINES; h++) {
+ list_for_each_entry(mfc, &mrt->mfc_cache_array[h], list) {
+ if (e < s_e)
+ goto next_entry;
+ if (ipmr_fill_mroute(mrt, skb,
+ NETLINK_CB(cb->skb).pid,
+ cb->nlh->nlmsg_seq,
+ mfc) < 0)
+ goto done;
+next_entry:
+ e++;
+ }
+ e = s_e = 0;
+ }
+ s_h = 0;
+next_table:
+ t++;
+ }
+done:
+ read_unlock(&mrt_lock);
+
+ cb->args[2] = e;
+ cb->args[1] = h;
+ cb->args[0] = t;
+
+ return skb->len;
+}
+
#ifdef CONFIG_PROC_FS
/*
* The /proc interfaces to multicast routing /proc/ip_mr_cache /proc/ip_mr_vif
*/
struct ipmr_vif_iter {
struct seq_net_private p;
+ struct mr_table *mrt;
int ct;
};
@@ -1714,11 +2092,13 @@ static struct vif_device *ipmr_vif_seq_idx(struct net *net,
struct ipmr_vif_iter *iter,
loff_t pos)
{
- for (iter->ct = 0; iter->ct < net->ipv4.maxvif; ++iter->ct) {
- if (!VIF_EXISTS(net, iter->ct))
+ struct mr_table *mrt = iter->mrt;
+
+ for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
+ if (!VIF_EXISTS(mrt, iter->ct))
continue;
if (pos-- == 0)
- return &net->ipv4.vif_table[iter->ct];
+ return &mrt->vif_table[iter->ct];
}
return NULL;
}
@@ -1726,7 +2106,15 @@ static struct vif_device *ipmr_vif_seq_idx(struct net *net,
static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(mrt_lock)
{
+ struct ipmr_vif_iter *iter = seq->private;
struct net *net = seq_file_net(seq);
+ struct mr_table *mrt;
+
+ mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
+ if (mrt == NULL)
+ return ERR_PTR(-ENOENT);
+
+ iter->mrt = mrt;
read_lock(&mrt_lock);
return *pos ? ipmr_vif_seq_idx(net, seq->private, *pos - 1)
@@ -1737,15 +2125,16 @@ static void *ipmr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct ipmr_vif_iter *iter = seq->private;
struct net *net = seq_file_net(seq);
+ struct mr_table *mrt = iter->mrt;
++*pos;
if (v == SEQ_START_TOKEN)
return ipmr_vif_seq_idx(net, iter, 0);
- while (++iter->ct < net->ipv4.maxvif) {
- if (!VIF_EXISTS(net, iter->ct))
+ while (++iter->ct < mrt->maxvif) {
+ if (!VIF_EXISTS(mrt, iter->ct))
continue;
- return &net->ipv4.vif_table[iter->ct];
+ return &mrt->vif_table[iter->ct];
}
return NULL;
}
@@ -1758,7 +2147,8 @@ static void ipmr_vif_seq_stop(struct seq_file *seq, void *v)
static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
{
- struct net *net = seq_file_net(seq);
+ struct ipmr_vif_iter *iter = seq->private;
+ struct mr_table *mrt = iter->mrt;
if (v == SEQ_START_TOKEN) {
seq_puts(seq,
@@ -1769,7 +2159,7 @@ static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
seq_printf(seq,
"%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n",
- vif - net->ipv4.vif_table,
+ vif - mrt->vif_table,
name, vif->bytes_in, vif->pkt_in,
vif->bytes_out, vif->pkt_out,
vif->flags, vif->local, vif->remote);
@@ -1800,7 +2190,8 @@ static const struct file_operations ipmr_vif_fops = {
struct ipmr_mfc_iter {
struct seq_net_private p;
- struct mfc_cache **cache;
+ struct mr_table *mrt;
+ struct list_head *cache;
int ct;
};
@@ -1808,22 +2199,22 @@ struct ipmr_mfc_iter {
static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net,
struct ipmr_mfc_iter *it, loff_t pos)
{
+ struct mr_table *mrt = it->mrt;
struct mfc_cache *mfc;
- it->cache = net->ipv4.mfc_cache_array;
read_lock(&mrt_lock);
- for (it->ct = 0; it->ct < MFC_LINES; it->ct++)
- for (mfc = net->ipv4.mfc_cache_array[it->ct];
- mfc; mfc = mfc->next)
+ for (it->ct = 0; it->ct < MFC_LINES; it->ct++) {
+ it->cache = &mrt->mfc_cache_array[it->ct];
+ list_for_each_entry(mfc, it->cache, list)
if (pos-- == 0)
return mfc;
+ }
read_unlock(&mrt_lock);
- it->cache = &mfc_unres_queue;
spin_lock_bh(&mfc_unres_lock);
- for (mfc = mfc_unres_queue; mfc; mfc = mfc->next)
- if (net_eq(mfc_net(mfc), net) &&
- pos-- == 0)
+ it->cache = &mrt->mfc_unres_queue;
+ list_for_each_entry(mfc, it->cache, list)
+ if (pos-- == 0)
return mfc;
spin_unlock_bh(&mfc_unres_lock);
@@ -1836,7 +2227,13 @@ static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
{
struct ipmr_mfc_iter *it = seq->private;
struct net *net = seq_file_net(seq);
+ struct mr_table *mrt;
+ mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
+ if (mrt == NULL)
+ return ERR_PTR(-ENOENT);
+
+ it->mrt = mrt;
it->cache = NULL;
it->ct = 0;
return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
@@ -1848,37 +2245,36 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
struct mfc_cache *mfc = v;
struct ipmr_mfc_iter *it = seq->private;
struct net *net = seq_file_net(seq);
+ struct mr_table *mrt = it->mrt;
++*pos;
if (v == SEQ_START_TOKEN)
return ipmr_mfc_seq_idx(net, seq->private, 0);
- if (mfc->next)
- return mfc->next;
+ if (mfc->list.next != it->cache)
+ return list_entry(mfc->list.next, struct mfc_cache, list);
- if (it->cache == &mfc_unres_queue)
+ if (it->cache == &mrt->mfc_unres_queue)
goto end_of_list;
- BUG_ON(it->cache != net->ipv4.mfc_cache_array);
+ BUG_ON(it->cache != &mrt->mfc_cache_array[it->ct]);
while (++it->ct < MFC_LINES) {
- mfc = net->ipv4.mfc_cache_array[it->ct];
- if (mfc)
- return mfc;
+ it->cache = &mrt->mfc_cache_array[it->ct];
+ if (list_empty(it->cache))
+ continue;
+ return list_first_entry(it->cache, struct mfc_cache, list);
}
/* exhausted cache_array, show unresolved */
read_unlock(&mrt_lock);
- it->cache = &mfc_unres_queue;
+ it->cache = &mrt->mfc_unres_queue;
it->ct = 0;
spin_lock_bh(&mfc_unres_lock);
- mfc = mfc_unres_queue;
- while (mfc && !net_eq(mfc_net(mfc), net))
- mfc = mfc->next;
- if (mfc)
- return mfc;
+ if (!list_empty(it->cache))
+ return list_first_entry(it->cache, struct mfc_cache, list);
end_of_list:
spin_unlock_bh(&mfc_unres_lock);
@@ -1890,18 +2286,17 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
{
struct ipmr_mfc_iter *it = seq->private;
- struct net *net = seq_file_net(seq);
+ struct mr_table *mrt = it->mrt;
- if (it->cache == &mfc_unres_queue)
+ if (it->cache == &mrt->mfc_unres_queue)
spin_unlock_bh(&mfc_unres_lock);
- else if (it->cache == net->ipv4.mfc_cache_array)
+ else if (it->cache == &mrt->mfc_cache_array[it->ct])
read_unlock(&mrt_lock);
}
static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
{
int n;
- struct net *net = seq_file_net(seq);
if (v == SEQ_START_TOKEN) {
seq_puts(seq,
@@ -1909,20 +2304,21 @@ static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
} else {
const struct mfc_cache *mfc = v;
const struct ipmr_mfc_iter *it = seq->private;
+ const struct mr_table *mrt = it->mrt;
- seq_printf(seq, "%08lX %08lX %-3hd",
- (unsigned long) mfc->mfc_mcastgrp,
- (unsigned long) mfc->mfc_origin,
+ seq_printf(seq, "%08X %08X %-3hd",
+ (__force u32) mfc->mfc_mcastgrp,
+ (__force u32) mfc->mfc_origin,
mfc->mfc_parent);
- if (it->cache != &mfc_unres_queue) {
+ if (it->cache != &mrt->mfc_unres_queue) {
seq_printf(seq, " %8lu %8lu %8lu",
mfc->mfc_un.res.pkt,
mfc->mfc_un.res.bytes,
mfc->mfc_un.res.wrong_if);
for (n = mfc->mfc_un.res.minvif;
n < mfc->mfc_un.res.maxvif; n++ ) {
- if (VIF_EXISTS(net, n) &&
+ if (VIF_EXISTS(mrt, n) &&
mfc->mfc_un.res.ttls[n] < 255)
seq_printf(seq,
" %2d:%-3d",
@@ -1974,27 +2370,11 @@ static const struct net_protocol pim_protocol = {
*/
static int __net_init ipmr_net_init(struct net *net)
{
- int err = 0;
+ int err;
- net->ipv4.vif_table = kcalloc(MAXVIFS, sizeof(struct vif_device),
- GFP_KERNEL);
- if (!net->ipv4.vif_table) {
- err = -ENOMEM;
+ err = ipmr_rules_init(net);
+ if (err < 0)
goto fail;
- }
-
- /* Forwarding cache */
- net->ipv4.mfc_cache_array = kcalloc(MFC_LINES,
- sizeof(struct mfc_cache *),
- GFP_KERNEL);
- if (!net->ipv4.mfc_cache_array) {
- err = -ENOMEM;
- goto fail_mfc_cache;
- }
-
-#ifdef CONFIG_IP_PIMSM
- net->ipv4.mroute_reg_vif_num = -1;
-#endif
#ifdef CONFIG_PROC_FS
err = -ENOMEM;
@@ -2009,10 +2389,8 @@ static int __net_init ipmr_net_init(struct net *net)
proc_cache_fail:
proc_net_remove(net, "ip_mr_vif");
proc_vif_fail:
- kfree(net->ipv4.mfc_cache_array);
+ ipmr_rules_exit(net);
#endif
-fail_mfc_cache:
- kfree(net->ipv4.vif_table);
fail:
return err;
}
@@ -2023,8 +2401,7 @@ static void __net_exit ipmr_net_exit(struct net *net)
proc_net_remove(net, "ip_mr_cache");
proc_net_remove(net, "ip_mr_vif");
#endif
- kfree(net->ipv4.mfc_cache_array);
- kfree(net->ipv4.vif_table);
+ ipmr_rules_exit(net);
}
static struct pernet_operations ipmr_net_ops = {
@@ -2047,7 +2424,6 @@ int __init ip_mr_init(void)
if (err)
goto reg_pernet_fail;
- setup_timer(&ipmr_expire_timer, ipmr_expire_process, 0);
err = register_netdevice_notifier(&ip_mr_notifier);
if (err)
goto reg_notif_fail;
@@ -2058,6 +2434,7 @@ int __init ip_mr_init(void)
goto add_proto_fail;
}
#endif
+ rtnl_register(RTNL_FAMILY_IPMR, RTM_GETROUTE, NULL, ipmr_rtm_dumproute);
return 0;
#ifdef CONFIG_IP_PIMSM_V2
@@ -2065,7 +2442,6 @@ add_proto_fail:
unregister_netdevice_notifier(&ip_mr_notifier);
#endif
reg_notif_fail:
- del_timer(&ipmr_expire_timer);
unregister_pernet_subsys(&ipmr_net_ops);
reg_pernet_fail:
kmem_cache_destroy(mrt_cachep);
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index 82fb43c5c59e..07de855e2175 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -17,7 +17,7 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type)
const struct iphdr *iph = ip_hdr(skb);
struct rtable *rt;
struct flowi fl = {};
- struct dst_entry *odst;
+ unsigned long orefdst;
unsigned int hh_len;
unsigned int type;
@@ -51,14 +51,14 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type)
if (ip_route_output_key(net, &rt, &fl) != 0)
return -1;
- odst = skb_dst(skb);
+ orefdst = skb->_skb_refdst;
if (ip_route_input(skb, iph->daddr, iph->saddr,
RT_TOS(iph->tos), rt->u.dst.dev) != 0) {
dst_release(&rt->u.dst);
return -1;
}
dst_release(&rt->u.dst);
- dst_release(odst);
+ refdst_drop(orefdst);
}
if (skb_dst(skb)->error)
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index f07d77f65751..1ac01b128621 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -49,12 +49,7 @@ MODULE_DESCRIPTION("arptables core");
#endif
#ifdef CONFIG_NETFILTER_DEBUG
-#define ARP_NF_ASSERT(x) \
-do { \
- if (!(x)) \
- printk("ARP_NF_ASSERT: %s:%s:%u\n", \
- __func__, __FILE__, __LINE__); \
-} while(0)
+#define ARP_NF_ASSERT(x) WARN_ON(!(x))
#else
#define ARP_NF_ASSERT(x)
#endif
@@ -224,10 +219,10 @@ static inline int arp_checkentry(const struct arpt_arp *arp)
}
static unsigned int
-arpt_error(struct sk_buff *skb, const struct xt_target_param *par)
+arpt_error(struct sk_buff *skb, const struct xt_action_param *par)
{
if (net_ratelimit())
- printk("arp_tables: error: '%s'\n",
+ pr_err("arp_tables: error: '%s'\n",
(const char *)par->targinfo);
return NF_DROP;
@@ -260,12 +255,11 @@ unsigned int arpt_do_table(struct sk_buff *skb,
static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
unsigned int verdict = NF_DROP;
const struct arphdr *arp;
- bool hotdrop = false;
struct arpt_entry *e, *back;
const char *indev, *outdev;
void *table_base;
const struct xt_table_info *private;
- struct xt_target_param tgpar;
+ struct xt_action_param acpar;
if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
return NF_DROP;
@@ -280,10 +274,11 @@ unsigned int arpt_do_table(struct sk_buff *skb,
e = get_entry(table_base, private->hook_entry[hook]);
back = get_entry(table_base, private->underflow[hook]);
- tgpar.in = in;
- tgpar.out = out;
- tgpar.hooknum = hook;
- tgpar.family = NFPROTO_ARP;
+ acpar.in = in;
+ acpar.out = out;
+ acpar.hooknum = hook;
+ acpar.family = NFPROTO_ARP;
+ acpar.hotdrop = false;
arp = arp_hdr(skb);
do {
@@ -333,9 +328,9 @@ unsigned int arpt_do_table(struct sk_buff *skb,
/* Targets which reenter must return
* abs. verdicts
*/
- tgpar.target = t->u.kernel.target;
- tgpar.targinfo = t->data;
- verdict = t->u.kernel.target->target(skb, &tgpar);
+ acpar.target = t->u.kernel.target;
+ acpar.targinfo = t->data;
+ verdict = t->u.kernel.target->target(skb, &acpar);
/* Target might have changed stuff. */
arp = arp_hdr(skb);
@@ -345,10 +340,10 @@ unsigned int arpt_do_table(struct sk_buff *skb,
else
/* Verdict */
break;
- } while (!hotdrop);
+ } while (!acpar.hotdrop);
xt_info_rdunlock_bh();
- if (hotdrop)
+ if (acpar.hotdrop)
return NF_DROP;
else
return verdict;
@@ -390,7 +385,7 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
int visited = e->comefrom & (1 << hook);
if (e->comefrom & (1 << NF_ARP_NUMHOOKS)) {
- printk("arptables: loop hook %u pos %u %08X.\n",
+ pr_notice("arptables: loop hook %u pos %u %08X.\n",
hook, pos, e->comefrom);
return 0;
}
@@ -523,13 +518,11 @@ find_check_entry(struct arpt_entry *e, const char *name, unsigned int size)
return ret;
t = arpt_get_target(e);
- target = try_then_request_module(xt_find_target(NFPROTO_ARP,
- t->u.user.name,
- t->u.user.revision),
- "arpt_%s", t->u.user.name);
- if (IS_ERR(target) || !target) {
+ target = xt_request_find_target(NFPROTO_ARP, t->u.user.name,
+ t->u.user.revision);
+ if (IS_ERR(target)) {
duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
- ret = target ? PTR_ERR(target) : -ENOENT;
+ ret = PTR_ERR(target);
goto out;
}
t->u.kernel.target = target;
@@ -651,6 +644,9 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
if (ret != 0)
break;
++i;
+ if (strcmp(arpt_get_target(iter)->u.user.name,
+ XT_ERROR_TARGET) == 0)
+ ++newinfo->stacksize;
}
duprintf("translate_table: ARPT_ENTRY_ITERATE gives %d\n", ret);
if (ret != 0)
@@ -1252,14 +1248,12 @@ check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
entry_offset = (void *)e - (void *)base;
t = compat_arpt_get_target(e);
- target = try_then_request_module(xt_find_target(NFPROTO_ARP,
- t->u.user.name,
- t->u.user.revision),
- "arpt_%s", t->u.user.name);
- if (IS_ERR(target) || !target) {
+ target = xt_request_find_target(NFPROTO_ARP, t->u.user.name,
+ t->u.user.revision);
+ if (IS_ERR(target)) {
duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
t->u.user.name);
- ret = target ? PTR_ERR(target) : -ENOENT;
+ ret = PTR_ERR(target);
goto out;
}
t->u.kernel.target = target;
@@ -1778,8 +1772,7 @@ struct xt_table *arpt_register_table(struct net *net,
{
int ret;
struct xt_table_info *newinfo;
- struct xt_table_info bootstrap
- = { 0, 0, 0, { 0 }, { 0 }, { } };
+ struct xt_table_info bootstrap = {0};
void *loc_cpu_entry;
struct xt_table *new_table;
@@ -1830,22 +1823,23 @@ void arpt_unregister_table(struct xt_table *table)
}
/* The built-in targets: standard (NULL) and error. */
-static struct xt_target arpt_standard_target __read_mostly = {
- .name = ARPT_STANDARD_TARGET,
- .targetsize = sizeof(int),
- .family = NFPROTO_ARP,
+static struct xt_target arpt_builtin_tg[] __read_mostly = {
+ {
+ .name = ARPT_STANDARD_TARGET,
+ .targetsize = sizeof(int),
+ .family = NFPROTO_ARP,
#ifdef CONFIG_COMPAT
- .compatsize = sizeof(compat_int_t),
- .compat_from_user = compat_standard_from_user,
- .compat_to_user = compat_standard_to_user,
+ .compatsize = sizeof(compat_int_t),
+ .compat_from_user = compat_standard_from_user,
+ .compat_to_user = compat_standard_to_user,
#endif
-};
-
-static struct xt_target arpt_error_target __read_mostly = {
- .name = ARPT_ERROR_TARGET,
- .target = arpt_error,
- .targetsize = ARPT_FUNCTION_MAXNAMELEN,
- .family = NFPROTO_ARP,
+ },
+ {
+ .name = ARPT_ERROR_TARGET,
+ .target = arpt_error,
+ .targetsize = ARPT_FUNCTION_MAXNAMELEN,
+ .family = NFPROTO_ARP,
+ },
};
static struct nf_sockopt_ops arpt_sockopts = {
@@ -1889,12 +1883,9 @@ static int __init arp_tables_init(void)
goto err1;
/* Noone else will be downing sem now, so we won't sleep */
- ret = xt_register_target(&arpt_standard_target);
+ ret = xt_register_targets(arpt_builtin_tg, ARRAY_SIZE(arpt_builtin_tg));
if (ret < 0)
goto err2;
- ret = xt_register_target(&arpt_error_target);
- if (ret < 0)
- goto err3;
/* Register setsockopt */
ret = nf_register_sockopt(&arpt_sockopts);
@@ -1905,9 +1896,7 @@ static int __init arp_tables_init(void)
return 0;
err4:
- xt_unregister_target(&arpt_error_target);
-err3:
- xt_unregister_target(&arpt_standard_target);
+ xt_unregister_targets(arpt_builtin_tg, ARRAY_SIZE(arpt_builtin_tg));
err2:
unregister_pernet_subsys(&arp_tables_net_ops);
err1:
@@ -1917,8 +1906,7 @@ err1:
static void __exit arp_tables_fini(void)
{
nf_unregister_sockopt(&arpt_sockopts);
- xt_unregister_target(&arpt_error_target);
- xt_unregister_target(&arpt_standard_target);
+ xt_unregister_targets(arpt_builtin_tg, ARRAY_SIZE(arpt_builtin_tg));
unregister_pernet_subsys(&arp_tables_net_ops);
}
diff --git a/net/ipv4/netfilter/arpt_mangle.c b/net/ipv4/netfilter/arpt_mangle.c
index b0d5b1d0a769..e1be7dd1171b 100644
--- a/net/ipv4/netfilter/arpt_mangle.c
+++ b/net/ipv4/netfilter/arpt_mangle.c
@@ -9,7 +9,7 @@ MODULE_AUTHOR("Bart De Schuymer <bdschuym@pandora.be>");
MODULE_DESCRIPTION("arptables arp payload mangle target");
static unsigned int
-target(struct sk_buff *skb, const struct xt_target_param *par)
+target(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct arpt_mangle *mangle = par->targinfo;
const struct arphdr *arp;
@@ -54,7 +54,7 @@ target(struct sk_buff *skb, const struct xt_target_param *par)
return mangle->target;
}
-static bool checkentry(const struct xt_tgchk_param *par)
+static int checkentry(const struct xt_tgchk_param *par)
{
const struct arpt_mangle *mangle = par->targinfo;
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
index e2787048aa0a..a4e5fc5df4bf 100644
--- a/net/ipv4/netfilter/ip_queue.c
+++ b/net/ipv4/netfilter/ip_queue.c
@@ -161,8 +161,7 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
break;
case IPQ_COPY_PACKET:
- if ((entry->skb->ip_summed == CHECKSUM_PARTIAL ||
- entry->skb->ip_summed == CHECKSUM_COMPLETE) &&
+ if (entry->skb->ip_summed == CHECKSUM_PARTIAL &&
(*errp = skb_checksum_help(entry->skb))) {
read_unlock_bh(&queue_lock);
return NULL;
@@ -462,7 +461,6 @@ __ipq_rcv_skb(struct sk_buff *skb)
if (flags & NLM_F_ACK)
netlink_ack(skb, nlh, 0);
- return;
}
static void
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index b29c66df8d1f..63958f3394a5 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -39,24 +39,19 @@ MODULE_DESCRIPTION("IPv4 packet filter");
/*#define DEBUG_IP_FIREWALL_USER*/
#ifdef DEBUG_IP_FIREWALL
-#define dprintf(format, args...) printk(format , ## args)
+#define dprintf(format, args...) pr_info(format , ## args)
#else
#define dprintf(format, args...)
#endif
#ifdef DEBUG_IP_FIREWALL_USER
-#define duprintf(format, args...) printk(format , ## args)
+#define duprintf(format, args...) pr_info(format , ## args)
#else
#define duprintf(format, args...)
#endif
#ifdef CONFIG_NETFILTER_DEBUG
-#define IP_NF_ASSERT(x) \
-do { \
- if (!(x)) \
- printk("IP_NF_ASSERT: %s:%s:%u\n", \
- __func__, __FILE__, __LINE__); \
-} while(0)
+#define IP_NF_ASSERT(x) WARN_ON(!(x))
#else
#define IP_NF_ASSERT(x)
#endif
@@ -165,30 +160,14 @@ ip_checkentry(const struct ipt_ip *ip)
}
static unsigned int
-ipt_error(struct sk_buff *skb, const struct xt_target_param *par)
+ipt_error(struct sk_buff *skb, const struct xt_action_param *par)
{
if (net_ratelimit())
- printk("ip_tables: error: `%s'\n",
- (const char *)par->targinfo);
+ pr_info("error: `%s'\n", (const char *)par->targinfo);
return NF_DROP;
}
-/* Performance critical - called for every packet */
-static inline bool
-do_match(const struct ipt_entry_match *m, const struct sk_buff *skb,
- struct xt_match_param *par)
-{
- par->match = m->u.kernel.match;
- par->matchinfo = m->data;
-
- /* Stop iteration if it doesn't match */
- if (!m->u.kernel.match->match(skb, par))
- return true;
- else
- return false;
-}
-
/* Performance critical */
static inline struct ipt_entry *
get_entry(const void *base, unsigned int offset)
@@ -322,19 +301,16 @@ ipt_do_table(struct sk_buff *skb,
const struct net_device *out,
struct xt_table *table)
{
-#define tb_comefrom ((struct ipt_entry *)table_base)->comefrom
-
static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
const struct iphdr *ip;
- bool hotdrop = false;
/* Initializing verdict to NF_DROP keeps gcc happy. */
unsigned int verdict = NF_DROP;
const char *indev, *outdev;
const void *table_base;
- struct ipt_entry *e, *back;
+ struct ipt_entry *e, **jumpstack;
+ unsigned int *stackptr, origptr, cpu;
const struct xt_table_info *private;
- struct xt_match_param mtpar;
- struct xt_target_param tgpar;
+ struct xt_action_param acpar;
/* Initialization */
ip = ip_hdr(skb);
@@ -346,40 +322,47 @@ ipt_do_table(struct sk_buff *skb,
* things we don't know, ie. tcp syn flag or ports). If the
* rule is also a fragment-specific rule, non-fragments won't
* match it. */
- mtpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET;
- mtpar.thoff = ip_hdrlen(skb);
- mtpar.hotdrop = &hotdrop;
- mtpar.in = tgpar.in = in;
- mtpar.out = tgpar.out = out;
- mtpar.family = tgpar.family = NFPROTO_IPV4;
- mtpar.hooknum = tgpar.hooknum = hook;
+ acpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET;
+ acpar.thoff = ip_hdrlen(skb);
+ acpar.hotdrop = false;
+ acpar.in = in;
+ acpar.out = out;
+ acpar.family = NFPROTO_IPV4;
+ acpar.hooknum = hook;
IP_NF_ASSERT(table->valid_hooks & (1 << hook));
xt_info_rdlock_bh();
private = table->private;
- table_base = private->entries[smp_processor_id()];
+ cpu = smp_processor_id();
+ table_base = private->entries[cpu];
+ jumpstack = (struct ipt_entry **)private->jumpstack[cpu];
+ stackptr = &private->stackptr[cpu];
+ origptr = *stackptr;
e = get_entry(table_base, private->hook_entry[hook]);
- /* For return from builtin chain */
- back = get_entry(table_base, private->underflow[hook]);
+ pr_debug("Entering %s(hook %u); sp at %u (UF %p)\n",
+ table->name, hook, origptr,
+ get_entry(table_base, private->underflow[hook]));
do {
const struct ipt_entry_target *t;
const struct xt_entry_match *ematch;
IP_NF_ASSERT(e);
- IP_NF_ASSERT(back);
if (!ip_packet_match(ip, indev, outdev,
- &e->ip, mtpar.fragoff)) {
+ &e->ip, acpar.fragoff)) {
no_match:
e = ipt_next_entry(e);
continue;
}
- xt_ematch_foreach(ematch, e)
- if (do_match(ematch, skb, &mtpar) != 0)
+ xt_ematch_foreach(ematch, e) {
+ acpar.match = ematch->u.kernel.match;
+ acpar.matchinfo = ematch->data;
+ if (!acpar.match->match(skb, &acpar))
goto no_match;
+ }
ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1);
@@ -404,41 +387,38 @@ ipt_do_table(struct sk_buff *skb,
verdict = (unsigned)(-v) - 1;
break;
}
- e = back;
- back = get_entry(table_base, back->comefrom);
+ if (*stackptr == 0) {
+ e = get_entry(table_base,
+ private->underflow[hook]);
+ pr_debug("Underflow (this is normal) "
+ "to %p\n", e);
+ } else {
+ e = jumpstack[--*stackptr];
+ pr_debug("Pulled %p out from pos %u\n",
+ e, *stackptr);
+ e = ipt_next_entry(e);
+ }
continue;
}
if (table_base + v != ipt_next_entry(e) &&
!(e->ip.flags & IPT_F_GOTO)) {
- /* Save old back ptr in next entry */
- struct ipt_entry *next = ipt_next_entry(e);
- next->comefrom = (void *)back - table_base;
- /* set back pointer to next entry */
- back = next;
+ if (*stackptr >= private->stacksize) {
+ verdict = NF_DROP;
+ break;
+ }
+ jumpstack[(*stackptr)++] = e;
+ pr_debug("Pushed %p into pos %u\n",
+ e, *stackptr - 1);
}
e = get_entry(table_base, v);
continue;
}
- /* Targets which reenter must return
- abs. verdicts */
- tgpar.target = t->u.kernel.target;
- tgpar.targinfo = t->data;
-
+ acpar.target = t->u.kernel.target;
+ acpar.targinfo = t->data;
-#ifdef CONFIG_NETFILTER_DEBUG
- tb_comefrom = 0xeeeeeeec;
-#endif
- verdict = t->u.kernel.target->target(skb, &tgpar);
-#ifdef CONFIG_NETFILTER_DEBUG
- if (tb_comefrom != 0xeeeeeeec && verdict == IPT_CONTINUE) {
- printk("Target %s reentered!\n",
- t->u.kernel.target->name);
- verdict = NF_DROP;
- }
- tb_comefrom = 0x57acc001;
-#endif
+ verdict = t->u.kernel.target->target(skb, &acpar);
/* Target might have changed stuff. */
ip = ip_hdr(skb);
if (verdict == IPT_CONTINUE)
@@ -446,18 +426,18 @@ ipt_do_table(struct sk_buff *skb,
else
/* Verdict */
break;
- } while (!hotdrop);
+ } while (!acpar.hotdrop);
xt_info_rdunlock_bh();
-
+ pr_debug("Exiting %s; resetting sp from %u to %u\n",
+ __func__, *stackptr, origptr);
+ *stackptr = origptr;
#ifdef DEBUG_ALLOW_ALL
return NF_ACCEPT;
#else
- if (hotdrop)
+ if (acpar.hotdrop)
return NF_DROP;
else return verdict;
#endif
-
-#undef tb_comefrom
}
/* Figures out from what hook each rule can be called: returns 0 if
@@ -486,7 +466,7 @@ mark_source_chains(const struct xt_table_info *newinfo,
int visited = e->comefrom & (1 << hook);
if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
- printk("iptables: loop hook %u pos %u %08X.\n",
+ pr_err("iptables: loop hook %u pos %u %08X.\n",
hook, pos, e->comefrom);
return 0;
}
@@ -591,7 +571,7 @@ check_entry(const struct ipt_entry *e, const char *name)
const struct ipt_entry_target *t;
if (!ip_checkentry(&e->ip)) {
- duprintf("ip_tables: ip check failed %p %s.\n", e, name);
+ duprintf("ip check failed %p %s.\n", e, par->match->name);
return -EINVAL;
}
@@ -618,8 +598,7 @@ check_match(struct ipt_entry_match *m, struct xt_mtchk_param *par)
ret = xt_check_match(par, m->u.match_size - sizeof(*m),
ip->proto, ip->invflags & IPT_INV_PROTO);
if (ret < 0) {
- duprintf("ip_tables: check failed for `%s'.\n",
- par.match->name);
+ duprintf("check failed for `%s'.\n", par->match->name);
return ret;
}
return 0;
@@ -631,12 +610,11 @@ find_check_match(struct ipt_entry_match *m, struct xt_mtchk_param *par)
struct xt_match *match;
int ret;
- match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
- m->u.user.revision),
- "ipt_%s", m->u.user.name);
- if (IS_ERR(match) || !match) {
+ match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name,
+ m->u.user.revision);
+ if (IS_ERR(match)) {
duprintf("find_check_match: `%s' not found\n", m->u.user.name);
- return match ? PTR_ERR(match) : -ENOENT;
+ return PTR_ERR(match);
}
m->u.kernel.match = match;
@@ -667,7 +645,7 @@ static int check_target(struct ipt_entry *e, struct net *net, const char *name)
ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
e->ip.proto, e->ip.invflags & IPT_INV_PROTO);
if (ret < 0) {
- duprintf("ip_tables: check failed for `%s'.\n",
+ duprintf("check failed for `%s'.\n",
t->u.kernel.target->name);
return ret;
}
@@ -703,13 +681,11 @@ find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
}
t = ipt_get_target(e);
- target = try_then_request_module(xt_find_target(AF_INET,
- t->u.user.name,
- t->u.user.revision),
- "ipt_%s", t->u.user.name);
- if (IS_ERR(target) || !target) {
+ target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name,
+ t->u.user.revision);
+ if (IS_ERR(target)) {
duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
- ret = target ? PTR_ERR(target) : -ENOENT;
+ ret = PTR_ERR(target);
goto cleanup_matches;
}
t->u.kernel.target = target;
@@ -843,6 +819,9 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
if (ret != 0)
return ret;
++i;
+ if (strcmp(ipt_get_target(iter)->u.user.name,
+ XT_ERROR_TARGET) == 0)
+ ++newinfo->stacksize;
}
if (i != repl->num_entries) {
@@ -1311,7 +1290,7 @@ do_replace(struct net *net, const void __user *user, unsigned int len)
if (ret != 0)
goto free_newinfo;
- duprintf("ip_tables: Translated table\n");
+ duprintf("Translated table\n");
ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
tmp.num_counters, tmp.counters);
@@ -1476,13 +1455,12 @@ compat_find_calc_match(struct ipt_entry_match *m,
{
struct xt_match *match;
- match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
- m->u.user.revision),
- "ipt_%s", m->u.user.name);
- if (IS_ERR(match) || !match) {
+ match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name,
+ m->u.user.revision);
+ if (IS_ERR(match)) {
duprintf("compat_check_calc_match: `%s' not found\n",
m->u.user.name);
- return match ? PTR_ERR(match) : -ENOENT;
+ return PTR_ERR(match);
}
m->u.kernel.match = match;
*size += xt_compat_match_offset(match);
@@ -1549,14 +1527,12 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
}
t = compat_ipt_get_target(e);
- target = try_then_request_module(xt_find_target(AF_INET,
- t->u.user.name,
- t->u.user.revision),
- "ipt_%s", t->u.user.name);
- if (IS_ERR(target) || !target) {
+ target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name,
+ t->u.user.revision);
+ if (IS_ERR(target)) {
duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
t->u.user.name);
- ret = target ? PTR_ERR(target) : -ENOENT;
+ ret = PTR_ERR(target);
goto release_matches;
}
t->u.kernel.target = target;
@@ -2094,8 +2070,7 @@ struct xt_table *ipt_register_table(struct net *net,
{
int ret;
struct xt_table_info *newinfo;
- struct xt_table_info bootstrap
- = { 0, 0, 0, { 0 }, { 0 }, { } };
+ struct xt_table_info bootstrap = {0};
void *loc_cpu_entry;
struct xt_table *new_table;
@@ -2157,7 +2132,7 @@ icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
}
static bool
-icmp_match(const struct sk_buff *skb, const struct xt_match_param *par)
+icmp_match(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct icmphdr *ic;
struct icmphdr _icmph;
@@ -2173,7 +2148,7 @@ icmp_match(const struct sk_buff *skb, const struct xt_match_param *par)
* can't. Hence, no choice but to drop.
*/
duprintf("Dropping evil ICMP tinygram.\n");
- *par->hotdrop = true;
+ par->hotdrop = true;
return false;
}
@@ -2184,31 +2159,31 @@ icmp_match(const struct sk_buff *skb, const struct xt_match_param *par)
!!(icmpinfo->invflags&IPT_ICMP_INV));
}
-static bool icmp_checkentry(const struct xt_mtchk_param *par)
+static int icmp_checkentry(const struct xt_mtchk_param *par)
{
const struct ipt_icmp *icmpinfo = par->matchinfo;
/* Must specify no unknown invflags */
- return !(icmpinfo->invflags & ~IPT_ICMP_INV);
+ return (icmpinfo->invflags & ~IPT_ICMP_INV) ? -EINVAL : 0;
}
-/* The built-in targets: standard (NULL) and error. */
-static struct xt_target ipt_standard_target __read_mostly = {
- .name = IPT_STANDARD_TARGET,
- .targetsize = sizeof(int),
- .family = NFPROTO_IPV4,
+static struct xt_target ipt_builtin_tg[] __read_mostly = {
+ {
+ .name = IPT_STANDARD_TARGET,
+ .targetsize = sizeof(int),
+ .family = NFPROTO_IPV4,
#ifdef CONFIG_COMPAT
- .compatsize = sizeof(compat_int_t),
- .compat_from_user = compat_standard_from_user,
- .compat_to_user = compat_standard_to_user,
+ .compatsize = sizeof(compat_int_t),
+ .compat_from_user = compat_standard_from_user,
+ .compat_to_user = compat_standard_to_user,
#endif
-};
-
-static struct xt_target ipt_error_target __read_mostly = {
- .name = IPT_ERROR_TARGET,
- .target = ipt_error,
- .targetsize = IPT_FUNCTION_MAXNAMELEN,
- .family = NFPROTO_IPV4,
+ },
+ {
+ .name = IPT_ERROR_TARGET,
+ .target = ipt_error,
+ .targetsize = IPT_FUNCTION_MAXNAMELEN,
+ .family = NFPROTO_IPV4,
+ },
};
static struct nf_sockopt_ops ipt_sockopts = {
@@ -2228,13 +2203,15 @@ static struct nf_sockopt_ops ipt_sockopts = {
.owner = THIS_MODULE,
};
-static struct xt_match icmp_matchstruct __read_mostly = {
- .name = "icmp",
- .match = icmp_match,
- .matchsize = sizeof(struct ipt_icmp),
- .checkentry = icmp_checkentry,
- .proto = IPPROTO_ICMP,
- .family = NFPROTO_IPV4,
+static struct xt_match ipt_builtin_mt[] __read_mostly = {
+ {
+ .name = "icmp",
+ .match = icmp_match,
+ .matchsize = sizeof(struct ipt_icmp),
+ .checkentry = icmp_checkentry,
+ .proto = IPPROTO_ICMP,
+ .family = NFPROTO_IPV4,
+ },
};
static int __net_init ip_tables_net_init(struct net *net)
@@ -2261,13 +2238,10 @@ static int __init ip_tables_init(void)
goto err1;
/* Noone else will be downing sem now, so we won't sleep */
- ret = xt_register_target(&ipt_standard_target);
+ ret = xt_register_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg));
if (ret < 0)
goto err2;
- ret = xt_register_target(&ipt_error_target);
- if (ret < 0)
- goto err3;
- ret = xt_register_match(&icmp_matchstruct);
+ ret = xt_register_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt));
if (ret < 0)
goto err4;
@@ -2276,15 +2250,13 @@ static int __init ip_tables_init(void)
if (ret < 0)
goto err5;
- printk(KERN_INFO "ip_tables: (C) 2000-2006 Netfilter Core Team\n");
+ pr_info("(C) 2000-2006 Netfilter Core Team\n");
return 0;
err5:
- xt_unregister_match(&icmp_matchstruct);
+ xt_unregister_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt));
err4:
- xt_unregister_target(&ipt_error_target);
-err3:
- xt_unregister_target(&ipt_standard_target);
+ xt_unregister_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg));
err2:
unregister_pernet_subsys(&ip_tables_net_ops);
err1:
@@ -2295,10 +2267,8 @@ static void __exit ip_tables_fini(void)
{
nf_unregister_sockopt(&ipt_sockopts);
- xt_unregister_match(&icmp_matchstruct);
- xt_unregister_target(&ipt_error_target);
- xt_unregister_target(&ipt_standard_target);
-
+ xt_unregister_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt));
+ xt_unregister_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg));
unregister_pernet_subsys(&ip_tables_net_ops);
}
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index ab828400ed71..f91c94b9a790 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -9,6 +9,7 @@
* published by the Free Software Foundation.
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/jhash.h>
@@ -88,7 +89,7 @@ clusterip_config_entry_put(struct clusterip_config *c)
list_del(&c->list);
write_unlock_bh(&clusterip_lock);
- dev_mc_delete(c->dev, c->clustermac, ETH_ALEN, 0);
+ dev_mc_del(c->dev, c->clustermac);
dev_put(c->dev);
/* In case anyone still accesses the file, the open/close
@@ -239,8 +240,7 @@ clusterip_hashfn(const struct sk_buff *skb,
break;
default:
if (net_ratelimit())
- printk(KERN_NOTICE "CLUSTERIP: unknown protocol `%u'\n",
- iph->protocol);
+ pr_info("unknown protocol %u\n", iph->protocol);
sport = dport = 0;
}
@@ -262,7 +262,7 @@ clusterip_hashfn(const struct sk_buff *skb,
hashval = 0;
/* This cannot happen, unless the check function wasn't called
* at rule load time */
- printk("CLUSTERIP: unknown mode `%u'\n", config->hash_mode);
+ pr_info("unknown mode %u\n", config->hash_mode);
BUG();
break;
}
@@ -282,7 +282,7 @@ clusterip_responsible(const struct clusterip_config *config, u_int32_t hash)
***********************************************************************/
static unsigned int
-clusterip_tg(struct sk_buff *skb, const struct xt_target_param *par)
+clusterip_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct ipt_clusterip_tgt_info *cipinfo = par->targinfo;
struct nf_conn *ct;
@@ -295,7 +295,7 @@ clusterip_tg(struct sk_buff *skb, const struct xt_target_param *par)
ct = nf_ct_get(skb, &ctinfo);
if (ct == NULL) {
- printk(KERN_ERR "CLUSTERIP: no conntrack!\n");
+ pr_info("no conntrack!\n");
/* FIXME: need to drop invalid ones, since replies
* to outgoing connections of other nodes will be
* marked as INVALID */
@@ -348,25 +348,24 @@ clusterip_tg(struct sk_buff *skb, const struct xt_target_param *par)
return XT_CONTINUE;
}
-static bool clusterip_tg_check(const struct xt_tgchk_param *par)
+static int clusterip_tg_check(const struct xt_tgchk_param *par)
{
struct ipt_clusterip_tgt_info *cipinfo = par->targinfo;
const struct ipt_entry *e = par->entryinfo;
-
struct clusterip_config *config;
+ int ret;
if (cipinfo->hash_mode != CLUSTERIP_HASHMODE_SIP &&
cipinfo->hash_mode != CLUSTERIP_HASHMODE_SIP_SPT &&
cipinfo->hash_mode != CLUSTERIP_HASHMODE_SIP_SPT_DPT) {
- printk(KERN_WARNING "CLUSTERIP: unknown mode `%u'\n",
- cipinfo->hash_mode);
- return false;
+ pr_info("unknown mode %u\n", cipinfo->hash_mode);
+ return -EINVAL;
}
if (e->ip.dmsk.s_addr != htonl(0xffffffff) ||
e->ip.dst.s_addr == 0) {
- printk(KERN_ERR "CLUSTERIP: Please specify destination IP\n");
- return false;
+ pr_info("Please specify destination IP\n");
+ return -EINVAL;
}
/* FIXME: further sanity checks */
@@ -374,41 +373,41 @@ static bool clusterip_tg_check(const struct xt_tgchk_param *par)
config = clusterip_config_find_get(e->ip.dst.s_addr, 1);
if (!config) {
if (!(cipinfo->flags & CLUSTERIP_FLAG_NEW)) {
- printk(KERN_WARNING "CLUSTERIP: no config found for %pI4, need 'new'\n", &e->ip.dst.s_addr);
- return false;
+ pr_info("no config found for %pI4, need 'new'\n",
+ &e->ip.dst.s_addr);
+ return -EINVAL;
} else {
struct net_device *dev;
if (e->ip.iniface[0] == '\0') {
- printk(KERN_WARNING "CLUSTERIP: Please specify an interface name\n");
- return false;
+ pr_info("Please specify an interface name\n");
+ return -EINVAL;
}
dev = dev_get_by_name(&init_net, e->ip.iniface);
if (!dev) {
- printk(KERN_WARNING "CLUSTERIP: no such interface %s\n", e->ip.iniface);
- return false;
+ pr_info("no such interface %s\n",
+ e->ip.iniface);
+ return -ENOENT;
}
config = clusterip_config_init(cipinfo,
e->ip.dst.s_addr, dev);
if (!config) {
- printk(KERN_WARNING "CLUSTERIP: cannot allocate config\n");
+ pr_info("cannot allocate config\n");
dev_put(dev);
- return false;
+ return -ENOMEM;
}
- dev_mc_add(config->dev,config->clustermac, ETH_ALEN, 0);
+ dev_mc_add(config->dev, config->clustermac);
}
}
cipinfo->config = config;
- if (nf_ct_l3proto_try_module_get(par->target->family) < 0) {
- printk(KERN_WARNING "can't load conntrack support for "
- "proto=%u\n", par->target->family);
- return false;
- }
-
- return true;
+ ret = nf_ct_l3proto_try_module_get(par->family);
+ if (ret < 0)
+ pr_info("cannot load conntrack support for proto=%u\n",
+ par->family);
+ return ret;
}
/* drop reference count of cluster config when rule is deleted */
@@ -422,7 +421,7 @@ static void clusterip_tg_destroy(const struct xt_tgdtor_param *par)
clusterip_config_put(cipinfo->config);
- nf_ct_l3proto_module_put(par->target->family);
+ nf_ct_l3proto_module_put(par->family);
}
#ifdef CONFIG_COMPAT
@@ -479,8 +478,8 @@ static void arp_print(struct arp_payload *payload)
}
hbuffer[--k]='\0';
- printk("src %pI4@%s, dst %pI4\n",
- &payload->src_ip, hbuffer, &payload->dst_ip);
+ pr_debug("src %pI4@%s, dst %pI4\n",
+ &payload->src_ip, hbuffer, &payload->dst_ip);
}
#endif
@@ -519,7 +518,7 @@ arp_mangle(unsigned int hook,
* this wouldn't work, since we didn't subscribe the mcast group on
* other interfaces */
if (c->dev != out) {
- pr_debug("CLUSTERIP: not mangling arp reply on different "
+ pr_debug("not mangling arp reply on different "
"interface: cip'%s'-skb'%s'\n",
c->dev->name, out->name);
clusterip_config_put(c);
@@ -530,7 +529,7 @@ arp_mangle(unsigned int hook,
memcpy(payload->src_hw, c->clustermac, arp->ar_hln);
#ifdef DEBUG
- pr_debug(KERN_DEBUG "CLUSTERIP mangled arp reply: ");
+ pr_debug("mangled arp reply: ");
arp_print(payload);
#endif
@@ -601,7 +600,8 @@ static void *clusterip_seq_next(struct seq_file *s, void *v, loff_t *pos)
static void clusterip_seq_stop(struct seq_file *s, void *v)
{
- kfree(v);
+ if (!IS_ERR(v))
+ kfree(v);
}
static int clusterip_seq_show(struct seq_file *s, void *v)
@@ -706,13 +706,13 @@ static int __init clusterip_tg_init(void)
#ifdef CONFIG_PROC_FS
clusterip_procdir = proc_mkdir("ipt_CLUSTERIP", init_net.proc_net);
if (!clusterip_procdir) {
- printk(KERN_ERR "CLUSTERIP: Unable to proc dir entry\n");
+ pr_err("Unable to proc dir entry\n");
ret = -ENOMEM;
goto cleanup_hook;
}
#endif /* CONFIG_PROC_FS */
- printk(KERN_NOTICE "ClusterIP Version %s loaded successfully\n",
+ pr_info("ClusterIP Version %s loaded successfully\n",
CLUSTERIP_VERSION);
return 0;
@@ -727,8 +727,7 @@ cleanup_target:
static void __exit clusterip_tg_exit(void)
{
- printk(KERN_NOTICE "ClusterIP Version %s unloading\n",
- CLUSTERIP_VERSION);
+ pr_info("ClusterIP Version %s unloading\n", CLUSTERIP_VERSION);
#ifdef CONFIG_PROC_FS
remove_proc_entry(clusterip_procdir->name, clusterip_procdir->parent);
#endif
diff --git a/net/ipv4/netfilter/ipt_ECN.c b/net/ipv4/netfilter/ipt_ECN.c
index ea5cea2415c1..4bf3dc49ad1e 100644
--- a/net/ipv4/netfilter/ipt_ECN.c
+++ b/net/ipv4/netfilter/ipt_ECN.c
@@ -6,7 +6,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/in.h>
#include <linux/module.h>
#include <linux/skbuff.h>
@@ -77,7 +77,7 @@ set_ect_tcp(struct sk_buff *skb, const struct ipt_ECN_info *einfo)
}
static unsigned int
-ecn_tg(struct sk_buff *skb, const struct xt_target_param *par)
+ecn_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct ipt_ECN_info *einfo = par->targinfo;
@@ -93,28 +93,25 @@ ecn_tg(struct sk_buff *skb, const struct xt_target_param *par)
return XT_CONTINUE;
}
-static bool ecn_tg_check(const struct xt_tgchk_param *par)
+static int ecn_tg_check(const struct xt_tgchk_param *par)
{
const struct ipt_ECN_info *einfo = par->targinfo;
const struct ipt_entry *e = par->entryinfo;
if (einfo->operation & IPT_ECN_OP_MASK) {
- printk(KERN_WARNING "ECN: unsupported ECN operation %x\n",
- einfo->operation);
- return false;
+ pr_info("unsupported ECN operation %x\n", einfo->operation);
+ return -EINVAL;
}
if (einfo->ip_ect & ~IPT_ECN_IP_MASK) {
- printk(KERN_WARNING "ECN: new ECT codepoint %x out of mask\n",
- einfo->ip_ect);
- return false;
+ pr_info("new ECT codepoint %x out of mask\n", einfo->ip_ect);
+ return -EINVAL;
}
if ((einfo->operation & (IPT_ECN_OP_SET_ECE|IPT_ECN_OP_SET_CWR)) &&
(e->ip.proto != IPPROTO_TCP || (e->ip.invflags & XT_INV_PROTO))) {
- printk(KERN_WARNING "ECN: cannot use TCP operations on a "
- "non-tcp rule\n");
- return false;
+ pr_info("cannot use TCP operations on a non-tcp rule\n");
+ return -EINVAL;
}
- return true;
+ return 0;
}
static struct xt_target ecn_tg_reg __read_mostly = {
diff --git a/net/ipv4/netfilter/ipt_LOG.c b/net/ipv4/netfilter/ipt_LOG.c
index ee128efa1c8d..5234f4f3499a 100644
--- a/net/ipv4/netfilter/ipt_LOG.c
+++ b/net/ipv4/netfilter/ipt_LOG.c
@@ -9,7 +9,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/skbuff.h>
@@ -367,7 +367,7 @@ static struct nf_loginfo default_loginfo = {
.type = NF_LOG_TYPE_LOG,
.u = {
.log = {
- .level = 0,
+ .level = 5,
.logflags = NF_LOG_MASK,
},
},
@@ -425,7 +425,7 @@ ipt_log_packet(u_int8_t pf,
}
static unsigned int
-log_tg(struct sk_buff *skb, const struct xt_target_param *par)
+log_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct ipt_log_info *loginfo = par->targinfo;
struct nf_loginfo li;
@@ -439,20 +439,19 @@ log_tg(struct sk_buff *skb, const struct xt_target_param *par)
return XT_CONTINUE;
}
-static bool log_tg_check(const struct xt_tgchk_param *par)
+static int log_tg_check(const struct xt_tgchk_param *par)
{
const struct ipt_log_info *loginfo = par->targinfo;
if (loginfo->level >= 8) {
- pr_debug("LOG: level %u >= 8\n", loginfo->level);
- return false;
+ pr_debug("level %u >= 8\n", loginfo->level);
+ return -EINVAL;
}
if (loginfo->prefix[sizeof(loginfo->prefix)-1] != '\0') {
- pr_debug("LOG: prefix term %i\n",
- loginfo->prefix[sizeof(loginfo->prefix)-1]);
- return false;
+ pr_debug("prefix is not null-terminated\n");
+ return -EINVAL;
}
- return true;
+ return 0;
}
static struct xt_target log_tg_reg __read_mostly = {
diff --git a/net/ipv4/netfilter/ipt_MASQUERADE.c b/net/ipv4/netfilter/ipt_MASQUERADE.c
index 650b54042b01..d2ed9dc74ebc 100644
--- a/net/ipv4/netfilter/ipt_MASQUERADE.c
+++ b/net/ipv4/netfilter/ipt_MASQUERADE.c
@@ -8,7 +8,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/types.h>
#include <linux/inetdevice.h>
#include <linux/ip.h>
@@ -28,23 +28,23 @@ MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
MODULE_DESCRIPTION("Xtables: automatic-address SNAT");
/* FIXME: Multiple targets. --RR */
-static bool masquerade_tg_check(const struct xt_tgchk_param *par)
+static int masquerade_tg_check(const struct xt_tgchk_param *par)
{
const struct nf_nat_multi_range_compat *mr = par->targinfo;
if (mr->range[0].flags & IP_NAT_RANGE_MAP_IPS) {
- pr_debug("masquerade_check: bad MAP_IPS.\n");
- return false;
+ pr_debug("bad MAP_IPS.\n");
+ return -EINVAL;
}
if (mr->rangesize != 1) {
- pr_debug("masquerade_check: bad rangesize %u\n", mr->rangesize);
- return false;
+ pr_debug("bad rangesize %u\n", mr->rangesize);
+ return -EINVAL;
}
- return true;
+ return 0;
}
static unsigned int
-masquerade_tg(struct sk_buff *skb, const struct xt_target_param *par)
+masquerade_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
struct nf_conn *ct;
struct nf_conn_nat *nat;
@@ -72,7 +72,7 @@ masquerade_tg(struct sk_buff *skb, const struct xt_target_param *par)
rt = skb_rtable(skb);
newsrc = inet_select_addr(par->out, rt->rt_gateway, RT_SCOPE_UNIVERSE);
if (!newsrc) {
- printk("MASQUERADE: %s ate my IP address\n", par->out->name);
+ pr_info("%s ate my IP address\n", par->out->name);
return NF_DROP;
}
diff --git a/net/ipv4/netfilter/ipt_NETMAP.c b/net/ipv4/netfilter/ipt_NETMAP.c
index 7c29582d4ec8..f43867d1697f 100644
--- a/net/ipv4/netfilter/ipt_NETMAP.c
+++ b/net/ipv4/netfilter/ipt_NETMAP.c
@@ -9,7 +9,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/ip.h>
#include <linux/module.h>
#include <linux/netdevice.h>
@@ -22,23 +22,23 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Svenning Soerensen <svenning@post5.tele.dk>");
MODULE_DESCRIPTION("Xtables: 1:1 NAT mapping of IPv4 subnets");
-static bool netmap_tg_check(const struct xt_tgchk_param *par)
+static int netmap_tg_check(const struct xt_tgchk_param *par)
{
const struct nf_nat_multi_range_compat *mr = par->targinfo;
if (!(mr->range[0].flags & IP_NAT_RANGE_MAP_IPS)) {
- pr_debug("NETMAP:check: bad MAP_IPS.\n");
- return false;
+ pr_debug("bad MAP_IPS.\n");
+ return -EINVAL;
}
if (mr->rangesize != 1) {
- pr_debug("NETMAP:check: bad rangesize %u.\n", mr->rangesize);
- return false;
+ pr_debug("bad rangesize %u.\n", mr->rangesize);
+ return -EINVAL;
}
- return true;
+ return 0;
}
static unsigned int
-netmap_tg(struct sk_buff *skb, const struct xt_target_param *par)
+netmap_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
diff --git a/net/ipv4/netfilter/ipt_REDIRECT.c b/net/ipv4/netfilter/ipt_REDIRECT.c
index 698e5e78685b..18a0656505a0 100644
--- a/net/ipv4/netfilter/ipt_REDIRECT.c
+++ b/net/ipv4/netfilter/ipt_REDIRECT.c
@@ -6,7 +6,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/types.h>
#include <linux/ip.h>
#include <linux/timer.h>
@@ -26,23 +26,23 @@ MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
MODULE_DESCRIPTION("Xtables: Connection redirection to localhost");
/* FIXME: Take multiple ranges --RR */
-static bool redirect_tg_check(const struct xt_tgchk_param *par)
+static int redirect_tg_check(const struct xt_tgchk_param *par)
{
const struct nf_nat_multi_range_compat *mr = par->targinfo;
if (mr->range[0].flags & IP_NAT_RANGE_MAP_IPS) {
- pr_debug("redirect_check: bad MAP_IPS.\n");
- return false;
+ pr_debug("bad MAP_IPS.\n");
+ return -EINVAL;
}
if (mr->rangesize != 1) {
- pr_debug("redirect_check: bad rangesize %u.\n", mr->rangesize);
- return false;
+ pr_debug("bad rangesize %u.\n", mr->rangesize);
+ return -EINVAL;
}
- return true;
+ return 0;
}
static unsigned int
-redirect_tg(struct sk_buff *skb, const struct xt_target_param *par)
+redirect_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c
index a0e8bcf04159..f5f4a888e4ec 100644
--- a/net/ipv4/netfilter/ipt_REJECT.c
+++ b/net/ipv4/netfilter/ipt_REJECT.c
@@ -9,7 +9,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
@@ -136,13 +136,10 @@ static inline void send_unreach(struct sk_buff *skb_in, int code)
}
static unsigned int
-reject_tg(struct sk_buff *skb, const struct xt_target_param *par)
+reject_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct ipt_reject_info *reject = par->targinfo;
- /* WARNING: This code causes reentry within iptables.
- This means that the iptables jump stack is now crap. We
- must return an absolute verdict. --RR */
switch (reject->with) {
case IPT_ICMP_NET_UNREACHABLE:
send_unreach(skb, ICMP_NET_UNREACH);
@@ -175,23 +172,23 @@ reject_tg(struct sk_buff *skb, const struct xt_target_param *par)
return NF_DROP;
}
-static bool reject_tg_check(const struct xt_tgchk_param *par)
+static int reject_tg_check(const struct xt_tgchk_param *par)
{
const struct ipt_reject_info *rejinfo = par->targinfo;
const struct ipt_entry *e = par->entryinfo;
if (rejinfo->with == IPT_ICMP_ECHOREPLY) {
- printk("ipt_REJECT: ECHOREPLY no longer supported.\n");
- return false;
+ pr_info("ECHOREPLY no longer supported.\n");
+ return -EINVAL;
} else if (rejinfo->with == IPT_TCP_RESET) {
/* Must specify that it's a TCP packet */
if (e->ip.proto != IPPROTO_TCP ||
(e->ip.invflags & XT_INV_PROTO)) {
- printk("ipt_REJECT: TCP_RESET invalid for non-tcp\n");
- return false;
+ pr_info("TCP_RESET invalid for non-tcp\n");
+ return -EINVAL;
}
}
- return true;
+ return 0;
}
static struct xt_target reject_tg_reg __read_mostly = {
diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c
index 0dbe697f164f..446e0f467a17 100644
--- a/net/ipv4/netfilter/ipt_ULOG.c
+++ b/net/ipv4/netfilter/ipt_ULOG.c
@@ -29,7 +29,7 @@
* Specify, after how many hundredths of a second the queue should be
* flushed even if it is not full yet.
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/socket.h>
@@ -57,8 +57,6 @@ MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_NFLOG);
#define ULOG_NL_EVENT 111 /* Harald's favorite number */
#define ULOG_MAXNLGROUPS 32 /* numer of nlgroups */
-#define PRINTR(format, args...) do { if (net_ratelimit()) printk(format , ## args); } while (0)
-
static unsigned int nlbufsiz = NLMSG_GOODSIZE;
module_param(nlbufsiz, uint, 0400);
MODULE_PARM_DESC(nlbufsiz, "netlink buffer size");
@@ -91,12 +89,12 @@ static void ulog_send(unsigned int nlgroupnum)
ulog_buff_t *ub = &ulog_buffers[nlgroupnum];
if (timer_pending(&ub->timer)) {
- pr_debug("ipt_ULOG: ulog_send: timer was pending, deleting\n");
+ pr_debug("ulog_send: timer was pending, deleting\n");
del_timer(&ub->timer);
}
if (!ub->skb) {
- pr_debug("ipt_ULOG: ulog_send: nothing to send\n");
+ pr_debug("ulog_send: nothing to send\n");
return;
}
@@ -105,7 +103,7 @@ static void ulog_send(unsigned int nlgroupnum)
ub->lastnlh->nlmsg_type = NLMSG_DONE;
NETLINK_CB(ub->skb).dst_group = nlgroupnum + 1;
- pr_debug("ipt_ULOG: throwing %d packets to netlink group %u\n",
+ pr_debug("throwing %d packets to netlink group %u\n",
ub->qlen, nlgroupnum + 1);
netlink_broadcast(nflognl, ub->skb, 0, nlgroupnum + 1, GFP_ATOMIC);
@@ -118,7 +116,7 @@ static void ulog_send(unsigned int nlgroupnum)
/* timer function to flush queue in flushtimeout time */
static void ulog_timer(unsigned long data)
{
- pr_debug("ipt_ULOG: timer function called, calling ulog_send\n");
+ pr_debug("timer function called, calling ulog_send\n");
/* lock to protect against somebody modifying our structure
* from ipt_ulog_target at the same time */
@@ -139,7 +137,7 @@ static struct sk_buff *ulog_alloc_skb(unsigned int size)
n = max(size, nlbufsiz);
skb = alloc_skb(n, GFP_ATOMIC);
if (!skb) {
- PRINTR("ipt_ULOG: can't alloc whole buffer %ub!\n", n);
+ pr_debug("cannot alloc whole buffer %ub!\n", n);
if (n > size) {
/* try to allocate only as much as we need for
@@ -147,8 +145,7 @@ static struct sk_buff *ulog_alloc_skb(unsigned int size)
skb = alloc_skb(size, GFP_ATOMIC);
if (!skb)
- PRINTR("ipt_ULOG: can't even allocate %ub\n",
- size);
+ pr_debug("cannot even allocate %ub\n", size);
}
}
@@ -199,8 +196,7 @@ static void ipt_ulog_packet(unsigned int hooknum,
goto alloc_failure;
}
- pr_debug("ipt_ULOG: qlen %d, qthreshold %Zu\n", ub->qlen,
- loginfo->qthreshold);
+ pr_debug("qlen %d, qthreshold %Zu\n", ub->qlen, loginfo->qthreshold);
/* NLMSG_PUT contains a hidden goto nlmsg_failure !!! */
nlh = NLMSG_PUT(ub->skb, 0, ub->qlen, ULOG_NL_EVENT,
@@ -273,16 +269,14 @@ static void ipt_ulog_packet(unsigned int hooknum,
return;
nlmsg_failure:
- PRINTR("ipt_ULOG: error during NLMSG_PUT\n");
-
+ pr_debug("error during NLMSG_PUT\n");
alloc_failure:
- PRINTR("ipt_ULOG: Error building netlink message\n");
-
+ pr_debug("Error building netlink message\n");
spin_unlock_bh(&ulog_lock);
}
static unsigned int
-ulog_tg(struct sk_buff *skb, const struct xt_target_param *par)
+ulog_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
ipt_ulog_packet(par->hooknum, skb, par->in, par->out,
par->targinfo, NULL);
@@ -314,21 +308,20 @@ static void ipt_logfn(u_int8_t pf,
ipt_ulog_packet(hooknum, skb, in, out, &loginfo, prefix);
}
-static bool ulog_tg_check(const struct xt_tgchk_param *par)
+static int ulog_tg_check(const struct xt_tgchk_param *par)
{
const struct ipt_ulog_info *loginfo = par->targinfo;
if (loginfo->prefix[sizeof(loginfo->prefix) - 1] != '\0') {
- pr_debug("ipt_ULOG: prefix term %i\n",
- loginfo->prefix[sizeof(loginfo->prefix) - 1]);
- return false;
+ pr_debug("prefix not null-terminated\n");
+ return -EINVAL;
}
if (loginfo->qthreshold > ULOG_MAX_QLEN) {
- pr_debug("ipt_ULOG: queue threshold %Zu > MAX_QLEN\n",
+ pr_debug("queue threshold %Zu > MAX_QLEN\n",
loginfo->qthreshold);
- return false;
+ return -EINVAL;
}
- return true;
+ return 0;
}
#ifdef CONFIG_COMPAT
@@ -390,10 +383,10 @@ static int __init ulog_tg_init(void)
{
int ret, i;
- pr_debug("ipt_ULOG: init module\n");
+ pr_debug("init module\n");
if (nlbufsiz > 128*1024) {
- printk("Netlink buffer has to be <= 128kB\n");
+ pr_warning("Netlink buffer has to be <= 128kB\n");
return -EINVAL;
}
@@ -423,7 +416,7 @@ static void __exit ulog_tg_exit(void)
ulog_buff_t *ub;
int i;
- pr_debug("ipt_ULOG: cleanup_module\n");
+ pr_debug("cleanup_module\n");
if (nflog)
nf_log_unregister(&ipt_ulog_logger);
diff --git a/net/ipv4/netfilter/ipt_addrtype.c b/net/ipv4/netfilter/ipt_addrtype.c
index 3b216be3bc9f..db8bff0fb86d 100644
--- a/net/ipv4/netfilter/ipt_addrtype.c
+++ b/net/ipv4/netfilter/ipt_addrtype.c
@@ -8,7 +8,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/skbuff.h>
@@ -30,7 +30,7 @@ static inline bool match_type(struct net *net, const struct net_device *dev,
}
static bool
-addrtype_mt_v0(const struct sk_buff *skb, const struct xt_match_param *par)
+addrtype_mt_v0(const struct sk_buff *skb, struct xt_action_param *par)
{
struct net *net = dev_net(par->in ? par->in : par->out);
const struct ipt_addrtype_info *info = par->matchinfo;
@@ -48,7 +48,7 @@ addrtype_mt_v0(const struct sk_buff *skb, const struct xt_match_param *par)
}
static bool
-addrtype_mt_v1(const struct sk_buff *skb, const struct xt_match_param *par)
+addrtype_mt_v1(const struct sk_buff *skb, struct xt_action_param *par)
{
struct net *net = dev_net(par->in ? par->in : par->out);
const struct ipt_addrtype_info_v1 *info = par->matchinfo;
@@ -70,34 +70,34 @@ addrtype_mt_v1(const struct sk_buff *skb, const struct xt_match_param *par)
return ret;
}
-static bool addrtype_mt_checkentry_v1(const struct xt_mtchk_param *par)
+static int addrtype_mt_checkentry_v1(const struct xt_mtchk_param *par)
{
struct ipt_addrtype_info_v1 *info = par->matchinfo;
if (info->flags & IPT_ADDRTYPE_LIMIT_IFACE_IN &&
info->flags & IPT_ADDRTYPE_LIMIT_IFACE_OUT) {
- printk(KERN_ERR "ipt_addrtype: both incoming and outgoing "
- "interface limitation cannot be selected\n");
- return false;
+ pr_info("both incoming and outgoing "
+ "interface limitation cannot be selected\n");
+ return -EINVAL;
}
if (par->hook_mask & ((1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_LOCAL_IN)) &&
info->flags & IPT_ADDRTYPE_LIMIT_IFACE_OUT) {
- printk(KERN_ERR "ipt_addrtype: output interface limitation "
- "not valid in PRE_ROUTING and INPUT\n");
- return false;
+ pr_info("output interface limitation "
+ "not valid in PREROUTING and INPUT\n");
+ return -EINVAL;
}
if (par->hook_mask & ((1 << NF_INET_POST_ROUTING) |
(1 << NF_INET_LOCAL_OUT)) &&
info->flags & IPT_ADDRTYPE_LIMIT_IFACE_IN) {
- printk(KERN_ERR "ipt_addrtype: input interface limitation "
- "not valid in POST_ROUTING and OUTPUT\n");
- return false;
+ pr_info("input interface limitation "
+ "not valid in POSTROUTING and OUTPUT\n");
+ return -EINVAL;
}
- return true;
+ return 0;
}
static struct xt_match addrtype_mt_reg[] __read_mostly = {
diff --git a/net/ipv4/netfilter/ipt_ah.c b/net/ipv4/netfilter/ipt_ah.c
index 0104c0b399de..14a2aa8b8a14 100644
--- a/net/ipv4/netfilter/ipt_ah.c
+++ b/net/ipv4/netfilter/ipt_ah.c
@@ -5,7 +5,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/in.h>
#include <linux/module.h>
#include <linux/skbuff.h>
@@ -18,25 +18,19 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Yon Uriarte <yon@astaro.de>");
MODULE_DESCRIPTION("Xtables: IPv4 IPsec-AH SPI match");
-#ifdef DEBUG_CONNTRACK
-#define duprintf(format, args...) printk(format , ## args)
-#else
-#define duprintf(format, args...)
-#endif
-
/* Returns 1 if the spi is matched by the range, 0 otherwise */
static inline bool
spi_match(u_int32_t min, u_int32_t max, u_int32_t spi, bool invert)
{
bool r;
- duprintf("ah spi_match:%c 0x%x <= 0x%x <= 0x%x",invert? '!':' ',
- min,spi,max);
+ pr_debug("spi_match:%c 0x%x <= 0x%x <= 0x%x\n",
+ invert ? '!' : ' ', min, spi, max);
r=(spi >= min && spi <= max) ^ invert;
- duprintf(" result %s\n",r? "PASS" : "FAILED");
+ pr_debug(" result %s\n", r ? "PASS" : "FAILED");
return r;
}
-static bool ah_mt(const struct sk_buff *skb, const struct xt_match_param *par)
+static bool ah_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
struct ip_auth_hdr _ahdr;
const struct ip_auth_hdr *ah;
@@ -51,8 +45,8 @@ static bool ah_mt(const struct sk_buff *skb, const struct xt_match_param *par)
/* We've been asked to examine this packet, and we
* can't. Hence, no choice but to drop.
*/
- duprintf("Dropping evil AH tinygram.\n");
- *par->hotdrop = true;
+ pr_debug("Dropping evil AH tinygram.\n");
+ par->hotdrop = true;
return 0;
}
@@ -61,16 +55,16 @@ static bool ah_mt(const struct sk_buff *skb, const struct xt_match_param *par)
!!(ahinfo->invflags & IPT_AH_INV_SPI));
}
-static bool ah_mt_check(const struct xt_mtchk_param *par)
+static int ah_mt_check(const struct xt_mtchk_param *par)
{
const struct ipt_ah *ahinfo = par->matchinfo;
/* Must specify no unknown invflags */
if (ahinfo->invflags & ~IPT_AH_INV_MASK) {
- duprintf("ipt_ah: unknown flags %X\n", ahinfo->invflags);
- return false;
+ pr_debug("unknown flags %X\n", ahinfo->invflags);
+ return -EINVAL;
}
- return true;
+ return 0;
}
static struct xt_match ah_mt_reg __read_mostly = {
diff --git a/net/ipv4/netfilter/ipt_ecn.c b/net/ipv4/netfilter/ipt_ecn.c
index 2a1e56b71908..af6e9c778345 100644
--- a/net/ipv4/netfilter/ipt_ecn.c
+++ b/net/ipv4/netfilter/ipt_ecn.c
@@ -6,7 +6,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/in.h>
#include <linux/ip.h>
#include <net/ip.h>
@@ -67,7 +67,7 @@ static inline bool match_tcp(const struct sk_buff *skb,
return true;
}
-static bool ecn_mt(const struct sk_buff *skb, const struct xt_match_param *par)
+static bool ecn_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct ipt_ecn_info *info = par->matchinfo;
@@ -78,32 +78,31 @@ static bool ecn_mt(const struct sk_buff *skb, const struct xt_match_param *par)
if (info->operation & (IPT_ECN_OP_MATCH_ECE|IPT_ECN_OP_MATCH_CWR)) {
if (ip_hdr(skb)->protocol != IPPROTO_TCP)
return false;
- if (!match_tcp(skb, info, par->hotdrop))
+ if (!match_tcp(skb, info, &par->hotdrop))
return false;
}
return true;
}
-static bool ecn_mt_check(const struct xt_mtchk_param *par)
+static int ecn_mt_check(const struct xt_mtchk_param *par)
{
const struct ipt_ecn_info *info = par->matchinfo;
const struct ipt_ip *ip = par->entryinfo;
if (info->operation & IPT_ECN_OP_MATCH_MASK)
- return false;
+ return -EINVAL;
if (info->invert & IPT_ECN_OP_MATCH_MASK)
- return false;
+ return -EINVAL;
if (info->operation & (IPT_ECN_OP_MATCH_ECE|IPT_ECN_OP_MATCH_CWR) &&
ip->proto != IPPROTO_TCP) {
- printk(KERN_WARNING "ipt_ecn: can't match TCP bits in rule for"
- " non-tcp packets\n");
- return false;
+ pr_info("cannot match TCP bits in rule for non-tcp packets\n");
+ return -EINVAL;
}
- return true;
+ return 0;
}
static struct xt_match ecn_mt_reg __read_mostly = {
diff --git a/net/ipv4/netfilter/iptable_filter.c b/net/ipv4/netfilter/iptable_filter.c
index 55392466daa4..c37641e819f2 100644
--- a/net/ipv4/netfilter/iptable_filter.c
+++ b/net/ipv4/netfilter/iptable_filter.c
@@ -89,7 +89,7 @@ static int __init iptable_filter_init(void)
int ret;
if (forward < 0 || forward > NF_MAX_VERDICT) {
- printk("iptables forward must be 0 or 1\n");
+ pr_err("iptables forward must be 0 or 1\n");
return -EINVAL;
}
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index 2bb1f87051c4..5a03c02af999 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -382,32 +382,32 @@ static int __init nf_conntrack_l3proto_ipv4_init(void)
ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_tcp4);
if (ret < 0) {
- printk("nf_conntrack_ipv4: can't register tcp.\n");
+ pr_err("nf_conntrack_ipv4: can't register tcp.\n");
goto cleanup_sockopt;
}
ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_udp4);
if (ret < 0) {
- printk("nf_conntrack_ipv4: can't register udp.\n");
+ pr_err("nf_conntrack_ipv4: can't register udp.\n");
goto cleanup_tcp;
}
ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_icmp);
if (ret < 0) {
- printk("nf_conntrack_ipv4: can't register icmp.\n");
+ pr_err("nf_conntrack_ipv4: can't register icmp.\n");
goto cleanup_udp;
}
ret = nf_conntrack_l3proto_register(&nf_conntrack_l3proto_ipv4);
if (ret < 0) {
- printk("nf_conntrack_ipv4: can't register ipv4\n");
+ pr_err("nf_conntrack_ipv4: can't register ipv4\n");
goto cleanup_icmp;
}
ret = nf_register_hooks(ipv4_conntrack_ops,
ARRAY_SIZE(ipv4_conntrack_ops));
if (ret < 0) {
- printk("nf_conntrack_ipv4: can't register hooks.\n");
+ pr_err("nf_conntrack_ipv4: can't register hooks.\n");
goto cleanup_ipv4;
}
#if defined(CONFIG_PROC_FS) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
index 2fb7b76da94f..244f7cb08d68 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
@@ -336,12 +336,12 @@ static int ct_cpu_seq_show(struct seq_file *seq, void *v)
const struct ip_conntrack_stat *st = v;
if (v == SEQ_START_TOKEN) {
- seq_printf(seq, "entries searched found new invalid ignore delete delete_list insert insert_failed drop early_drop icmp_error expect_new expect_create expect_delete\n");
+ seq_printf(seq, "entries searched found new invalid ignore delete delete_list insert insert_failed drop early_drop icmp_error expect_new expect_create expect_delete search_restart\n");
return 0;
}
seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x "
- "%08x %08x %08x %08x %08x %08x %08x %08x \n",
+ "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
nr_conntracks,
st->searched,
st->found,
@@ -358,7 +358,8 @@ static int ct_cpu_seq_show(struct seq_file *seq, void *v)
st->expect_new,
st->expect_create,
- st->expect_delete
+ st->expect_delete,
+ st->search_restart
);
return 0;
}
diff --git a/net/ipv4/netfilter/nf_nat_h323.c b/net/ipv4/netfilter/nf_nat_h323.c
index 7e8e6fc75413..5045196d853c 100644
--- a/net/ipv4/netfilter/nf_nat_h323.c
+++ b/net/ipv4/netfilter/nf_nat_h323.c
@@ -10,7 +10,6 @@
*/
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/tcp.h>
#include <net/tcp.h>
@@ -44,7 +43,7 @@ static int set_addr(struct sk_buff *skb,
addroff, sizeof(buf),
(char *) &buf, sizeof(buf))) {
if (net_ratelimit())
- printk("nf_nat_h323: nf_nat_mangle_tcp_packet"
+ pr_notice("nf_nat_h323: nf_nat_mangle_tcp_packet"
" error\n");
return -1;
}
@@ -60,7 +59,7 @@ static int set_addr(struct sk_buff *skb,
addroff, sizeof(buf),
(char *) &buf, sizeof(buf))) {
if (net_ratelimit())
- printk("nf_nat_h323: nf_nat_mangle_udp_packet"
+ pr_notice("nf_nat_h323: nf_nat_mangle_udp_packet"
" error\n");
return -1;
}
@@ -216,7 +215,7 @@ static int nat_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
/* Run out of expectations */
if (i >= H323_RTP_CHANNEL_MAX) {
if (net_ratelimit())
- printk("nf_nat_h323: out of expectations\n");
+ pr_notice("nf_nat_h323: out of expectations\n");
return 0;
}
@@ -235,7 +234,7 @@ static int nat_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
if (nated_port == 0) { /* No port available */
if (net_ratelimit())
- printk("nf_nat_h323: out of RTP ports\n");
+ pr_notice("nf_nat_h323: out of RTP ports\n");
return 0;
}
@@ -292,7 +291,7 @@ static int nat_t120(struct sk_buff *skb, struct nf_conn *ct,
if (nated_port == 0) { /* No port available */
if (net_ratelimit())
- printk("nf_nat_h323: out of TCP ports\n");
+ pr_notice("nf_nat_h323: out of TCP ports\n");
return 0;
}
@@ -342,7 +341,7 @@ static int nat_h245(struct sk_buff *skb, struct nf_conn *ct,
if (nated_port == 0) { /* No port available */
if (net_ratelimit())
- printk("nf_nat_q931: out of TCP ports\n");
+ pr_notice("nf_nat_q931: out of TCP ports\n");
return 0;
}
@@ -426,7 +425,7 @@ static int nat_q931(struct sk_buff *skb, struct nf_conn *ct,
if (nated_port == 0) { /* No port available */
if (net_ratelimit())
- printk("nf_nat_ras: out of TCP ports\n");
+ pr_notice("nf_nat_ras: out of TCP ports\n");
return 0;
}
@@ -508,7 +507,7 @@ static int nat_callforwarding(struct sk_buff *skb, struct nf_conn *ct,
if (nated_port == 0) { /* No port available */
if (net_ratelimit())
- printk("nf_nat_q931: out of TCP ports\n");
+ pr_notice("nf_nat_q931: out of TCP ports\n");
return 0;
}
diff --git a/net/ipv4/netfilter/nf_nat_rule.c b/net/ipv4/netfilter/nf_nat_rule.c
index 26de2c1f7fab..98ed78281aee 100644
--- a/net/ipv4/netfilter/nf_nat_rule.c
+++ b/net/ipv4/netfilter/nf_nat_rule.c
@@ -7,6 +7,7 @@
*/
/* Everything about the rules for NAT. */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/types.h>
#include <linux/ip.h>
#include <linux/netfilter.h>
@@ -38,7 +39,7 @@ static const struct xt_table nat_table = {
/* Source NAT */
static unsigned int
-ipt_snat_target(struct sk_buff *skb, const struct xt_target_param *par)
+ipt_snat_target(struct sk_buff *skb, const struct xt_action_param *par)
{
struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
@@ -57,7 +58,7 @@ ipt_snat_target(struct sk_buff *skb, const struct xt_target_param *par)
}
static unsigned int
-ipt_dnat_target(struct sk_buff *skb, const struct xt_target_param *par)
+ipt_dnat_target(struct sk_buff *skb, const struct xt_action_param *par)
{
struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
@@ -74,28 +75,28 @@ ipt_dnat_target(struct sk_buff *skb, const struct xt_target_param *par)
return nf_nat_setup_info(ct, &mr->range[0], IP_NAT_MANIP_DST);
}
-static bool ipt_snat_checkentry(const struct xt_tgchk_param *par)
+static int ipt_snat_checkentry(const struct xt_tgchk_param *par)
{
const struct nf_nat_multi_range_compat *mr = par->targinfo;
/* Must be a valid range */
if (mr->rangesize != 1) {
- printk("SNAT: multiple ranges no longer supported\n");
- return false;
+ pr_info("SNAT: multiple ranges no longer supported\n");
+ return -EINVAL;
}
- return true;
+ return 0;
}
-static bool ipt_dnat_checkentry(const struct xt_tgchk_param *par)
+static int ipt_dnat_checkentry(const struct xt_tgchk_param *par)
{
const struct nf_nat_multi_range_compat *mr = par->targinfo;
/* Must be a valid range */
if (mr->rangesize != 1) {
- printk("DNAT: multiple ranges no longer supported\n");
- return false;
+ pr_info("DNAT: multiple ranges no longer supported\n");
+ return -EINVAL;
}
- return true;
+ return 0;
}
unsigned int
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
index 4d85b6e55f29..1679e2c0963d 100644
--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
@@ -401,7 +401,7 @@ static unsigned char asn1_octets_decode(struct asn1_ctx *ctx,
*octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
if (*octets == NULL) {
if (net_ratelimit())
- printk("OOM in bsalg (%d)\n", __LINE__);
+ pr_notice("OOM in bsalg (%d)\n", __LINE__);
return 0;
}
@@ -452,7 +452,7 @@ static unsigned char asn1_oid_decode(struct asn1_ctx *ctx,
*oid = kmalloc(size * sizeof(unsigned long), GFP_ATOMIC);
if (*oid == NULL) {
if (net_ratelimit())
- printk("OOM in bsalg (%d)\n", __LINE__);
+ pr_notice("OOM in bsalg (%d)\n", __LINE__);
return 0;
}
@@ -729,7 +729,7 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx,
if (*obj == NULL) {
kfree(id);
if (net_ratelimit())
- printk("OOM in bsalg (%d)\n", __LINE__);
+ pr_notice("OOM in bsalg (%d)\n", __LINE__);
return 0;
}
(*obj)->syntax.l[0] = l;
@@ -746,7 +746,7 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx,
kfree(p);
kfree(id);
if (net_ratelimit())
- printk("OOM in bsalg (%d)\n", __LINE__);
+ pr_notice("OOM in bsalg (%d)\n", __LINE__);
return 0;
}
memcpy((*obj)->syntax.c, p, len);
@@ -761,7 +761,7 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx,
if (*obj == NULL) {
kfree(id);
if (net_ratelimit())
- printk("OOM in bsalg (%d)\n", __LINE__);
+ pr_notice("OOM in bsalg (%d)\n", __LINE__);
return 0;
}
if (!asn1_null_decode(ctx, end)) {
@@ -782,7 +782,7 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx,
kfree(lp);
kfree(id);
if (net_ratelimit())
- printk("OOM in bsalg (%d)\n", __LINE__);
+ pr_notice("OOM in bsalg (%d)\n", __LINE__);
return 0;
}
memcpy((*obj)->syntax.ul, lp, len);
@@ -803,7 +803,7 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx,
kfree(p);
kfree(id);
if (net_ratelimit())
- printk("OOM in bsalg (%d)\n", __LINE__);
+ pr_notice("OOM in bsalg (%d)\n", __LINE__);
return 0;
}
memcpy((*obj)->syntax.uc, p, len);
@@ -821,7 +821,7 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx,
if (*obj == NULL) {
kfree(id);
if (net_ratelimit())
- printk("OOM in bsalg (%d)\n", __LINE__);
+ pr_notice("OOM in bsalg (%d)\n", __LINE__);
return 0;
}
(*obj)->syntax.ul[0] = ul;
diff --git a/net/ipv4/netfilter/nf_nat_standalone.c b/net/ipv4/netfilter/nf_nat_standalone.c
index c39c9cf6bee6..beb25819c9c9 100644
--- a/net/ipv4/netfilter/nf_nat_standalone.c
+++ b/net/ipv4/netfilter/nf_nat_standalone.c
@@ -138,9 +138,8 @@ nf_nat_fn(unsigned int hooknum,
ret = nf_nat_rule_find(skb, hooknum, in, out,
ct);
- if (ret != NF_ACCEPT) {
+ if (ret != NF_ACCEPT)
return ret;
- }
} else
pr_debug("Already setup manip %s for ct %p\n",
maniptype == IP_NAT_MANIP_SRC ? "SRC" : "DST",
@@ -294,12 +293,12 @@ static int __init nf_nat_standalone_init(void)
#endif
ret = nf_nat_rule_init();
if (ret < 0) {
- printk("nf_nat_init: can't setup rules.\n");
+ pr_err("nf_nat_init: can't setup rules.\n");
goto cleanup_decode_session;
}
ret = nf_register_hooks(nf_nat_ops, ARRAY_SIZE(nf_nat_ops));
if (ret < 0) {
- printk("nf_nat_init: can't register hooks.\n");
+ pr_err("nf_nat_init: can't register hooks.\n");
goto cleanup_rule_init;
}
return ret;
diff --git a/net/ipv4/netfilter/nf_nat_tftp.c b/net/ipv4/netfilter/nf_nat_tftp.c
index b096e81500ae..7274a43c7a12 100644
--- a/net/ipv4/netfilter/nf_nat_tftp.c
+++ b/net/ipv4/netfilter/nf_nat_tftp.c
@@ -6,7 +6,6 @@
*/
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/udp.h>
#include <net/netfilter/nf_nat_helper.h>
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 4f1f337f4337..3dc9914c1dce 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -251,6 +251,7 @@ static const struct snmp_mib snmp4_net_list[] = {
SNMP_MIB_ITEM("TCPSackShiftFallback", LINUX_MIB_SACKSHIFTFALLBACK),
SNMP_MIB_ITEM("TCPBacklogDrop", LINUX_MIB_TCPBACKLOGDROP),
SNMP_MIB_ITEM("TCPMinTTLDrop", LINUX_MIB_TCPMINTTLDROP),
+ SNMP_MIB_ITEM("TCPDeferAcceptDrop", LINUX_MIB_TCPDEFERACCEPTDROP),
SNMP_MIB_SENTINEL
};
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index cc6f097fbd5f..2c7a1639388a 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -290,7 +290,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
{
/* Charge it to the socket. */
- if (sock_queue_rcv_skb(sk, skb) < 0) {
+ if (ip_queue_rcv_skb(sk, skb) < 0) {
kfree_skb(skb);
return NET_RX_DROP;
}
@@ -381,8 +381,8 @@ static int raw_send_hdrinc(struct sock *sk, void *from, size_t length,
icmp_out_count(net, ((struct icmphdr *)
skb_transport_header(skb))->type);
- err = NF_HOOK(PF_INET, NF_INET_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
- dst_output);
+ err = NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL,
+ rt->u.dst.dev, dst_output);
if (err > 0)
err = net_xmit_errno(err);
if (err)
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index cb562fdd9b9a..560acc677ce4 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -129,7 +129,6 @@ static int ip_rt_gc_elasticity __read_mostly = 8;
static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
static int ip_rt_min_advmss __read_mostly = 256;
-static int ip_rt_secret_interval __read_mostly = 10 * 60 * HZ;
static int rt_chain_length_max __read_mostly = 20;
static struct delayed_work expires_work;
@@ -258,10 +257,9 @@ static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
(__raw_get_cpu_var(rt_cache_stat).field++)
static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
- int genid)
+ int genid)
{
- return jhash_3words((__force u32)(__be32)(daddr),
- (__force u32)(__be32)(saddr),
+ return jhash_3words((__force u32)daddr, (__force u32)saddr,
idx, genid)
& rt_hash_mask;
}
@@ -378,12 +376,13 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v)
struct rtable *r = v;
int len;
- seq_printf(seq, "%s\t%08lX\t%08lX\t%8X\t%d\t%u\t%d\t"
- "%08lX\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n",
+ seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t"
+ "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n",
r->u.dst.dev ? r->u.dst.dev->name : "*",
- (unsigned long)r->rt_dst, (unsigned long)r->rt_gateway,
+ (__force u32)r->rt_dst,
+ (__force u32)r->rt_gateway,
r->rt_flags, atomic_read(&r->u.dst.__refcnt),
- r->u.dst.__use, 0, (unsigned long)r->rt_src,
+ r->u.dst.__use, 0, (__force u32)r->rt_src,
(dst_metric(&r->u.dst, RTAX_ADVMSS) ?
(int)dst_metric(&r->u.dst, RTAX_ADVMSS) + 40 : 0),
dst_metric(&r->u.dst, RTAX_WINDOW),
@@ -685,18 +684,17 @@ static inline bool rt_caching(const struct net *net)
static inline bool compare_hash_inputs(const struct flowi *fl1,
const struct flowi *fl2)
{
- return (__force u32)(((fl1->nl_u.ip4_u.daddr ^ fl2->nl_u.ip4_u.daddr) |
- (fl1->nl_u.ip4_u.saddr ^ fl2->nl_u.ip4_u.saddr) |
+ return ((((__force u32)fl1->nl_u.ip4_u.daddr ^ (__force u32)fl2->nl_u.ip4_u.daddr) |
+ ((__force u32)fl1->nl_u.ip4_u.saddr ^ (__force u32)fl2->nl_u.ip4_u.saddr) |
(fl1->iif ^ fl2->iif)) == 0);
}
static inline int compare_keys(struct flowi *fl1, struct flowi *fl2)
{
- return ((__force u32)((fl1->nl_u.ip4_u.daddr ^ fl2->nl_u.ip4_u.daddr) |
- (fl1->nl_u.ip4_u.saddr ^ fl2->nl_u.ip4_u.saddr)) |
+ return (((__force u32)fl1->nl_u.ip4_u.daddr ^ (__force u32)fl2->nl_u.ip4_u.daddr) |
+ ((__force u32)fl1->nl_u.ip4_u.saddr ^ (__force u32)fl2->nl_u.ip4_u.saddr) |
(fl1->mark ^ fl2->mark) |
- (*(u16 *)&fl1->nl_u.ip4_u.tos ^
- *(u16 *)&fl2->nl_u.ip4_u.tos) |
+ (*(u16 *)&fl1->nl_u.ip4_u.tos ^ *(u16 *)&fl2->nl_u.ip4_u.tos) |
(fl1->oif ^ fl2->oif) |
(fl1->iif ^ fl2->iif)) == 0;
}
@@ -919,32 +917,11 @@ void rt_cache_flush_batch(void)
rt_do_flush(!in_softirq());
}
-/*
- * We change rt_genid and let gc do the cleanup
- */
-static void rt_secret_rebuild(unsigned long __net)
-{
- struct net *net = (struct net *)__net;
- rt_cache_invalidate(net);
- mod_timer(&net->ipv4.rt_secret_timer, jiffies + ip_rt_secret_interval);
-}
-
-static void rt_secret_rebuild_oneshot(struct net *net)
-{
- del_timer_sync(&net->ipv4.rt_secret_timer);
- rt_cache_invalidate(net);
- if (ip_rt_secret_interval)
- mod_timer(&net->ipv4.rt_secret_timer, jiffies + ip_rt_secret_interval);
-}
-
static void rt_emergency_hash_rebuild(struct net *net)
{
- if (net_ratelimit()) {
+ if (net_ratelimit())
printk(KERN_WARNING "Route hash chain too long!\n");
- printk(KERN_WARNING "Adjust your secret_interval!\n");
- }
-
- rt_secret_rebuild_oneshot(net);
+ rt_cache_invalidate(net);
}
/*
@@ -2300,8 +2277,8 @@ martian_source:
goto e_inval;
}
-int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr,
- u8 tos, struct net_device *dev)
+int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
+ u8 tos, struct net_device *dev, bool noref)
{
struct rtable * rth;
unsigned hash;
@@ -2319,18 +2296,23 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr,
rcu_read_lock();
for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
rth = rcu_dereference(rth->u.dst.rt_next)) {
- if (((rth->fl.fl4_dst ^ daddr) |
- (rth->fl.fl4_src ^ saddr) |
+ if ((((__force u32)rth->fl.fl4_dst ^ (__force u32)daddr) |
+ ((__force u32)rth->fl.fl4_src ^ (__force u32)saddr) |
(rth->fl.iif ^ iif) |
rth->fl.oif |
(rth->fl.fl4_tos ^ tos)) == 0 &&
rth->fl.mark == skb->mark &&
net_eq(dev_net(rth->u.dst.dev), net) &&
!rt_is_expired(rth)) {
- dst_use(&rth->u.dst, jiffies);
+ if (noref) {
+ dst_use_noref(&rth->u.dst, jiffies);
+ skb_dst_set_noref(skb, &rth->u.dst);
+ } else {
+ dst_use(&rth->u.dst, jiffies);
+ skb_dst_set(skb, &rth->u.dst);
+ }
RT_CACHE_STAT_INC(in_hit);
rcu_read_unlock();
- skb_dst_set(skb, &rth->u.dst);
return 0;
}
RT_CACHE_STAT_INC(in_hlist_search);
@@ -2373,6 +2355,7 @@ skip_cache:
}
return ip_route_input_slow(skb, daddr, saddr, tos, dev);
}
+EXPORT_SYMBOL(ip_route_input_common);
static int __mkroute_output(struct rtable **result,
struct fib_result *res,
@@ -3056,7 +3039,7 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
continue;
if (rt_is_expired(rt))
continue;
- skb_dst_set(skb, dst_clone(&rt->u.dst));
+ skb_dst_set_noref(skb, &rt->u.dst);
if (rt_fill_info(net, skb, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, RTM_NEWROUTE,
1, NLM_F_MULTI) <= 0) {
@@ -3102,48 +3085,6 @@ static int ipv4_sysctl_rtcache_flush(ctl_table *__ctl, int write,
return -EINVAL;
}
-static void rt_secret_reschedule(int old)
-{
- struct net *net;
- int new = ip_rt_secret_interval;
- int diff = new - old;
-
- if (!diff)
- return;
-
- rtnl_lock();
- for_each_net(net) {
- int deleted = del_timer_sync(&net->ipv4.rt_secret_timer);
- long time;
-
- if (!new)
- continue;
-
- if (deleted) {
- time = net->ipv4.rt_secret_timer.expires - jiffies;
-
- if (time <= 0 || (time += diff) <= 0)
- time = 0;
- } else
- time = new;
-
- mod_timer(&net->ipv4.rt_secret_timer, jiffies + time);
- }
- rtnl_unlock();
-}
-
-static int ipv4_sysctl_rt_secret_interval(ctl_table *ctl, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos)
-{
- int old = ip_rt_secret_interval;
- int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
-
- rt_secret_reschedule(old);
-
- return ret;
-}
-
static ctl_table ipv4_route_table[] = {
{
.procname = "gc_thresh",
@@ -3252,13 +3193,6 @@ static ctl_table ipv4_route_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
- {
- .procname = "secret_interval",
- .data = &ip_rt_secret_interval,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = ipv4_sysctl_rt_secret_interval,
- },
{ }
};
@@ -3337,34 +3271,15 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
};
#endif
-
-static __net_init int rt_secret_timer_init(struct net *net)
+static __net_init int rt_genid_init(struct net *net)
{
- atomic_set(&net->ipv4.rt_genid,
- (int) ((num_physpages ^ (num_physpages>>8)) ^
- (jiffies ^ (jiffies >> 7))));
-
- net->ipv4.rt_secret_timer.function = rt_secret_rebuild;
- net->ipv4.rt_secret_timer.data = (unsigned long)net;
- init_timer_deferrable(&net->ipv4.rt_secret_timer);
-
- if (ip_rt_secret_interval) {
- net->ipv4.rt_secret_timer.expires =
- jiffies + net_random() % ip_rt_secret_interval +
- ip_rt_secret_interval;
- add_timer(&net->ipv4.rt_secret_timer);
- }
+ get_random_bytes(&net->ipv4.rt_genid,
+ sizeof(net->ipv4.rt_genid));
return 0;
}
-static __net_exit void rt_secret_timer_exit(struct net *net)
-{
- del_timer_sync(&net->ipv4.rt_secret_timer);
-}
-
-static __net_initdata struct pernet_operations rt_secret_timer_ops = {
- .init = rt_secret_timer_init,
- .exit = rt_secret_timer_exit,
+static __net_initdata struct pernet_operations rt_genid_ops = {
+ .init = rt_genid_init,
};
@@ -3425,9 +3340,6 @@ int __init ip_rt_init(void)
schedule_delayed_work(&expires_work,
net_random() % ip_rt_gc_interval + ip_rt_gc_interval);
- if (register_pernet_subsys(&rt_secret_timer_ops))
- printk(KERN_ERR "Unable to setup rt_secret_timer\n");
-
if (ip_rt_proc_init())
printk(KERN_ERR "Unable to create route proc files\n");
#ifdef CONFIG_XFRM
@@ -3439,6 +3351,7 @@ int __init ip_rt_init(void)
#ifdef CONFIG_SYSCTL
register_pernet_subsys(&sysctl_route_ops);
#endif
+ register_pernet_subsys(&rt_genid_ops);
return rc;
}
@@ -3454,5 +3367,4 @@ void __init ip_static_sysctl_init(void)
#endif
EXPORT_SYMBOL(__ip_select_ident);
-EXPORT_SYMBOL(ip_route_input);
EXPORT_SYMBOL(ip_route_output_key);
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 1cd5c15174b8..d96c1da4b17c 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -299,6 +299,13 @@ static struct ctl_table ipv4_table[] = {
.mode = 0644,
.proc_handler = ipv4_local_port_range,
},
+ {
+ .procname = "ip_local_reserved_ports",
+ .data = NULL, /* initialized in sysctl_ipv4_init */
+ .maxlen = 65536,
+ .mode = 0644,
+ .proc_handler = proc_do_large_bitmap,
+ },
#ifdef CONFIG_IP_MULTICAST
{
.procname = "igmp_max_memberships",
@@ -736,6 +743,16 @@ static __net_initdata struct pernet_operations ipv4_sysctl_ops = {
static __init int sysctl_ipv4_init(void)
{
struct ctl_table_header *hdr;
+ struct ctl_table *i;
+
+ for (i = ipv4_table; i->procname; i++) {
+ if (strcmp(i->procname, "ip_local_reserved_ports") == 0) {
+ i->data = sysctl_local_reserved_ports;
+ break;
+ }
+ }
+ if (!i->procname)
+ return -EINVAL;
hdr = register_sysctl_paths(net_ipv4_ctl_path, ipv4_table);
if (hdr == NULL)
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 0f8caf64caa3..6596b4feeddc 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -378,7 +378,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
struct sock *sk = sock->sk;
struct tcp_sock *tp = tcp_sk(sk);
- sock_poll_wait(file, sk->sk_sleep, wait);
+ sock_poll_wait(file, sk_sleep(sk), wait);
if (sk->sk_state == TCP_LISTEN)
return inet_csk_listen_poll(sk);
@@ -2215,7 +2215,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
default:
/* fallthru */
break;
- };
+ }
if (optlen < sizeof(int))
return -EINVAL;
@@ -2298,7 +2298,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
if (sock_flag(sk, SOCK_KEEPOPEN) &&
!((1 << sk->sk_state) &
(TCPF_CLOSE | TCPF_LISTEN))) {
- __u32 elapsed = tcp_time_stamp - tp->rcv_tstamp;
+ u32 elapsed = keepalive_time_elapsed(tp);
if (tp->keepalive_time > elapsed)
elapsed = tp->keepalive_time - elapsed;
else
@@ -2721,7 +2721,7 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
struct tcphdr *th2;
unsigned int len;
unsigned int thlen;
- unsigned int flags;
+ __be32 flags;
unsigned int mss = 1;
unsigned int hlen;
unsigned int off;
@@ -2771,10 +2771,10 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
found:
flush = NAPI_GRO_CB(p)->flush;
- flush |= flags & TCP_FLAG_CWR;
- flush |= (flags ^ tcp_flag_word(th2)) &
- ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH);
- flush |= th->ack_seq ^ th2->ack_seq;
+ flush |= (__force int)(flags & TCP_FLAG_CWR);
+ flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
+ ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
+ flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
for (i = sizeof(*th); i < thlen; i += 4)
flush |= *(u32 *)((u8 *)th + i) ^
*(u32 *)((u8 *)th2 + i);
@@ -2795,8 +2795,9 @@ found:
out_check_final:
flush = len < mss;
- flush |= flags & (TCP_FLAG_URG | TCP_FLAG_PSH | TCP_FLAG_RST |
- TCP_FLAG_SYN | TCP_FLAG_FIN);
+ flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
+ TCP_FLAG_RST | TCP_FLAG_SYN |
+ TCP_FLAG_FIN));
if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
pp = head;
@@ -2839,7 +2840,6 @@ static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool * __percpu *pool)
if (p->md5_desc.tfm)
crypto_free_hash(p->md5_desc.tfm);
kfree(p);
- p = NULL;
}
}
free_percpu(pool);
@@ -2937,25 +2937,40 @@ retry:
EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
-struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu)
+
+/**
+ * tcp_get_md5sig_pool - get md5sig_pool for this user
+ *
+ * We use percpu structure, so if we succeed, we exit with preemption
+ * and BH disabled, to make sure another thread or softirq handling
+ * wont try to get same context.
+ */
+struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
{
struct tcp_md5sig_pool * __percpu *p;
- spin_lock_bh(&tcp_md5sig_pool_lock);
+
+ local_bh_disable();
+
+ spin_lock(&tcp_md5sig_pool_lock);
p = tcp_md5sig_pool;
if (p)
tcp_md5sig_users++;
- spin_unlock_bh(&tcp_md5sig_pool_lock);
- return (p ? *per_cpu_ptr(p, cpu) : NULL);
-}
+ spin_unlock(&tcp_md5sig_pool_lock);
+
+ if (p)
+ return *per_cpu_ptr(p, smp_processor_id());
-EXPORT_SYMBOL(__tcp_get_md5sig_pool);
+ local_bh_enable();
+ return NULL;
+}
+EXPORT_SYMBOL(tcp_get_md5sig_pool);
-void __tcp_put_md5sig_pool(void)
+void tcp_put_md5sig_pool(void)
{
+ local_bh_enable();
tcp_free_md5sig_pool();
}
-
-EXPORT_SYMBOL(__tcp_put_md5sig_pool);
+EXPORT_SYMBOL(tcp_put_md5sig_pool);
int tcp_md5_hash_header(struct tcp_md5sig_pool *hp,
struct tcphdr *th)
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index f240f57b2199..3e6dafcb1071 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3710,7 +3710,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
}
if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP))
- dst_confirm(sk->sk_dst_cache);
+ dst_confirm(__sk_dst_get(sk));
return 1;
@@ -3845,12 +3845,13 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
/* 16-bit multiple */
opt_rx->cookie_plus = opsize;
*hvpp = ptr;
+ break;
default:
/* ignore option */
break;
- };
+ }
break;
- };
+ }
ptr += opsize-2;
length -= opsize;
@@ -4319,7 +4320,7 @@ static void tcp_ofo_queue(struct sock *sk)
}
if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
- SOCK_DEBUG(sk, "ofo packet was already received \n");
+ SOCK_DEBUG(sk, "ofo packet was already received\n");
__skb_unlink(skb, &tp->out_of_order_queue);
__kfree_skb(skb);
continue;
@@ -4367,6 +4368,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq)
goto drop;
+ skb_dst_drop(skb);
__skb_pull(skb, th->doff * 4);
TCP_ECN_accept_cwr(tp, skb);
@@ -5833,7 +5835,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
if (tp->snd_una == tp->write_seq) {
tcp_set_state(sk, TCP_FIN_WAIT2);
sk->sk_shutdown |= SEND_SHUTDOWN;
- dst_confirm(sk->sk_dst_cache);
+ dst_confirm(__sk_dst_get(sk));
if (!sock_flag(sk, SOCK_DEAD))
/* Wake up lingering close() */
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 3c23e70885f4..202cf09c4cd4 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -519,26 +519,31 @@ out:
sock_put(sk);
}
-/* This routine computes an IPv4 TCP checksum. */
-void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb)
+static void __tcp_v4_send_check(struct sk_buff *skb,
+ __be32 saddr, __be32 daddr)
{
- struct inet_sock *inet = inet_sk(sk);
struct tcphdr *th = tcp_hdr(skb);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- th->check = ~tcp_v4_check(len, inet->inet_saddr,
- inet->inet_daddr, 0);
+ th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
skb->csum_start = skb_transport_header(skb) - skb->head;
skb->csum_offset = offsetof(struct tcphdr, check);
} else {
- th->check = tcp_v4_check(len, inet->inet_saddr,
- inet->inet_daddr,
+ th->check = tcp_v4_check(skb->len, saddr, daddr,
csum_partial(th,
th->doff << 2,
skb->csum));
}
}
+/* This routine computes an IPv4 TCP checksum. */
+void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
+{
+ struct inet_sock *inet = inet_sk(sk);
+
+ __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
+}
+
int tcp_v4_gso_send_check(struct sk_buff *skb)
{
const struct iphdr *iph;
@@ -551,10 +556,8 @@ int tcp_v4_gso_send_check(struct sk_buff *skb)
th = tcp_hdr(skb);
th->check = 0;
- th->check = ~tcp_v4_check(skb->len, iph->saddr, iph->daddr, 0);
- skb->csum_start = skb_transport_header(skb) - skb->head;
- skb->csum_offset = offsetof(struct tcphdr, check);
skb->ip_summed = CHECKSUM_PARTIAL;
+ __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
return 0;
}
@@ -763,13 +766,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
skb = tcp_make_synack(sk, dst, req, rvp);
if (skb) {
- struct tcphdr *th = tcp_hdr(skb);
-
- th->check = tcp_v4_check(skb->len,
- ireq->loc_addr,
- ireq->rmt_addr,
- csum_partial(th, skb->len,
- skb->csum));
+ __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
ireq->rmt_addr,
@@ -894,7 +891,7 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
kfree(newkey);
return -ENOMEM;
}
- sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
+ sk_nocaps_add(sk, NETIF_F_GSO_MASK);
}
if (tcp_alloc_md5sig_pool(sk) == NULL) {
kfree(newkey);
@@ -1024,7 +1021,7 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
return -EINVAL;
tp->md5sig_info = p;
- sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
+ sk_nocaps_add(sk, NETIF_F_GSO_MASK);
}
newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, sk->sk_allocation);
@@ -1289,8 +1286,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
goto drop_and_release;
/* Secret recipe starts with IP addresses */
- *mess++ ^= daddr;
- *mess++ ^= saddr;
+ *mess++ ^= (__force u32)daddr;
+ *mess++ ^= (__force u32)saddr;
/* plus variable length Initiator Cookie */
c = (u8 *)mess;
@@ -1465,7 +1462,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
if (newkey != NULL)
tcp_v4_md5_do_add(newsk, newinet->inet_daddr,
newkey, key->keylen);
- newsk->sk_route_caps &= ~NETIF_F_GSO_MASK;
+ sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
}
#endif
@@ -1675,6 +1672,8 @@ process:
skb->dev = NULL;
+ sock_rps_save_rxhash(sk, skb->rxhash);
+
bh_lock_sock_nested(sk);
ret = 0;
if (!sock_owned_by_user(sk)) {
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 5fabff9ac6d6..794c2e122a41 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -672,6 +672,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
if (req->retrans < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
inet_rsk(req)->acked = 1;
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
return NULL;
}
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 0dda86e72ad8..b4ed957f201a 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -350,6 +350,7 @@ static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb,
*/
static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
{
+ skb->ip_summed = CHECKSUM_PARTIAL;
skb->csum = 0;
TCP_SKB_CB(skb)->flags = flags;
@@ -667,7 +668,6 @@ static unsigned tcp_synack_options(struct sock *sk,
u8 cookie_plus = (xvp != NULL && !xvp->cookie_out_never) ?
xvp->cookie_plus :
0;
- bool doing_ts = ireq->tstamp_ok;
#ifdef CONFIG_TCP_MD5SIG
*md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req);
@@ -680,7 +680,7 @@ static unsigned tcp_synack_options(struct sock *sk,
* rather than TS in order to fit in better with old,
* buggy kernels, but that was deemed to be unnecessary.
*/
- doing_ts &= !ireq->sack_ok;
+ ireq->tstamp_ok &= !ireq->sack_ok;
}
#else
*md5 = NULL;
@@ -695,7 +695,7 @@ static unsigned tcp_synack_options(struct sock *sk,
opts->options |= OPTION_WSCALE;
remaining -= TCPOLEN_WSCALE_ALIGNED;
}
- if (likely(doing_ts)) {
+ if (likely(ireq->tstamp_ok)) {
opts->options |= OPTION_TS;
opts->tsval = TCP_SKB_CB(skb)->when;
opts->tsecr = req->ts_recent;
@@ -703,7 +703,7 @@ static unsigned tcp_synack_options(struct sock *sk,
}
if (likely(ireq->sack_ok)) {
opts->options |= OPTION_SACK_ADVERTISE;
- if (unlikely(!doing_ts))
+ if (unlikely(!ireq->tstamp_ok))
remaining -= TCPOLEN_SACKPERM_ALIGNED;
}
@@ -711,7 +711,7 @@ static unsigned tcp_synack_options(struct sock *sk,
* If the <SYN> options fit, the same options should fit now!
*/
if (*md5 == NULL &&
- doing_ts &&
+ ireq->tstamp_ok &&
cookie_plus > TCPOLEN_COOKIE_BASE) {
int need = cookie_plus; /* has TCPOLEN_COOKIE_BASE */
@@ -860,7 +860,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
th->urg_ptr = htons(tp->snd_up - tcb->seq);
th->urg = 1;
} else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) {
- th->urg_ptr = 0xFFFF;
+ th->urg_ptr = htons(0xFFFF);
th->urg = 1;
}
}
@@ -872,13 +872,13 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
#ifdef CONFIG_TCP_MD5SIG
/* Calculate the MD5 hash, as we have all we need now */
if (md5) {
- sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
+ sk_nocaps_add(sk, NETIF_F_GSO_MASK);
tp->af_specific->calc_md5_hash(opts.hash_location,
md5, sk, NULL, skb);
}
#endif
- icsk->icsk_af_ops->send_check(sk, skb->len, skb);
+ icsk->icsk_af_ops->send_check(sk, skb);
if (likely(tcb->flags & TCPCB_FLAG_ACK))
tcp_event_ack_sent(sk, tcp_skb_pcount(skb));
@@ -887,9 +887,10 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
tcp_event_data_sent(tp, skb, sk);
if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
- TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS);
+ TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS,
+ tcp_skb_pcount(skb));
- err = icsk->icsk_af_ops->queue_xmit(skb, 0);
+ err = icsk->icsk_af_ops->queue_xmit(skb);
if (likely(err <= 0))
return err;
@@ -2484,7 +2485,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
*tail-- ^= TCP_SKB_CB(skb)->seq + 1;
/* recommended */
- *tail-- ^= ((th->dest << 16) | th->source);
+ *tail-- ^= (((__force u32)th->dest << 16) | (__force u32)th->source);
*tail-- ^= (u32)(unsigned long)cvp; /* per sockopt */
sha_transform((__u32 *)&xvp->cookie_bakery[0],
@@ -2502,7 +2503,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
th->window = htons(min(req->rcv_wnd, 65535U));
tcp_options_write((__be32 *)(th + 1), tp, &opts);
th->doff = (tcp_header_size >> 2);
- TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS);
+ TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, tcp_skb_pcount(skb));
#ifdef CONFIG_TCP_MD5SIG
/* Okay, we have all we need - do the md5 hash if needed */
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 8a0ab2977f1f..440a5c6004f6 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -172,14 +172,14 @@ static int tcp_write_timeout(struct sock *sk)
if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
if (icsk->icsk_retransmits)
- dst_negative_advice(&sk->sk_dst_cache, sk);
+ dst_negative_advice(sk);
retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
} else {
if (retransmits_timed_out(sk, sysctl_tcp_retries1)) {
/* Black hole detection */
tcp_mtu_probing(icsk, sk);
- dst_negative_advice(&sk->sk_dst_cache, sk);
+ dst_negative_advice(sk);
}
retry_until = sysctl_tcp_retries2;
@@ -517,7 +517,7 @@ static void tcp_keepalive_timer (unsigned long data)
struct sock *sk = (struct sock *) data;
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
- __u32 elapsed;
+ u32 elapsed;
/* Only process if socket is not in use. */
bh_lock_sock(sk);
@@ -554,7 +554,7 @@ static void tcp_keepalive_timer (unsigned long data)
if (tp->packets_out || tcp_send_head(sk))
goto resched;
- elapsed = tcp_time_stamp - tp->rcv_tstamp;
+ elapsed = keepalive_time_elapsed(tp);
if (elapsed >= keepalive_time_when(tp)) {
if (icsk->icsk_probes_out >= keepalive_probes(tp)) {
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 8fef859db35d..9de6a698f91d 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -233,7 +233,8 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
*/
do {
if (low <= snum && snum <= high &&
- !test_bit(snum >> udptable->log, bitmap))
+ !test_bit(snum >> udptable->log, bitmap) &&
+ !inet_is_reserved_local_port(snum))
goto found;
snum += rand;
} while (snum != first);
@@ -307,13 +308,13 @@ static int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2)
static unsigned int udp4_portaddr_hash(struct net *net, __be32 saddr,
unsigned int port)
{
- return jhash_1word(saddr, net_hash_mix(net)) ^ port;
+ return jhash_1word((__force u32)saddr, net_hash_mix(net)) ^ port;
}
int udp_v4_get_port(struct sock *sk, unsigned short snum)
{
unsigned int hash2_nulladdr =
- udp4_portaddr_hash(sock_net(sk), INADDR_ANY, snum);
+ udp4_portaddr_hash(sock_net(sk), htonl(INADDR_ANY), snum);
unsigned int hash2_partial =
udp4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0);
@@ -466,14 +467,14 @@ static struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
daddr, hnum, dif,
hslot2, slot2);
if (!result) {
- hash2 = udp4_portaddr_hash(net, INADDR_ANY, hnum);
+ hash2 = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum);
slot2 = hash2 & udptable->mask;
hslot2 = &udptable->hash2[slot2];
if (hslot->count < hslot2->count)
goto begin;
result = udp4_lib_lookup2(net, saddr, sport,
- INADDR_ANY, hnum, dif,
+ htonl(INADDR_ANY), hnum, dif,
hslot2, slot2);
}
rcu_read_unlock();
@@ -1062,10 +1063,10 @@ static unsigned int first_packet_length(struct sock *sk)
spin_unlock_bh(&rcvq->lock);
if (!skb_queue_empty(&list_kill)) {
- lock_sock(sk);
+ lock_sock_bh(sk);
__skb_queue_purge(&list_kill);
sk_mem_reclaim_partial(sk);
- release_sock(sk);
+ unlock_sock_bh(sk);
}
return res;
}
@@ -1196,10 +1197,10 @@ out:
return err;
csum_copy_err:
- lock_sock(sk);
+ lock_sock_bh(sk);
if (!skb_kill_datagram(sk, skb, flags))
UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
- release_sock(sk);
+ unlock_sock_bh(sk);
if (noblock)
return -EAGAIN;
@@ -1217,6 +1218,7 @@ int udp_disconnect(struct sock *sk, int flags)
sk->sk_state = TCP_CLOSE;
inet->inet_daddr = 0;
inet->inet_dport = 0;
+ sock_rps_save_rxhash(sk, 0);
sk->sk_bound_dev_if = 0;
if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
inet_reset_saddr(sk);
@@ -1258,8 +1260,12 @@ EXPORT_SYMBOL(udp_lib_unhash);
static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
- int rc = sock_queue_rcv_skb(sk, skb);
+ int rc;
+ if (inet_sk(sk)->inet_daddr)
+ sock_rps_save_rxhash(sk, skb->rxhash);
+
+ rc = ip_queue_rcv_skb(sk, skb);
if (rc < 0) {
int is_udplite = IS_UDPLITE(sk);
@@ -1367,6 +1373,10 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
goto drop;
}
+
+ if (sk_rcvqueues_full(sk, skb))
+ goto drop;
+
rc = 0;
bh_lock_sock(sk);
@@ -1527,6 +1537,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
uh = udp_hdr(skb);
ulen = ntohs(uh->len);
+ saddr = ip_hdr(skb)->saddr;
+ daddr = ip_hdr(skb)->daddr;
+
if (ulen > skb->len)
goto short_packet;
@@ -1540,9 +1553,6 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
if (udp4_csum_init(skb, uh, proto))
goto csum_error;
- saddr = ip_hdr(skb)->saddr;
- daddr = ip_hdr(skb)->daddr;
-
if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
return __udp4_lib_mcast_deliver(net, skb, uh,
saddr, daddr, udptable);
@@ -1615,9 +1625,9 @@ int udp_rcv(struct sk_buff *skb)
void udp_destroy_sock(struct sock *sk)
{
- lock_sock(sk);
+ lock_sock_bh(sk);
udp_flush_pending_frames(sk);
- release_sock(sk);
+ unlock_sock_bh(sk);
}
/*
diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c
index c791bb63203f..ad8fbb871aa0 100644
--- a/net/ipv4/xfrm4_input.c
+++ b/net/ipv4/xfrm4_input.c
@@ -27,8 +27,8 @@ static inline int xfrm4_rcv_encap_finish(struct sk_buff *skb)
if (skb_dst(skb) == NULL) {
const struct iphdr *iph = ip_hdr(skb);
- if (ip_route_input(skb, iph->daddr, iph->saddr, iph->tos,
- skb->dev))
+ if (ip_route_input_noref(skb, iph->daddr, iph->saddr,
+ iph->tos, skb->dev))
goto drop;
}
return dst_input(skb);
@@ -61,7 +61,7 @@ int xfrm4_transport_finish(struct sk_buff *skb, int async)
iph->tot_len = htons(skb->len);
ip_send_check(iph);
- NF_HOOK(PF_INET, NF_INET_PRE_ROUTING, skb, skb->dev, NULL,
+ NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, skb->dev, NULL,
xfrm4_rcv_encap_finish);
return 0;
}
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c
index c908bd99bcba..571aa96a175c 100644
--- a/net/ipv4/xfrm4_output.c
+++ b/net/ipv4/xfrm4_output.c
@@ -86,7 +86,7 @@ static int xfrm4_output_finish(struct sk_buff *skb)
int xfrm4_output(struct sk_buff *skb)
{
- return NF_HOOK_COND(PF_INET, NF_INET_POST_ROUTING, skb,
+ return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb,
NULL, skb_dst(skb)->dev, xfrm4_output_finish,
!(IPCB(skb)->flags & IPSKB_REROUTED));
}
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index e4a1483fba77..1705476670ef 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -59,27 +59,6 @@ static int xfrm4_get_saddr(struct net *net,
return 0;
}
-static struct dst_entry *
-__xfrm4_find_bundle(struct flowi *fl, struct xfrm_policy *policy)
-{
- struct dst_entry *dst;
-
- read_lock_bh(&policy->lock);
- for (dst = policy->bundles; dst; dst = dst->next) {
- struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
- if (xdst->u.rt.fl.oif == fl->oif && /*XXX*/
- xdst->u.rt.fl.fl4_dst == fl->fl4_dst &&
- xdst->u.rt.fl.fl4_src == fl->fl4_src &&
- xdst->u.rt.fl.fl4_tos == fl->fl4_tos &&
- xfrm_bundle_ok(policy, xdst, fl, AF_INET, 0)) {
- dst_clone(dst);
- break;
- }
- }
- read_unlock_bh(&policy->lock);
- return dst;
-}
-
static int xfrm4_get_tos(struct flowi *fl)
{
return fl->fl4_tos;
@@ -259,7 +238,6 @@ static struct xfrm_policy_afinfo xfrm4_policy_afinfo = {
.dst_ops = &xfrm4_dst_ops,
.dst_lookup = xfrm4_dst_lookup,
.get_saddr = xfrm4_get_saddr,
- .find_bundle = __xfrm4_find_bundle,
.decode_session = _decode_session4,
.get_tos = xfrm4_get_tos,
.init_path = xfrm4_init_path,
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig
index a578096152ab..36d7437ac054 100644
--- a/net/ipv6/Kconfig
+++ b/net/ipv6/Kconfig
@@ -229,6 +229,20 @@ config IPV6_MROUTE
Experimental support for IPv6 multicast forwarding.
If unsure, say N.
+config IPV6_MROUTE_MULTIPLE_TABLES
+ bool "IPv6: multicast policy routing"
+ depends on IPV6_MROUTE
+ select FIB_RULES
+ help
+ Normally, a multicast router runs a userspace daemon and decides
+ what to do with a multicast packet based on the source and
+ destination addresses. If you say Y here, the multicast router
+ will also be able to take interfaces and packet marks into
+ account and run multiple instances of userspace daemons
+ simultaneously, each one handling a single table.
+
+ If unsure, say N.
+
config IPV6_PIMSM_V2
bool "IPv6: PIM-SM version 2 support (EXPERIMENTAL)"
depends on IPV6_MROUTE
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 413054f02aab..e1a698df5706 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -82,7 +82,7 @@
#include <linux/random.h>
#endif
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/unaligned.h>
#include <linux/proc_fs.h>
@@ -98,7 +98,11 @@
#endif
#define INFINITY_LIFE_TIME 0xFFFFFFFF
-#define TIME_DELTA(a,b) ((unsigned long)((long)(a) - (long)(b)))
+#define TIME_DELTA(a, b) ((unsigned long)((long)(a) - (long)(b)))
+
+#define ADDRCONF_TIMER_FUZZ_MINUS (HZ > 50 ? HZ/50 : 1)
+#define ADDRCONF_TIMER_FUZZ (HZ / 4)
+#define ADDRCONF_TIMER_FUZZ_MAX (HZ)
#ifdef CONFIG_SYSCTL
static void addrconf_sysctl_register(struct inet6_dev *idev);
@@ -127,8 +131,8 @@ static int ipv6_count_addresses(struct inet6_dev *idev);
/*
* Configured unicast address hash table
*/
-static struct inet6_ifaddr *inet6_addr_lst[IN6_ADDR_HSIZE];
-static DEFINE_RWLOCK(addrconf_hash_lock);
+static struct hlist_head inet6_addr_lst[IN6_ADDR_HSIZE];
+static DEFINE_SPINLOCK(addrconf_hash_lock);
static void addrconf_verify(unsigned long);
@@ -138,8 +142,8 @@ static DEFINE_SPINLOCK(addrconf_verify_lock);
static void addrconf_join_anycast(struct inet6_ifaddr *ifp);
static void addrconf_leave_anycast(struct inet6_ifaddr *ifp);
-static void addrconf_bonding_change(struct net_device *dev,
- unsigned long event);
+static void addrconf_type_change(struct net_device *dev,
+ unsigned long event);
static int addrconf_ifdown(struct net_device *dev, int how);
static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags);
@@ -152,8 +156,8 @@ static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
static void inet6_prefix_notify(int event, struct inet6_dev *idev,
struct prefix_info *pinfo);
-static int ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
- struct net_device *dev);
+static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
+ struct net_device *dev);
static ATOMIC_NOTIFIER_HEAD(inet6addr_chain);
@@ -250,8 +254,7 @@ static void addrconf_del_timer(struct inet6_ifaddr *ifp)
__in6_ifa_put(ifp);
}
-enum addrconf_timer_t
-{
+enum addrconf_timer_t {
AC_NONE,
AC_DAD,
AC_RS,
@@ -271,7 +274,8 @@ static void addrconf_mod_timer(struct inet6_ifaddr *ifp,
case AC_RS:
ifp->timer.function = addrconf_rs_timer;
break;
- default:;
+ default:
+ break;
}
ifp->timer.expires = jiffies + when;
add_timer(&ifp->timer);
@@ -318,7 +322,7 @@ void in6_dev_finish_destroy(struct inet6_dev *idev)
{
struct net_device *dev = idev->dev;
- WARN_ON(idev->addr_list != NULL);
+ WARN_ON(!list_empty(&idev->addr_list));
WARN_ON(idev->mc_list != NULL);
#ifdef NET_REFCNT_DEBUG
@@ -326,7 +330,7 @@ void in6_dev_finish_destroy(struct inet6_dev *idev)
#endif
dev_put(dev);
if (!idev->dead) {
- printk("Freeing alive inet6 device %p\n", idev);
+ pr_warning("Freeing alive inet6 device %p\n", idev);
return;
}
snmp6_free_dev(idev);
@@ -351,6 +355,8 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
rwlock_init(&ndev->lock);
ndev->dev = dev;
+ INIT_LIST_HEAD(&ndev->addr_list);
+
memcpy(&ndev->cnf, dev_net(dev)->ipv6.devconf_dflt, sizeof(ndev->cnf));
ndev->cnf.mtu6 = dev->mtu;
ndev->cnf.sysctl = NULL;
@@ -402,6 +408,7 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
#endif
#ifdef CONFIG_IPV6_PRIVACY
+ INIT_LIST_HEAD(&ndev->tempaddr_list);
setup_timer(&ndev->regen_timer, ipv6_regen_rndid, (unsigned long)ndev);
if ((dev->flags&IFF_LOOPBACK) ||
dev->type == ARPHRD_TUNNEL ||
@@ -439,8 +446,10 @@ static struct inet6_dev * ipv6_find_idev(struct net_device *dev)
ASSERT_RTNL();
- if ((idev = __in6_dev_get(dev)) == NULL) {
- if ((idev = ipv6_add_dev(dev)) == NULL)
+ idev = __in6_dev_get(dev);
+ if (!idev) {
+ idev = ipv6_add_dev(dev);
+ if (!idev)
return NULL;
}
@@ -466,7 +475,8 @@ static void dev_forward_change(struct inet6_dev *idev)
else
ipv6_dev_mc_dec(dev, &in6addr_linklocal_allrouters);
}
- for (ifa=idev->addr_list; ifa; ifa=ifa->if_next) {
+
+ list_for_each_entry(ifa, &idev->addr_list, if_list) {
if (ifa->flags&IFA_F_TENTATIVE)
continue;
if (idev->cnf.forwarding)
@@ -523,12 +533,16 @@ static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old)
}
#endif
-/* Nobody refers to this ifaddr, destroy it */
+static void inet6_ifa_finish_destroy_rcu(struct rcu_head *head)
+{
+ struct inet6_ifaddr *ifp = container_of(head, struct inet6_ifaddr, rcu);
+ kfree(ifp);
+}
+/* Nobody refers to this ifaddr, destroy it */
void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp)
{
- WARN_ON(ifp->if_next != NULL);
- WARN_ON(ifp->lst_next != NULL);
+ WARN_ON(!hlist_unhashed(&ifp->addr_lst));
#ifdef NET_REFCNT_DEBUG
printk(KERN_DEBUG "inet6_ifa_finish_destroy\n");
@@ -537,54 +551,46 @@ void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp)
in6_dev_put(ifp->idev);
if (del_timer(&ifp->timer))
- printk("Timer is still running, when freeing ifa=%p\n", ifp);
+ pr_notice("Timer is still running, when freeing ifa=%p\n", ifp);
- if (!ifp->dead) {
- printk("Freeing alive inet6 address %p\n", ifp);
+ if (ifp->state != INET6_IFADDR_STATE_DEAD) {
+ pr_warning("Freeing alive inet6 address %p\n", ifp);
return;
}
dst_release(&ifp->rt->u.dst);
- kfree(ifp);
+ call_rcu(&ifp->rcu, inet6_ifa_finish_destroy_rcu);
}
static void
ipv6_link_dev_addr(struct inet6_dev *idev, struct inet6_ifaddr *ifp)
{
- struct inet6_ifaddr *ifa, **ifap;
+ struct list_head *p;
int ifp_scope = ipv6_addr_src_scope(&ifp->addr);
/*
* Each device address list is sorted in order of scope -
* global before linklocal.
*/
- for (ifap = &idev->addr_list; (ifa = *ifap) != NULL;
- ifap = &ifa->if_next) {
+ list_for_each(p, &idev->addr_list) {
+ struct inet6_ifaddr *ifa
+ = list_entry(p, struct inet6_ifaddr, if_list);
if (ifp_scope >= ipv6_addr_src_scope(&ifa->addr))
break;
}
- ifp->if_next = *ifap;
- *ifap = ifp;
+ list_add_tail(&ifp->if_list, p);
}
-/*
- * Hash function taken from net_alias.c
- */
-static u8 ipv6_addr_hash(const struct in6_addr *addr)
+static u32 ipv6_addr_hash(const struct in6_addr *addr)
{
- __u32 word;
-
/*
* We perform the hash function over the last 64 bits of the address
* This will include the IEEE address token on links that support it.
*/
-
- word = (__force u32)(addr->s6_addr32[2] ^ addr->s6_addr32[3]);
- word ^= (word >> 16);
- word ^= (word >> 8);
-
- return ((word ^ (word >> 4)) & 0x0f);
+ return jhash_2words((__force u32)addr->s6_addr32[2],
+ (__force u32)addr->s6_addr32[3], 0)
+ & (IN6_ADDR_HSIZE - 1);
}
/* On success it returns ifp with increased reference count */
@@ -595,7 +601,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
{
struct inet6_ifaddr *ifa = NULL;
struct rt6_info *rt;
- int hash;
+ unsigned int hash;
int err = 0;
int addr_type = ipv6_addr_type(addr);
@@ -616,7 +622,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
goto out2;
}
- write_lock(&addrconf_hash_lock);
+ spin_lock(&addrconf_hash_lock);
/* Ignore adding duplicate addresses on an interface */
if (ipv6_chk_same_addr(dev_net(idev->dev), addr, idev->dev)) {
@@ -642,7 +648,9 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
ipv6_addr_copy(&ifa->addr, addr);
spin_lock_init(&ifa->lock);
+ spin_lock_init(&ifa->state_lock);
init_timer(&ifa->timer);
+ INIT_HLIST_NODE(&ifa->addr_lst);
ifa->timer.data = (unsigned long) ifa;
ifa->scope = scope;
ifa->prefix_len = pfxlen;
@@ -669,10 +677,8 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
/* Add to big hash table */
hash = ipv6_addr_hash(addr);
- ifa->lst_next = inet6_addr_lst[hash];
- inet6_addr_lst[hash] = ifa;
- in6_ifa_hold(ifa);
- write_unlock(&addrconf_hash_lock);
+ hlist_add_head_rcu(&ifa->addr_lst, &inet6_addr_lst[hash]);
+ spin_unlock(&addrconf_hash_lock);
write_lock(&idev->lock);
/* Add to inet6_dev unicast addr list. */
@@ -680,8 +686,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
#ifdef CONFIG_IPV6_PRIVACY
if (ifa->flags&IFA_F_TEMPORARY) {
- ifa->tmp_next = idev->tempaddr_list;
- idev->tempaddr_list = ifa;
+ list_add(&ifa->tmp_list, &idev->tempaddr_list);
in6_ifa_hold(ifa);
}
#endif
@@ -700,7 +705,7 @@ out2:
return ifa;
out:
- write_unlock(&addrconf_hash_lock);
+ spin_unlock(&addrconf_hash_lock);
goto out2;
}
@@ -708,52 +713,44 @@ out:
static void ipv6_del_addr(struct inet6_ifaddr *ifp)
{
- struct inet6_ifaddr *ifa, **ifap;
+ struct inet6_ifaddr *ifa, *ifn;
struct inet6_dev *idev = ifp->idev;
+ int state;
int hash;
int deleted = 0, onlink = 0;
unsigned long expires = jiffies;
hash = ipv6_addr_hash(&ifp->addr);
- ifp->dead = 1;
+ spin_lock_bh(&ifp->state_lock);
+ state = ifp->state;
+ ifp->state = INET6_IFADDR_STATE_DEAD;
+ spin_unlock_bh(&ifp->state_lock);
- write_lock_bh(&addrconf_hash_lock);
- for (ifap = &inet6_addr_lst[hash]; (ifa=*ifap) != NULL;
- ifap = &ifa->lst_next) {
- if (ifa == ifp) {
- *ifap = ifa->lst_next;
- __in6_ifa_put(ifp);
- ifa->lst_next = NULL;
- break;
- }
- }
- write_unlock_bh(&addrconf_hash_lock);
+ if (state == INET6_IFADDR_STATE_DEAD)
+ goto out;
+
+ spin_lock_bh(&addrconf_hash_lock);
+ hlist_del_init_rcu(&ifp->addr_lst);
+ spin_unlock_bh(&addrconf_hash_lock);
write_lock_bh(&idev->lock);
#ifdef CONFIG_IPV6_PRIVACY
if (ifp->flags&IFA_F_TEMPORARY) {
- for (ifap = &idev->tempaddr_list; (ifa=*ifap) != NULL;
- ifap = &ifa->tmp_next) {
- if (ifa == ifp) {
- *ifap = ifa->tmp_next;
- if (ifp->ifpub) {
- in6_ifa_put(ifp->ifpub);
- ifp->ifpub = NULL;
- }
- __in6_ifa_put(ifp);
- ifa->tmp_next = NULL;
- break;
- }
+ list_del(&ifp->tmp_list);
+ if (ifp->ifpub) {
+ in6_ifa_put(ifp->ifpub);
+ ifp->ifpub = NULL;
}
+ __in6_ifa_put(ifp);
}
#endif
- for (ifap = &idev->addr_list; (ifa=*ifap) != NULL;) {
+ list_for_each_entry_safe(ifa, ifn, &idev->addr_list, if_list) {
if (ifa == ifp) {
- *ifap = ifa->if_next;
+ list_del_init(&ifp->if_list);
__in6_ifa_put(ifp);
- ifa->if_next = NULL;
+
if (!(ifp->flags & IFA_F_PERMANENT) || onlink > 0)
break;
deleted = 1;
@@ -786,7 +783,6 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
}
}
}
- ifap = &ifa->if_next;
}
write_unlock_bh(&idev->lock);
@@ -830,6 +826,7 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
dst_release(&rt->u.dst);
}
+out:
in6_ifa_put(ifp);
}
@@ -1165,7 +1162,7 @@ int ipv6_dev_get_saddr(struct net *net, struct net_device *dst_dev,
continue;
read_lock_bh(&idev->lock);
- for (score->ifa = idev->addr_list; score->ifa; score->ifa = score->ifa->if_next) {
+ list_for_each_entry(score->ifa, &idev->addr_list, if_list) {
int i;
/*
@@ -1243,7 +1240,6 @@ try_nextdev:
in6_ifa_put(hiscore->ifa);
return 0;
}
-
EXPORT_SYMBOL(ipv6_dev_get_saddr);
int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
@@ -1253,12 +1249,14 @@ int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
int err = -EADDRNOTAVAIL;
rcu_read_lock();
- if ((idev = __in6_dev_get(dev)) != NULL) {
+ idev = __in6_dev_get(dev);
+ if (idev) {
struct inet6_ifaddr *ifp;
read_lock_bh(&idev->lock);
- for (ifp=idev->addr_list; ifp; ifp=ifp->if_next) {
- if (ifp->scope == IFA_LINK && !(ifp->flags & banned_flags)) {
+ list_for_each_entry(ifp, &idev->addr_list, if_list) {
+ if (ifp->scope == IFA_LINK &&
+ !(ifp->flags & banned_flags)) {
ipv6_addr_copy(addr, &ifp->addr);
err = 0;
break;
@@ -1276,7 +1274,7 @@ static int ipv6_count_addresses(struct inet6_dev *idev)
struct inet6_ifaddr *ifp;
read_lock_bh(&idev->lock);
- for (ifp=idev->addr_list; ifp; ifp=ifp->if_next)
+ list_for_each_entry(ifp, &idev->addr_list, if_list)
cnt++;
read_unlock_bh(&idev->lock);
return cnt;
@@ -1285,41 +1283,44 @@ static int ipv6_count_addresses(struct inet6_dev *idev)
int ipv6_chk_addr(struct net *net, struct in6_addr *addr,
struct net_device *dev, int strict)
{
- struct inet6_ifaddr * ifp;
- u8 hash = ipv6_addr_hash(addr);
+ struct inet6_ifaddr *ifp;
+ struct hlist_node *node;
+ unsigned int hash = ipv6_addr_hash(addr);
- read_lock_bh(&addrconf_hash_lock);
- for(ifp = inet6_addr_lst[hash]; ifp; ifp=ifp->lst_next) {
+ rcu_read_lock_bh();
+ hlist_for_each_entry_rcu(ifp, node, &inet6_addr_lst[hash], addr_lst) {
if (!net_eq(dev_net(ifp->idev->dev), net))
continue;
if (ipv6_addr_equal(&ifp->addr, addr) &&
- !(ifp->flags&IFA_F_TENTATIVE)) {
- if (dev == NULL || ifp->idev->dev == dev ||
- !(ifp->scope&(IFA_LINK|IFA_HOST) || strict))
- break;
+ !(ifp->flags&IFA_F_TENTATIVE) &&
+ (dev == NULL || ifp->idev->dev == dev ||
+ !(ifp->scope&(IFA_LINK|IFA_HOST) || strict))) {
+ rcu_read_unlock_bh();
+ return 1;
}
}
- read_unlock_bh(&addrconf_hash_lock);
- return ifp != NULL;
+
+ rcu_read_unlock_bh();
+ return 0;
}
EXPORT_SYMBOL(ipv6_chk_addr);
-static
-int ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
- struct net_device *dev)
+static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
+ struct net_device *dev)
{
- struct inet6_ifaddr * ifp;
- u8 hash = ipv6_addr_hash(addr);
+ unsigned int hash = ipv6_addr_hash(addr);
+ struct inet6_ifaddr *ifp;
+ struct hlist_node *node;
- for(ifp = inet6_addr_lst[hash]; ifp; ifp=ifp->lst_next) {
+ hlist_for_each_entry(ifp, node, &inet6_addr_lst[hash], addr_lst) {
if (!net_eq(dev_net(ifp->idev->dev), net))
continue;
if (ipv6_addr_equal(&ifp->addr, addr)) {
if (dev == NULL || ifp->idev->dev == dev)
- break;
+ return true;
}
}
- return ifp != NULL;
+ return false;
}
int ipv6_chk_prefix(struct in6_addr *addr, struct net_device *dev)
@@ -1333,7 +1334,7 @@ int ipv6_chk_prefix(struct in6_addr *addr, struct net_device *dev)
idev = __in6_dev_get(dev);
if (idev) {
read_lock_bh(&idev->lock);
- for (ifa = idev->addr_list; ifa; ifa = ifa->if_next) {
+ list_for_each_entry(ifa, &idev->addr_list, if_list) {
onlink = ipv6_prefix_equal(addr, &ifa->addr,
ifa->prefix_len);
if (onlink)
@@ -1350,24 +1351,26 @@ EXPORT_SYMBOL(ipv6_chk_prefix);
struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *addr,
struct net_device *dev, int strict)
{
- struct inet6_ifaddr * ifp;
- u8 hash = ipv6_addr_hash(addr);
+ struct inet6_ifaddr *ifp, *result = NULL;
+ unsigned int hash = ipv6_addr_hash(addr);
+ struct hlist_node *node;
- read_lock_bh(&addrconf_hash_lock);
- for(ifp = inet6_addr_lst[hash]; ifp; ifp=ifp->lst_next) {
+ rcu_read_lock_bh();
+ hlist_for_each_entry_rcu_bh(ifp, node, &inet6_addr_lst[hash], addr_lst) {
if (!net_eq(dev_net(ifp->idev->dev), net))
continue;
if (ipv6_addr_equal(&ifp->addr, addr)) {
if (dev == NULL || ifp->idev->dev == dev ||
!(ifp->scope&(IFA_LINK|IFA_HOST) || strict)) {
+ result = ifp;
in6_ifa_hold(ifp);
break;
}
}
}
- read_unlock_bh(&addrconf_hash_lock);
+ rcu_read_unlock_bh();
- return ifp;
+ return result;
}
/* Gets referenced address, destroys ifaddr */
@@ -1403,10 +1406,27 @@ static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed)
ipv6_del_addr(ifp);
}
+static int addrconf_dad_end(struct inet6_ifaddr *ifp)
+{
+ int err = -ENOENT;
+
+ spin_lock(&ifp->state_lock);
+ if (ifp->state == INET6_IFADDR_STATE_DAD) {
+ ifp->state = INET6_IFADDR_STATE_POSTDAD;
+ err = 0;
+ }
+ spin_unlock(&ifp->state_lock);
+
+ return err;
+}
+
void addrconf_dad_failure(struct inet6_ifaddr *ifp)
{
struct inet6_dev *idev = ifp->idev;
+ if (addrconf_dad_end(ifp))
+ return;
+
if (net_ratelimit())
printk(KERN_INFO "%s: IPv6 duplicate address %pI6c detected!\n",
ifp->idev->dev->name, &ifp->addr);
@@ -1570,7 +1590,7 @@ static int ipv6_inherit_eui64(u8 *eui, struct inet6_dev *idev)
struct inet6_ifaddr *ifp;
read_lock_bh(&idev->lock);
- for (ifp=idev->addr_list; ifp; ifp=ifp->if_next) {
+ list_for_each_entry(ifp, &idev->addr_list, if_list) {
if (ifp->scope == IFA_LINK && !(ifp->flags&IFA_F_TENTATIVE)) {
memcpy(eui, ifp->addr.s6_addr+8, 8);
err = 0;
@@ -1738,7 +1758,8 @@ static struct inet6_dev *addrconf_add_dev(struct net_device *dev)
ASSERT_RTNL();
- if ((idev = ipv6_find_idev(dev)) == NULL)
+ idev = ipv6_find_idev(dev);
+ if (!idev)
return NULL;
/* Add default multicast route */
@@ -1971,7 +1992,7 @@ ok:
#ifdef CONFIG_IPV6_PRIVACY
read_lock_bh(&in6_dev->lock);
/* update all temporary addresses in the list */
- for (ift=in6_dev->tempaddr_list; ift; ift=ift->tmp_next) {
+ list_for_each_entry(ift, &in6_dev->tempaddr_list, tmp_list) {
/*
* When adjusting the lifetimes of an existing
* temporary address, only lower the lifetimes.
@@ -2174,7 +2195,7 @@ static int inet6_addr_del(struct net *net, int ifindex, struct in6_addr *pfx,
return -ENXIO;
read_lock_bh(&idev->lock);
- for (ifp = idev->addr_list; ifp; ifp=ifp->if_next) {
+ list_for_each_entry(ifp, &idev->addr_list, if_list) {
if (ifp->prefix_len == plen &&
ipv6_addr_equal(pfx, &ifp->addr)) {
in6_ifa_hold(ifp);
@@ -2185,7 +2206,7 @@ static int inet6_addr_del(struct net *net, int ifindex, struct in6_addr *pfx,
/* If the last address is deleted administratively,
disable IPv6 on this interface.
*/
- if (idev->addr_list == NULL)
+ if (list_empty(&idev->addr_list))
addrconf_ifdown(idev->dev, 1);
return 0;
}
@@ -2446,7 +2467,8 @@ static void addrconf_ip6_tnl_config(struct net_device *dev)
ASSERT_RTNL();
- if ((idev = addrconf_add_dev(dev)) == NULL) {
+ idev = addrconf_add_dev(dev);
+ if (!idev) {
printk(KERN_DEBUG "init ip6-ip6: add_dev failed\n");
return;
}
@@ -2461,7 +2483,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
int run_pending = 0;
int err;
- switch(event) {
+ switch (event) {
case NETDEV_REGISTER:
if (!idev && dev->mtu >= IPV6_MIN_MTU) {
idev = ipv6_add_dev(dev);
@@ -2469,6 +2491,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
return notifier_from_errno(-ENOMEM);
}
break;
+
case NETDEV_UP:
case NETDEV_CHANGE:
if (dev->flags & IFF_SLAVE)
@@ -2498,10 +2521,9 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
}
if (idev) {
- if (idev->if_flags & IF_READY) {
+ if (idev->if_flags & IF_READY)
/* device is already configured. */
break;
- }
idev->if_flags |= IF_READY;
}
@@ -2513,7 +2535,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
run_pending = 1;
}
- switch(dev->type) {
+ switch (dev->type) {
#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
case ARPHRD_SIT:
addrconf_sit_config(dev);
@@ -2530,25 +2552,30 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
addrconf_dev_config(dev);
break;
}
+
if (idev) {
if (run_pending)
addrconf_dad_run(idev);
- /* If the MTU changed during the interface down, when the
- interface up, the changed MTU must be reflected in the
- idev as well as routers.
+ /*
+ * If the MTU changed during the interface down,
+ * when the interface up, the changed MTU must be
+ * reflected in the idev as well as routers.
*/
- if (idev->cnf.mtu6 != dev->mtu && dev->mtu >= IPV6_MIN_MTU) {
+ if (idev->cnf.mtu6 != dev->mtu &&
+ dev->mtu >= IPV6_MIN_MTU) {
rt6_mtu_change(dev, dev->mtu);
idev->cnf.mtu6 = dev->mtu;
}
idev->tstamp = jiffies;
inet6_ifinfo_notify(RTM_NEWLINK, idev);
- /* If the changed mtu during down is lower than IPV6_MIN_MTU
- stop IPv6 on this interface.
+
+ /*
+ * If the changed mtu during down is lower than
+ * IPV6_MIN_MTU stop IPv6 on this interface.
*/
if (dev->mtu < IPV6_MIN_MTU)
- addrconf_ifdown(dev, event != NETDEV_DOWN);
+ addrconf_ifdown(dev, 1);
}
break;
@@ -2565,7 +2592,10 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
break;
}
- /* MTU falled under IPV6_MIN_MTU. Stop IPv6 on this interface. */
+ /*
+ * MTU falled under IPV6_MIN_MTU.
+ * Stop IPv6 on this interface.
+ */
case NETDEV_DOWN:
case NETDEV_UNREGISTER:
@@ -2585,9 +2615,10 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
return notifier_from_errno(err);
}
break;
- case NETDEV_BONDING_OLDTYPE:
- case NETDEV_BONDING_NEWTYPE:
- addrconf_bonding_change(dev, event);
+
+ case NETDEV_PRE_TYPE_CHANGE:
+ case NETDEV_POST_TYPE_CHANGE:
+ addrconf_type_change(dev, event);
break;
}
@@ -2599,28 +2630,28 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
*/
static struct notifier_block ipv6_dev_notf = {
.notifier_call = addrconf_notify,
- .priority = 0
};
-static void addrconf_bonding_change(struct net_device *dev, unsigned long event)
+static void addrconf_type_change(struct net_device *dev, unsigned long event)
{
struct inet6_dev *idev;
ASSERT_RTNL();
idev = __in6_dev_get(dev);
- if (event == NETDEV_BONDING_NEWTYPE)
+ if (event == NETDEV_POST_TYPE_CHANGE)
ipv6_mc_remap(idev);
- else if (event == NETDEV_BONDING_OLDTYPE)
+ else if (event == NETDEV_PRE_TYPE_CHANGE)
ipv6_mc_unmap(idev);
}
static int addrconf_ifdown(struct net_device *dev, int how)
{
- struct inet6_dev *idev;
- struct inet6_ifaddr *ifa, *keep_list, **bifa;
struct net *net = dev_net(dev);
- int i;
+ struct inet6_dev *idev;
+ struct inet6_ifaddr *ifa;
+ LIST_HEAD(keep_list);
+ int state;
ASSERT_RTNL();
@@ -2631,8 +2662,9 @@ static int addrconf_ifdown(struct net_device *dev, int how)
if (idev == NULL)
return -ENODEV;
- /* Step 1: remove reference to ipv6 device from parent device.
- Do not dev_put!
+ /*
+ * Step 1: remove reference to ipv6 device from parent device.
+ * Do not dev_put!
*/
if (how) {
idev->dead = 1;
@@ -2645,41 +2677,21 @@ static int addrconf_ifdown(struct net_device *dev, int how)
}
- /* Step 2: clear hash table */
- for (i=0; i<IN6_ADDR_HSIZE; i++) {
- bifa = &inet6_addr_lst[i];
-
- write_lock_bh(&addrconf_hash_lock);
- while ((ifa = *bifa) != NULL) {
- if (ifa->idev == idev &&
- (how || !(ifa->flags&IFA_F_PERMANENT) ||
- ipv6_addr_type(&ifa->addr) & IPV6_ADDR_LINKLOCAL)) {
- *bifa = ifa->lst_next;
- ifa->lst_next = NULL;
- __in6_ifa_put(ifa);
- continue;
- }
- bifa = &ifa->lst_next;
- }
- write_unlock_bh(&addrconf_hash_lock);
- }
-
write_lock_bh(&idev->lock);
- /* Step 3: clear flags for stateless addrconf */
+ /* Step 2: clear flags for stateless addrconf */
if (!how)
idev->if_flags &= ~(IF_RS_SENT|IF_RA_RCVD|IF_READY);
- /* Step 4: clear address list */
#ifdef CONFIG_IPV6_PRIVACY
if (how && del_timer(&idev->regen_timer))
in6_dev_put(idev);
- /* clear tempaddr list */
- while ((ifa = idev->tempaddr_list) != NULL) {
- idev->tempaddr_list = ifa->tmp_next;
- ifa->tmp_next = NULL;
- ifa->dead = 1;
+ /* Step 3: clear tempaddr list */
+ while (!list_empty(&idev->tempaddr_list)) {
+ ifa = list_first_entry(&idev->tempaddr_list,
+ struct inet6_ifaddr, tmp_list);
+ list_del(&ifa->tmp_list);
write_unlock_bh(&idev->lock);
spin_lock_bh(&ifa->lock);
@@ -2692,23 +2704,18 @@ static int addrconf_ifdown(struct net_device *dev, int how)
write_lock_bh(&idev->lock);
}
#endif
- keep_list = NULL;
- bifa = &keep_list;
- while ((ifa = idev->addr_list) != NULL) {
- idev->addr_list = ifa->if_next;
- ifa->if_next = NULL;
+ while (!list_empty(&idev->addr_list)) {
+ ifa = list_first_entry(&idev->addr_list,
+ struct inet6_ifaddr, if_list);
addrconf_del_timer(ifa);
/* If just doing link down, and address is permanent
and not link-local, then retain it. */
- if (how == 0 &&
+ if (!how &&
(ifa->flags&IFA_F_PERMANENT) &&
!(ipv6_addr_type(&ifa->addr) & IPV6_ADDR_LINKLOCAL)) {
-
- /* Move to holding list */
- *bifa = ifa;
- bifa = &ifa->if_next;
+ list_move_tail(&ifa->if_list, &keep_list);
/* If not doing DAD on this address, just keep it. */
if ((dev->flags&(IFF_NOARP|IFF_LOOPBACK)) ||
@@ -2722,25 +2729,45 @@ static int addrconf_ifdown(struct net_device *dev, int how)
/* Flag it for later restoration when link comes up */
ifa->flags |= IFA_F_TENTATIVE;
+ ifa->state = INET6_IFADDR_STATE_DAD;
+
+ write_unlock_bh(&idev->lock);
+
in6_ifa_hold(ifa);
} else {
- ifa->dead = 1;
+ list_del(&ifa->if_list);
+
+ /* clear hash table */
+ spin_lock_bh(&addrconf_hash_lock);
+ hlist_del_init_rcu(&ifa->addr_lst);
+ spin_unlock_bh(&addrconf_hash_lock);
+
+ write_unlock_bh(&idev->lock);
+ spin_lock_bh(&ifa->state_lock);
+ state = ifa->state;
+ ifa->state = INET6_IFADDR_STATE_DEAD;
+ spin_unlock_bh(&ifa->state_lock);
+
+ if (state == INET6_IFADDR_STATE_DEAD)
+ goto put_ifa;
}
- write_unlock_bh(&idev->lock);
__ipv6_ifa_notify(RTM_DELADDR, ifa);
- atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifa);
+ if (ifa->state == INET6_IFADDR_STATE_DEAD)
+ atomic_notifier_call_chain(&inet6addr_chain,
+ NETDEV_DOWN, ifa);
+
+put_ifa:
in6_ifa_put(ifa);
write_lock_bh(&idev->lock);
}
- idev->addr_list = keep_list;
+ list_splice(&keep_list, &idev->addr_list);
write_unlock_bh(&idev->lock);
/* Step 5: Discard multicast list */
-
if (how)
ipv6_mc_destroy_dev(idev);
else
@@ -2748,8 +2775,7 @@ static int addrconf_ifdown(struct net_device *dev, int how)
idev->tstamp = jiffies;
- /* Shot the device (if unregistered) */
-
+ /* Last: Shot the device (if unregistered) */
if (how) {
addrconf_sysctl_unregister(idev);
neigh_parms_release(&nd_tbl, idev->nd_parms);
@@ -2827,10 +2853,10 @@ static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags)
net_srandom(ifp->addr.s6_addr32[3]);
read_lock_bh(&idev->lock);
- if (ifp->dead)
+ spin_lock(&ifp->lock);
+ if (ifp->state == INET6_IFADDR_STATE_DEAD)
goto out;
- spin_lock(&ifp->lock);
if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) ||
idev->cnf.accept_dad < 1 ||
!(ifp->flags&IFA_F_TENTATIVE) ||
@@ -2860,12 +2886,12 @@ static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags)
* Optimistic nodes can start receiving
* Frames right away
*/
- if(ifp->flags & IFA_F_OPTIMISTIC)
+ if (ifp->flags & IFA_F_OPTIMISTIC)
ip6_ins_rt(ifp->rt);
addrconf_dad_kick(ifp);
- spin_unlock(&ifp->lock);
out:
+ spin_unlock(&ifp->lock);
read_unlock_bh(&idev->lock);
}
@@ -2875,6 +2901,9 @@ static void addrconf_dad_timer(unsigned long data)
struct inet6_dev *idev = ifp->idev;
struct in6_addr mcaddr;
+ if (!ifp->probes && addrconf_dad_end(ifp))
+ goto out;
+
read_lock(&idev->lock);
if (idev->dead || !(idev->if_flags & IF_READY)) {
read_unlock(&idev->lock);
@@ -2882,6 +2911,12 @@ static void addrconf_dad_timer(unsigned long data)
}
spin_lock(&ifp->lock);
+ if (ifp->state == INET6_IFADDR_STATE_DEAD) {
+ spin_unlock(&ifp->lock);
+ read_unlock(&idev->lock);
+ goto out;
+ }
+
if (ifp->probes == 0) {
/*
* DAD was successful
@@ -2910,7 +2945,7 @@ out:
static void addrconf_dad_completed(struct inet6_ifaddr *ifp)
{
- struct net_device * dev = ifp->idev->dev;
+ struct net_device *dev = ifp->idev->dev;
/*
* Configure the address for reception. Now it is valid.
@@ -2941,18 +2976,17 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp)
}
}
-static void addrconf_dad_run(struct inet6_dev *idev) {
+static void addrconf_dad_run(struct inet6_dev *idev)
+{
struct inet6_ifaddr *ifp;
read_lock_bh(&idev->lock);
- for (ifp = idev->addr_list; ifp; ifp = ifp->if_next) {
+ list_for_each_entry(ifp, &idev->addr_list, if_list) {
spin_lock(&ifp->lock);
- if (!(ifp->flags & IFA_F_TENTATIVE)) {
- spin_unlock(&ifp->lock);
- continue;
- }
+ if (ifp->flags & IFA_F_TENTATIVE &&
+ ifp->state == INET6_IFADDR_STATE_DAD)
+ addrconf_dad_kick(ifp);
spin_unlock(&ifp->lock);
- addrconf_dad_kick(ifp);
}
read_unlock_bh(&idev->lock);
}
@@ -2970,36 +3004,35 @@ static struct inet6_ifaddr *if6_get_first(struct seq_file *seq)
struct net *net = seq_file_net(seq);
for (state->bucket = 0; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) {
- ifa = inet6_addr_lst[state->bucket];
-
- while (ifa && !net_eq(dev_net(ifa->idev->dev), net))
- ifa = ifa->lst_next;
- if (ifa)
- break;
+ struct hlist_node *n;
+ hlist_for_each_entry_rcu_bh(ifa, n, &inet6_addr_lst[state->bucket],
+ addr_lst)
+ if (net_eq(dev_net(ifa->idev->dev), net))
+ return ifa;
}
- return ifa;
+ return NULL;
}
-static struct inet6_ifaddr *if6_get_next(struct seq_file *seq, struct inet6_ifaddr *ifa)
+static struct inet6_ifaddr *if6_get_next(struct seq_file *seq,
+ struct inet6_ifaddr *ifa)
{
struct if6_iter_state *state = seq->private;
struct net *net = seq_file_net(seq);
+ struct hlist_node *n = &ifa->addr_lst;
- ifa = ifa->lst_next;
-try_again:
- if (ifa) {
- if (!net_eq(dev_net(ifa->idev->dev), net)) {
- ifa = ifa->lst_next;
- goto try_again;
- }
- }
+ hlist_for_each_entry_continue_rcu_bh(ifa, n, addr_lst)
+ if (net_eq(dev_net(ifa->idev->dev), net))
+ return ifa;
- if (!ifa && ++state->bucket < IN6_ADDR_HSIZE) {
- ifa = inet6_addr_lst[state->bucket];
- goto try_again;
+ while (++state->bucket < IN6_ADDR_HSIZE) {
+ hlist_for_each_entry_rcu_bh(ifa, n,
+ &inet6_addr_lst[state->bucket], addr_lst) {
+ if (net_eq(dev_net(ifa->idev->dev), net))
+ return ifa;
+ }
}
- return ifa;
+ return NULL;
}
static struct inet6_ifaddr *if6_get_idx(struct seq_file *seq, loff_t pos)
@@ -3007,15 +3040,15 @@ static struct inet6_ifaddr *if6_get_idx(struct seq_file *seq, loff_t pos)
struct inet6_ifaddr *ifa = if6_get_first(seq);
if (ifa)
- while(pos && (ifa = if6_get_next(seq, ifa)) != NULL)
+ while (pos && (ifa = if6_get_next(seq, ifa)) != NULL)
--pos;
return pos ? NULL : ifa;
}
static void *if6_seq_start(struct seq_file *seq, loff_t *pos)
- __acquires(addrconf_hash_lock)
+ __acquires(rcu_bh)
{
- read_lock_bh(&addrconf_hash_lock);
+ rcu_read_lock_bh();
return if6_get_idx(seq, *pos);
}
@@ -3029,9 +3062,9 @@ static void *if6_seq_next(struct seq_file *seq, void *v, loff_t *pos)
}
static void if6_seq_stop(struct seq_file *seq, void *v)
- __releases(addrconf_hash_lock)
+ __releases(rcu_bh)
{
- read_unlock_bh(&addrconf_hash_lock);
+ rcu_read_unlock_bh();
}
static int if6_seq_show(struct seq_file *seq, void *v)
@@ -3101,10 +3134,12 @@ void if6_proc_exit(void)
int ipv6_chk_home_addr(struct net *net, struct in6_addr *addr)
{
int ret = 0;
- struct inet6_ifaddr * ifp;
- u8 hash = ipv6_addr_hash(addr);
- read_lock_bh(&addrconf_hash_lock);
- for (ifp = inet6_addr_lst[hash]; ifp; ifp = ifp->lst_next) {
+ struct inet6_ifaddr *ifp = NULL;
+ struct hlist_node *n;
+ unsigned int hash = ipv6_addr_hash(addr);
+
+ rcu_read_lock_bh();
+ hlist_for_each_entry_rcu_bh(ifp, n, &inet6_addr_lst[hash], addr_lst) {
if (!net_eq(dev_net(ifp->idev->dev), net))
continue;
if (ipv6_addr_equal(&ifp->addr, addr) &&
@@ -3113,7 +3148,7 @@ int ipv6_chk_home_addr(struct net *net, struct in6_addr *addr)
break;
}
}
- read_unlock_bh(&addrconf_hash_lock);
+ rcu_read_unlock_bh();
return ret;
}
#endif
@@ -3124,43 +3159,35 @@ int ipv6_chk_home_addr(struct net *net, struct in6_addr *addr)
static void addrconf_verify(unsigned long foo)
{
+ unsigned long now, next, next_sec, next_sched;
struct inet6_ifaddr *ifp;
- unsigned long now, next;
+ struct hlist_node *node;
int i;
- spin_lock_bh(&addrconf_verify_lock);
+ rcu_read_lock_bh();
+ spin_lock(&addrconf_verify_lock);
now = jiffies;
- next = now + ADDR_CHECK_FREQUENCY;
+ next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY);
del_timer(&addr_chk_timer);
- for (i=0; i < IN6_ADDR_HSIZE; i++) {
-
+ for (i = 0; i < IN6_ADDR_HSIZE; i++) {
restart:
- read_lock(&addrconf_hash_lock);
- for (ifp=inet6_addr_lst[i]; ifp; ifp=ifp->lst_next) {
+ hlist_for_each_entry_rcu_bh(ifp, node,
+ &inet6_addr_lst[i], addr_lst) {
unsigned long age;
-#ifdef CONFIG_IPV6_PRIVACY
- unsigned long regen_advance;
-#endif
if (ifp->flags & IFA_F_PERMANENT)
continue;
spin_lock(&ifp->lock);
- age = (now - ifp->tstamp) / HZ;
-
-#ifdef CONFIG_IPV6_PRIVACY
- regen_advance = ifp->idev->cnf.regen_max_retry *
- ifp->idev->cnf.dad_transmits *
- ifp->idev->nd_parms->retrans_time / HZ;
-#endif
+ /* We try to batch several events at once. */
+ age = (now - ifp->tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
if (ifp->valid_lft != INFINITY_LIFE_TIME &&
age >= ifp->valid_lft) {
spin_unlock(&ifp->lock);
in6_ifa_hold(ifp);
- read_unlock(&addrconf_hash_lock);
ipv6_del_addr(ifp);
goto restart;
} else if (ifp->prefered_lft == INFINITY_LIFE_TIME) {
@@ -3182,7 +3209,6 @@ restart:
if (deprecate) {
in6_ifa_hold(ifp);
- read_unlock(&addrconf_hash_lock);
ipv6_ifa_notify(0, ifp);
in6_ifa_put(ifp);
@@ -3191,6 +3217,10 @@ restart:
#ifdef CONFIG_IPV6_PRIVACY
} else if ((ifp->flags&IFA_F_TEMPORARY) &&
!(ifp->flags&IFA_F_TENTATIVE)) {
+ unsigned long regen_advance = ifp->idev->cnf.regen_max_retry *
+ ifp->idev->cnf.dad_transmits *
+ ifp->idev->nd_parms->retrans_time / HZ;
+
if (age >= ifp->prefered_lft - regen_advance) {
struct inet6_ifaddr *ifpub = ifp->ifpub;
if (time_before(ifp->tstamp + ifp->prefered_lft * HZ, next))
@@ -3200,7 +3230,7 @@ restart:
in6_ifa_hold(ifp);
in6_ifa_hold(ifpub);
spin_unlock(&ifp->lock);
- read_unlock(&addrconf_hash_lock);
+
spin_lock(&ifpub->lock);
ifpub->regen_count = 0;
spin_unlock(&ifpub->lock);
@@ -3220,12 +3250,26 @@ restart:
spin_unlock(&ifp->lock);
}
}
- read_unlock(&addrconf_hash_lock);
}
- addr_chk_timer.expires = time_before(next, jiffies + HZ) ? jiffies + HZ : next;
+ next_sec = round_jiffies_up(next);
+ next_sched = next;
+
+ /* If rounded timeout is accurate enough, accept it. */
+ if (time_before(next_sec, next + ADDRCONF_TIMER_FUZZ))
+ next_sched = next_sec;
+
+ /* And minimum interval is ADDRCONF_TIMER_FUZZ_MAX. */
+ if (time_before(next_sched, jiffies + ADDRCONF_TIMER_FUZZ_MAX))
+ next_sched = jiffies + ADDRCONF_TIMER_FUZZ_MAX;
+
+ ADBG((KERN_DEBUG "now = %lu, schedule = %lu, rounded schedule = %lu => %lu\n",
+ now, next, next_sec, next_sched));
+
+ addr_chk_timer.expires = next_sched;
add_timer(&addr_chk_timer);
- spin_unlock_bh(&addrconf_verify_lock);
+ spin_unlock(&addrconf_verify_lock);
+ rcu_read_unlock_bh();
}
static struct in6_addr *extract_addr(struct nlattr *addr, struct nlattr *local)
@@ -3515,8 +3559,7 @@ static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca,
return nlmsg_end(skb, nlh);
}
-enum addr_type_t
-{
+enum addr_type_t {
UNICAST_ADDR,
MULTICAST_ADDR,
ANYCAST_ADDR,
@@ -3527,7 +3570,6 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
struct netlink_callback *cb, enum addr_type_t type,
int s_ip_idx, int *p_ip_idx)
{
- struct inet6_ifaddr *ifa;
struct ifmcaddr6 *ifmca;
struct ifacaddr6 *ifaca;
int err = 1;
@@ -3535,11 +3577,12 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
read_lock_bh(&idev->lock);
switch (type) {
- case UNICAST_ADDR:
+ case UNICAST_ADDR: {
+ struct inet6_ifaddr *ifa;
+
/* unicast address incl. temp addr */
- for (ifa = idev->addr_list; ifa;
- ifa = ifa->if_next, ip_idx++) {
- if (ip_idx < s_ip_idx)
+ list_for_each_entry(ifa, &idev->addr_list, if_list) {
+ if (++ip_idx < s_ip_idx)
continue;
err = inet6_fill_ifaddr(skb, ifa,
NETLINK_CB(cb->skb).pid,
@@ -3550,6 +3593,7 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
break;
}
break;
+ }
case MULTICAST_ADDR:
/* multicast address */
for (ifmca = idev->mc_list; ifmca;
@@ -3614,7 +3658,8 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
if (h > s_h || idx > s_idx)
s_ip_idx = 0;
ip_idx = 0;
- if ((idev = __in6_dev_get(dev)) == NULL)
+ idev = __in6_dev_get(dev);
+ if (!idev)
goto cont;
if (in6_dump_addrs(idev, skb, cb, type,
@@ -3681,12 +3726,14 @@ static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr* nlh,
if (ifm->ifa_index)
dev = __dev_get_by_index(net, ifm->ifa_index);
- if ((ifa = ipv6_get_ifaddr(net, addr, dev, 1)) == NULL) {
+ ifa = ipv6_get_ifaddr(net, addr, dev, 1);
+ if (!ifa) {
err = -EADDRNOTAVAIL;
goto errout;
}
- if ((skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_KERNEL)) == NULL) {
+ skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_KERNEL);
+ if (!skb) {
err = -ENOBUFS;
goto errout_ifa;
}
@@ -3811,7 +3858,7 @@ static inline void __snmp6_fill_stats(u64 *stats, void __percpu **mib,
static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype,
int bytes)
{
- switch(attrtype) {
+ switch (attrtype) {
case IFLA_INET6_STATS:
__snmp6_fill_stats(stats, (void __percpu **)idev->stats.ipv6, IPSTATS_MIB_MAX, bytes);
break;
@@ -4047,7 +4094,9 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
addrconf_leave_anycast(ifp);
addrconf_leave_solict(ifp->idev, &ifp->addr);
dst_hold(&ifp->rt->u.dst);
- if (ip6_del_rt(ifp->rt))
+
+ if (ifp->state == INET6_IFADDR_STATE_DEAD &&
+ ip6_del_rt(ifp->rt))
dst_free(&ifp->rt->u.dst);
break;
}
@@ -4163,211 +4212,211 @@ static struct addrconf_sysctl_table
.sysctl_header = NULL,
.addrconf_vars = {
{
- .procname = "forwarding",
- .data = &ipv6_devconf.forwarding,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = addrconf_sysctl_forward,
+ .procname = "forwarding",
+ .data = &ipv6_devconf.forwarding,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = addrconf_sysctl_forward,
},
{
- .procname = "hop_limit",
- .data = &ipv6_devconf.hop_limit,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
+ .procname = "hop_limit",
+ .data = &ipv6_devconf.hop_limit,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
},
{
- .procname = "mtu",
- .data = &ipv6_devconf.mtu6,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
+ .procname = "mtu",
+ .data = &ipv6_devconf.mtu6,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
},
{
- .procname = "accept_ra",
- .data = &ipv6_devconf.accept_ra,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
+ .procname = "accept_ra",
+ .data = &ipv6_devconf.accept_ra,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
},
{
- .procname = "accept_redirects",
- .data = &ipv6_devconf.accept_redirects,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
+ .procname = "accept_redirects",
+ .data = &ipv6_devconf.accept_redirects,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
},
{
- .procname = "autoconf",
- .data = &ipv6_devconf.autoconf,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
+ .procname = "autoconf",
+ .data = &ipv6_devconf.autoconf,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
},
{
- .procname = "dad_transmits",
- .data = &ipv6_devconf.dad_transmits,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
+ .procname = "dad_transmits",
+ .data = &ipv6_devconf.dad_transmits,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
},
{
- .procname = "router_solicitations",
- .data = &ipv6_devconf.rtr_solicits,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
+ .procname = "router_solicitations",
+ .data = &ipv6_devconf.rtr_solicits,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
},
{
- .procname = "router_solicitation_interval",
- .data = &ipv6_devconf.rtr_solicit_interval,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
+ .procname = "router_solicitation_interval",
+ .data = &ipv6_devconf.rtr_solicit_interval,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
},
{
- .procname = "router_solicitation_delay",
- .data = &ipv6_devconf.rtr_solicit_delay,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
+ .procname = "router_solicitation_delay",
+ .data = &ipv6_devconf.rtr_solicit_delay,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
},
{
- .procname = "force_mld_version",
- .data = &ipv6_devconf.force_mld_version,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
+ .procname = "force_mld_version",
+ .data = &ipv6_devconf.force_mld_version,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
},
#ifdef CONFIG_IPV6_PRIVACY
{
- .procname = "use_tempaddr",
- .data = &ipv6_devconf.use_tempaddr,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
+ .procname = "use_tempaddr",
+ .data = &ipv6_devconf.use_tempaddr,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
},
{
- .procname = "temp_valid_lft",
- .data = &ipv6_devconf.temp_valid_lft,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
+ .procname = "temp_valid_lft",
+ .data = &ipv6_devconf.temp_valid_lft,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
},
{
- .procname = "temp_prefered_lft",
- .data = &ipv6_devconf.temp_prefered_lft,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
+ .procname = "temp_prefered_lft",
+ .data = &ipv6_devconf.temp_prefered_lft,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
},
{
- .procname = "regen_max_retry",
- .data = &ipv6_devconf.regen_max_retry,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
+ .procname = "regen_max_retry",
+ .data = &ipv6_devconf.regen_max_retry,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
},
{
- .procname = "max_desync_factor",
- .data = &ipv6_devconf.max_desync_factor,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
+ .procname = "max_desync_factor",
+ .data = &ipv6_devconf.max_desync_factor,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
},
#endif
{
- .procname = "max_addresses",
- .data = &ipv6_devconf.max_addresses,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
+ .procname = "max_addresses",
+ .data = &ipv6_devconf.max_addresses,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
},
{
- .procname = "accept_ra_defrtr",
- .data = &ipv6_devconf.accept_ra_defrtr,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
+ .procname = "accept_ra_defrtr",
+ .data = &ipv6_devconf.accept_ra_defrtr,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
},
{
- .procname = "accept_ra_pinfo",
- .data = &ipv6_devconf.accept_ra_pinfo,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
+ .procname = "accept_ra_pinfo",
+ .data = &ipv6_devconf.accept_ra_pinfo,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
},
#ifdef CONFIG_IPV6_ROUTER_PREF
{
- .procname = "accept_ra_rtr_pref",
- .data = &ipv6_devconf.accept_ra_rtr_pref,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
+ .procname = "accept_ra_rtr_pref",
+ .data = &ipv6_devconf.accept_ra_rtr_pref,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
},
{
- .procname = "router_probe_interval",
- .data = &ipv6_devconf.rtr_probe_interval,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
+ .procname = "router_probe_interval",
+ .data = &ipv6_devconf.rtr_probe_interval,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
},
#ifdef CONFIG_IPV6_ROUTE_INFO
{
- .procname = "accept_ra_rt_info_max_plen",
- .data = &ipv6_devconf.accept_ra_rt_info_max_plen,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
+ .procname = "accept_ra_rt_info_max_plen",
+ .data = &ipv6_devconf.accept_ra_rt_info_max_plen,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
},
#endif
#endif
{
- .procname = "proxy_ndp",
- .data = &ipv6_devconf.proxy_ndp,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
+ .procname = "proxy_ndp",
+ .data = &ipv6_devconf.proxy_ndp,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
},
{
- .procname = "accept_source_route",
- .data = &ipv6_devconf.accept_source_route,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
+ .procname = "accept_source_route",
+ .data = &ipv6_devconf.accept_source_route,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
},
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
{
- .procname = "optimistic_dad",
- .data = &ipv6_devconf.optimistic_dad,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
+ .procname = "optimistic_dad",
+ .data = &ipv6_devconf.optimistic_dad,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
},
#endif
#ifdef CONFIG_IPV6_MROUTE
{
- .procname = "mc_forwarding",
- .data = &ipv6_devconf.mc_forwarding,
- .maxlen = sizeof(int),
- .mode = 0444,
- .proc_handler = proc_dointvec,
+ .procname = "mc_forwarding",
+ .data = &ipv6_devconf.mc_forwarding,
+ .maxlen = sizeof(int),
+ .mode = 0444,
+ .proc_handler = proc_dointvec,
},
#endif
{
- .procname = "disable_ipv6",
- .data = &ipv6_devconf.disable_ipv6,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = addrconf_sysctl_disable,
+ .procname = "disable_ipv6",
+ .data = &ipv6_devconf.disable_ipv6,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = addrconf_sysctl_disable,
},
{
- .procname = "accept_dad",
- .data = &ipv6_devconf.accept_dad,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
+ .procname = "accept_dad",
+ .data = &ipv6_devconf.accept_dad,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
},
{
.procname = "force_tllao",
@@ -4403,8 +4452,8 @@ static int __addrconf_sysctl_register(struct net *net, char *dev_name,
if (t == NULL)
goto out;
- for (i=0; t->addrconf_vars[i].data; i++) {
- t->addrconf_vars[i].data += (char*)p - (char*)&ipv6_devconf;
+ for (i = 0; t->addrconf_vars[i].data; i++) {
+ t->addrconf_vars[i].data += (char *)p - (char *)&ipv6_devconf;
t->addrconf_vars[i].extra1 = idev; /* embedded; no ref */
t->addrconf_vars[i].extra2 = net;
}
@@ -4541,14 +4590,12 @@ int register_inet6addr_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_register(&inet6addr_chain, nb);
}
-
EXPORT_SYMBOL(register_inet6addr_notifier);
int unregister_inet6addr_notifier(struct notifier_block *nb)
{
- return atomic_notifier_chain_unregister(&inet6addr_chain,nb);
+ return atomic_notifier_chain_unregister(&inet6addr_chain, nb);
}
-
EXPORT_SYMBOL(unregister_inet6addr_notifier);
/*
@@ -4557,11 +4604,12 @@ EXPORT_SYMBOL(unregister_inet6addr_notifier);
int __init addrconf_init(void)
{
- int err;
+ int i, err;
- if ((err = ipv6_addr_label_init()) < 0) {
- printk(KERN_CRIT "IPv6 Addrconf: cannot initialize default policy table: %d.\n",
- err);
+ err = ipv6_addr_label_init();
+ if (err < 0) {
+ printk(KERN_CRIT "IPv6 Addrconf:"
+ " cannot initialize default policy table: %d.\n", err);
return err;
}
@@ -4592,6 +4640,9 @@ int __init addrconf_init(void)
if (err)
goto errlo;
+ for (i = 0; i < IN6_ADDR_HSIZE; i++)
+ INIT_HLIST_HEAD(&inet6_addr_lst[i]);
+
register_netdevice_notifier(&ipv6_dev_notf);
addrconf_verify(0);
@@ -4620,7 +4671,6 @@ errlo:
void addrconf_cleanup(void)
{
- struct inet6_ifaddr *ifa;
struct net_device *dev;
int i;
@@ -4640,20 +4690,10 @@ void addrconf_cleanup(void)
/*
* Check hash table.
*/
- write_lock_bh(&addrconf_hash_lock);
- for (i=0; i < IN6_ADDR_HSIZE; i++) {
- for (ifa=inet6_addr_lst[i]; ifa; ) {
- struct inet6_ifaddr *bifa;
-
- bifa = ifa;
- ifa = ifa->lst_next;
- printk(KERN_DEBUG "bug: IPv6 address leakage detected: ifa=%p\n", bifa);
- /* Do not free it; something is wrong.
- Now we can investigate it with debugger.
- */
- }
- }
- write_unlock_bh(&addrconf_hash_lock);
+ spin_lock_bh(&addrconf_hash_lock);
+ for (i = 0; i < IN6_ADDR_HSIZE; i++)
+ WARN_ON(!hlist_empty(&inet6_addr_lst[i]));
+ spin_unlock_bh(&addrconf_hash_lock);
del_timer(&addr_chk_timer);
rtnl_unlock();
diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c
index ae404c9a746c..8c4348cb1950 100644
--- a/net/ipv6/addrlabel.c
+++ b/net/ipv6/addrlabel.c
@@ -422,10 +422,6 @@ static int ip6addrlbl_newdel(struct sk_buff *skb, struct nlmsghdr *nlh,
ifal->ifal_prefixlen > 128)
return -EINVAL;
- if (ifal->ifal_index &&
- !__dev_get_by_index(net, ifal->ifal_index))
- return -EINVAL;
-
if (!tb[IFAL_ADDRESS])
return -EINVAL;
@@ -441,6 +437,10 @@ static int ip6addrlbl_newdel(struct sk_buff *skb, struct nlmsghdr *nlh,
switch(nlh->nlmsg_type) {
case RTM_NEWADDRLABEL:
+ if (ifal->ifal_index &&
+ !__dev_get_by_index(net, ifal->ifal_index))
+ return -EINVAL;
+
err = ip6addrlbl_add(net, pfx, ifal->ifal_prefixlen,
ifal->ifal_index, label,
nlh->nlmsg_flags & NLM_F_REPLACE);
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 3192aa02ba5d..e733942dafe1 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -200,7 +200,7 @@ lookup_protocol:
inet_sk(sk)->pinet6 = np = inet6_sk_generic(sk);
np->hop_limit = -1;
- np->mcast_hops = -1;
+ np->mcast_hops = IPV6_DEFAULT_MCASTHOPS;
np->mc_loop = 1;
np->pmtudisc = IPV6_PMTUDISC_WANT;
np->ipv6only = net->ipv6.sysctl.bindv6only;
@@ -417,6 +417,9 @@ void inet6_destroy_sock(struct sock *sk)
if ((skb = xchg(&np->pktoptions, NULL)) != NULL)
kfree_skb(skb);
+ if ((skb = xchg(&np->rxpmtu, NULL)) != NULL)
+ kfree_skb(skb);
+
/* Free flowlabels */
fl6_free_socklist(sk);
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 622dc7939a1b..712684687c9a 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -222,6 +222,8 @@ void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
if (!skb)
return;
+ skb->protocol = htons(ETH_P_IPV6);
+
serr = SKB_EXT_ERR(skb);
serr->ee.ee_errno = err;
serr->ee.ee_origin = SO_EE_ORIGIN_ICMP6;
@@ -255,6 +257,8 @@ void ipv6_local_error(struct sock *sk, int err, struct flowi *fl, u32 info)
if (!skb)
return;
+ skb->protocol = htons(ETH_P_IPV6);
+
skb_put(skb, sizeof(struct ipv6hdr));
skb_reset_network_header(skb);
iph = ipv6_hdr(skb);
@@ -278,6 +282,45 @@ void ipv6_local_error(struct sock *sk, int err, struct flowi *fl, u32 info)
kfree_skb(skb);
}
+void ipv6_local_rxpmtu(struct sock *sk, struct flowi *fl, u32 mtu)
+{
+ struct ipv6_pinfo *np = inet6_sk(sk);
+ struct ipv6hdr *iph;
+ struct sk_buff *skb;
+ struct ip6_mtuinfo *mtu_info;
+
+ if (!np->rxopt.bits.rxpmtu)
+ return;
+
+ skb = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC);
+ if (!skb)
+ return;
+
+ skb_put(skb, sizeof(struct ipv6hdr));
+ skb_reset_network_header(skb);
+ iph = ipv6_hdr(skb);
+ ipv6_addr_copy(&iph->daddr, &fl->fl6_dst);
+
+ mtu_info = IP6CBMTU(skb);
+ if (!mtu_info) {
+ kfree_skb(skb);
+ return;
+ }
+
+ mtu_info->ip6m_mtu = mtu;
+ mtu_info->ip6m_addr.sin6_family = AF_INET6;
+ mtu_info->ip6m_addr.sin6_port = 0;
+ mtu_info->ip6m_addr.sin6_flowinfo = 0;
+ mtu_info->ip6m_addr.sin6_scope_id = fl->oif;
+ ipv6_addr_copy(&mtu_info->ip6m_addr.sin6_addr, &ipv6_hdr(skb)->daddr);
+
+ __skb_pull(skb, skb_tail_pointer(skb) - skb->data);
+ skb_reset_transport_header(skb);
+
+ skb = xchg(&np->rxpmtu, skb);
+ kfree_skb(skb);
+}
+
/*
* Handle MSG_ERRQUEUE
*/
@@ -319,7 +362,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
sin->sin6_flowinfo = 0;
sin->sin6_port = serr->port;
sin->sin6_scope_id = 0;
- if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP6) {
+ if (skb->protocol == htons(ETH_P_IPV6)) {
ipv6_addr_copy(&sin->sin6_addr,
(struct in6_addr *)(nh + serr->addr_offset));
if (np->sndflow)
@@ -341,7 +384,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
sin->sin6_family = AF_INET6;
sin->sin6_flowinfo = 0;
sin->sin6_scope_id = 0;
- if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP6) {
+ if (skb->protocol == htons(ETH_P_IPV6)) {
ipv6_addr_copy(&sin->sin6_addr, &ipv6_hdr(skb)->saddr);
if (np->rxopt.all)
datagram_recv_ctl(sk, msg, skb);
@@ -381,6 +424,54 @@ out:
return err;
}
+/*
+ * Handle IPV6_RECVPATHMTU
+ */
+int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len)
+{
+ struct ipv6_pinfo *np = inet6_sk(sk);
+ struct sk_buff *skb;
+ struct sockaddr_in6 *sin;
+ struct ip6_mtuinfo mtu_info;
+ int err;
+ int copied;
+
+ err = -EAGAIN;
+ skb = xchg(&np->rxpmtu, NULL);
+ if (skb == NULL)
+ goto out;
+
+ copied = skb->len;
+ if (copied > len) {
+ msg->msg_flags |= MSG_TRUNC;
+ copied = len;
+ }
+ err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+ if (err)
+ goto out_free_skb;
+
+ sock_recv_timestamp(msg, sk, skb);
+
+ memcpy(&mtu_info, IP6CBMTU(skb), sizeof(mtu_info));
+
+ sin = (struct sockaddr_in6 *)msg->msg_name;
+ if (sin) {
+ sin->sin6_family = AF_INET6;
+ sin->sin6_flowinfo = 0;
+ sin->sin6_port = 0;
+ sin->sin6_scope_id = mtu_info.ip6m_addr.sin6_scope_id;
+ ipv6_addr_copy(&sin->sin6_addr, &mtu_info.ip6m_addr.sin6_addr);
+ }
+
+ put_cmsg(msg, SOL_IPV6, IPV6_PATHMTU, sizeof(mtu_info), &mtu_info);
+
+ err = copied;
+
+out_free_skb:
+ kfree_skb(skb);
+out:
+ return err;
+}
int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
@@ -497,7 +588,7 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
int datagram_send_ctl(struct net *net,
struct msghdr *msg, struct flowi *fl,
struct ipv6_txoptions *opt,
- int *hlimit, int *tclass)
+ int *hlimit, int *tclass, int *dontfrag)
{
struct in6_pktinfo *src_info;
struct cmsghdr *cmsg;
@@ -737,6 +828,25 @@ int datagram_send_ctl(struct net *net,
break;
}
+
+ case IPV6_DONTFRAG:
+ {
+ int df;
+
+ err = -EINVAL;
+ if (cmsg->cmsg_len != CMSG_LEN(sizeof(int))) {
+ goto exit_f;
+ }
+
+ df = *(int *)CMSG_DATA(cmsg);
+ if (df < 0 || df > 1)
+ goto exit_f;
+
+ err = 0;
+ *dontfrag = df;
+
+ break;
+ }
default:
LIMIT_NETDEBUG(KERN_DEBUG "invalid cmsg type: %d\n",
cmsg->cmsg_type);
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index 5e463c43fcc2..8e44f8f9c188 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -208,7 +208,6 @@ static int fib6_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
{
struct fib6_rule *rule6 = (struct fib6_rule *) rule;
- frh->family = AF_INET6;
frh->dst_len = rule6->dst.plen;
frh->src_len = rule6->src.plen;
frh->tos = rule6->tclass;
@@ -238,7 +237,7 @@ static size_t fib6_rule_nlmsg_payload(struct fib_rule *rule)
+ nla_total_size(16); /* src */
}
-static struct fib_rules_ops fib6_rules_ops_template = {
+static const struct fib_rules_ops __net_initdata fib6_rules_ops_template = {
.family = AF_INET6,
.rule_size = sizeof(struct fib6_rule),
.addr_size = sizeof(struct in6_addr),
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 3330a4bd6157..ce7992982557 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -481,8 +481,9 @@ route_done:
len + sizeof(struct icmp6hdr),
sizeof(struct icmp6hdr), hlimit,
np->tclass, NULL, &fl, (struct rt6_info*)dst,
- MSG_DONTWAIT);
+ MSG_DONTWAIT, np->dontfrag);
if (err) {
+ ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS);
ip6_flush_pending_frames(sk);
goto out_put;
}
@@ -560,9 +561,11 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr),
sizeof(struct icmp6hdr), hlimit, np->tclass, NULL, &fl,
- (struct rt6_info*)dst, MSG_DONTWAIT);
+ (struct rt6_info*)dst, MSG_DONTWAIT,
+ np->dontfrag);
if (err) {
+ ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS);
ip6_flush_pending_frames(sk);
goto out_put;
}
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 628db24bcf22..0c5e3c3b7fd5 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -178,7 +178,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
return dst;
}
-int inet6_csk_xmit(struct sk_buff *skb, int ipfragok)
+int inet6_csk_xmit(struct sk_buff *skb)
{
struct sock *sk = skb->sk;
struct inet_sock *inet = inet_sk(sk);
@@ -234,7 +234,7 @@ int inet6_csk_xmit(struct sk_buff *skb, int ipfragok)
/* Restore final destination back after routing done */
ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
- return ip6_xmit(sk, skb, &fl, np->opt, 0);
+ return ip6_xmit(sk, skb, &fl, np->opt);
}
EXPORT_SYMBOL_GPL(inet6_csk_xmit);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 6b82e02158c6..92a122b7795d 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -128,12 +128,24 @@ static __inline__ u32 fib6_new_sernum(void)
/*
* test bit
*/
+#if defined(__LITTLE_ENDIAN)
+# define BITOP_BE32_SWIZZLE (0x1F & ~7)
+#else
+# define BITOP_BE32_SWIZZLE 0
+#endif
static __inline__ __be32 addr_bit_set(void *token, int fn_bit)
{
__be32 *addr = token;
-
- return htonl(1 << ((~fn_bit)&0x1F)) & addr[fn_bit>>5];
+ /*
+ * Here,
+ * 1 << ((~fn_bit ^ BITOP_BE32_SWIZZLE) & 0x1f)
+ * is optimized version of
+ * htonl(1 << ((~fn_bit)&0x1F))
+ * See include/asm-generic/bitops/le.h.
+ */
+ return (__force __be32)(1 << ((~fn_bit ^ BITOP_BE32_SWIZZLE) & 0x1f)) &
+ addr[fn_bit >> 5];
}
static __inline__ struct fib6_node * node_alloc(void)
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index 14e23216eb28..13654686aeab 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -360,7 +360,8 @@ fl_create(struct net *net, struct in6_flowlabel_req *freq, char __user *optval,
msg.msg_control = (void*)(fl->opt+1);
flowi.oif = 0;
- err = datagram_send_ctl(net, &msg, &flowi, fl->opt, &junk, &junk);
+ err = datagram_send_ctl(net, &msg, &flowi, fl->opt, &junk,
+ &junk, &junk);
if (err)
goto done;
err = -EINVAL;
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 6aa7ee1295c2..a83e9209cecc 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -143,7 +143,7 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
/* Must drop socket now because of tproxy. */
skb_orphan(skb);
- return NF_HOOK(PF_INET6, NF_INET_PRE_ROUTING, skb, dev, NULL,
+ return NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, dev, NULL,
ip6_rcv_finish);
err:
IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INHDRERRORS);
@@ -236,7 +236,7 @@ discard:
int ip6_input(struct sk_buff *skb)
{
- return NF_HOOK(PF_INET6, NF_INET_LOCAL_IN, skb, skb->dev, NULL,
+ return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_IN, skb, skb->dev, NULL,
ip6_input_finish);
}
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 75d5ef830097..cd963f64e27c 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -67,8 +67,8 @@ int __ip6_local_out(struct sk_buff *skb)
len = 0;
ipv6_hdr(skb)->payload_len = htons(len);
- return nf_hook(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, skb_dst(skb)->dev,
- dst_output);
+ return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
+ skb_dst(skb)->dev, dst_output);
}
int ip6_local_out(struct sk_buff *skb)
@@ -83,22 +83,6 @@ int ip6_local_out(struct sk_buff *skb)
}
EXPORT_SYMBOL_GPL(ip6_local_out);
-static int ip6_output_finish(struct sk_buff *skb)
-{
- struct dst_entry *dst = skb_dst(skb);
-
- if (dst->hh)
- return neigh_hh_output(dst->hh, skb);
- else if (dst->neighbour)
- return dst->neighbour->output(skb);
-
- IP6_INC_STATS_BH(dev_net(dst->dev),
- ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
- kfree_skb(skb);
- return -EINVAL;
-
-}
-
/* dev_loopback_xmit for use with netfilter. */
static int ip6_dev_loopback_xmit(struct sk_buff *newskb)
{
@@ -112,8 +96,7 @@ static int ip6_dev_loopback_xmit(struct sk_buff *newskb)
return 0;
}
-
-static int ip6_output2(struct sk_buff *skb)
+static int ip6_finish_output2(struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
struct net_device *dev = dst->dev;
@@ -125,7 +108,7 @@ static int ip6_output2(struct sk_buff *skb)
struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(skb->sk) &&
- ((mroute6_socket(dev_net(dev)) &&
+ ((mroute6_socket(dev_net(dev), skb) &&
!(IP6CB(skb)->flags & IP6SKB_FORWARDED)) ||
ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr,
&ipv6_hdr(skb)->saddr))) {
@@ -135,8 +118,8 @@ static int ip6_output2(struct sk_buff *skb)
is not supported in any case.
*/
if (newskb)
- NF_HOOK(PF_INET6, NF_INET_POST_ROUTING, newskb,
- NULL, newskb->dev,
+ NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING,
+ newskb, NULL, newskb->dev,
ip6_dev_loopback_xmit);
if (ipv6_hdr(skb)->hop_limit == 0) {
@@ -151,8 +134,15 @@ static int ip6_output2(struct sk_buff *skb)
skb->len);
}
- return NF_HOOK(PF_INET6, NF_INET_POST_ROUTING, skb, NULL, skb->dev,
- ip6_output_finish);
+ if (dst->hh)
+ return neigh_hh_output(dst->hh, skb);
+ else if (dst->neighbour)
+ return dst->neighbour->output(skb);
+
+ IP6_INC_STATS_BH(dev_net(dst->dev),
+ ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
+ kfree_skb(skb);
+ return -EINVAL;
}
static inline int ip6_skb_dst_mtu(struct sk_buff *skb)
@@ -163,29 +153,37 @@ static inline int ip6_skb_dst_mtu(struct sk_buff *skb)
skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
}
+static int ip6_finish_output(struct sk_buff *skb)
+{
+ if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
+ dst_allfrag(skb_dst(skb)))
+ return ip6_fragment(skb, ip6_finish_output2);
+ else
+ return ip6_finish_output2(skb);
+}
+
int ip6_output(struct sk_buff *skb)
{
+ struct net_device *dev = skb_dst(skb)->dev;
struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
if (unlikely(idev->cnf.disable_ipv6)) {
- IP6_INC_STATS(dev_net(skb_dst(skb)->dev), idev,
+ IP6_INC_STATS(dev_net(dev), idev,
IPSTATS_MIB_OUTDISCARDS);
kfree_skb(skb);
return 0;
}
- if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
- dst_allfrag(skb_dst(skb)))
- return ip6_fragment(skb, ip6_output2);
- else
- return ip6_output2(skb);
+ return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL, dev,
+ ip6_finish_output,
+ !(IP6CB(skb)->flags & IP6SKB_REROUTED));
}
/*
- * xmit an sk_buff (used by TCP)
+ * xmit an sk_buff (used by TCP, SCTP and DCCP)
*/
int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
- struct ipv6_txoptions *opt, int ipfragok)
+ struct ipv6_txoptions *opt)
{
struct net *net = sock_net(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
@@ -218,8 +216,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
}
kfree_skb(skb);
skb = skb2;
- if (sk)
- skb_set_owner_w(skb, sk);
+ skb_set_owner_w(skb, sk);
}
if (opt->opt_flen)
ipv6_push_frag_opts(skb, opt, &proto);
@@ -231,10 +228,6 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
skb_reset_network_header(skb);
hdr = ipv6_hdr(skb);
- /* Allow local fragmentation. */
- if (ipfragok)
- skb->local_df = 1;
-
/*
* Fill in the IPv6 header
*/
@@ -261,8 +254,8 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
if ((skb->len <= mtu) || skb->local_df || skb_is_gso(skb)) {
IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_OUT, skb->len);
- return NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, dst->dev,
- dst_output);
+ return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
+ dst->dev, dst_output);
}
if (net_ratelimit())
@@ -538,7 +531,7 @@ int ip6_forward(struct sk_buff *skb)
hdr->hop_limit--;
IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
- return NF_HOOK(PF_INET6, NF_INET_FORWARD, skb, skb->dev, dst->dev,
+ return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dst->dev,
ip6_forward_finish);
error:
@@ -1109,7 +1102,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
int offset, int len, int odd, struct sk_buff *skb),
void *from, int length, int transhdrlen,
int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi *fl,
- struct rt6_info *rt, unsigned int flags)
+ struct rt6_info *rt, unsigned int flags, int dontfrag)
{
struct inet_sock *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
@@ -1223,15 +1216,23 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
*/
inet->cork.length += length;
- if (((length > mtu) && (sk->sk_protocol == IPPROTO_UDP)) &&
- (rt->u.dst.dev->features & NETIF_F_UFO)) {
+ if (length > mtu) {
+ int proto = sk->sk_protocol;
+ if (dontfrag && (proto == IPPROTO_UDP || proto == IPPROTO_RAW)){
+ ipv6_local_rxpmtu(sk, fl, mtu-exthdrlen);
+ return -EMSGSIZE;
+ }
- err = ip6_ufo_append_data(sk, getfrag, from, length, hh_len,
- fragheaderlen, transhdrlen, mtu,
- flags);
- if (err)
- goto error;
- return 0;
+ if (proto == IPPROTO_UDP &&
+ (rt->u.dst.dev->features & NETIF_F_UFO)) {
+
+ err = ip6_ufo_append_data(sk, getfrag, from, length,
+ hh_len, fragheaderlen,
+ transhdrlen, mtu, flags);
+ if (err)
+ goto error;
+ return 0;
+ }
}
if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 2599870747ec..8f39893d8081 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -723,14 +723,10 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
skb->protocol = htons(protocol);
skb->pkt_type = PACKET_HOST;
memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
- skb->dev = t->dev;
- skb_dst_drop(skb);
- nf_reset(skb);
- dscp_ecn_decapsulate(t, ipv6h, skb);
+ skb_tunnel_rx(skb, t->dev);
- t->dev->stats.rx_packets++;
- t->dev->stats.rx_bytes += skb->len;
+ dscp_ecn_decapsulate(t, ipv6h, skb);
netif_rx(skb);
rcu_read_unlock();
return 0;
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 3e333268db89..bd9e7d3e9c8e 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -42,6 +42,7 @@
#include <linux/if_arp.h>
#include <net/checksum.h>
#include <net/netlink.h>
+#include <net/fib_rules.h>
#include <net/ipv6.h>
#include <net/ip6_route.h>
@@ -51,6 +52,34 @@
#include <linux/netfilter_ipv6.h>
#include <net/ip6_checksum.h>
+struct mr6_table {
+ struct list_head list;
+#ifdef CONFIG_NET_NS
+ struct net *net;
+#endif
+ u32 id;
+ struct sock *mroute6_sk;
+ struct timer_list ipmr_expire_timer;
+ struct list_head mfc6_unres_queue;
+ struct list_head mfc6_cache_array[MFC6_LINES];
+ struct mif_device vif6_table[MAXMIFS];
+ int maxvif;
+ atomic_t cache_resolve_queue_len;
+ int mroute_do_assert;
+ int mroute_do_pim;
+#ifdef CONFIG_IPV6_PIMSM_V2
+ int mroute_reg_vif_num;
+#endif
+};
+
+struct ip6mr_rule {
+ struct fib_rule common;
+};
+
+struct ip6mr_result {
+ struct mr6_table *mrt;
+};
+
/* Big lock, protecting vif table, mrt cache and mroute socket state.
Note that the changes are semaphored via rtnl_lock.
*/
@@ -61,9 +90,7 @@ static DEFINE_RWLOCK(mrt_lock);
* Multicast router control variables
*/
-#define MIF_EXISTS(_net, _idx) ((_net)->ipv6.vif6_table[_idx].dev != NULL)
-
-static struct mfc6_cache *mfc_unres_queue; /* Queue of unresolved entries */
+#define MIF_EXISTS(_mrt, _idx) ((_mrt)->vif6_table[_idx].dev != NULL)
/* Special spinlock for queue of unresolved entries */
static DEFINE_SPINLOCK(mfc_unres_lock);
@@ -78,20 +105,233 @@ static DEFINE_SPINLOCK(mfc_unres_lock);
static struct kmem_cache *mrt_cachep __read_mostly;
-static int ip6_mr_forward(struct sk_buff *skb, struct mfc6_cache *cache);
-static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt,
+static struct mr6_table *ip6mr_new_table(struct net *net, u32 id);
+static void ip6mr_free_table(struct mr6_table *mrt);
+
+static int ip6_mr_forward(struct net *net, struct mr6_table *mrt,
+ struct sk_buff *skb, struct mfc6_cache *cache);
+static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
mifi_t mifi, int assert);
-static int ip6mr_fill_mroute(struct sk_buff *skb, struct mfc6_cache *c, struct rtmsg *rtm);
-static void mroute_clean_tables(struct net *net);
+static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
+ struct mfc6_cache *c, struct rtmsg *rtm);
+static int ip6mr_rtm_dumproute(struct sk_buff *skb,
+ struct netlink_callback *cb);
+static void mroute_clean_tables(struct mr6_table *mrt);
+static void ipmr_expire_process(unsigned long arg);
+
+#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
+#define ip6mr_for_each_table(mrt, met) \
+ list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list)
+
+static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
+{
+ struct mr6_table *mrt;
+
+ ip6mr_for_each_table(mrt, net) {
+ if (mrt->id == id)
+ return mrt;
+ }
+ return NULL;
+}
+
+static int ip6mr_fib_lookup(struct net *net, struct flowi *flp,
+ struct mr6_table **mrt)
+{
+ struct ip6mr_result res;
+ struct fib_lookup_arg arg = { .result = &res, };
+ int err;
+
+ err = fib_rules_lookup(net->ipv6.mr6_rules_ops, flp, 0, &arg);
+ if (err < 0)
+ return err;
+ *mrt = res.mrt;
+ return 0;
+}
+
+static int ip6mr_rule_action(struct fib_rule *rule, struct flowi *flp,
+ int flags, struct fib_lookup_arg *arg)
+{
+ struct ip6mr_result *res = arg->result;
+ struct mr6_table *mrt;
+
+ switch (rule->action) {
+ case FR_ACT_TO_TBL:
+ break;
+ case FR_ACT_UNREACHABLE:
+ return -ENETUNREACH;
+ case FR_ACT_PROHIBIT:
+ return -EACCES;
+ case FR_ACT_BLACKHOLE:
+ default:
+ return -EINVAL;
+ }
+
+ mrt = ip6mr_get_table(rule->fr_net, rule->table);
+ if (mrt == NULL)
+ return -EAGAIN;
+ res->mrt = mrt;
+ return 0;
+}
+
+static int ip6mr_rule_match(struct fib_rule *rule, struct flowi *flp, int flags)
+{
+ return 1;
+}
+
+static const struct nla_policy ip6mr_rule_policy[FRA_MAX + 1] = {
+ FRA_GENERIC_POLICY,
+};
+
+static int ip6mr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
+ struct fib_rule_hdr *frh, struct nlattr **tb)
+{
+ return 0;
+}
+
+static int ip6mr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
+ struct nlattr **tb)
+{
+ return 1;
+}
+
+static int ip6mr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
+ struct fib_rule_hdr *frh)
+{
+ frh->dst_len = 0;
+ frh->src_len = 0;
+ frh->tos = 0;
+ return 0;
+}
+
+static const struct fib_rules_ops __net_initdata ip6mr_rules_ops_template = {
+ .family = RTNL_FAMILY_IP6MR,
+ .rule_size = sizeof(struct ip6mr_rule),
+ .addr_size = sizeof(struct in6_addr),
+ .action = ip6mr_rule_action,
+ .match = ip6mr_rule_match,
+ .configure = ip6mr_rule_configure,
+ .compare = ip6mr_rule_compare,
+ .default_pref = fib_default_rule_pref,
+ .fill = ip6mr_rule_fill,
+ .nlgroup = RTNLGRP_IPV6_RULE,
+ .policy = ip6mr_rule_policy,
+ .owner = THIS_MODULE,
+};
+
+static int __net_init ip6mr_rules_init(struct net *net)
+{
+ struct fib_rules_ops *ops;
+ struct mr6_table *mrt;
+ int err;
+
+ ops = fib_rules_register(&ip6mr_rules_ops_template, net);
+ if (IS_ERR(ops))
+ return PTR_ERR(ops);
+
+ INIT_LIST_HEAD(&net->ipv6.mr6_tables);
+
+ mrt = ip6mr_new_table(net, RT6_TABLE_DFLT);
+ if (mrt == NULL) {
+ err = -ENOMEM;
+ goto err1;
+ }
+
+ err = fib_default_rule_add(ops, 0x7fff, RT6_TABLE_DFLT, 0);
+ if (err < 0)
+ goto err2;
+
+ net->ipv6.mr6_rules_ops = ops;
+ return 0;
+
+err2:
+ kfree(mrt);
+err1:
+ fib_rules_unregister(ops);
+ return err;
+}
+
+static void __net_exit ip6mr_rules_exit(struct net *net)
+{
+ struct mr6_table *mrt, *next;
+
+ list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list)
+ ip6mr_free_table(mrt);
+ fib_rules_unregister(net->ipv6.mr6_rules_ops);
+}
+#else
+#define ip6mr_for_each_table(mrt, net) \
+ for (mrt = net->ipv6.mrt6; mrt; mrt = NULL)
+
+static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
+{
+ return net->ipv6.mrt6;
+}
+
+static int ip6mr_fib_lookup(struct net *net, struct flowi *flp,
+ struct mr6_table **mrt)
+{
+ *mrt = net->ipv6.mrt6;
+ return 0;
+}
+
+static int __net_init ip6mr_rules_init(struct net *net)
+{
+ net->ipv6.mrt6 = ip6mr_new_table(net, RT6_TABLE_DFLT);
+ return net->ipv6.mrt6 ? 0 : -ENOMEM;
+}
-static struct timer_list ipmr_expire_timer;
+static void __net_exit ip6mr_rules_exit(struct net *net)
+{
+ ip6mr_free_table(net->ipv6.mrt6);
+}
+#endif
+
+static struct mr6_table *ip6mr_new_table(struct net *net, u32 id)
+{
+ struct mr6_table *mrt;
+ unsigned int i;
+
+ mrt = ip6mr_get_table(net, id);
+ if (mrt != NULL)
+ return mrt;
+
+ mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
+ if (mrt == NULL)
+ return NULL;
+ mrt->id = id;
+ write_pnet(&mrt->net, net);
+
+ /* Forwarding cache */
+ for (i = 0; i < MFC6_LINES; i++)
+ INIT_LIST_HEAD(&mrt->mfc6_cache_array[i]);
+
+ INIT_LIST_HEAD(&mrt->mfc6_unres_queue);
+ setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process,
+ (unsigned long)mrt);
+
+#ifdef CONFIG_IPV6_PIMSM_V2
+ mrt->mroute_reg_vif_num = -1;
+#endif
+#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
+ list_add_tail_rcu(&mrt->list, &net->ipv6.mr6_tables);
+#endif
+ return mrt;
+}
+
+static void ip6mr_free_table(struct mr6_table *mrt)
+{
+ del_timer(&mrt->ipmr_expire_timer);
+ mroute_clean_tables(mrt);
+ kfree(mrt);
+}
#ifdef CONFIG_PROC_FS
struct ipmr_mfc_iter {
struct seq_net_private p;
- struct mfc6_cache **cache;
+ struct mr6_table *mrt;
+ struct list_head *cache;
int ct;
};
@@ -99,22 +339,22 @@ struct ipmr_mfc_iter {
static struct mfc6_cache *ipmr_mfc_seq_idx(struct net *net,
struct ipmr_mfc_iter *it, loff_t pos)
{
+ struct mr6_table *mrt = it->mrt;
struct mfc6_cache *mfc;
- it->cache = net->ipv6.mfc6_cache_array;
read_lock(&mrt_lock);
- for (it->ct = 0; it->ct < MFC6_LINES; it->ct++)
- for (mfc = net->ipv6.mfc6_cache_array[it->ct];
- mfc; mfc = mfc->next)
+ for (it->ct = 0; it->ct < MFC6_LINES; it->ct++) {
+ it->cache = &mrt->mfc6_cache_array[it->ct];
+ list_for_each_entry(mfc, it->cache, list)
if (pos-- == 0)
return mfc;
+ }
read_unlock(&mrt_lock);
- it->cache = &mfc_unres_queue;
spin_lock_bh(&mfc_unres_lock);
- for (mfc = mfc_unres_queue; mfc; mfc = mfc->next)
- if (net_eq(mfc6_net(mfc), net) &&
- pos-- == 0)
+ it->cache = &mrt->mfc6_unres_queue;
+ list_for_each_entry(mfc, it->cache, list)
+ if (pos-- == 0)
return mfc;
spin_unlock_bh(&mfc_unres_lock);
@@ -122,15 +362,13 @@ static struct mfc6_cache *ipmr_mfc_seq_idx(struct net *net,
return NULL;
}
-
-
-
/*
* The /proc interfaces to multicast routing /proc/ip6_mr_cache /proc/ip6_mr_vif
*/
struct ipmr_vif_iter {
struct seq_net_private p;
+ struct mr6_table *mrt;
int ct;
};
@@ -138,11 +376,13 @@ static struct mif_device *ip6mr_vif_seq_idx(struct net *net,
struct ipmr_vif_iter *iter,
loff_t pos)
{
- for (iter->ct = 0; iter->ct < net->ipv6.maxvif; ++iter->ct) {
- if (!MIF_EXISTS(net, iter->ct))
+ struct mr6_table *mrt = iter->mrt;
+
+ for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
+ if (!MIF_EXISTS(mrt, iter->ct))
continue;
if (pos-- == 0)
- return &net->ipv6.vif6_table[iter->ct];
+ return &mrt->vif6_table[iter->ct];
}
return NULL;
}
@@ -150,7 +390,15 @@ static struct mif_device *ip6mr_vif_seq_idx(struct net *net,
static void *ip6mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(mrt_lock)
{
+ struct ipmr_vif_iter *iter = seq->private;
struct net *net = seq_file_net(seq);
+ struct mr6_table *mrt;
+
+ mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
+ if (mrt == NULL)
+ return ERR_PTR(-ENOENT);
+
+ iter->mrt = mrt;
read_lock(&mrt_lock);
return *pos ? ip6mr_vif_seq_idx(net, seq->private, *pos - 1)
@@ -161,15 +409,16 @@ static void *ip6mr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct ipmr_vif_iter *iter = seq->private;
struct net *net = seq_file_net(seq);
+ struct mr6_table *mrt = iter->mrt;
++*pos;
if (v == SEQ_START_TOKEN)
return ip6mr_vif_seq_idx(net, iter, 0);
- while (++iter->ct < net->ipv6.maxvif) {
- if (!MIF_EXISTS(net, iter->ct))
+ while (++iter->ct < mrt->maxvif) {
+ if (!MIF_EXISTS(mrt, iter->ct))
continue;
- return &net->ipv6.vif6_table[iter->ct];
+ return &mrt->vif6_table[iter->ct];
}
return NULL;
}
@@ -182,7 +431,8 @@ static void ip6mr_vif_seq_stop(struct seq_file *seq, void *v)
static int ip6mr_vif_seq_show(struct seq_file *seq, void *v)
{
- struct net *net = seq_file_net(seq);
+ struct ipmr_vif_iter *iter = seq->private;
+ struct mr6_table *mrt = iter->mrt;
if (v == SEQ_START_TOKEN) {
seq_puts(seq,
@@ -193,7 +443,7 @@ static int ip6mr_vif_seq_show(struct seq_file *seq, void *v)
seq_printf(seq,
"%2td %-10s %8ld %7ld %8ld %7ld %05X\n",
- vif - net->ipv6.vif6_table,
+ vif - mrt->vif6_table,
name, vif->bytes_in, vif->pkt_in,
vif->bytes_out, vif->pkt_out,
vif->flags);
@@ -224,8 +474,15 @@ static const struct file_operations ip6mr_vif_fops = {
static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
{
+ struct ipmr_mfc_iter *it = seq->private;
struct net *net = seq_file_net(seq);
+ struct mr6_table *mrt;
+ mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
+ if (mrt == NULL)
+ return ERR_PTR(-ENOENT);
+
+ it->mrt = mrt;
return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
: SEQ_START_TOKEN;
}
@@ -235,35 +492,36 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
struct mfc6_cache *mfc = v;
struct ipmr_mfc_iter *it = seq->private;
struct net *net = seq_file_net(seq);
+ struct mr6_table *mrt = it->mrt;
++*pos;
if (v == SEQ_START_TOKEN)
return ipmr_mfc_seq_idx(net, seq->private, 0);
- if (mfc->next)
- return mfc->next;
+ if (mfc->list.next != it->cache)
+ return list_entry(mfc->list.next, struct mfc6_cache, list);
- if (it->cache == &mfc_unres_queue)
+ if (it->cache == &mrt->mfc6_unres_queue)
goto end_of_list;
- BUG_ON(it->cache != net->ipv6.mfc6_cache_array);
+ BUG_ON(it->cache != &mrt->mfc6_cache_array[it->ct]);
while (++it->ct < MFC6_LINES) {
- mfc = net->ipv6.mfc6_cache_array[it->ct];
- if (mfc)
- return mfc;
+ it->cache = &mrt->mfc6_cache_array[it->ct];
+ if (list_empty(it->cache))
+ continue;
+ return list_first_entry(it->cache, struct mfc6_cache, list);
}
/* exhausted cache_array, show unresolved */
read_unlock(&mrt_lock);
- it->cache = &mfc_unres_queue;
+ it->cache = &mrt->mfc6_unres_queue;
it->ct = 0;
spin_lock_bh(&mfc_unres_lock);
- mfc = mfc_unres_queue;
- if (mfc)
- return mfc;
+ if (!list_empty(it->cache))
+ return list_first_entry(it->cache, struct mfc6_cache, list);
end_of_list:
spin_unlock_bh(&mfc_unres_lock);
@@ -275,18 +533,17 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
{
struct ipmr_mfc_iter *it = seq->private;
- struct net *net = seq_file_net(seq);
+ struct mr6_table *mrt = it->mrt;
- if (it->cache == &mfc_unres_queue)
+ if (it->cache == &mrt->mfc6_unres_queue)
spin_unlock_bh(&mfc_unres_lock);
- else if (it->cache == net->ipv6.mfc6_cache_array)
+ else if (it->cache == mrt->mfc6_cache_array)
read_unlock(&mrt_lock);
}
static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
{
int n;
- struct net *net = seq_file_net(seq);
if (v == SEQ_START_TOKEN) {
seq_puts(seq,
@@ -296,19 +553,20 @@ static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
} else {
const struct mfc6_cache *mfc = v;
const struct ipmr_mfc_iter *it = seq->private;
+ struct mr6_table *mrt = it->mrt;
seq_printf(seq, "%pI6 %pI6 %-3hd",
&mfc->mf6c_mcastgrp, &mfc->mf6c_origin,
mfc->mf6c_parent);
- if (it->cache != &mfc_unres_queue) {
+ if (it->cache != &mrt->mfc6_unres_queue) {
seq_printf(seq, " %8lu %8lu %8lu",
mfc->mfc_un.res.pkt,
mfc->mfc_un.res.bytes,
mfc->mfc_un.res.wrong_if);
for (n = mfc->mfc_un.res.minvif;
n < mfc->mfc_un.res.maxvif; n++) {
- if (MIF_EXISTS(net, n) &&
+ if (MIF_EXISTS(mrt, n) &&
mfc->mfc_un.res.ttls[n] < 255)
seq_printf(seq,
" %2d:%-3d",
@@ -355,7 +613,12 @@ static int pim6_rcv(struct sk_buff *skb)
struct ipv6hdr *encap;
struct net_device *reg_dev = NULL;
struct net *net = dev_net(skb->dev);
- int reg_vif_num = net->ipv6.mroute_reg_vif_num;
+ struct mr6_table *mrt;
+ struct flowi fl = {
+ .iif = skb->dev->ifindex,
+ .mark = skb->mark,
+ };
+ int reg_vif_num;
if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
goto drop;
@@ -378,9 +641,13 @@ static int pim6_rcv(struct sk_buff *skb)
ntohs(encap->payload_len) + sizeof(*pim) > skb->len)
goto drop;
+ if (ip6mr_fib_lookup(net, &fl, &mrt) < 0)
+ goto drop;
+ reg_vif_num = mrt->mroute_reg_vif_num;
+
read_lock(&mrt_lock);
if (reg_vif_num >= 0)
- reg_dev = net->ipv6.vif6_table[reg_vif_num].dev;
+ reg_dev = mrt->vif6_table[reg_vif_num].dev;
if (reg_dev)
dev_hold(reg_dev);
read_unlock(&mrt_lock);
@@ -391,14 +658,12 @@ static int pim6_rcv(struct sk_buff *skb)
skb->mac_header = skb->network_header;
skb_pull(skb, (u8 *)encap - skb->data);
skb_reset_network_header(skb);
- skb->dev = reg_dev;
skb->protocol = htons(ETH_P_IPV6);
skb->ip_summed = 0;
skb->pkt_type = PACKET_HOST;
- skb_dst_drop(skb);
- reg_dev->stats.rx_bytes += skb->len;
- reg_dev->stats.rx_packets++;
- nf_reset(skb);
+
+ skb_tunnel_rx(skb, reg_dev);
+
netif_rx(skb);
dev_put(reg_dev);
return 0;
@@ -417,12 +682,22 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct net *net = dev_net(dev);
+ struct mr6_table *mrt;
+ struct flowi fl = {
+ .oif = dev->ifindex,
+ .iif = skb->skb_iif,
+ .mark = skb->mark,
+ };
+ int err;
+
+ err = ip6mr_fib_lookup(net, &fl, &mrt);
+ if (err < 0)
+ return err;
read_lock(&mrt_lock);
dev->stats.tx_bytes += skb->len;
dev->stats.tx_packets++;
- ip6mr_cache_report(net, skb, net->ipv6.mroute_reg_vif_num,
- MRT6MSG_WHOLEPKT);
+ ip6mr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, MRT6MSG_WHOLEPKT);
read_unlock(&mrt_lock);
kfree_skb(skb);
return NETDEV_TX_OK;
@@ -442,11 +717,17 @@ static void reg_vif_setup(struct net_device *dev)
dev->features |= NETIF_F_NETNS_LOCAL;
}
-static struct net_device *ip6mr_reg_vif(struct net *net)
+static struct net_device *ip6mr_reg_vif(struct net *net, struct mr6_table *mrt)
{
struct net_device *dev;
+ char name[IFNAMSIZ];
+
+ if (mrt->id == RT6_TABLE_DFLT)
+ sprintf(name, "pim6reg");
+ else
+ sprintf(name, "pim6reg%u", mrt->id);
- dev = alloc_netdev(0, "pim6reg", reg_vif_setup);
+ dev = alloc_netdev(0, name, reg_vif_setup);
if (dev == NULL)
return NULL;
@@ -478,15 +759,16 @@ failure:
* Delete a VIF entry
*/
-static int mif6_delete(struct net *net, int vifi, struct list_head *head)
+static int mif6_delete(struct mr6_table *mrt, int vifi, struct list_head *head)
{
struct mif_device *v;
struct net_device *dev;
struct inet6_dev *in6_dev;
- if (vifi < 0 || vifi >= net->ipv6.maxvif)
+
+ if (vifi < 0 || vifi >= mrt->maxvif)
return -EADDRNOTAVAIL;
- v = &net->ipv6.vif6_table[vifi];
+ v = &mrt->vif6_table[vifi];
write_lock_bh(&mrt_lock);
dev = v->dev;
@@ -498,17 +780,17 @@ static int mif6_delete(struct net *net, int vifi, struct list_head *head)
}
#ifdef CONFIG_IPV6_PIMSM_V2
- if (vifi == net->ipv6.mroute_reg_vif_num)
- net->ipv6.mroute_reg_vif_num = -1;
+ if (vifi == mrt->mroute_reg_vif_num)
+ mrt->mroute_reg_vif_num = -1;
#endif
- if (vifi + 1 == net->ipv6.maxvif) {
+ if (vifi + 1 == mrt->maxvif) {
int tmp;
for (tmp = vifi - 1; tmp >= 0; tmp--) {
- if (MIF_EXISTS(net, tmp))
+ if (MIF_EXISTS(mrt, tmp))
break;
}
- net->ipv6.maxvif = tmp + 1;
+ mrt->maxvif = tmp + 1;
}
write_unlock_bh(&mrt_lock);
@@ -528,7 +810,6 @@ static int mif6_delete(struct net *net, int vifi, struct list_head *head)
static inline void ip6mr_cache_free(struct mfc6_cache *c)
{
- release_net(mfc6_net(c));
kmem_cache_free(mrt_cachep, c);
}
@@ -536,12 +817,12 @@ static inline void ip6mr_cache_free(struct mfc6_cache *c)
and reporting error to netlink readers.
*/
-static void ip6mr_destroy_unres(struct mfc6_cache *c)
+static void ip6mr_destroy_unres(struct mr6_table *mrt, struct mfc6_cache *c)
{
+ struct net *net = read_pnet(&mrt->net);
struct sk_buff *skb;
- struct net *net = mfc6_net(c);
- atomic_dec(&net->ipv6.cache_resolve_queue_len);
+ atomic_dec(&mrt->cache_resolve_queue_len);
while((skb = skb_dequeue(&c->mfc_un.unres.unresolved)) != NULL) {
if (ipv6_hdr(skb)->version == 0) {
@@ -559,60 +840,59 @@ static void ip6mr_destroy_unres(struct mfc6_cache *c)
}
-/* Single timer process for all the unresolved queue. */
+/* Timer process for all the unresolved queue. */
-static void ipmr_do_expire_process(unsigned long dummy)
+static void ipmr_do_expire_process(struct mr6_table *mrt)
{
unsigned long now = jiffies;
unsigned long expires = 10 * HZ;
- struct mfc6_cache *c, **cp;
-
- cp = &mfc_unres_queue;
+ struct mfc6_cache *c, *next;
- while ((c = *cp) != NULL) {
+ list_for_each_entry_safe(c, next, &mrt->mfc6_unres_queue, list) {
if (time_after(c->mfc_un.unres.expires, now)) {
/* not yet... */
unsigned long interval = c->mfc_un.unres.expires - now;
if (interval < expires)
expires = interval;
- cp = &c->next;
continue;
}
- *cp = c->next;
- ip6mr_destroy_unres(c);
+ list_del(&c->list);
+ ip6mr_destroy_unres(mrt, c);
}
- if (mfc_unres_queue != NULL)
- mod_timer(&ipmr_expire_timer, jiffies + expires);
+ if (!list_empty(&mrt->mfc6_unres_queue))
+ mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
}
-static void ipmr_expire_process(unsigned long dummy)
+static void ipmr_expire_process(unsigned long arg)
{
+ struct mr6_table *mrt = (struct mr6_table *)arg;
+
if (!spin_trylock(&mfc_unres_lock)) {
- mod_timer(&ipmr_expire_timer, jiffies + 1);
+ mod_timer(&mrt->ipmr_expire_timer, jiffies + 1);
return;
}
- if (mfc_unres_queue != NULL)
- ipmr_do_expire_process(dummy);
+ if (!list_empty(&mrt->mfc6_unres_queue))
+ ipmr_do_expire_process(mrt);
spin_unlock(&mfc_unres_lock);
}
/* Fill oifs list. It is called under write locked mrt_lock. */
-static void ip6mr_update_thresholds(struct mfc6_cache *cache, unsigned char *ttls)
+static void ip6mr_update_thresholds(struct mr6_table *mrt, struct mfc6_cache *cache,
+ unsigned char *ttls)
{
int vifi;
- struct net *net = mfc6_net(cache);
cache->mfc_un.res.minvif = MAXMIFS;
cache->mfc_un.res.maxvif = 0;
memset(cache->mfc_un.res.ttls, 255, MAXMIFS);
- for (vifi = 0; vifi < net->ipv6.maxvif; vifi++) {
- if (MIF_EXISTS(net, vifi) &&
+ for (vifi = 0; vifi < mrt->maxvif; vifi++) {
+ if (MIF_EXISTS(mrt, vifi) &&
ttls[vifi] && ttls[vifi] < 255) {
cache->mfc_un.res.ttls[vifi] = ttls[vifi];
if (cache->mfc_un.res.minvif > vifi)
@@ -623,16 +903,17 @@ static void ip6mr_update_thresholds(struct mfc6_cache *cache, unsigned char *ttl
}
}
-static int mif6_add(struct net *net, struct mif6ctl *vifc, int mrtsock)
+static int mif6_add(struct net *net, struct mr6_table *mrt,
+ struct mif6ctl *vifc, int mrtsock)
{
int vifi = vifc->mif6c_mifi;
- struct mif_device *v = &net->ipv6.vif6_table[vifi];
+ struct mif_device *v = &mrt->vif6_table[vifi];
struct net_device *dev;
struct inet6_dev *in6_dev;
int err;
/* Is vif busy ? */
- if (MIF_EXISTS(net, vifi))
+ if (MIF_EXISTS(mrt, vifi))
return -EADDRINUSE;
switch (vifc->mif6c_flags) {
@@ -642,9 +923,9 @@ static int mif6_add(struct net *net, struct mif6ctl *vifc, int mrtsock)
* Special Purpose VIF in PIM
* All the packets will be sent to the daemon
*/
- if (net->ipv6.mroute_reg_vif_num >= 0)
+ if (mrt->mroute_reg_vif_num >= 0)
return -EADDRINUSE;
- dev = ip6mr_reg_vif(net);
+ dev = ip6mr_reg_vif(net, mrt);
if (!dev)
return -ENOBUFS;
err = dev_set_allmulti(dev, 1);
@@ -694,50 +975,48 @@ static int mif6_add(struct net *net, struct mif6ctl *vifc, int mrtsock)
v->dev = dev;
#ifdef CONFIG_IPV6_PIMSM_V2
if (v->flags & MIFF_REGISTER)
- net->ipv6.mroute_reg_vif_num = vifi;
+ mrt->mroute_reg_vif_num = vifi;
#endif
- if (vifi + 1 > net->ipv6.maxvif)
- net->ipv6.maxvif = vifi + 1;
+ if (vifi + 1 > mrt->maxvif)
+ mrt->maxvif = vifi + 1;
write_unlock_bh(&mrt_lock);
return 0;
}
-static struct mfc6_cache *ip6mr_cache_find(struct net *net,
+static struct mfc6_cache *ip6mr_cache_find(struct mr6_table *mrt,
struct in6_addr *origin,
struct in6_addr *mcastgrp)
{
int line = MFC6_HASH(mcastgrp, origin);
struct mfc6_cache *c;
- for (c = net->ipv6.mfc6_cache_array[line]; c; c = c->next) {
+ list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) {
if (ipv6_addr_equal(&c->mf6c_origin, origin) &&
ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp))
- break;
+ return c;
}
- return c;
+ return NULL;
}
/*
* Allocate a multicast cache entry
*/
-static struct mfc6_cache *ip6mr_cache_alloc(struct net *net)
+static struct mfc6_cache *ip6mr_cache_alloc(void)
{
struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
if (c == NULL)
return NULL;
c->mfc_un.res.minvif = MAXMIFS;
- mfc6_net_set(c, net);
return c;
}
-static struct mfc6_cache *ip6mr_cache_alloc_unres(struct net *net)
+static struct mfc6_cache *ip6mr_cache_alloc_unres(void)
{
struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
if (c == NULL)
return NULL;
skb_queue_head_init(&c->mfc_un.unres.unresolved);
c->mfc_un.unres.expires = jiffies + 10 * HZ;
- mfc6_net_set(c, net);
return c;
}
@@ -745,7 +1024,8 @@ static struct mfc6_cache *ip6mr_cache_alloc_unres(struct net *net)
* A cache entry has gone into a resolved state from queued
*/
-static void ip6mr_cache_resolve(struct mfc6_cache *uc, struct mfc6_cache *c)
+static void ip6mr_cache_resolve(struct net *net, struct mr6_table *mrt,
+ struct mfc6_cache *uc, struct mfc6_cache *c)
{
struct sk_buff *skb;
@@ -758,7 +1038,7 @@ static void ip6mr_cache_resolve(struct mfc6_cache *uc, struct mfc6_cache *c)
int err;
struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr));
- if (ip6mr_fill_mroute(skb, c, NLMSG_DATA(nlh)) > 0) {
+ if (__ip6mr_fill_mroute(mrt, skb, c, NLMSG_DATA(nlh)) > 0) {
nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh;
} else {
nlh->nlmsg_type = NLMSG_ERROR;
@@ -766,9 +1046,9 @@ static void ip6mr_cache_resolve(struct mfc6_cache *uc, struct mfc6_cache *c)
skb_trim(skb, nlh->nlmsg_len);
((struct nlmsgerr *)NLMSG_DATA(nlh))->error = -EMSGSIZE;
}
- err = rtnl_unicast(skb, mfc6_net(uc), NETLINK_CB(skb).pid);
+ err = rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
} else
- ip6_mr_forward(skb, c);
+ ip6_mr_forward(net, mrt, skb, c);
}
}
@@ -779,8 +1059,8 @@ static void ip6mr_cache_resolve(struct mfc6_cache *uc, struct mfc6_cache *c)
* Called under mrt_lock.
*/
-static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt, mifi_t mifi,
- int assert)
+static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
+ mifi_t mifi, int assert)
{
struct sk_buff *skb;
struct mrt6msg *msg;
@@ -816,7 +1096,7 @@ static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt, mifi_t mifi,
msg = (struct mrt6msg *)skb_transport_header(skb);
msg->im6_mbz = 0;
msg->im6_msgtype = MRT6MSG_WHOLEPKT;
- msg->im6_mif = net->ipv6.mroute_reg_vif_num;
+ msg->im6_mif = mrt->mroute_reg_vif_num;
msg->im6_pad = 0;
ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr);
ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr);
@@ -851,7 +1131,7 @@ static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt, mifi_t mifi,
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
- if (net->ipv6.mroute6_sk == NULL) {
+ if (mrt->mroute6_sk == NULL) {
kfree_skb(skb);
return -EINVAL;
}
@@ -859,7 +1139,7 @@ static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt, mifi_t mifi,
/*
* Deliver to user space multicast routing algorithms
*/
- ret = sock_queue_rcv_skb(net->ipv6.mroute6_sk, skb);
+ ret = sock_queue_rcv_skb(mrt->mroute6_sk, skb);
if (ret < 0) {
if (net_ratelimit())
printk(KERN_WARNING "mroute6: pending queue full, dropping entries.\n");
@@ -874,26 +1154,28 @@ static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt, mifi_t mifi,
*/
static int
-ip6mr_cache_unresolved(struct net *net, mifi_t mifi, struct sk_buff *skb)
+ip6mr_cache_unresolved(struct mr6_table *mrt, mifi_t mifi, struct sk_buff *skb)
{
+ bool found = false;
int err;
struct mfc6_cache *c;
spin_lock_bh(&mfc_unres_lock);
- for (c = mfc_unres_queue; c; c = c->next) {
- if (net_eq(mfc6_net(c), net) &&
- ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) &&
- ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr))
+ list_for_each_entry(c, &mrt->mfc6_unres_queue, list) {
+ if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) &&
+ ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr)) {
+ found = true;
break;
+ }
}
- if (c == NULL) {
+ if (!found) {
/*
* Create a new entry if allowable
*/
- if (atomic_read(&net->ipv6.cache_resolve_queue_len) >= 10 ||
- (c = ip6mr_cache_alloc_unres(net)) == NULL) {
+ if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
+ (c = ip6mr_cache_alloc_unres()) == NULL) {
spin_unlock_bh(&mfc_unres_lock);
kfree_skb(skb);
@@ -910,7 +1192,7 @@ ip6mr_cache_unresolved(struct net *net, mifi_t mifi, struct sk_buff *skb)
/*
* Reflect first query at pim6sd
*/
- err = ip6mr_cache_report(net, skb, mifi, MRT6MSG_NOCACHE);
+ err = ip6mr_cache_report(mrt, skb, mifi, MRT6MSG_NOCACHE);
if (err < 0) {
/* If the report failed throw the cache entry
out - Brad Parker
@@ -922,11 +1204,10 @@ ip6mr_cache_unresolved(struct net *net, mifi_t mifi, struct sk_buff *skb)
return err;
}
- atomic_inc(&net->ipv6.cache_resolve_queue_len);
- c->next = mfc_unres_queue;
- mfc_unres_queue = c;
+ atomic_inc(&mrt->cache_resolve_queue_len);
+ list_add(&c->list, &mrt->mfc6_unres_queue);
- ipmr_do_expire_process(1);
+ ipmr_do_expire_process(mrt);
}
/*
@@ -948,19 +1229,18 @@ ip6mr_cache_unresolved(struct net *net, mifi_t mifi, struct sk_buff *skb)
* MFC6 cache manipulation by user space
*/
-static int ip6mr_mfc_delete(struct net *net, struct mf6cctl *mfc)
+static int ip6mr_mfc_delete(struct mr6_table *mrt, struct mf6cctl *mfc)
{
int line;
- struct mfc6_cache *c, **cp;
+ struct mfc6_cache *c, *next;
line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
- for (cp = &net->ipv6.mfc6_cache_array[line];
- (c = *cp) != NULL; cp = &c->next) {
+ list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[line], list) {
if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr)) {
write_lock_bh(&mrt_lock);
- *cp = c->next;
+ list_del(&c->list);
write_unlock_bh(&mrt_lock);
ip6mr_cache_free(c);
@@ -975,6 +1255,7 @@ static int ip6mr_device_event(struct notifier_block *this,
{
struct net_device *dev = ptr;
struct net *net = dev_net(dev);
+ struct mr6_table *mrt;
struct mif_device *v;
int ct;
LIST_HEAD(list);
@@ -982,10 +1263,12 @@ static int ip6mr_device_event(struct notifier_block *this,
if (event != NETDEV_UNREGISTER)
return NOTIFY_DONE;
- v = &net->ipv6.vif6_table[0];
- for (ct = 0; ct < net->ipv6.maxvif; ct++, v++) {
- if (v->dev == dev)
- mif6_delete(net, ct, &list);
+ ip6mr_for_each_table(mrt, net) {
+ v = &mrt->vif6_table[0];
+ for (ct = 0; ct < mrt->maxvif; ct++, v++) {
+ if (v->dev == dev)
+ mif6_delete(mrt, ct, &list);
+ }
}
unregister_netdevice_many(&list);
@@ -1002,26 +1285,11 @@ static struct notifier_block ip6_mr_notifier = {
static int __net_init ip6mr_net_init(struct net *net)
{
- int err = 0;
- net->ipv6.vif6_table = kcalloc(MAXMIFS, sizeof(struct mif_device),
- GFP_KERNEL);
- if (!net->ipv6.vif6_table) {
- err = -ENOMEM;
- goto fail;
- }
-
- /* Forwarding cache */
- net->ipv6.mfc6_cache_array = kcalloc(MFC6_LINES,
- sizeof(struct mfc6_cache *),
- GFP_KERNEL);
- if (!net->ipv6.mfc6_cache_array) {
- err = -ENOMEM;
- goto fail_mfc6_cache;
- }
+ int err;
-#ifdef CONFIG_IPV6_PIMSM_V2
- net->ipv6.mroute_reg_vif_num = -1;
-#endif
+ err = ip6mr_rules_init(net);
+ if (err < 0)
+ goto fail;
#ifdef CONFIG_PROC_FS
err = -ENOMEM;
@@ -1030,16 +1298,15 @@ static int __net_init ip6mr_net_init(struct net *net)
if (!proc_net_fops_create(net, "ip6_mr_cache", 0, &ip6mr_mfc_fops))
goto proc_cache_fail;
#endif
+
return 0;
#ifdef CONFIG_PROC_FS
proc_cache_fail:
proc_net_remove(net, "ip6_mr_vif");
proc_vif_fail:
- kfree(net->ipv6.mfc6_cache_array);
+ ip6mr_rules_exit(net);
#endif
-fail_mfc6_cache:
- kfree(net->ipv6.vif6_table);
fail:
return err;
}
@@ -1050,9 +1317,7 @@ static void __net_exit ip6mr_net_exit(struct net *net)
proc_net_remove(net, "ip6_mr_cache");
proc_net_remove(net, "ip6_mr_vif");
#endif
- mroute_clean_tables(net);
- kfree(net->ipv6.mfc6_cache_array);
- kfree(net->ipv6.vif6_table);
+ ip6mr_rules_exit(net);
}
static struct pernet_operations ip6mr_net_ops = {
@@ -1075,7 +1340,6 @@ int __init ip6_mr_init(void)
if (err)
goto reg_pernet_fail;
- setup_timer(&ipmr_expire_timer, ipmr_expire_process, 0);
err = register_netdevice_notifier(&ip6_mr_notifier);
if (err)
goto reg_notif_fail;
@@ -1086,13 +1350,13 @@ int __init ip6_mr_init(void)
goto add_proto_fail;
}
#endif
+ rtnl_register(RTNL_FAMILY_IP6MR, RTM_GETROUTE, NULL, ip6mr_rtm_dumproute);
return 0;
#ifdef CONFIG_IPV6_PIMSM_V2
add_proto_fail:
unregister_netdevice_notifier(&ip6_mr_notifier);
#endif
reg_notif_fail:
- del_timer(&ipmr_expire_timer);
unregister_pernet_subsys(&ip6mr_net_ops);
reg_pernet_fail:
kmem_cache_destroy(mrt_cachep);
@@ -1102,15 +1366,16 @@ reg_pernet_fail:
void ip6_mr_cleanup(void)
{
unregister_netdevice_notifier(&ip6_mr_notifier);
- del_timer(&ipmr_expire_timer);
unregister_pernet_subsys(&ip6mr_net_ops);
kmem_cache_destroy(mrt_cachep);
}
-static int ip6mr_mfc_add(struct net *net, struct mf6cctl *mfc, int mrtsock)
+static int ip6mr_mfc_add(struct net *net, struct mr6_table *mrt,
+ struct mf6cctl *mfc, int mrtsock)
{
+ bool found = false;
int line;
- struct mfc6_cache *uc, *c, **cp;
+ struct mfc6_cache *uc, *c;
unsigned char ttls[MAXMIFS];
int i;
@@ -1126,17 +1391,18 @@ static int ip6mr_mfc_add(struct net *net, struct mf6cctl *mfc, int mrtsock)
line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
- for (cp = &net->ipv6.mfc6_cache_array[line];
- (c = *cp) != NULL; cp = &c->next) {
+ list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) {
if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
- ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr))
+ ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr)) {
+ found = true;
break;
+ }
}
- if (c != NULL) {
+ if (found) {
write_lock_bh(&mrt_lock);
c->mf6c_parent = mfc->mf6cc_parent;
- ip6mr_update_thresholds(c, ttls);
+ ip6mr_update_thresholds(mrt, c, ttls);
if (!mrtsock)
c->mfc_flags |= MFC_STATIC;
write_unlock_bh(&mrt_lock);
@@ -1146,43 +1412,42 @@ static int ip6mr_mfc_add(struct net *net, struct mf6cctl *mfc, int mrtsock)
if (!ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr))
return -EINVAL;
- c = ip6mr_cache_alloc(net);
+ c = ip6mr_cache_alloc();
if (c == NULL)
return -ENOMEM;
c->mf6c_origin = mfc->mf6cc_origin.sin6_addr;
c->mf6c_mcastgrp = mfc->mf6cc_mcastgrp.sin6_addr;
c->mf6c_parent = mfc->mf6cc_parent;
- ip6mr_update_thresholds(c, ttls);
+ ip6mr_update_thresholds(mrt, c, ttls);
if (!mrtsock)
c->mfc_flags |= MFC_STATIC;
write_lock_bh(&mrt_lock);
- c->next = net->ipv6.mfc6_cache_array[line];
- net->ipv6.mfc6_cache_array[line] = c;
+ list_add(&c->list, &mrt->mfc6_cache_array[line]);
write_unlock_bh(&mrt_lock);
/*
* Check to see if we resolved a queued list. If so we
* need to send on the frames and tidy up.
*/
+ found = false;
spin_lock_bh(&mfc_unres_lock);
- for (cp = &mfc_unres_queue; (uc = *cp) != NULL;
- cp = &uc->next) {
- if (net_eq(mfc6_net(uc), net) &&
- ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) &&
+ list_for_each_entry(uc, &mrt->mfc6_unres_queue, list) {
+ if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) &&
ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) {
- *cp = uc->next;
- atomic_dec(&net->ipv6.cache_resolve_queue_len);
+ list_del(&uc->list);
+ atomic_dec(&mrt->cache_resolve_queue_len);
+ found = true;
break;
}
}
- if (mfc_unres_queue == NULL)
- del_timer(&ipmr_expire_timer);
+ if (list_empty(&mrt->mfc6_unres_queue))
+ del_timer(&mrt->ipmr_expire_timer);
spin_unlock_bh(&mfc_unres_lock);
- if (uc) {
- ip6mr_cache_resolve(uc, c);
+ if (found) {
+ ip6mr_cache_resolve(net, mrt, uc, c);
ip6mr_cache_free(uc);
}
return 0;
@@ -1192,17 +1457,18 @@ static int ip6mr_mfc_add(struct net *net, struct mf6cctl *mfc, int mrtsock)
* Close the multicast socket, and clear the vif tables etc
*/
-static void mroute_clean_tables(struct net *net)
+static void mroute_clean_tables(struct mr6_table *mrt)
{
int i;
LIST_HEAD(list);
+ struct mfc6_cache *c, *next;
/*
* Shut down all active vif entries
*/
- for (i = 0; i < net->ipv6.maxvif; i++) {
- if (!(net->ipv6.vif6_table[i].flags & VIFF_STATIC))
- mif6_delete(net, i, &list);
+ for (i = 0; i < mrt->maxvif; i++) {
+ if (!(mrt->vif6_table[i].flags & VIFF_STATIC))
+ mif6_delete(mrt, i, &list);
}
unregister_netdevice_many(&list);
@@ -1210,48 +1476,36 @@ static void mroute_clean_tables(struct net *net)
* Wipe the cache
*/
for (i = 0; i < MFC6_LINES; i++) {
- struct mfc6_cache *c, **cp;
-
- cp = &net->ipv6.mfc6_cache_array[i];
- while ((c = *cp) != NULL) {
- if (c->mfc_flags & MFC_STATIC) {
- cp = &c->next;
+ list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[i], list) {
+ if (c->mfc_flags & MFC_STATIC)
continue;
- }
write_lock_bh(&mrt_lock);
- *cp = c->next;
+ list_del(&c->list);
write_unlock_bh(&mrt_lock);
ip6mr_cache_free(c);
}
}
- if (atomic_read(&net->ipv6.cache_resolve_queue_len) != 0) {
- struct mfc6_cache *c, **cp;
-
+ if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
spin_lock_bh(&mfc_unres_lock);
- cp = &mfc_unres_queue;
- while ((c = *cp) != NULL) {
- if (!net_eq(mfc6_net(c), net)) {
- cp = &c->next;
- continue;
- }
- *cp = c->next;
- ip6mr_destroy_unres(c);
+ list_for_each_entry_safe(c, next, &mrt->mfc6_unres_queue, list) {
+ list_del(&c->list);
+ ip6mr_destroy_unres(mrt, c);
}
spin_unlock_bh(&mfc_unres_lock);
}
}
-static int ip6mr_sk_init(struct sock *sk)
+static int ip6mr_sk_init(struct mr6_table *mrt, struct sock *sk)
{
int err = 0;
struct net *net = sock_net(sk);
rtnl_lock();
write_lock_bh(&mrt_lock);
- if (likely(net->ipv6.mroute6_sk == NULL)) {
- net->ipv6.mroute6_sk = sk;
+ if (likely(mrt->mroute6_sk == NULL)) {
+ mrt->mroute6_sk = sk;
net->ipv6.devconf_all->mc_forwarding++;
}
else
@@ -1265,24 +1519,43 @@ static int ip6mr_sk_init(struct sock *sk)
int ip6mr_sk_done(struct sock *sk)
{
- int err = 0;
+ int err = -EACCES;
struct net *net = sock_net(sk);
+ struct mr6_table *mrt;
rtnl_lock();
- if (sk == net->ipv6.mroute6_sk) {
- write_lock_bh(&mrt_lock);
- net->ipv6.mroute6_sk = NULL;
- net->ipv6.devconf_all->mc_forwarding--;
- write_unlock_bh(&mrt_lock);
+ ip6mr_for_each_table(mrt, net) {
+ if (sk == mrt->mroute6_sk) {
+ write_lock_bh(&mrt_lock);
+ mrt->mroute6_sk = NULL;
+ net->ipv6.devconf_all->mc_forwarding--;
+ write_unlock_bh(&mrt_lock);
- mroute_clean_tables(net);
- } else
- err = -EACCES;
+ mroute_clean_tables(mrt);
+ err = 0;
+ break;
+ }
+ }
rtnl_unlock();
return err;
}
+struct sock *mroute6_socket(struct net *net, struct sk_buff *skb)
+{
+ struct mr6_table *mrt;
+ struct flowi fl = {
+ .iif = skb->skb_iif,
+ .oif = skb->dev->ifindex,
+ .mark = skb->mark,
+ };
+
+ if (ip6mr_fib_lookup(net, &fl, &mrt) < 0)
+ return NULL;
+
+ return mrt->mroute6_sk;
+}
+
/*
* Socket options and virtual interface manipulation. The whole
* virtual interface system is a complete heap, but unfortunately
@@ -1297,9 +1570,14 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
struct mf6cctl mfc;
mifi_t mifi;
struct net *net = sock_net(sk);
+ struct mr6_table *mrt;
+
+ mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
+ if (mrt == NULL)
+ return -ENOENT;
if (optname != MRT6_INIT) {
- if (sk != net->ipv6.mroute6_sk && !capable(CAP_NET_ADMIN))
+ if (sk != mrt->mroute6_sk && !capable(CAP_NET_ADMIN))
return -EACCES;
}
@@ -1311,7 +1589,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
if (optlen < sizeof(int))
return -EINVAL;
- return ip6mr_sk_init(sk);
+ return ip6mr_sk_init(mrt, sk);
case MRT6_DONE:
return ip6mr_sk_done(sk);
@@ -1324,7 +1602,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
if (vif.mif6c_mifi >= MAXMIFS)
return -ENFILE;
rtnl_lock();
- ret = mif6_add(net, &vif, sk == net->ipv6.mroute6_sk);
+ ret = mif6_add(net, mrt, &vif, sk == mrt->mroute6_sk);
rtnl_unlock();
return ret;
@@ -1334,7 +1612,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
if (copy_from_user(&mifi, optval, sizeof(mifi_t)))
return -EFAULT;
rtnl_lock();
- ret = mif6_delete(net, mifi, NULL);
+ ret = mif6_delete(mrt, mifi, NULL);
rtnl_unlock();
return ret;
@@ -1350,10 +1628,9 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
return -EFAULT;
rtnl_lock();
if (optname == MRT6_DEL_MFC)
- ret = ip6mr_mfc_delete(net, &mfc);
+ ret = ip6mr_mfc_delete(mrt, &mfc);
else
- ret = ip6mr_mfc_add(net, &mfc,
- sk == net->ipv6.mroute6_sk);
+ ret = ip6mr_mfc_add(net, mrt, &mfc, sk == mrt->mroute6_sk);
rtnl_unlock();
return ret;
@@ -1365,7 +1642,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
int v;
if (get_user(v, (int __user *)optval))
return -EFAULT;
- net->ipv6.mroute_do_assert = !!v;
+ mrt->mroute_do_assert = !!v;
return 0;
}
@@ -1378,15 +1655,36 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
v = !!v;
rtnl_lock();
ret = 0;
- if (v != net->ipv6.mroute_do_pim) {
- net->ipv6.mroute_do_pim = v;
- net->ipv6.mroute_do_assert = v;
+ if (v != mrt->mroute_do_pim) {
+ mrt->mroute_do_pim = v;
+ mrt->mroute_do_assert = v;
}
rtnl_unlock();
return ret;
}
#endif
+#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
+ case MRT6_TABLE:
+ {
+ u32 v;
+
+ if (optlen != sizeof(u32))
+ return -EINVAL;
+ if (get_user(v, (u32 __user *)optval))
+ return -EFAULT;
+ if (sk == mrt->mroute6_sk)
+ return -EBUSY;
+
+ rtnl_lock();
+ ret = 0;
+ if (!ip6mr_new_table(net, v))
+ ret = -ENOMEM;
+ raw6_sk(sk)->ip6mr_table = v;
+ rtnl_unlock();
+ return ret;
+ }
+#endif
/*
* Spurious command, or MRT6_VERSION which you cannot
* set.
@@ -1406,6 +1704,11 @@ int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval,
int olr;
int val;
struct net *net = sock_net(sk);
+ struct mr6_table *mrt;
+
+ mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
+ if (mrt == NULL)
+ return -ENOENT;
switch (optname) {
case MRT6_VERSION:
@@ -1413,11 +1716,11 @@ int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval,
break;
#ifdef CONFIG_IPV6_PIMSM_V2
case MRT6_PIM:
- val = net->ipv6.mroute_do_pim;
+ val = mrt->mroute_do_pim;
break;
#endif
case MRT6_ASSERT:
- val = net->ipv6.mroute_do_assert;
+ val = mrt->mroute_do_assert;
break;
default:
return -ENOPROTOOPT;
@@ -1448,16 +1751,21 @@ int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
struct mif_device *vif;
struct mfc6_cache *c;
struct net *net = sock_net(sk);
+ struct mr6_table *mrt;
+
+ mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
+ if (mrt == NULL)
+ return -ENOENT;
switch (cmd) {
case SIOCGETMIFCNT_IN6:
if (copy_from_user(&vr, arg, sizeof(vr)))
return -EFAULT;
- if (vr.mifi >= net->ipv6.maxvif)
+ if (vr.mifi >= mrt->maxvif)
return -EINVAL;
read_lock(&mrt_lock);
- vif = &net->ipv6.vif6_table[vr.mifi];
- if (MIF_EXISTS(net, vr.mifi)) {
+ vif = &mrt->vif6_table[vr.mifi];
+ if (MIF_EXISTS(mrt, vr.mifi)) {
vr.icount = vif->pkt_in;
vr.ocount = vif->pkt_out;
vr.ibytes = vif->bytes_in;
@@ -1475,7 +1783,7 @@ int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
return -EFAULT;
read_lock(&mrt_lock);
- c = ip6mr_cache_find(net, &sr.src.sin6_addr, &sr.grp.sin6_addr);
+ c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
if (c) {
sr.pktcnt = c->mfc_un.res.pkt;
sr.bytecnt = c->mfc_un.res.bytes;
@@ -1505,11 +1813,11 @@ static inline int ip6mr_forward2_finish(struct sk_buff *skb)
* Processing handlers for ip6mr_forward
*/
-static int ip6mr_forward2(struct sk_buff *skb, struct mfc6_cache *c, int vifi)
+static int ip6mr_forward2(struct net *net, struct mr6_table *mrt,
+ struct sk_buff *skb, struct mfc6_cache *c, int vifi)
{
struct ipv6hdr *ipv6h;
- struct net *net = mfc6_net(c);
- struct mif_device *vif = &net->ipv6.vif6_table[vifi];
+ struct mif_device *vif = &mrt->vif6_table[vifi];
struct net_device *dev;
struct dst_entry *dst;
struct flowi fl;
@@ -1523,7 +1831,7 @@ static int ip6mr_forward2(struct sk_buff *skb, struct mfc6_cache *c, int vifi)
vif->bytes_out += skb->len;
vif->dev->stats.tx_bytes += skb->len;
vif->dev->stats.tx_packets++;
- ip6mr_cache_report(net, skb, vifi, MRT6MSG_WHOLEPKT);
+ ip6mr_cache_report(mrt, skb, vifi, MRT6MSG_WHOLEPKT);
goto out_free;
}
#endif
@@ -1570,7 +1878,7 @@ static int ip6mr_forward2(struct sk_buff *skb, struct mfc6_cache *c, int vifi)
IP6CB(skb)->flags |= IP6SKB_FORWARDED;
- return NF_HOOK(PF_INET6, NF_INET_FORWARD, skb, skb->dev, dev,
+ return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dev,
ip6mr_forward2_finish);
out_free:
@@ -1578,22 +1886,22 @@ out_free:
return 0;
}
-static int ip6mr_find_vif(struct net_device *dev)
+static int ip6mr_find_vif(struct mr6_table *mrt, struct net_device *dev)
{
- struct net *net = dev_net(dev);
int ct;
- for (ct = net->ipv6.maxvif - 1; ct >= 0; ct--) {
- if (net->ipv6.vif6_table[ct].dev == dev)
+
+ for (ct = mrt->maxvif - 1; ct >= 0; ct--) {
+ if (mrt->vif6_table[ct].dev == dev)
break;
}
return ct;
}
-static int ip6_mr_forward(struct sk_buff *skb, struct mfc6_cache *cache)
+static int ip6_mr_forward(struct net *net, struct mr6_table *mrt,
+ struct sk_buff *skb, struct mfc6_cache *cache)
{
int psend = -1;
int vif, ct;
- struct net *net = mfc6_net(cache);
vif = cache->mf6c_parent;
cache->mfc_un.res.pkt++;
@@ -1602,30 +1910,30 @@ static int ip6_mr_forward(struct sk_buff *skb, struct mfc6_cache *cache)
/*
* Wrong interface: drop packet and (maybe) send PIM assert.
*/
- if (net->ipv6.vif6_table[vif].dev != skb->dev) {
+ if (mrt->vif6_table[vif].dev != skb->dev) {
int true_vifi;
cache->mfc_un.res.wrong_if++;
- true_vifi = ip6mr_find_vif(skb->dev);
+ true_vifi = ip6mr_find_vif(mrt, skb->dev);
- if (true_vifi >= 0 && net->ipv6.mroute_do_assert &&
+ if (true_vifi >= 0 && mrt->mroute_do_assert &&
/* pimsm uses asserts, when switching from RPT to SPT,
so that we cannot check that packet arrived on an oif.
It is bad, but otherwise we would need to move pretty
large chunk of pimd to kernel. Ough... --ANK
*/
- (net->ipv6.mroute_do_pim ||
+ (mrt->mroute_do_pim ||
cache->mfc_un.res.ttls[true_vifi] < 255) &&
time_after(jiffies,
cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
cache->mfc_un.res.last_assert = jiffies;
- ip6mr_cache_report(net, skb, true_vifi, MRT6MSG_WRONGMIF);
+ ip6mr_cache_report(mrt, skb, true_vifi, MRT6MSG_WRONGMIF);
}
goto dont_forward;
}
- net->ipv6.vif6_table[vif].pkt_in++;
- net->ipv6.vif6_table[vif].bytes_in += skb->len;
+ mrt->vif6_table[vif].pkt_in++;
+ mrt->vif6_table[vif].bytes_in += skb->len;
/*
* Forward the frame
@@ -1635,13 +1943,13 @@ static int ip6_mr_forward(struct sk_buff *skb, struct mfc6_cache *cache)
if (psend != -1) {
struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
if (skb2)
- ip6mr_forward2(skb2, cache, psend);
+ ip6mr_forward2(net, mrt, skb2, cache, psend);
}
psend = ct;
}
}
if (psend != -1) {
- ip6mr_forward2(skb, cache, psend);
+ ip6mr_forward2(net, mrt, skb, cache, psend);
return 0;
}
@@ -1659,9 +1967,19 @@ int ip6_mr_input(struct sk_buff *skb)
{
struct mfc6_cache *cache;
struct net *net = dev_net(skb->dev);
+ struct mr6_table *mrt;
+ struct flowi fl = {
+ .iif = skb->dev->ifindex,
+ .mark = skb->mark,
+ };
+ int err;
+
+ err = ip6mr_fib_lookup(net, &fl, &mrt);
+ if (err < 0)
+ return err;
read_lock(&mrt_lock);
- cache = ip6mr_cache_find(net,
+ cache = ip6mr_cache_find(mrt,
&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr);
/*
@@ -1670,9 +1988,9 @@ int ip6_mr_input(struct sk_buff *skb)
if (cache == NULL) {
int vif;
- vif = ip6mr_find_vif(skb->dev);
+ vif = ip6mr_find_vif(mrt, skb->dev);
if (vif >= 0) {
- int err = ip6mr_cache_unresolved(net, vif, skb);
+ int err = ip6mr_cache_unresolved(mrt, vif, skb);
read_unlock(&mrt_lock);
return err;
@@ -1682,7 +2000,7 @@ int ip6_mr_input(struct sk_buff *skb)
return -ENODEV;
}
- ip6_mr_forward(skb, cache);
+ ip6_mr_forward(net, mrt, skb, cache);
read_unlock(&mrt_lock);
@@ -1690,12 +2008,11 @@ int ip6_mr_input(struct sk_buff *skb)
}
-static int
-ip6mr_fill_mroute(struct sk_buff *skb, struct mfc6_cache *c, struct rtmsg *rtm)
+static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
+ struct mfc6_cache *c, struct rtmsg *rtm)
{
int ct;
struct rtnexthop *nhp;
- struct net *net = mfc6_net(c);
u8 *b = skb_tail_pointer(skb);
struct rtattr *mp_head;
@@ -1703,19 +2020,19 @@ ip6mr_fill_mroute(struct sk_buff *skb, struct mfc6_cache *c, struct rtmsg *rtm)
if (c->mf6c_parent > MAXMIFS)
return -ENOENT;
- if (MIF_EXISTS(net, c->mf6c_parent))
- RTA_PUT(skb, RTA_IIF, 4, &net->ipv6.vif6_table[c->mf6c_parent].dev->ifindex);
+ if (MIF_EXISTS(mrt, c->mf6c_parent))
+ RTA_PUT(skb, RTA_IIF, 4, &mrt->vif6_table[c->mf6c_parent].dev->ifindex);
mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0));
for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
- if (MIF_EXISTS(net, ct) && c->mfc_un.res.ttls[ct] < 255) {
+ if (MIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
goto rtattr_failure;
nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
nhp->rtnh_flags = 0;
nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
- nhp->rtnh_ifindex = net->ipv6.vif6_table[ct].dev->ifindex;
+ nhp->rtnh_ifindex = mrt->vif6_table[ct].dev->ifindex;
nhp->rtnh_len = sizeof(*nhp);
}
}
@@ -1733,11 +2050,16 @@ int ip6mr_get_route(struct net *net,
struct sk_buff *skb, struct rtmsg *rtm, int nowait)
{
int err;
+ struct mr6_table *mrt;
struct mfc6_cache *cache;
struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
+ mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
+ if (mrt == NULL)
+ return -ENOENT;
+
read_lock(&mrt_lock);
- cache = ip6mr_cache_find(net, &rt->rt6i_src.addr, &rt->rt6i_dst.addr);
+ cache = ip6mr_cache_find(mrt, &rt->rt6i_src.addr, &rt->rt6i_dst.addr);
if (!cache) {
struct sk_buff *skb2;
@@ -1751,7 +2073,7 @@ int ip6mr_get_route(struct net *net,
}
dev = skb->dev;
- if (dev == NULL || (vif = ip6mr_find_vif(dev)) < 0) {
+ if (dev == NULL || (vif = ip6mr_find_vif(mrt, dev)) < 0) {
read_unlock(&mrt_lock);
return -ENODEV;
}
@@ -1780,7 +2102,7 @@ int ip6mr_get_route(struct net *net,
ipv6_addr_copy(&iph->saddr, &rt->rt6i_src.addr);
ipv6_addr_copy(&iph->daddr, &rt->rt6i_dst.addr);
- err = ip6mr_cache_unresolved(net, vif, skb2);
+ err = ip6mr_cache_unresolved(mrt, vif, skb2);
read_unlock(&mrt_lock);
return err;
@@ -1789,8 +2111,88 @@ int ip6mr_get_route(struct net *net,
if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY))
cache->mfc_flags |= MFC_NOTIFY;
- err = ip6mr_fill_mroute(skb, cache, rtm);
+ err = __ip6mr_fill_mroute(mrt, skb, cache, rtm);
read_unlock(&mrt_lock);
return err;
}
+static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
+ u32 pid, u32 seq, struct mfc6_cache *c)
+{
+ struct nlmsghdr *nlh;
+ struct rtmsg *rtm;
+
+ nlh = nlmsg_put(skb, pid, seq, RTM_NEWROUTE, sizeof(*rtm), NLM_F_MULTI);
+ if (nlh == NULL)
+ return -EMSGSIZE;
+
+ rtm = nlmsg_data(nlh);
+ rtm->rtm_family = RTNL_FAMILY_IPMR;
+ rtm->rtm_dst_len = 128;
+ rtm->rtm_src_len = 128;
+ rtm->rtm_tos = 0;
+ rtm->rtm_table = mrt->id;
+ NLA_PUT_U32(skb, RTA_TABLE, mrt->id);
+ rtm->rtm_scope = RT_SCOPE_UNIVERSE;
+ rtm->rtm_protocol = RTPROT_UNSPEC;
+ rtm->rtm_flags = 0;
+
+ NLA_PUT(skb, RTA_SRC, 16, &c->mf6c_origin);
+ NLA_PUT(skb, RTA_DST, 16, &c->mf6c_mcastgrp);
+
+ if (__ip6mr_fill_mroute(mrt, skb, c, rtm) < 0)
+ goto nla_put_failure;
+
+ return nlmsg_end(skb, nlh);
+
+nla_put_failure:
+ nlmsg_cancel(skb, nlh);
+ return -EMSGSIZE;
+}
+
+static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct net *net = sock_net(skb->sk);
+ struct mr6_table *mrt;
+ struct mfc6_cache *mfc;
+ unsigned int t = 0, s_t;
+ unsigned int h = 0, s_h;
+ unsigned int e = 0, s_e;
+
+ s_t = cb->args[0];
+ s_h = cb->args[1];
+ s_e = cb->args[2];
+
+ read_lock(&mrt_lock);
+ ip6mr_for_each_table(mrt, net) {
+ if (t < s_t)
+ goto next_table;
+ if (t > s_t)
+ s_h = 0;
+ for (h = s_h; h < MFC6_LINES; h++) {
+ list_for_each_entry(mfc, &mrt->mfc6_cache_array[h], list) {
+ if (e < s_e)
+ goto next_entry;
+ if (ip6mr_fill_mroute(mrt, skb,
+ NETLINK_CB(cb->skb).pid,
+ cb->nlh->nlmsg_seq,
+ mfc) < 0)
+ goto done;
+next_entry:
+ e++;
+ }
+ e = s_e = 0;
+ }
+ s_h = 0;
+next_table:
+ t++;
+ }
+done:
+ read_unlock(&mrt_lock);
+
+ cb->args[2] = e;
+ cb->args[1] = h;
+ cb->args[0] = t;
+
+ return skb->len;
+}
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 33f60fca7aa7..bd43f0152c21 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -114,9 +114,9 @@ struct ipv6_txoptions *ipv6_update_options(struct sock *sk,
}
opt = xchg(&inet6_sk(sk)->opt, opt);
} else {
- write_lock(&sk->sk_dst_lock);
+ spin_lock(&sk->sk_dst_lock);
opt = xchg(&inet6_sk(sk)->opt, opt);
- write_unlock(&sk->sk_dst_lock);
+ spin_unlock(&sk->sk_dst_lock);
}
sk_dst_reset(sk);
@@ -337,6 +337,13 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
retv = 0;
break;
+ case IPV6_RECVPATHMTU:
+ if (optlen < sizeof(int))
+ goto e_inval;
+ np->rxopt.bits.rxpmtu = valbool;
+ retv = 0;
+ break;
+
case IPV6_HOPOPTS:
case IPV6_RTHDRDSTOPTS:
case IPV6_RTHDR:
@@ -451,7 +458,8 @@ sticky_done:
msg.msg_controllen = optlen;
msg.msg_control = (void*)(opt+1);
- retv = datagram_send_ctl(net, &msg, &fl, opt, &junk, &junk);
+ retv = datagram_send_ctl(net, &msg, &fl, opt, &junk, &junk,
+ &junk);
if (retv)
goto done;
update:
@@ -767,6 +775,17 @@ pref_skip_coa:
break;
}
+ case IPV6_MINHOPCOUNT:
+ if (optlen < sizeof(int))
+ goto e_inval;
+ if (val < 0 || val > 255)
+ goto e_inval;
+ np->min_hopcount = val;
+ break;
+ case IPV6_DONTFRAG:
+ np->dontfrag = valbool;
+ retv = 0;
+ break;
}
release_sock(sk);
@@ -971,14 +990,13 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
case IPV6_MTU:
{
struct dst_entry *dst;
+
val = 0;
- lock_sock(sk);
- dst = sk_dst_get(sk);
- if (dst) {
+ rcu_read_lock();
+ dst = __sk_dst_get(sk);
+ if (dst)
val = dst_mtu(dst);
- dst_release(dst);
- }
- release_sock(sk);
+ rcu_read_unlock();
if (!val)
return -ENOTCONN;
break;
@@ -1056,6 +1074,38 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
val = np->rxopt.bits.rxflow;
break;
+ case IPV6_RECVPATHMTU:
+ val = np->rxopt.bits.rxpmtu;
+ break;
+
+ case IPV6_PATHMTU:
+ {
+ struct dst_entry *dst;
+ struct ip6_mtuinfo mtuinfo;
+
+ if (len < sizeof(mtuinfo))
+ return -EINVAL;
+
+ len = sizeof(mtuinfo);
+ memset(&mtuinfo, 0, sizeof(mtuinfo));
+
+ rcu_read_lock();
+ dst = __sk_dst_get(sk);
+ if (dst)
+ mtuinfo.ip6m_mtu = dst_mtu(dst);
+ rcu_read_unlock();
+ if (!mtuinfo.ip6m_mtu)
+ return -ENOTCONN;
+
+ if (put_user(len, optlen))
+ return -EFAULT;
+ if (copy_to_user(optval, &mtuinfo, len))
+ return -EFAULT;
+
+ return 0;
+ break;
+ }
+
case IPV6_UNICAST_HOPS:
case IPV6_MULTICAST_HOPS:
{
@@ -1066,12 +1116,14 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
else
val = np->mcast_hops;
- dst = sk_dst_get(sk);
- if (dst) {
- if (val < 0)
+ if (val < 0) {
+ rcu_read_lock();
+ dst = __sk_dst_get(sk);
+ if (dst)
val = ip6_dst_hoplimit(dst);
- dst_release(dst);
+ rcu_read_unlock();
}
+
if (val < 0)
val = sock_net(sk)->ipv6.devconf_all->hop_limit;
break;
@@ -1115,6 +1167,14 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
val |= IPV6_PREFER_SRC_HOME;
break;
+ case IPV6_MINHOPCOUNT:
+ val = np->min_hopcount;
+ break;
+
+ case IPV6_DONTFRAG:
+ val = np->dontfrag;
+ break;
+
default:
return -ENOPROTOOPT;
}
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index c483ab9fd67b..59f1881968c7 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -44,6 +44,7 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
+#include <net/mld.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv6.h>
@@ -71,54 +72,11 @@
#define MDBG(x)
#endif
-/*
- * These header formats should be in a separate include file, but icmpv6.h
- * doesn't have in6_addr defined in all cases, there is no __u128, and no
- * other files reference these.
- *
- * +-DLS 4/14/03
- */
-
-/* Multicast Listener Discovery version 2 headers */
-
-struct mld2_grec {
- __u8 grec_type;
- __u8 grec_auxwords;
- __be16 grec_nsrcs;
- struct in6_addr grec_mca;
- struct in6_addr grec_src[0];
-};
-
-struct mld2_report {
- __u8 type;
- __u8 resv1;
- __sum16 csum;
- __be16 resv2;
- __be16 ngrec;
- struct mld2_grec grec[0];
-};
-
-struct mld2_query {
- __u8 type;
- __u8 code;
- __sum16 csum;
- __be16 mrc;
- __be16 resv1;
- struct in6_addr mca;
-#if defined(__LITTLE_ENDIAN_BITFIELD)
- __u8 qrv:3,
- suppress:1,
- resv2:4;
-#elif defined(__BIG_ENDIAN_BITFIELD)
- __u8 resv2:4,
- suppress:1,
- qrv:3;
-#else
-#error "Please fix <asm/byteorder.h>"
-#endif
- __u8 qqic;
- __be16 nsrcs;
- struct in6_addr srcs[0];
+/* Ensure that we have struct in6_addr aligned on 32bit word. */
+static void *__mld2_query_bugs[] __attribute__((__unused__)) = {
+ BUILD_BUG_ON_NULL(offsetof(struct mld2_query, mld2q_srcs) % 4),
+ BUILD_BUG_ON_NULL(offsetof(struct mld2_report, mld2r_grec) % 4),
+ BUILD_BUG_ON_NULL(offsetof(struct mld2_grec, grec_mca) % 4)
};
static struct in6_addr mld2_all_mcr = MLD2_ALL_MCR_INIT;
@@ -157,14 +115,6 @@ static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
((idev)->mc_v1_seen && \
time_before(jiffies, (idev)->mc_v1_seen)))
-#define MLDV2_MASK(value, nb) ((nb)>=32 ? (value) : ((1<<(nb))-1) & (value))
-#define MLDV2_EXP(thresh, nbmant, nbexp, value) \
- ((value) < (thresh) ? (value) : \
- ((MLDV2_MASK(value, nbmant) | (1<<(nbmant))) << \
- (MLDV2_MASK((value) >> (nbmant), nbexp) + (nbexp))))
-
-#define MLDV2_MRC(value) MLDV2_EXP(0x8000, 12, 3, value)
-
#define IPV6_MLD_MAX_MSF 64
int sysctl_mld_max_msf __read_mostly = IPV6_MLD_MAX_MSF;
@@ -715,7 +665,7 @@ static void igmp6_group_added(struct ifmcaddr6 *mc)
if (!(mc->mca_flags&MAF_LOADED)) {
mc->mca_flags |= MAF_LOADED;
if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0)
- dev_mc_add(dev, buf, dev->addr_len, 0);
+ dev_mc_add(dev, buf);
}
spin_unlock_bh(&mc->mca_lock);
@@ -741,7 +691,7 @@ static void igmp6_group_dropped(struct ifmcaddr6 *mc)
if (mc->mca_flags&MAF_LOADED) {
mc->mca_flags &= ~MAF_LOADED;
if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0)
- dev_mc_delete(dev, buf, dev->addr_len, 0);
+ dev_mc_del(dev, buf);
}
if (mc->mca_flags & MAF_NOREPORT)
@@ -1161,7 +1111,7 @@ int igmp6_event_query(struct sk_buff *skb)
struct in6_addr *group;
unsigned long max_delay;
struct inet6_dev *idev;
- struct icmp6hdr *hdr;
+ struct mld_msg *mld;
int group_type;
int mark = 0;
int len;
@@ -1182,8 +1132,8 @@ int igmp6_event_query(struct sk_buff *skb)
if (idev == NULL)
return 0;
- hdr = icmp6_hdr(skb);
- group = (struct in6_addr *) (hdr + 1);
+ mld = (struct mld_msg *)icmp6_hdr(skb);
+ group = &mld->mld_mca;
group_type = ipv6_addr_type(group);
if (group_type != IPV6_ADDR_ANY &&
@@ -1197,7 +1147,7 @@ int igmp6_event_query(struct sk_buff *skb)
/* MLDv1 router present */
/* Translate milliseconds to jiffies */
- max_delay = (ntohs(hdr->icmp6_maxdelay)*HZ)/1000;
+ max_delay = (ntohs(mld->mld_maxdelay)*HZ)/1000;
switchback = (idev->mc_qrv + 1) * max_delay;
idev->mc_v1_seen = jiffies + switchback;
@@ -1216,14 +1166,14 @@ int igmp6_event_query(struct sk_buff *skb)
return -EINVAL;
}
mlh2 = (struct mld2_query *)skb_transport_header(skb);
- max_delay = (MLDV2_MRC(ntohs(mlh2->mrc))*HZ)/1000;
+ max_delay = (MLDV2_MRC(ntohs(mlh2->mld2q_mrc))*HZ)/1000;
if (!max_delay)
max_delay = 1;
idev->mc_maxdelay = max_delay;
- if (mlh2->qrv)
- idev->mc_qrv = mlh2->qrv;
+ if (mlh2->mld2q_qrv)
+ idev->mc_qrv = mlh2->mld2q_qrv;
if (group_type == IPV6_ADDR_ANY) { /* general query */
- if (mlh2->nsrcs) {
+ if (mlh2->mld2q_nsrcs) {
in6_dev_put(idev);
return -EINVAL; /* no sources allowed */
}
@@ -1232,9 +1182,9 @@ int igmp6_event_query(struct sk_buff *skb)
return 0;
}
/* mark sources to include, if group & source-specific */
- if (mlh2->nsrcs != 0) {
+ if (mlh2->mld2q_nsrcs != 0) {
if (!pskb_may_pull(skb, srcs_offset +
- ntohs(mlh2->nsrcs) * sizeof(struct in6_addr))) {
+ ntohs(mlh2->mld2q_nsrcs) * sizeof(struct in6_addr))) {
in6_dev_put(idev);
return -EINVAL;
}
@@ -1270,7 +1220,7 @@ int igmp6_event_query(struct sk_buff *skb)
ma->mca_flags &= ~MAF_GSQUERY;
}
if (!(ma->mca_flags & MAF_GSQUERY) ||
- mld_marksources(ma, ntohs(mlh2->nsrcs), mlh2->srcs))
+ mld_marksources(ma, ntohs(mlh2->mld2q_nsrcs), mlh2->mld2q_srcs))
igmp6_group_queried(ma, max_delay);
spin_unlock_bh(&ma->mca_lock);
break;
@@ -1286,9 +1236,8 @@ int igmp6_event_query(struct sk_buff *skb)
int igmp6_event_report(struct sk_buff *skb)
{
struct ifmcaddr6 *ma;
- struct in6_addr *addrp;
struct inet6_dev *idev;
- struct icmp6hdr *hdr;
+ struct mld_msg *mld;
int addr_type;
/* Our own report looped back. Ignore it. */
@@ -1300,10 +1249,10 @@ int igmp6_event_report(struct sk_buff *skb)
skb->pkt_type != PACKET_BROADCAST)
return 0;
- if (!pskb_may_pull(skb, sizeof(struct in6_addr)))
+ if (!pskb_may_pull(skb, sizeof(*mld) - sizeof(struct icmp6hdr)))
return -EINVAL;
- hdr = icmp6_hdr(skb);
+ mld = (struct mld_msg *)icmp6_hdr(skb);
/* Drop reports with not link local source */
addr_type = ipv6_addr_type(&ipv6_hdr(skb)->saddr);
@@ -1311,8 +1260,6 @@ int igmp6_event_report(struct sk_buff *skb)
!(addr_type&IPV6_ADDR_LINKLOCAL))
return -EINVAL;
- addrp = (struct in6_addr *) (hdr + 1);
-
idev = in6_dev_get(skb->dev);
if (idev == NULL)
return -ENODEV;
@@ -1323,7 +1270,7 @@ int igmp6_event_report(struct sk_buff *skb)
read_lock_bh(&idev->lock);
for (ma = idev->mc_list; ma; ma=ma->next) {
- if (ipv6_addr_equal(&ma->mca_addr, addrp)) {
+ if (ipv6_addr_equal(&ma->mca_addr, &mld->mld_mca)) {
spin_lock(&ma->mca_lock);
if (del_timer(&ma->mca_timer))
atomic_dec(&ma->mca_refcnt);
@@ -1432,11 +1379,11 @@ static struct sk_buff *mld_newpack(struct net_device *dev, int size)
skb_set_transport_header(skb, skb_tail_pointer(skb) - skb->data);
skb_put(skb, sizeof(*pmr));
pmr = (struct mld2_report *)skb_transport_header(skb);
- pmr->type = ICMPV6_MLD2_REPORT;
- pmr->resv1 = 0;
- pmr->csum = 0;
- pmr->resv2 = 0;
- pmr->ngrec = 0;
+ pmr->mld2r_type = ICMPV6_MLD2_REPORT;
+ pmr->mld2r_resv1 = 0;
+ pmr->mld2r_cksum = 0;
+ pmr->mld2r_resv2 = 0;
+ pmr->mld2r_ngrec = 0;
return skb;
}
@@ -1458,9 +1405,10 @@ static void mld_sendpack(struct sk_buff *skb)
mldlen = skb->tail - skb->transport_header;
pip6->payload_len = htons(payload_len);
- pmr->csum = csum_ipv6_magic(&pip6->saddr, &pip6->daddr, mldlen,
- IPPROTO_ICMPV6, csum_partial(skb_transport_header(skb),
- mldlen, 0));
+ pmr->mld2r_cksum = csum_ipv6_magic(&pip6->saddr, &pip6->daddr, mldlen,
+ IPPROTO_ICMPV6,
+ csum_partial(skb_transport_header(skb),
+ mldlen, 0));
dst = icmp6_dst_alloc(skb->dev, NULL, &ipv6_hdr(skb)->daddr);
@@ -1480,7 +1428,7 @@ static void mld_sendpack(struct sk_buff *skb)
payload_len = skb->len;
- err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev,
+ err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev,
dst_output);
out:
if (!err) {
@@ -1521,7 +1469,7 @@ static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc,
pgr->grec_nsrcs = 0;
pgr->grec_mca = pmc->mca_addr; /* structure copy */
pmr = (struct mld2_report *)skb_transport_header(skb);
- pmr->ngrec = htons(ntohs(pmr->ngrec)+1);
+ pmr->mld2r_ngrec = htons(ntohs(pmr->mld2r_ngrec)+1);
*ppgr = pgr;
return skb;
}
@@ -1557,7 +1505,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
/* EX and TO_EX get a fresh packet, if needed */
if (truncate) {
- if (pmr && pmr->ngrec &&
+ if (pmr && pmr->mld2r_ngrec &&
AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) {
if (skb)
mld_sendpack(skb);
@@ -1770,9 +1718,8 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
struct sock *sk = net->ipv6.igmp_sk;
struct inet6_dev *idev;
struct sk_buff *skb;
- struct icmp6hdr *hdr;
+ struct mld_msg *hdr;
const struct in6_addr *snd_addr, *saddr;
- struct in6_addr *addrp;
struct in6_addr addr_buf;
int err, len, payload_len, full_len;
u8 ra[8] = { IPPROTO_ICMPV6, 0,
@@ -1820,16 +1767,14 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
memcpy(skb_put(skb, sizeof(ra)), ra, sizeof(ra));
- hdr = (struct icmp6hdr *) skb_put(skb, sizeof(struct icmp6hdr));
- memset(hdr, 0, sizeof(struct icmp6hdr));
- hdr->icmp6_type = type;
+ hdr = (struct mld_msg *) skb_put(skb, sizeof(struct mld_msg));
+ memset(hdr, 0, sizeof(struct mld_msg));
+ hdr->mld_type = type;
+ ipv6_addr_copy(&hdr->mld_mca, addr);
- addrp = (struct in6_addr *) skb_put(skb, sizeof(struct in6_addr));
- ipv6_addr_copy(addrp, addr);
-
- hdr->icmp6_cksum = csum_ipv6_magic(saddr, snd_addr, len,
- IPPROTO_ICMPV6,
- csum_partial(hdr, len, 0));
+ hdr->mld_cksum = csum_ipv6_magic(saddr, snd_addr, len,
+ IPPROTO_ICMPV6,
+ csum_partial(hdr, len, 0));
idev = in6_dev_get(skb->dev);
@@ -1848,7 +1793,7 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
goto err_out;
skb_dst_set(skb, dst);
- err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev,
+ err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev,
dst_output);
out:
if (!err) {
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index da0a4d2adc69..0abdc242ddb7 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -536,7 +536,7 @@ void ndisc_send_skb(struct sk_buff *skb,
idev = in6_dev_get(dst->dev);
IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len);
- err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, dst->dev,
+ err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, dst->dev,
dst_output);
if (!err) {
ICMP6MSGOUT_INC_STATS(net, idev, type);
@@ -890,8 +890,6 @@ out:
in6_ifa_put(ifp);
else
in6_dev_put(idev);
-
- return;
}
static void ndisc_recv_na(struct sk_buff *skb)
@@ -1618,7 +1616,7 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
skb_dst_set(buff, dst);
idev = in6_dev_get(dst->dev);
IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len);
- err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, buff, NULL, dst->dev,
+ err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, buff, NULL, dst->dev,
dst_output);
if (!err) {
ICMP6MSGOUT_INC_STATS(net, idev, NDISC_REDIRECT);
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index d5ed92b14346..a74951c039b6 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -25,20 +25,6 @@ int ip6_route_me_harder(struct sk_buff *skb)
};
dst = ip6_route_output(net, skb->sk, &fl);
-
-#ifdef CONFIG_XFRM
- if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
- xfrm_decode_session(skb, &fl, AF_INET6) == 0) {
- struct dst_entry *dst2 = skb_dst(skb);
-
- if (xfrm_lookup(net, &dst2, &fl, skb->sk, 0)) {
- skb_dst_set(skb, NULL);
- return -1;
- }
- skb_dst_set(skb, dst2);
- }
-#endif
-
if (dst->error) {
IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
LIMIT_NETDEBUG(KERN_DEBUG "ip6_route_me_harder: No more route.\n");
@@ -50,6 +36,17 @@ int ip6_route_me_harder(struct sk_buff *skb)
skb_dst_drop(skb);
skb_dst_set(skb, dst);
+
+#ifdef CONFIG_XFRM
+ if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
+ xfrm_decode_session(skb, &fl, AF_INET6) == 0) {
+ skb_dst_set(skb, NULL);
+ if (xfrm_lookup(net, &dst, &fl, skb->sk, 0))
+ return -1;
+ skb_dst_set(skb, dst);
+ }
+#endif
+
return 0;
}
EXPORT_SYMBOL(ip6_route_me_harder);
diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
index 6a68a74d14a3..8c201743d96d 100644
--- a/net/ipv6/netfilter/ip6_queue.c
+++ b/net/ipv6/netfilter/ip6_queue.c
@@ -162,8 +162,7 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
break;
case IPQ_COPY_PACKET:
- if ((entry->skb->ip_summed == CHECKSUM_PARTIAL ||
- entry->skb->ip_summed == CHECKSUM_COMPLETE) &&
+ if (entry->skb->ip_summed == CHECKSUM_PARTIAL &&
(*errp = skb_checksum_help(entry->skb))) {
read_unlock_bh(&queue_lock);
return NULL;
@@ -463,7 +462,6 @@ __ipq_rcv_skb(struct sk_buff *skb)
if (flags & NLM_F_ACK)
netlink_ack(skb, nlh, 0);
- return;
}
static void
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 9210e312edf1..6f517bd83692 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -40,24 +40,19 @@ MODULE_DESCRIPTION("IPv6 packet filter");
/*#define DEBUG_IP_FIREWALL_USER*/
#ifdef DEBUG_IP_FIREWALL
-#define dprintf(format, args...) printk(format , ## args)
+#define dprintf(format, args...) pr_info(format , ## args)
#else
#define dprintf(format, args...)
#endif
#ifdef DEBUG_IP_FIREWALL_USER
-#define duprintf(format, args...) printk(format , ## args)
+#define duprintf(format, args...) pr_info(format , ## args)
#else
#define duprintf(format, args...)
#endif
#ifdef CONFIG_NETFILTER_DEBUG
-#define IP_NF_ASSERT(x) \
-do { \
- if (!(x)) \
- printk("IP_NF_ASSERT: %s:%s:%u\n", \
- __func__, __FILE__, __LINE__); \
-} while(0)
+#define IP_NF_ASSERT(x) WARN_ON(!(x))
#else
#define IP_NF_ASSERT(x)
#endif
@@ -197,30 +192,14 @@ ip6_checkentry(const struct ip6t_ip6 *ipv6)
}
static unsigned int
-ip6t_error(struct sk_buff *skb, const struct xt_target_param *par)
+ip6t_error(struct sk_buff *skb, const struct xt_action_param *par)
{
if (net_ratelimit())
- printk("ip6_tables: error: `%s'\n",
- (const char *)par->targinfo);
+ pr_info("error: `%s'\n", (const char *)par->targinfo);
return NF_DROP;
}
-/* Performance critical - called for every packet */
-static inline bool
-do_match(const struct ip6t_entry_match *m, const struct sk_buff *skb,
- struct xt_match_param *par)
-{
- par->match = m->u.kernel.match;
- par->matchinfo = m->data;
-
- /* Stop iteration if it doesn't match */
- if (!m->u.kernel.match->match(skb, par))
- return true;
- else
- return false;
-}
-
static inline struct ip6t_entry *
get_entry(const void *base, unsigned int offset)
{
@@ -352,18 +331,15 @@ ip6t_do_table(struct sk_buff *skb,
const struct net_device *out,
struct xt_table *table)
{
-#define tb_comefrom ((struct ip6t_entry *)table_base)->comefrom
-
static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
- bool hotdrop = false;
/* Initializing verdict to NF_DROP keeps gcc happy. */
unsigned int verdict = NF_DROP;
const char *indev, *outdev;
const void *table_base;
- struct ip6t_entry *e, *back;
+ struct ip6t_entry *e, **jumpstack;
+ unsigned int *stackptr, origptr, cpu;
const struct xt_table_info *private;
- struct xt_match_param mtpar;
- struct xt_target_param tgpar;
+ struct xt_action_param acpar;
/* Initialization */
indev = in ? in->name : nulldevname;
@@ -374,39 +350,42 @@ ip6t_do_table(struct sk_buff *skb,
* things we don't know, ie. tcp syn flag or ports). If the
* rule is also a fragment-specific rule, non-fragments won't
* match it. */
- mtpar.hotdrop = &hotdrop;
- mtpar.in = tgpar.in = in;
- mtpar.out = tgpar.out = out;
- mtpar.family = tgpar.family = NFPROTO_IPV6;
- mtpar.hooknum = tgpar.hooknum = hook;
+ acpar.hotdrop = false;
+ acpar.in = in;
+ acpar.out = out;
+ acpar.family = NFPROTO_IPV6;
+ acpar.hooknum = hook;
IP_NF_ASSERT(table->valid_hooks & (1 << hook));
xt_info_rdlock_bh();
private = table->private;
- table_base = private->entries[smp_processor_id()];
+ cpu = smp_processor_id();
+ table_base = private->entries[cpu];
+ jumpstack = (struct ip6t_entry **)private->jumpstack[cpu];
+ stackptr = &private->stackptr[cpu];
+ origptr = *stackptr;
e = get_entry(table_base, private->hook_entry[hook]);
- /* For return from builtin chain */
- back = get_entry(table_base, private->underflow[hook]);
-
do {
const struct ip6t_entry_target *t;
const struct xt_entry_match *ematch;
IP_NF_ASSERT(e);
- IP_NF_ASSERT(back);
if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
- &mtpar.thoff, &mtpar.fragoff, &hotdrop)) {
+ &acpar.thoff, &acpar.fragoff, &acpar.hotdrop)) {
no_match:
e = ip6t_next_entry(e);
continue;
}
- xt_ematch_foreach(ematch, e)
- if (do_match(ematch, skb, &mtpar) != 0)
+ xt_ematch_foreach(ematch, e) {
+ acpar.match = ematch->u.kernel.match;
+ acpar.matchinfo = ematch->data;
+ if (!acpar.match->match(skb, &acpar))
goto no_match;
+ }
ADD_COUNTER(e->counters,
ntohs(ipv6_hdr(skb)->payload_len) +
@@ -433,62 +412,47 @@ ip6t_do_table(struct sk_buff *skb,
verdict = (unsigned)(-v) - 1;
break;
}
- e = back;
- back = get_entry(table_base, back->comefrom);
+ if (*stackptr == 0)
+ e = get_entry(table_base,
+ private->underflow[hook]);
+ else
+ e = ip6t_next_entry(jumpstack[--*stackptr]);
continue;
}
if (table_base + v != ip6t_next_entry(e) &&
!(e->ipv6.flags & IP6T_F_GOTO)) {
- /* Save old back ptr in next entry */
- struct ip6t_entry *next = ip6t_next_entry(e);
- next->comefrom = (void *)back - table_base;
- /* set back pointer to next entry */
- back = next;
+ if (*stackptr >= private->stacksize) {
+ verdict = NF_DROP;
+ break;
+ }
+ jumpstack[(*stackptr)++] = e;
}
e = get_entry(table_base, v);
continue;
}
- /* Targets which reenter must return
- abs. verdicts */
- tgpar.target = t->u.kernel.target;
- tgpar.targinfo = t->data;
-
-#ifdef CONFIG_NETFILTER_DEBUG
- tb_comefrom = 0xeeeeeeec;
-#endif
- verdict = t->u.kernel.target->target(skb, &tgpar);
+ acpar.target = t->u.kernel.target;
+ acpar.targinfo = t->data;
-#ifdef CONFIG_NETFILTER_DEBUG
- if (tb_comefrom != 0xeeeeeeec && verdict == IP6T_CONTINUE) {
- printk("Target %s reentered!\n",
- t->u.kernel.target->name);
- verdict = NF_DROP;
- }
- tb_comefrom = 0x57acc001;
-#endif
+ verdict = t->u.kernel.target->target(skb, &acpar);
if (verdict == IP6T_CONTINUE)
e = ip6t_next_entry(e);
else
/* Verdict */
break;
- } while (!hotdrop);
+ } while (!acpar.hotdrop);
-#ifdef CONFIG_NETFILTER_DEBUG
- tb_comefrom = NETFILTER_LINK_POISON;
-#endif
xt_info_rdunlock_bh();
+ *stackptr = origptr;
#ifdef DEBUG_ALLOW_ALL
return NF_ACCEPT;
#else
- if (hotdrop)
+ if (acpar.hotdrop)
return NF_DROP;
else return verdict;
#endif
-
-#undef tb_comefrom
}
/* Figures out from what hook each rule can be called: returns 0 if
@@ -517,7 +481,7 @@ mark_source_chains(const struct xt_table_info *newinfo,
int visited = e->comefrom & (1 << hook);
if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
- printk("iptables: loop hook %u pos %u %08X.\n",
+ pr_err("iptables: loop hook %u pos %u %08X.\n",
hook, pos, e->comefrom);
return 0;
}
@@ -661,12 +625,11 @@ find_check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par)
struct xt_match *match;
int ret;
- match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
- m->u.user.revision),
- "ip6t_%s", m->u.user.name);
- if (IS_ERR(match) || !match) {
+ match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
+ m->u.user.revision);
+ if (IS_ERR(match)) {
duprintf("find_check_match: `%s' not found\n", m->u.user.name);
- return match ? PTR_ERR(match) : -ENOENT;
+ return PTR_ERR(match);
}
m->u.kernel.match = match;
@@ -734,13 +697,11 @@ find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
}
t = ip6t_get_target(e);
- target = try_then_request_module(xt_find_target(AF_INET6,
- t->u.user.name,
- t->u.user.revision),
- "ip6t_%s", t->u.user.name);
- if (IS_ERR(target) || !target) {
+ target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
+ t->u.user.revision);
+ if (IS_ERR(target)) {
duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
- ret = target ? PTR_ERR(target) : -ENOENT;
+ ret = PTR_ERR(target);
goto cleanup_matches;
}
t->u.kernel.target = target;
@@ -873,6 +834,9 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
if (ret != 0)
return ret;
++i;
+ if (strcmp(ip6t_get_target(iter)->u.user.name,
+ XT_ERROR_TARGET) == 0)
+ ++newinfo->stacksize;
}
if (i != repl->num_entries) {
@@ -1509,13 +1473,12 @@ compat_find_calc_match(struct ip6t_entry_match *m,
{
struct xt_match *match;
- match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
- m->u.user.revision),
- "ip6t_%s", m->u.user.name);
- if (IS_ERR(match) || !match) {
+ match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
+ m->u.user.revision);
+ if (IS_ERR(match)) {
duprintf("compat_check_calc_match: `%s' not found\n",
m->u.user.name);
- return match ? PTR_ERR(match) : -ENOENT;
+ return PTR_ERR(match);
}
m->u.kernel.match = match;
*size += xt_compat_match_offset(match);
@@ -1582,14 +1545,12 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
}
t = compat_ip6t_get_target(e);
- target = try_then_request_module(xt_find_target(AF_INET6,
- t->u.user.name,
- t->u.user.revision),
- "ip6t_%s", t->u.user.name);
- if (IS_ERR(target) || !target) {
+ target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
+ t->u.user.revision);
+ if (IS_ERR(target)) {
duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
t->u.user.name);
- ret = target ? PTR_ERR(target) : -ENOENT;
+ ret = PTR_ERR(target);
goto release_matches;
}
t->u.kernel.target = target;
@@ -2127,8 +2088,7 @@ struct xt_table *ip6t_register_table(struct net *net,
{
int ret;
struct xt_table_info *newinfo;
- struct xt_table_info bootstrap
- = { 0, 0, 0, { 0 }, { 0 }, { } };
+ struct xt_table_info bootstrap = {0};
void *loc_cpu_entry;
struct xt_table *new_table;
@@ -2188,7 +2148,7 @@ icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
}
static bool
-icmp6_match(const struct sk_buff *skb, const struct xt_match_param *par)
+icmp6_match(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct icmp6hdr *ic;
struct icmp6hdr _icmph;
@@ -2204,7 +2164,7 @@ icmp6_match(const struct sk_buff *skb, const struct xt_match_param *par)
* can't. Hence, no choice but to drop.
*/
duprintf("Dropping evil ICMP tinygram.\n");
- *par->hotdrop = true;
+ par->hotdrop = true;
return false;
}
@@ -2216,31 +2176,32 @@ icmp6_match(const struct sk_buff *skb, const struct xt_match_param *par)
}
/* Called when user tries to insert an entry of this type. */
-static bool icmp6_checkentry(const struct xt_mtchk_param *par)
+static int icmp6_checkentry(const struct xt_mtchk_param *par)
{
const struct ip6t_icmp *icmpinfo = par->matchinfo;
/* Must specify no unknown invflags */
- return !(icmpinfo->invflags & ~IP6T_ICMP_INV);
+ return (icmpinfo->invflags & ~IP6T_ICMP_INV) ? -EINVAL : 0;
}
/* The built-in targets: standard (NULL) and error. */
-static struct xt_target ip6t_standard_target __read_mostly = {
- .name = IP6T_STANDARD_TARGET,
- .targetsize = sizeof(int),
- .family = NFPROTO_IPV6,
+static struct xt_target ip6t_builtin_tg[] __read_mostly = {
+ {
+ .name = IP6T_STANDARD_TARGET,
+ .targetsize = sizeof(int),
+ .family = NFPROTO_IPV6,
#ifdef CONFIG_COMPAT
- .compatsize = sizeof(compat_int_t),
- .compat_from_user = compat_standard_from_user,
- .compat_to_user = compat_standard_to_user,
+ .compatsize = sizeof(compat_int_t),
+ .compat_from_user = compat_standard_from_user,
+ .compat_to_user = compat_standard_to_user,
#endif
-};
-
-static struct xt_target ip6t_error_target __read_mostly = {
- .name = IP6T_ERROR_TARGET,
- .target = ip6t_error,
- .targetsize = IP6T_FUNCTION_MAXNAMELEN,
- .family = NFPROTO_IPV6,
+ },
+ {
+ .name = IP6T_ERROR_TARGET,
+ .target = ip6t_error,
+ .targetsize = IP6T_FUNCTION_MAXNAMELEN,
+ .family = NFPROTO_IPV6,
+ },
};
static struct nf_sockopt_ops ip6t_sockopts = {
@@ -2260,13 +2221,15 @@ static struct nf_sockopt_ops ip6t_sockopts = {
.owner = THIS_MODULE,
};
-static struct xt_match icmp6_matchstruct __read_mostly = {
- .name = "icmp6",
- .match = icmp6_match,
- .matchsize = sizeof(struct ip6t_icmp),
- .checkentry = icmp6_checkentry,
- .proto = IPPROTO_ICMPV6,
- .family = NFPROTO_IPV6,
+static struct xt_match ip6t_builtin_mt[] __read_mostly = {
+ {
+ .name = "icmp6",
+ .match = icmp6_match,
+ .matchsize = sizeof(struct ip6t_icmp),
+ .checkentry = icmp6_checkentry,
+ .proto = IPPROTO_ICMPV6,
+ .family = NFPROTO_IPV6,
+ },
};
static int __net_init ip6_tables_net_init(struct net *net)
@@ -2293,13 +2256,10 @@ static int __init ip6_tables_init(void)
goto err1;
/* Noone else will be downing sem now, so we won't sleep */
- ret = xt_register_target(&ip6t_standard_target);
+ ret = xt_register_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
if (ret < 0)
goto err2;
- ret = xt_register_target(&ip6t_error_target);
- if (ret < 0)
- goto err3;
- ret = xt_register_match(&icmp6_matchstruct);
+ ret = xt_register_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
if (ret < 0)
goto err4;
@@ -2308,15 +2268,13 @@ static int __init ip6_tables_init(void)
if (ret < 0)
goto err5;
- printk(KERN_INFO "ip6_tables: (C) 2000-2006 Netfilter Core Team\n");
+ pr_info("(C) 2000-2006 Netfilter Core Team\n");
return 0;
err5:
- xt_unregister_match(&icmp6_matchstruct);
+ xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
err4:
- xt_unregister_target(&ip6t_error_target);
-err3:
- xt_unregister_target(&ip6t_standard_target);
+ xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
err2:
unregister_pernet_subsys(&ip6_tables_net_ops);
err1:
@@ -2327,10 +2285,8 @@ static void __exit ip6_tables_fini(void)
{
nf_unregister_sockopt(&ip6t_sockopts);
- xt_unregister_match(&icmp6_matchstruct);
- xt_unregister_target(&ip6t_error_target);
- xt_unregister_target(&ip6t_standard_target);
-
+ xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
+ xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
unregister_pernet_subsys(&ip6_tables_net_ops);
}
diff --git a/net/ipv6/netfilter/ip6t_LOG.c b/net/ipv6/netfilter/ip6t_LOG.c
index b285fdf19050..af4ee11f2066 100644
--- a/net/ipv6/netfilter/ip6t_LOG.c
+++ b/net/ipv6/netfilter/ip6t_LOG.c
@@ -9,9 +9,8 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/skbuff.h>
#include <linux/if_arp.h>
#include <linux/ip.h>
@@ -378,7 +377,7 @@ static struct nf_loginfo default_loginfo = {
.type = NF_LOG_TYPE_LOG,
.u = {
.log = {
- .level = 0,
+ .level = 5,
.logflags = NF_LOG_MASK,
},
},
@@ -437,7 +436,7 @@ ip6t_log_packet(u_int8_t pf,
}
static unsigned int
-log_tg6(struct sk_buff *skb, const struct xt_target_param *par)
+log_tg6(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct ip6t_log_info *loginfo = par->targinfo;
struct nf_loginfo li;
@@ -452,20 +451,19 @@ log_tg6(struct sk_buff *skb, const struct xt_target_param *par)
}
-static bool log_tg6_check(const struct xt_tgchk_param *par)
+static int log_tg6_check(const struct xt_tgchk_param *par)
{
const struct ip6t_log_info *loginfo = par->targinfo;
if (loginfo->level >= 8) {
- pr_debug("LOG: level %u >= 8\n", loginfo->level);
- return false;
+ pr_debug("level %u >= 8\n", loginfo->level);
+ return -EINVAL;
}
if (loginfo->prefix[sizeof(loginfo->prefix)-1] != '\0') {
- pr_debug("LOG: prefix term %i\n",
- loginfo->prefix[sizeof(loginfo->prefix)-1]);
- return false;
+ pr_debug("prefix not null-terminated\n");
+ return -EINVAL;
}
- return true;
+ return 0;
}
static struct xt_target log_tg6_reg __read_mostly = {
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
index 39b50c3768e8..47d227713758 100644
--- a/net/ipv6/netfilter/ip6t_REJECT.c
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -14,6 +14,7 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/gfp.h>
#include <linux/module.h>
@@ -50,7 +51,7 @@ static void send_reset(struct net *net, struct sk_buff *oldskb)
if ((!(ipv6_addr_type(&oip6h->saddr) & IPV6_ADDR_UNICAST)) ||
(!(ipv6_addr_type(&oip6h->daddr) & IPV6_ADDR_UNICAST))) {
- pr_debug("ip6t_REJECT: addr is not unicast.\n");
+ pr_debug("addr is not unicast.\n");
return;
}
@@ -58,7 +59,7 @@ static void send_reset(struct net *net, struct sk_buff *oldskb)
tcphoff = ipv6_skip_exthdr(oldskb, ((u8*)(oip6h+1) - oldskb->data), &proto);
if ((tcphoff < 0) || (tcphoff > oldskb->len)) {
- pr_debug("ip6t_REJECT: Can't get TCP header.\n");
+ pr_debug("Cannot get TCP header.\n");
return;
}
@@ -66,7 +67,7 @@ static void send_reset(struct net *net, struct sk_buff *oldskb)
/* IP header checks: fragment, too short. */
if (proto != IPPROTO_TCP || otcplen < sizeof(struct tcphdr)) {
- pr_debug("ip6t_REJECT: proto(%d) != IPPROTO_TCP, "
+ pr_debug("proto(%d) != IPPROTO_TCP, "
"or too short. otcplen = %d\n",
proto, otcplen);
return;
@@ -77,14 +78,14 @@ static void send_reset(struct net *net, struct sk_buff *oldskb)
/* No RST for RST. */
if (otcph.rst) {
- pr_debug("ip6t_REJECT: RST is set\n");
+ pr_debug("RST is set\n");
return;
}
/* Check checksum. */
if (csum_ipv6_magic(&oip6h->saddr, &oip6h->daddr, otcplen, IPPROTO_TCP,
skb_checksum(oldskb, tcphoff, otcplen, 0))) {
- pr_debug("ip6t_REJECT: TCP checksum is invalid\n");
+ pr_debug("TCP checksum is invalid\n");
return;
}
@@ -108,7 +109,7 @@ static void send_reset(struct net *net, struct sk_buff *oldskb)
if (!nskb) {
if (net_ratelimit())
- printk("ip6t_REJECT: Can't alloc skb\n");
+ pr_debug("cannot alloc skb\n");
dst_release(dst);
return;
}
@@ -174,15 +175,12 @@ send_unreach(struct net *net, struct sk_buff *skb_in, unsigned char code,
}
static unsigned int
-reject_tg6(struct sk_buff *skb, const struct xt_target_param *par)
+reject_tg6(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct ip6t_reject_info *reject = par->targinfo;
struct net *net = dev_net((par->in != NULL) ? par->in : par->out);
pr_debug("%s: medium point\n", __func__);
- /* WARNING: This code causes reentry within ip6tables.
- This means that the ip6tables jump stack is now crap. We
- must return an absolute verdict. --RR */
switch (reject->with) {
case IP6T_ICMP6_NO_ROUTE:
send_unreach(net, skb, ICMPV6_NOROUTE, par->hooknum);
@@ -207,30 +205,30 @@ reject_tg6(struct sk_buff *skb, const struct xt_target_param *par)
break;
default:
if (net_ratelimit())
- printk(KERN_WARNING "ip6t_REJECT: case %u not handled yet\n", reject->with);
+ pr_info("case %u not handled yet\n", reject->with);
break;
}
return NF_DROP;
}
-static bool reject_tg6_check(const struct xt_tgchk_param *par)
+static int reject_tg6_check(const struct xt_tgchk_param *par)
{
const struct ip6t_reject_info *rejinfo = par->targinfo;
const struct ip6t_entry *e = par->entryinfo;
if (rejinfo->with == IP6T_ICMP6_ECHOREPLY) {
- printk("ip6t_REJECT: ECHOREPLY is not supported.\n");
- return false;
+ pr_info("ECHOREPLY is not supported.\n");
+ return -EINVAL;
} else if (rejinfo->with == IP6T_TCP_RESET) {
/* Must specify that it's a TCP packet */
if (e->ipv6.proto != IPPROTO_TCP ||
(e->ipv6.invflags & XT_INV_PROTO)) {
- printk("ip6t_REJECT: TCP_RESET illegal for non-tcp\n");
- return false;
+ pr_info("TCP_RESET illegal for non-tcp\n");
+ return -EINVAL;
}
}
- return true;
+ return 0;
}
static struct xt_target reject_tg6_reg __read_mostly = {
diff --git a/net/ipv6/netfilter/ip6t_ah.c b/net/ipv6/netfilter/ip6t_ah.c
index ac0b7c629d78..89cccc5a9c92 100644
--- a/net/ipv6/netfilter/ip6t_ah.c
+++ b/net/ipv6/netfilter/ip6t_ah.c
@@ -6,7 +6,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/ip.h>
@@ -29,14 +29,14 @@ spi_match(u_int32_t min, u_int32_t max, u_int32_t spi, bool invert)
{
bool r;
- pr_debug("ah spi_match:%c 0x%x <= 0x%x <= 0x%x",
+ pr_debug("spi_match:%c 0x%x <= 0x%x <= 0x%x\n",
invert ? '!' : ' ', min, spi, max);
r = (spi >= min && spi <= max) ^ invert;
pr_debug(" result %s\n", r ? "PASS" : "FAILED");
return r;
}
-static bool ah_mt6(const struct sk_buff *skb, const struct xt_match_param *par)
+static bool ah_mt6(const struct sk_buff *skb, struct xt_action_param *par)
{
struct ip_auth_hdr _ah;
const struct ip_auth_hdr *ah;
@@ -48,13 +48,13 @@ static bool ah_mt6(const struct sk_buff *skb, const struct xt_match_param *par)
err = ipv6_find_hdr(skb, &ptr, NEXTHDR_AUTH, NULL);
if (err < 0) {
if (err != -ENOENT)
- *par->hotdrop = true;
+ par->hotdrop = true;
return false;
}
ah = skb_header_pointer(skb, ptr, sizeof(_ah), &_ah);
if (ah == NULL) {
- *par->hotdrop = true;
+ par->hotdrop = true;
return false;
}
@@ -87,15 +87,15 @@ static bool ah_mt6(const struct sk_buff *skb, const struct xt_match_param *par)
!(ahinfo->hdrres && ah->reserved);
}
-static bool ah_mt6_check(const struct xt_mtchk_param *par)
+static int ah_mt6_check(const struct xt_mtchk_param *par)
{
const struct ip6t_ah *ahinfo = par->matchinfo;
if (ahinfo->invflags & ~IP6T_AH_INV_MASK) {
- pr_debug("ip6t_ah: unknown flags %X\n", ahinfo->invflags);
- return false;
+ pr_debug("unknown flags %X\n", ahinfo->invflags);
+ return -EINVAL;
}
- return true;
+ return 0;
}
static struct xt_match ah_mt6_reg __read_mostly = {
diff --git a/net/ipv6/netfilter/ip6t_eui64.c b/net/ipv6/netfilter/ip6t_eui64.c
index ca287f6d2bce..aab0706908c5 100644
--- a/net/ipv6/netfilter/ip6t_eui64.c
+++ b/net/ipv6/netfilter/ip6t_eui64.c
@@ -20,14 +20,14 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Andras Kis-Szabo <kisza@sch.bme.hu>");
static bool
-eui64_mt6(const struct sk_buff *skb, const struct xt_match_param *par)
+eui64_mt6(const struct sk_buff *skb, struct xt_action_param *par)
{
unsigned char eui64[8];
if (!(skb_mac_header(skb) >= skb->head &&
skb_mac_header(skb) + ETH_HLEN <= skb->data) &&
par->fragoff != 0) {
- *par->hotdrop = true;
+ par->hotdrop = true;
return false;
}
diff --git a/net/ipv6/netfilter/ip6t_frag.c b/net/ipv6/netfilter/ip6t_frag.c
index 7b91c2598ed5..eda898fda6ca 100644
--- a/net/ipv6/netfilter/ip6t_frag.c
+++ b/net/ipv6/netfilter/ip6t_frag.c
@@ -6,7 +6,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/ipv6.h>
@@ -27,7 +27,7 @@ static inline bool
id_match(u_int32_t min, u_int32_t max, u_int32_t id, bool invert)
{
bool r;
- pr_debug("frag id_match:%c 0x%x <= 0x%x <= 0x%x", invert ? '!' : ' ',
+ pr_debug("id_match:%c 0x%x <= 0x%x <= 0x%x\n", invert ? '!' : ' ',
min, id, max);
r = (id >= min && id <= max) ^ invert;
pr_debug(" result %s\n", r ? "PASS" : "FAILED");
@@ -35,7 +35,7 @@ id_match(u_int32_t min, u_int32_t max, u_int32_t id, bool invert)
}
static bool
-frag_mt6(const struct sk_buff *skb, const struct xt_match_param *par)
+frag_mt6(const struct sk_buff *skb, struct xt_action_param *par)
{
struct frag_hdr _frag;
const struct frag_hdr *fh;
@@ -46,13 +46,13 @@ frag_mt6(const struct sk_buff *skb, const struct xt_match_param *par)
err = ipv6_find_hdr(skb, &ptr, NEXTHDR_FRAGMENT, NULL);
if (err < 0) {
if (err != -ENOENT)
- *par->hotdrop = true;
+ par->hotdrop = true;
return false;
}
fh = skb_header_pointer(skb, ptr, sizeof(_frag), &_frag);
if (fh == NULL) {
- *par->hotdrop = true;
+ par->hotdrop = true;
return false;
}
@@ -102,15 +102,15 @@ frag_mt6(const struct sk_buff *skb, const struct xt_match_param *par)
(ntohs(fh->frag_off) & IP6_MF));
}
-static bool frag_mt6_check(const struct xt_mtchk_param *par)
+static int frag_mt6_check(const struct xt_mtchk_param *par)
{
const struct ip6t_frag *fraginfo = par->matchinfo;
if (fraginfo->invflags & ~IP6T_FRAG_INV_MASK) {
- pr_debug("ip6t_frag: unknown flags %X\n", fraginfo->invflags);
- return false;
+ pr_debug("unknown flags %X\n", fraginfo->invflags);
+ return -EINVAL;
}
- return true;
+ return 0;
}
static struct xt_match frag_mt6_reg __read_mostly = {
diff --git a/net/ipv6/netfilter/ip6t_hbh.c b/net/ipv6/netfilter/ip6t_hbh.c
index cbe8dec9744b..59df051eaef6 100644
--- a/net/ipv6/netfilter/ip6t_hbh.c
+++ b/net/ipv6/netfilter/ip6t_hbh.c
@@ -6,7 +6,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/ipv6.h>
@@ -41,8 +41,10 @@ MODULE_ALIAS("ip6t_dst");
* 5 -> RTALERT 2 x x
*/
+static struct xt_match hbh_mt6_reg[] __read_mostly;
+
static bool
-hbh_mt6(const struct sk_buff *skb, const struct xt_match_param *par)
+hbh_mt6(const struct sk_buff *skb, struct xt_action_param *par)
{
struct ipv6_opt_hdr _optsh;
const struct ipv6_opt_hdr *oh;
@@ -58,16 +60,18 @@ hbh_mt6(const struct sk_buff *skb, const struct xt_match_param *par)
unsigned int optlen;
int err;
- err = ipv6_find_hdr(skb, &ptr, par->match->data, NULL);
+ err = ipv6_find_hdr(skb, &ptr,
+ (par->match == &hbh_mt6_reg[0]) ?
+ NEXTHDR_HOP : NEXTHDR_DEST, NULL);
if (err < 0) {
if (err != -ENOENT)
- *par->hotdrop = true;
+ par->hotdrop = true;
return false;
}
oh = skb_header_pointer(skb, ptr, sizeof(_optsh), &_optsh);
if (oh == NULL) {
- *par->hotdrop = true;
+ par->hotdrop = true;
return false;
}
@@ -141,11 +145,11 @@ hbh_mt6(const struct sk_buff *skb, const struct xt_match_param *par)
}
/* Step to the next */
- pr_debug("len%04X \n", optlen);
+ pr_debug("len%04X\n", optlen);
if ((ptr > skb->len - optlen || hdrlen < optlen) &&
temp < optinfo->optsnr - 1) {
- pr_debug("new pointer is too large! \n");
+ pr_debug("new pointer is too large!\n");
break;
}
ptr += optlen;
@@ -160,32 +164,32 @@ hbh_mt6(const struct sk_buff *skb, const struct xt_match_param *par)
return false;
}
-static bool hbh_mt6_check(const struct xt_mtchk_param *par)
+static int hbh_mt6_check(const struct xt_mtchk_param *par)
{
const struct ip6t_opts *optsinfo = par->matchinfo;
if (optsinfo->invflags & ~IP6T_OPTS_INV_MASK) {
- pr_debug("ip6t_opts: unknown flags %X\n", optsinfo->invflags);
- return false;
+ pr_debug("unknown flags %X\n", optsinfo->invflags);
+ return -EINVAL;
}
if (optsinfo->flags & IP6T_OPTS_NSTRICT) {
- pr_debug("ip6t_opts: Not strict - not implemented");
- return false;
+ pr_debug("Not strict - not implemented");
+ return -EINVAL;
}
- return true;
+ return 0;
}
static struct xt_match hbh_mt6_reg[] __read_mostly = {
{
+ /* Note, hbh_mt6 relies on the order of hbh_mt6_reg */
.name = "hbh",
.family = NFPROTO_IPV6,
.match = hbh_mt6,
.matchsize = sizeof(struct ip6t_opts),
.checkentry = hbh_mt6_check,
.me = THIS_MODULE,
- .data = NEXTHDR_HOP,
},
{
.name = "dst",
@@ -194,7 +198,6 @@ static struct xt_match hbh_mt6_reg[] __read_mostly = {
.matchsize = sizeof(struct ip6t_opts),
.checkentry = hbh_mt6_check,
.me = THIS_MODULE,
- .data = NEXTHDR_DEST,
},
};
diff --git a/net/ipv6/netfilter/ip6t_ipv6header.c b/net/ipv6/netfilter/ip6t_ipv6header.c
index 91490ad9302c..54bd9790603f 100644
--- a/net/ipv6/netfilter/ip6t_ipv6header.c
+++ b/net/ipv6/netfilter/ip6t_ipv6header.c
@@ -27,7 +27,7 @@ MODULE_DESCRIPTION("Xtables: IPv6 header types match");
MODULE_AUTHOR("Andras Kis-Szabo <kisza@sch.bme.hu>");
static bool
-ipv6header_mt6(const struct sk_buff *skb, const struct xt_match_param *par)
+ipv6header_mt6(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct ip6t_ipv6header_info *info = par->matchinfo;
unsigned int temp;
@@ -118,16 +118,16 @@ ipv6header_mt6(const struct sk_buff *skb, const struct xt_match_param *par)
}
}
-static bool ipv6header_mt6_check(const struct xt_mtchk_param *par)
+static int ipv6header_mt6_check(const struct xt_mtchk_param *par)
{
const struct ip6t_ipv6header_info *info = par->matchinfo;
/* invflags is 0 or 0xff in hard mode */
if ((!info->modeflag) && info->invflags != 0x00 &&
info->invflags != 0xFF)
- return false;
+ return -EINVAL;
- return true;
+ return 0;
}
static struct xt_match ipv6header_mt6_reg __read_mostly = {
diff --git a/net/ipv6/netfilter/ip6t_mh.c b/net/ipv6/netfilter/ip6t_mh.c
index aafe4e66577b..0c90c66b1992 100644
--- a/net/ipv6/netfilter/ip6t_mh.c
+++ b/net/ipv6/netfilter/ip6t_mh.c
@@ -11,6 +11,7 @@
* Based on net/netfilter/xt_tcpudp.c
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/types.h>
#include <linux/module.h>
#include <net/ip.h>
@@ -24,12 +25,6 @@
MODULE_DESCRIPTION("Xtables: IPv6 Mobility Header match");
MODULE_LICENSE("GPL");
-#ifdef DEBUG_IP_FIREWALL_USER
-#define duprintf(format, args...) printk(format , ## args)
-#else
-#define duprintf(format, args...)
-#endif
-
/* Returns 1 if the type is matched by the range, 0 otherwise */
static inline bool
type_match(u_int8_t min, u_int8_t max, u_int8_t type, bool invert)
@@ -37,7 +32,7 @@ type_match(u_int8_t min, u_int8_t max, u_int8_t type, bool invert)
return (type >= min && type <= max) ^ invert;
}
-static bool mh_mt6(const struct sk_buff *skb, const struct xt_match_param *par)
+static bool mh_mt6(const struct sk_buff *skb, struct xt_action_param *par)
{
struct ip6_mh _mh;
const struct ip6_mh *mh;
@@ -51,15 +46,15 @@ static bool mh_mt6(const struct sk_buff *skb, const struct xt_match_param *par)
if (mh == NULL) {
/* We've been asked to examine this packet, and we
can't. Hence, no choice but to drop. */
- duprintf("Dropping evil MH tinygram.\n");
- *par->hotdrop = true;
+ pr_debug("Dropping evil MH tinygram.\n");
+ par->hotdrop = true;
return false;
}
if (mh->ip6mh_proto != IPPROTO_NONE) {
- duprintf("Dropping invalid MH Payload Proto: %u\n",
+ pr_debug("Dropping invalid MH Payload Proto: %u\n",
mh->ip6mh_proto);
- *par->hotdrop = true;
+ par->hotdrop = true;
return false;
}
@@ -67,12 +62,12 @@ static bool mh_mt6(const struct sk_buff *skb, const struct xt_match_param *par)
!!(mhinfo->invflags & IP6T_MH_INV_TYPE));
}
-static bool mh_mt6_check(const struct xt_mtchk_param *par)
+static int mh_mt6_check(const struct xt_mtchk_param *par)
{
const struct ip6t_mh *mhinfo = par->matchinfo;
/* Must specify no unknown invflags */
- return !(mhinfo->invflags & ~IP6T_MH_INV_MASK);
+ return (mhinfo->invflags & ~IP6T_MH_INV_MASK) ? -EINVAL : 0;
}
static struct xt_match mh_mt6_reg __read_mostly = {
diff --git a/net/ipv6/netfilter/ip6t_rt.c b/net/ipv6/netfilter/ip6t_rt.c
index b77307fc8743..d8488c50a8e0 100644
--- a/net/ipv6/netfilter/ip6t_rt.c
+++ b/net/ipv6/netfilter/ip6t_rt.c
@@ -6,7 +6,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/ipv6.h>
@@ -29,14 +29,14 @@ static inline bool
segsleft_match(u_int32_t min, u_int32_t max, u_int32_t id, bool invert)
{
bool r;
- pr_debug("rt segsleft_match:%c 0x%x <= 0x%x <= 0x%x",
+ pr_debug("segsleft_match:%c 0x%x <= 0x%x <= 0x%x\n",
invert ? '!' : ' ', min, id, max);
r = (id >= min && id <= max) ^ invert;
pr_debug(" result %s\n", r ? "PASS" : "FAILED");
return r;
}
-static bool rt_mt6(const struct sk_buff *skb, const struct xt_match_param *par)
+static bool rt_mt6(const struct sk_buff *skb, struct xt_action_param *par)
{
struct ipv6_rt_hdr _route;
const struct ipv6_rt_hdr *rh;
@@ -52,13 +52,13 @@ static bool rt_mt6(const struct sk_buff *skb, const struct xt_match_param *par)
err = ipv6_find_hdr(skb, &ptr, NEXTHDR_ROUTING, NULL);
if (err < 0) {
if (err != -ENOENT)
- *par->hotdrop = true;
+ par->hotdrop = true;
return false;
}
rh = skb_header_pointer(skb, ptr, sizeof(_route), &_route);
if (rh == NULL) {
- *par->hotdrop = true;
+ par->hotdrop = true;
return false;
}
@@ -183,23 +183,23 @@ static bool rt_mt6(const struct sk_buff *skb, const struct xt_match_param *par)
return false;
}
-static bool rt_mt6_check(const struct xt_mtchk_param *par)
+static int rt_mt6_check(const struct xt_mtchk_param *par)
{
const struct ip6t_rt *rtinfo = par->matchinfo;
if (rtinfo->invflags & ~IP6T_RT_INV_MASK) {
- pr_debug("ip6t_rt: unknown flags %X\n", rtinfo->invflags);
- return false;
+ pr_debug("unknown flags %X\n", rtinfo->invflags);
+ return -EINVAL;
}
if ((rtinfo->flags & (IP6T_RT_RES | IP6T_RT_FST_MASK)) &&
(!(rtinfo->flags & IP6T_RT_TYP) ||
(rtinfo->rt_type != 0) ||
(rtinfo->invflags & IP6T_RT_INV_TYP))) {
pr_debug("`--rt-type 0' required before `--rt-0-*'");
- return false;
+ return -EINVAL;
}
- return true;
+ return 0;
}
static struct xt_match rt_mt6_reg __read_mostly = {
diff --git a/net/ipv6/netfilter/ip6table_filter.c b/net/ipv6/netfilter/ip6table_filter.c
index d6fc9aff3163..c9e37c8fd62c 100644
--- a/net/ipv6/netfilter/ip6table_filter.c
+++ b/net/ipv6/netfilter/ip6table_filter.c
@@ -81,7 +81,7 @@ static int __init ip6table_filter_init(void)
int ret;
if (forward < 0 || forward > NF_MAX_VERDICT) {
- printk("iptables forward must be 0 or 1\n");
+ pr_err("iptables forward must be 0 or 1\n");
return -EINVAL;
}
diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c
index 6a102b57f356..679a0a3b7b3c 100644
--- a/net/ipv6/netfilter/ip6table_mangle.c
+++ b/net/ipv6/netfilter/ip6table_mangle.c
@@ -43,7 +43,7 @@ ip6t_mangle_out(struct sk_buff *skb, const struct net_device *out)
if (skb->len < sizeof(struct iphdr) ||
ip_hdrlen(skb) < sizeof(struct iphdr)) {
if (net_ratelimit())
- printk("ip6t_hook: happy cracking.\n");
+ pr_warning("ip6t_hook: happy cracking.\n");
return NF_ACCEPT;
}
#endif
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index 996c3f41fecd..ff43461704be 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -280,7 +280,7 @@ static unsigned int ipv6_conntrack_local(unsigned int hooknum,
/* root is playing with raw sockets. */
if (skb->len < sizeof(struct ipv6hdr)) {
if (net_ratelimit())
- printk("ipv6_conntrack_local: packet too short\n");
+ pr_notice("ipv6_conntrack_local: packet too short\n");
return NF_ACCEPT;
}
return __ipv6_conntrack_in(dev_net(out), hooknum, skb, okfn);
@@ -406,37 +406,37 @@ static int __init nf_conntrack_l3proto_ipv6_init(void)
ret = nf_ct_frag6_init();
if (ret < 0) {
- printk("nf_conntrack_ipv6: can't initialize frag6.\n");
+ pr_err("nf_conntrack_ipv6: can't initialize frag6.\n");
return ret;
}
ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_tcp6);
if (ret < 0) {
- printk("nf_conntrack_ipv6: can't register tcp.\n");
+ pr_err("nf_conntrack_ipv6: can't register tcp.\n");
goto cleanup_frag6;
}
ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_udp6);
if (ret < 0) {
- printk("nf_conntrack_ipv6: can't register udp.\n");
+ pr_err("nf_conntrack_ipv6: can't register udp.\n");
goto cleanup_tcp;
}
ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_icmpv6);
if (ret < 0) {
- printk("nf_conntrack_ipv6: can't register icmpv6.\n");
+ pr_err("nf_conntrack_ipv6: can't register icmpv6.\n");
goto cleanup_udp;
}
ret = nf_conntrack_l3proto_register(&nf_conntrack_l3proto_ipv6);
if (ret < 0) {
- printk("nf_conntrack_ipv6: can't register ipv6\n");
+ pr_err("nf_conntrack_ipv6: can't register ipv6\n");
goto cleanup_icmpv6;
}
ret = nf_register_hooks(ipv6_conntrack_ops,
ARRAY_SIZE(ipv6_conntrack_ops));
if (ret < 0) {
- printk("nf_conntrack_ipv6: can't register pre-routing defrag "
+ pr_err("nf_conntrack_ipv6: can't register pre-routing defrag "
"hook.\n");
goto cleanup_ipv6;
}
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index dd5b9bd61c62..6fb890187de0 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -644,7 +644,7 @@ void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb,
s2 = s->next;
s->next = NULL;
- NF_HOOK_THRESH(PF_INET6, hooknum, s, in, out, okfn,
+ NF_HOOK_THRESH(NFPROTO_IPV6, hooknum, s, in, out, okfn,
NF_IP6_PRI_CONNTRACK_DEFRAG + 1);
s = s2;
}
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index 58344c0fbd13..566798d69f37 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -97,6 +97,7 @@ static const struct snmp_mib snmp6_icmp6_list[] = {
SNMP_MIB_ITEM("Icmp6InMsgs", ICMP6_MIB_INMSGS),
SNMP_MIB_ITEM("Icmp6InErrors", ICMP6_MIB_INERRORS),
SNMP_MIB_ITEM("Icmp6OutMsgs", ICMP6_MIB_OUTMSGS),
+ SNMP_MIB_ITEM("Icmp6OutErrors", ICMP6_MIB_OUTERRORS),
SNMP_MIB_SENTINEL
};
@@ -167,7 +168,6 @@ static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, void __percpu **mib)
i & 0x100 ? "Out" : "In", i & 0xff);
seq_printf(seq, "%-32s\t%lu\n", name, val);
}
- return;
}
static void snmp6_seq_show_item(struct seq_file *seq, void __percpu **mib,
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 8763b1a0814a..4a4dcbe4f8b2 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -381,7 +381,7 @@ static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb)
}
/* Charge it to the socket. */
- if (sock_queue_rcv_skb(sk, skb) < 0) {
+ if (ip_queue_rcv_skb(sk, skb) < 0) {
kfree_skb(skb);
return NET_RX_DROP;
}
@@ -461,6 +461,9 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
if (flags & MSG_ERRQUEUE)
return ipv6_recv_error(sk, msg, len);
+ if (np->rxpmtu && np->rxopt.bits.rxpmtu)
+ return ipv6_recv_rxpmtu(sk, msg, len);
+
skb = skb_recv_datagram(sk, flags, noblock, &err);
if (!skb)
goto out;
@@ -637,8 +640,8 @@ static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
goto error_fault;
IP6_UPD_PO_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
- err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
- dst_output);
+ err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
+ rt->u.dst.dev, dst_output);
if (err > 0)
err = net_xmit_errno(err);
if (err)
@@ -733,6 +736,7 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
int addr_len = msg->msg_namelen;
int hlimit = -1;
int tclass = -1;
+ int dontfrag = -1;
u16 proto;
int err;
@@ -811,7 +815,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
memset(opt, 0, sizeof(struct ipv6_txoptions));
opt->tot_len = sizeof(struct ipv6_txoptions);
- err = datagram_send_ctl(sock_net(sk), msg, &fl, opt, &hlimit, &tclass);
+ err = datagram_send_ctl(sock_net(sk), msg, &fl, opt, &hlimit,
+ &tclass, &dontfrag);
if (err < 0) {
fl6_sock_release(flowlabel);
return err;
@@ -880,6 +885,9 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
if (tclass < 0)
tclass = np->tclass;
+ if (dontfrag < 0)
+ dontfrag = np->dontfrag;
+
if (msg->msg_flags&MSG_CONFIRM)
goto do_confirm;
@@ -890,7 +898,7 @@ back_from_confirm:
lock_sock(sk);
err = ip6_append_data(sk, ip_generic_getfrag, msg->msg_iov,
len, 0, hlimit, tclass, opt, &fl, (struct rt6_info*)dst,
- msg->msg_flags);
+ msg->msg_flags, dontfrag);
if (err)
ip6_flush_pending_frames(sk);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 05ebd7833043..294cbe8b0725 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -316,7 +316,6 @@ static void rt6_probe(struct rt6_info *rt)
#else
static inline void rt6_probe(struct rt6_info *rt)
{
- return;
}
#endif
@@ -1553,7 +1552,6 @@ void rt6_redirect(struct in6_addr *dest, struct in6_addr *src,
out:
dst_release(&rt->u.dst);
- return;
}
/*
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 5abae10cd884..e51e650ea80b 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -566,11 +566,9 @@ static int ipip6_rcv(struct sk_buff *skb)
kfree_skb(skb);
return 0;
}
- tunnel->dev->stats.rx_packets++;
- tunnel->dev->stats.rx_bytes += skb->len;
- skb->dev = tunnel->dev;
- skb_dst_drop(skb);
- nf_reset(skb);
+
+ skb_tunnel_rx(skb, tunnel->dev);
+
ipip6_ecn_decapsulate(iph, skb);
netif_rx(skb);
rcu_read_unlock();
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 075f540ec197..2b7c3a100e2c 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -75,6 +75,9 @@ static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
struct request_sock *req);
static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
+static void __tcp_v6_send_check(struct sk_buff *skb,
+ struct in6_addr *saddr,
+ struct in6_addr *daddr);
static const struct inet_connection_sock_af_ops ipv6_mapped;
static const struct inet_connection_sock_af_ops ipv6_specific;
@@ -350,6 +353,11 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (sk->sk_state == TCP_CLOSE)
goto out;
+ if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
+ NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
+ goto out;
+ }
+
tp = tcp_sk(sk);
seq = ntohl(th->seq);
if (sk->sk_state != TCP_LISTEN &&
@@ -503,14 +511,10 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
skb = tcp_make_synack(sk, dst, req, rvp);
if (skb) {
- struct tcphdr *th = tcp_hdr(skb);
-
- th->check = tcp_v6_check(skb->len,
- &treq->loc_addr, &treq->rmt_addr,
- csum_partial(th, skb->len, skb->csum));
+ __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
- err = ip6_xmit(sk, skb, &fl, opt, 0);
+ err = ip6_xmit(sk, skb, &fl, opt);
err = net_xmit_eval(err);
}
@@ -600,7 +604,7 @@ static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer,
kfree(newkey);
return -ENOMEM;
}
- sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
+ sk_nocaps_add(sk, NETIF_F_GSO_MASK);
}
if (tcp_alloc_md5sig_pool(sk) == NULL) {
kfree(newkey);
@@ -737,7 +741,7 @@ static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
return -ENOMEM;
tp->md5sig_info = p;
- sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
+ sk_nocaps_add(sk, NETIF_F_GSO_MASK);
}
newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
@@ -918,22 +922,29 @@ static struct timewait_sock_ops tcp6_timewait_sock_ops = {
.twsk_destructor= tcp_twsk_destructor,
};
-static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb)
+static void __tcp_v6_send_check(struct sk_buff *skb,
+ struct in6_addr *saddr, struct in6_addr *daddr)
{
- struct ipv6_pinfo *np = inet6_sk(sk);
struct tcphdr *th = tcp_hdr(skb);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- th->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 0);
+ th->check = ~tcp_v6_check(skb->len, saddr, daddr, 0);
skb->csum_start = skb_transport_header(skb) - skb->head;
skb->csum_offset = offsetof(struct tcphdr, check);
} else {
- th->check = csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP,
- csum_partial(th, th->doff<<2,
- skb->csum));
+ th->check = tcp_v6_check(skb->len, saddr, daddr,
+ csum_partial(th, th->doff << 2,
+ skb->csum));
}
}
+static void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
+{
+ struct ipv6_pinfo *np = inet6_sk(sk);
+
+ __tcp_v6_send_check(skb, &np->saddr, &np->daddr);
+}
+
static int tcp_v6_gso_send_check(struct sk_buff *skb)
{
struct ipv6hdr *ipv6h;
@@ -946,11 +957,8 @@ static int tcp_v6_gso_send_check(struct sk_buff *skb)
th = tcp_hdr(skb);
th->check = 0;
- th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len,
- IPPROTO_TCP, 0);
- skb->csum_start = skb_transport_header(skb) - skb->head;
- skb->csum_offset = offsetof(struct tcphdr, check);
skb->ip_summed = CHECKSUM_PARTIAL;
+ __tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr);
return 0;
}
@@ -1047,15 +1055,14 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
}
#endif
- buff->csum = csum_partial(t1, tot_len, 0);
-
memset(&fl, 0, sizeof(fl));
ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr);
ipv6_addr_copy(&fl.fl6_src, &ipv6_hdr(skb)->daddr);
- t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
- tot_len, IPPROTO_TCP,
- buff->csum);
+ buff->ip_summed = CHECKSUM_PARTIAL;
+ buff->csum = 0;
+
+ __tcp_v6_send_check(buff, &fl.fl6_src, &fl.fl6_dst);
fl.proto = IPPROTO_TCP;
fl.oif = inet6_iif(skb);
@@ -1070,7 +1077,7 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
if (!ip6_dst_lookup(ctl_sk, &dst, &fl)) {
if (xfrm_lookup(net, &dst, &fl, NULL, 0) >= 0) {
skb_dst_set(buff, dst);
- ip6_xmit(ctl_sk, buff, &fl, NULL, 0);
+ ip6_xmit(ctl_sk, buff, &fl, NULL);
TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
if (rst)
TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
@@ -1233,12 +1240,12 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
goto drop_and_free;
/* Secret recipe starts with IP addresses */
- d = &ipv6_hdr(skb)->daddr.s6_addr32[0];
+ d = (__force u32 *)&ipv6_hdr(skb)->daddr.s6_addr32[0];
*mess++ ^= *d++;
*mess++ ^= *d++;
*mess++ ^= *d++;
*mess++ ^= *d++;
- d = &ipv6_hdr(skb)->saddr.s6_addr32[0];
+ d = (__force u32 *)&ipv6_hdr(skb)->saddr.s6_addr32[0];
*mess++ ^= *d++;
*mess++ ^= *d++;
*mess++ ^= *d++;
@@ -1676,6 +1683,7 @@ ipv6_pktoptions:
static int tcp_v6_rcv(struct sk_buff *skb)
{
struct tcphdr *th;
+ struct ipv6hdr *hdr;
struct sock *sk;
int ret;
struct net *net = dev_net(skb->dev);
@@ -1702,12 +1710,13 @@ static int tcp_v6_rcv(struct sk_buff *skb)
goto bad_packet;
th = tcp_hdr(skb);
+ hdr = ipv6_hdr(skb);
TCP_SKB_CB(skb)->seq = ntohl(th->seq);
TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
skb->len - th->doff*4);
TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
TCP_SKB_CB(skb)->when = 0;
- TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(ipv6_hdr(skb));
+ TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(hdr);
TCP_SKB_CB(skb)->sacked = 0;
sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
@@ -1718,6 +1727,11 @@ process:
if (sk->sk_state == TCP_TIME_WAIT)
goto do_time_wait;
+ if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
+ NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
+ goto discard_and_relse;
+ }
+
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_and_relse;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 90824852f598..3d7a2c0b836a 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -91,9 +91,9 @@ static unsigned int udp6_portaddr_hash(struct net *net,
if (ipv6_addr_any(addr6))
hash = jhash_1word(0, mix);
else if (ipv6_addr_v4mapped(addr6))
- hash = jhash_1word(addr6->s6_addr32[3], mix);
+ hash = jhash_1word((__force u32)addr6->s6_addr32[3], mix);
else
- hash = jhash2(addr6->s6_addr32, 4, mix);
+ hash = jhash2((__force u32 *)addr6->s6_addr32, 4, mix);
return hash ^ port;
}
@@ -335,6 +335,9 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
if (flags & MSG_ERRQUEUE)
return ipv6_recv_error(sk, msg, len);
+ if (np->rxpmtu && np->rxopt.bits.rxpmtu)
+ return ipv6_recv_rxpmtu(sk, msg, len);
+
try_again:
skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
&peeked, &err);
@@ -421,7 +424,7 @@ out:
return err;
csum_copy_err:
- lock_sock(sk);
+ lock_sock_bh(sk);
if (!skb_kill_datagram(sk, skb, flags)) {
if (is_udp4)
UDP_INC_STATS_USER(sock_net(sk),
@@ -430,7 +433,7 @@ csum_copy_err:
UDP6_INC_STATS_USER(sock_net(sk),
UDP_MIB_INERRORS, is_udplite);
}
- release_sock(sk);
+ unlock_sock_bh(sk);
if (flags & MSG_DONTWAIT)
return -EAGAIN;
@@ -511,7 +514,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
goto drop;
}
- if ((rc = sock_queue_rcv_skb(sk, skb)) < 0) {
+ if ((rc = ip_queue_rcv_skb(sk, skb)) < 0) {
/* Note that an ENOMEM error is charged twice */
if (rc == -ENOMEM)
UDP6_INC_STATS_BH(sock_net(sk),
@@ -581,6 +584,10 @@ static void flush_stack(struct sock **stack, unsigned int count,
sk = stack[i];
if (skb1) {
+ if (sk_rcvqueues_full(sk, skb)) {
+ kfree_skb(skb1);
+ goto drop;
+ }
bh_lock_sock(sk);
if (!sock_owned_by_user(sk))
udpv6_queue_rcv_skb(sk, skb1);
@@ -692,7 +699,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
u32 ulen = 0;
if (!pskb_may_pull(skb, sizeof(struct udphdr)))
- goto short_packet;
+ goto discard;
saddr = &ipv6_hdr(skb)->saddr;
daddr = &ipv6_hdr(skb)->daddr;
@@ -756,6 +763,10 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
/* deliver */
+ if (sk_rcvqueues_full(sk, skb)) {
+ sock_put(sk);
+ goto discard;
+ }
bh_lock_sock(sk);
if (!sock_owned_by_user(sk))
udpv6_queue_rcv_skb(sk, skb);
@@ -770,9 +781,14 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
return 0;
short_packet:
- LIMIT_NETDEBUG(KERN_DEBUG "UDP%sv6: short packet: %d/%u\n",
+ LIMIT_NETDEBUG(KERN_DEBUG "UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n",
proto == IPPROTO_UDPLITE ? "-Lite" : "",
- ulen, skb->len);
+ saddr,
+ ntohs(uh->source),
+ ulen,
+ skb->len,
+ daddr,
+ ntohs(uh->dest));
discard:
UDP6_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
@@ -919,6 +935,7 @@ int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk,
int ulen = len;
int hlimit = -1;
int tclass = -1;
+ int dontfrag = -1;
int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
int err;
int connected = 0;
@@ -1049,7 +1066,8 @@ do_udp_sendmsg:
memset(opt, 0, sizeof(struct ipv6_txoptions));
opt->tot_len = sizeof(*opt);
- err = datagram_send_ctl(sock_net(sk), msg, &fl, opt, &hlimit, &tclass);
+ err = datagram_send_ctl(sock_net(sk), msg, &fl, opt, &hlimit,
+ &tclass, &dontfrag);
if (err < 0) {
fl6_sock_release(flowlabel);
return err;
@@ -1120,6 +1138,9 @@ do_udp_sendmsg:
if (tclass < 0)
tclass = np->tclass;
+ if (dontfrag < 0)
+ dontfrag = np->dontfrag;
+
if (msg->msg_flags&MSG_CONFIRM)
goto do_confirm;
back_from_confirm:
@@ -1143,7 +1164,7 @@ do_append_data:
err = ip6_append_data(sk, getfrag, msg->msg_iov, ulen,
sizeof(struct udphdr), hlimit, tclass, opt, &fl,
(struct rt6_info*)dst,
- corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
+ corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags, dontfrag);
if (err)
udp_v6_flush_pending_frames(sk);
else if (!corkreq)
diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c
index 2bc98ede1235..f8c3cf842f53 100644
--- a/net/ipv6/xfrm6_input.c
+++ b/net/ipv6/xfrm6_input.c
@@ -42,7 +42,7 @@ int xfrm6_transport_finish(struct sk_buff *skb, int async)
ipv6_hdr(skb)->payload_len = htons(skb->len);
__skb_push(skb, skb->data - skb_network_header(skb));
- NF_HOOK(PF_INET6, NF_INET_PRE_ROUTING, skb, skb->dev, NULL,
+ NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, skb->dev, NULL,
ip6_rcv_finish);
return -1;
}
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index 0c92112dcba3..6434bd5ce088 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -90,6 +90,6 @@ static int xfrm6_output_finish(struct sk_buff *skb)
int xfrm6_output(struct sk_buff *skb)
{
- return NF_HOOK(PF_INET6, NF_INET_POST_ROUTING, skb, NULL, skb_dst(skb)->dev,
- xfrm6_output_finish);
+ return NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL,
+ skb_dst(skb)->dev, xfrm6_output_finish);
}
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 00bf7c962b7e..4a0e77e14468 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -67,36 +67,6 @@ static int xfrm6_get_saddr(struct net *net,
return 0;
}
-static struct dst_entry *
-__xfrm6_find_bundle(struct flowi *fl, struct xfrm_policy *policy)
-{
- struct dst_entry *dst;
-
- /* Still not clear if we should set fl->fl6_{src,dst}... */
- read_lock_bh(&policy->lock);
- for (dst = policy->bundles; dst; dst = dst->next) {
- struct xfrm_dst *xdst = (struct xfrm_dst*)dst;
- struct in6_addr fl_dst_prefix, fl_src_prefix;
-
- ipv6_addr_prefix(&fl_dst_prefix,
- &fl->fl6_dst,
- xdst->u.rt6.rt6i_dst.plen);
- ipv6_addr_prefix(&fl_src_prefix,
- &fl->fl6_src,
- xdst->u.rt6.rt6i_src.plen);
- if (ipv6_addr_equal(&xdst->u.rt6.rt6i_dst.addr, &fl_dst_prefix) &&
- ipv6_addr_equal(&xdst->u.rt6.rt6i_src.addr, &fl_src_prefix) &&
- xfrm_bundle_ok(policy, xdst, fl, AF_INET6,
- (xdst->u.rt6.rt6i_dst.plen != 128 ||
- xdst->u.rt6.rt6i_src.plen != 128))) {
- dst_clone(dst);
- break;
- }
- }
- read_unlock_bh(&policy->lock);
- return dst;
-}
-
static int xfrm6_get_tos(struct flowi *fl)
{
return 0;
@@ -291,7 +261,6 @@ static struct xfrm_policy_afinfo xfrm6_policy_afinfo = {
.dst_ops = &xfrm6_dst_ops,
.dst_lookup = xfrm6_dst_lookup,
.get_saddr = xfrm6_get_saddr,
- .find_bundle = __xfrm6_find_bundle,
.decode_session = _decode_session6,
.get_tos = xfrm6_get_tos,
.init_path = xfrm6_init_path,
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index 2a4efcea3423..79986a674f6e 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -347,7 +347,7 @@ static void irda_flow_indication(void *instance, void *sap, LOCAL_FLOW flow)
self->tx_flow = flow;
IRDA_DEBUG(1, "%s(), IrTTP wants us to start again\n",
__func__);
- wake_up_interruptible(sk->sk_sleep);
+ wake_up_interruptible(sk_sleep(sk));
break;
default:
IRDA_DEBUG(0, "%s(), Unknown flow command!\n", __func__);
@@ -900,7 +900,7 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags)
if (flags & O_NONBLOCK)
goto out;
- err = wait_event_interruptible(*(sk->sk_sleep),
+ err = wait_event_interruptible(*(sk_sleep(sk)),
skb_peek(&sk->sk_receive_queue));
if (err)
goto out;
@@ -1066,7 +1066,7 @@ static int irda_connect(struct socket *sock, struct sockaddr *uaddr,
goto out;
err = -ERESTARTSYS;
- if (wait_event_interruptible(*(sk->sk_sleep),
+ if (wait_event_interruptible(*(sk_sleep(sk)),
(sk->sk_state != TCP_SYN_SENT)))
goto out;
@@ -1318,7 +1318,7 @@ static int irda_sendmsg(struct kiocb *iocb, struct socket *sock,
/* Check if IrTTP is wants us to slow down */
- if (wait_event_interruptible(*(sk->sk_sleep),
+ if (wait_event_interruptible(*(sk_sleep(sk)),
(self->tx_flow != FLOW_STOP || sk->sk_state != TCP_ESTABLISHED))) {
err = -ERESTARTSYS;
goto out;
@@ -1477,7 +1477,7 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock,
if (copied >= target)
break;
- prepare_to_wait_exclusive(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
/*
* POSIX 1003.1g mandates this order.
@@ -1497,7 +1497,7 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock,
/* Wait process until data arrives */
schedule();
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
if (err)
goto out;
@@ -1787,7 +1787,7 @@ static unsigned int irda_poll(struct file * file, struct socket *sock,
IRDA_DEBUG(4, "%s()\n", __func__);
lock_kernel();
- poll_wait(file, sk->sk_sleep, wait);
+ poll_wait(file, sk_sleep(sk), wait);
mask = 0;
/* Exceptional events? */
diff --git a/net/irda/ircomm/ircomm_param.c b/net/irda/ircomm/ircomm_param.c
index e2e893b474e9..8b915f3ac3b9 100644
--- a/net/irda/ircomm/ircomm_param.c
+++ b/net/irda/ircomm/ircomm_param.c
@@ -475,7 +475,7 @@ static int ircomm_param_dce(void *instance, irda_param_t *param, int get)
/* Check if any of the settings have changed */
if (dce & 0x0f) {
if (dce & IRCOMM_DELTA_CTS) {
- IRDA_DEBUG(2, "%s(), CTS \n", __func__ );
+ IRDA_DEBUG(2, "%s(), CTS\n", __func__ );
}
}
diff --git a/net/irda/iriap.c b/net/irda/iriap.c
index 79a1e5a23e10..fce364c6c71a 100644
--- a/net/irda/iriap.c
+++ b/net/irda/iriap.c
@@ -685,8 +685,6 @@ static void iriap_getvaluebyclass_indication(struct iriap_cb *self,
/* We have a match; send the value. */
iriap_getvaluebyclass_response(self, obj->id, IAS_SUCCESS,
attrib->value);
-
- return;
}
/*
diff --git a/net/irda/irnet/irnet_irda.c b/net/irda/irnet/irnet_irda.c
index df18ab4b6c5e..e98e40d76f4f 100644
--- a/net/irda/irnet/irnet_irda.c
+++ b/net/irda/irnet/irnet_irda.c
@@ -678,7 +678,6 @@ irda_irnet_destroy(irnet_socket * self)
self->stsap_sel = 0;
DEXIT(IRDA_SOCK_TRACE, "\n");
- return;
}
@@ -928,7 +927,6 @@ irnet_disconnect_server(irnet_socket * self,
irttp_listen(self->tsap);
DEXIT(IRDA_SERV_TRACE, "\n");
- return;
}
/*------------------------------------------------------------------*/
@@ -1013,7 +1011,6 @@ irnet_destroy_server(void)
irda_irnet_destroy(&irnet_server.s);
DEXIT(IRDA_SERV_TRACE, "\n");
- return;
}
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index c18286a2167b..c8b4599a752e 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -59,7 +59,7 @@ do { \
DEFINE_WAIT(__wait); \
long __timeo = timeo; \
ret = 0; \
- prepare_to_wait(sk->sk_sleep, &__wait, TASK_INTERRUPTIBLE); \
+ prepare_to_wait(sk_sleep(sk), &__wait, TASK_INTERRUPTIBLE); \
while (!(condition)) { \
if (!__timeo) { \
ret = -EAGAIN; \
@@ -76,7 +76,7 @@ do { \
if (ret) \
break; \
} \
- finish_wait(sk->sk_sleep, &__wait); \
+ finish_wait(sk_sleep(sk), &__wait); \
} while (0)
#define iucv_sock_wait(sk, condition, timeo) \
@@ -136,7 +136,6 @@ static void afiucv_pm_complete(struct device *dev)
#ifdef CONFIG_PM_DEBUG
printk(KERN_WARNING "afiucv_pm_complete\n");
#endif
- return;
}
/**
@@ -305,11 +304,14 @@ static inline int iucv_below_msglim(struct sock *sk)
*/
static void iucv_sock_wake_msglim(struct sock *sk)
{
- read_lock(&sk->sk_callback_lock);
- if (sk_has_sleeper(sk))
- wake_up_interruptible_all(sk->sk_sleep);
+ struct socket_wq *wq;
+
+ rcu_read_lock();
+ wq = rcu_dereference(sk->sk_wq);
+ if (wq_has_sleeper(wq))
+ wake_up_interruptible_all(&wq->wait);
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
- read_unlock(&sk->sk_callback_lock);
+ rcu_read_unlock();
}
/* Timers */
@@ -795,7 +797,7 @@ static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
/* Wait for an incoming connection */
- add_wait_queue_exclusive(sk->sk_sleep, &wait);
+ add_wait_queue_exclusive(sk_sleep(sk), &wait);
while (!(nsk = iucv_accept_dequeue(sk, newsock))) {
set_current_state(TASK_INTERRUPTIBLE);
if (!timeo) {
@@ -819,7 +821,7 @@ static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
}
set_current_state(TASK_RUNNING);
- remove_wait_queue(sk->sk_sleep, &wait);
+ remove_wait_queue(sk_sleep(sk), &wait);
if (err)
goto done;
@@ -1269,7 +1271,7 @@ unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
struct sock *sk = sock->sk;
unsigned int mask = 0;
- sock_poll_wait(file, sk->sk_sleep, wait);
+ sock_poll_wait(file, sk_sleep(sk), wait);
if (sk->sk_state == IUCV_LISTEN)
return iucv_accept_poll(sk);
diff --git a/net/key/af_key.c b/net/key/af_key.c
index ba9a3fcc2fed..43040e97c474 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -99,7 +99,7 @@ static void pfkey_sock_destruct(struct sock *sk)
skb_queue_purge(&sk->sk_receive_queue);
if (!sock_flag(sk, SOCK_DEAD)) {
- printk("Attempt to release alive pfkey socket: %p\n", sk);
+ pr_err("Attempt to release alive pfkey socket: %p\n", sk);
return;
}
@@ -1402,7 +1402,7 @@ static inline int event2poltype(int event)
case XFRM_MSG_POLEXPIRE:
// return SADB_X_SPDEXPIRE;
default:
- printk("pfkey: Unknown policy event %d\n", event);
+ pr_err("pfkey: Unknown policy event %d\n", event);
break;
}
@@ -1421,7 +1421,7 @@ static inline int event2keytype(int event)
case XFRM_MSG_EXPIRE:
return SADB_EXPIRE;
default:
- printk("pfkey: Unknown SA event %d\n", event);
+ pr_err("pfkey: Unknown SA event %d\n", event);
break;
}
@@ -2969,7 +2969,7 @@ static int pfkey_send_notify(struct xfrm_state *x, struct km_event *c)
case XFRM_MSG_NEWAE: /* not yet supported */
break;
default:
- printk("pfkey: Unknown SA event %d\n", c->event);
+ pr_err("pfkey: Unknown SA event %d\n", c->event);
break;
}
@@ -2993,7 +2993,7 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, struct km_e
break;
return key_notify_policy_flush(c);
default:
- printk("pfkey: Unknown policy event %d\n", c->event);
+ pr_err("pfkey: Unknown policy event %d\n", c->event);
break;
}
diff --git a/net/l2tp/Kconfig b/net/l2tp/Kconfig
new file mode 100644
index 000000000000..4b1e71751e10
--- /dev/null
+++ b/net/l2tp/Kconfig
@@ -0,0 +1,107 @@
+#
+# Layer Two Tunneling Protocol (L2TP)
+#
+
+menuconfig L2TP
+ tristate "Layer Two Tunneling Protocol (L2TP)"
+ depends on INET
+ ---help---
+ Layer Two Tunneling Protocol
+
+ From RFC 2661 <http://www.ietf.org/rfc/rfc2661.txt>.
+
+ L2TP facilitates the tunneling of packets across an
+ intervening network in a way that is as transparent as
+ possible to both end-users and applications.
+
+ L2TP is often used to tunnel PPP traffic over IP
+ tunnels. One IP tunnel may carry thousands of individual PPP
+ connections. L2TP is also used as a VPN protocol, popular
+ with home workers to connect to their offices.
+
+ L2TPv3 allows other protocols as well as PPP to be carried
+ over L2TP tunnels. L2TPv3 is defined in RFC 3931
+ <http://www.ietf.org/rfc/rfc3931.txt>.
+
+ The kernel component handles only L2TP data packets: a
+ userland daemon handles L2TP the control protocol (tunnel
+ and session setup). One such daemon is OpenL2TP
+ (http://openl2tp.org/).
+
+ If you don't need L2TP, say N. To compile all L2TP code as
+ modules, choose M here.
+
+config L2TP_DEBUGFS
+ tristate "L2TP debugfs support"
+ depends on L2TP && DEBUG_FS
+ help
+ Support for l2tp directory in debugfs filesystem. This may be
+ used to dump internal state of the l2tp drivers for problem
+ analysis.
+
+ If unsure, say 'Y'.
+
+ To compile this driver as a module, choose M here. The module
+ will be called l2tp_debugfs.
+
+config L2TP_V3
+ bool "L2TPv3 support (EXPERIMENTAL)"
+ depends on EXPERIMENTAL && L2TP
+ help
+ Layer Two Tunneling Protocol Version 3
+
+ From RFC 3931 <http://www.ietf.org/rfc/rfc3931.txt>.
+
+ The Layer Two Tunneling Protocol (L2TP) provides a dynamic
+ mechanism for tunneling Layer 2 (L2) "circuits" across a
+ packet-oriented data network (e.g., over IP). L2TP, as
+ originally defined in RFC 2661, is a standard method for
+ tunneling Point-to-Point Protocol (PPP) [RFC1661] sessions.
+ L2TP has since been adopted for tunneling a number of other
+ L2 protocols, including ATM, Frame Relay, HDLC and even raw
+ ethernet frames.
+
+ If you are connecting to L2TPv3 equipment, or you want to
+ tunnel raw ethernet frames using L2TP, say Y here. If
+ unsure, say N.
+
+config L2TP_IP
+ tristate "L2TP IP encapsulation for L2TPv3"
+ depends on L2TP_V3
+ help
+ Support for L2TP-over-IP socket family.
+
+ The L2TPv3 protocol defines two possible encapsulations for
+ L2TP frames, namely UDP and plain IP (without UDP). This
+ driver provides a new L2TPIP socket family with which
+ userspace L2TPv3 daemons may create L2TP/IP tunnel sockets
+ when UDP encapsulation is not required. When L2TP is carried
+ in IP packets, it used IP protocol number 115, so this port
+ must be enabled in firewalls.
+
+ To compile this driver as a module, choose M here. The module
+ will be called l2tp_ip.
+
+config L2TP_ETH
+ tristate "L2TP ethernet pseudowire support for L2TPv3"
+ depends on L2TP_V3
+ help
+ Support for carrying raw ethernet frames over L2TPv3.
+
+ From RFC 4719 <http://www.ietf.org/rfc/rfc4719.txt>.
+
+ The Layer 2 Tunneling Protocol, Version 3 (L2TPv3) can be
+ used as a control protocol and for data encapsulation to set
+ up Pseudowires for transporting layer 2 Packet Data Units
+ across an IP network [RFC3931].
+
+ This driver provides an ethernet virtual interface for each
+ L2TP ethernet pseudowire instance. Standard Linux tools may
+ be used to assign an IP address to the local virtual
+ interface, or add the interface to a bridge.
+
+ If you are using L2TPv3, you will almost certainly want to
+ enable this option.
+
+ To compile this driver as a module, choose M here. The module
+ will be called l2tp_eth.
diff --git a/net/l2tp/Makefile b/net/l2tp/Makefile
new file mode 100644
index 000000000000..110e7bc2de5e
--- /dev/null
+++ b/net/l2tp/Makefile
@@ -0,0 +1,12 @@
+#
+# Makefile for the L2TP.
+#
+
+obj-$(CONFIG_L2TP) += l2tp_core.o
+
+# Build l2tp as modules if L2TP is M
+obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_PPPOL2TP)) += l2tp_ppp.o
+obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_IP)) += l2tp_ip.o
+obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_V3)) += l2tp_netlink.o
+obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_ETH)) += l2tp_eth.o
+obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_DEBUGFS)) += l2tp_debugfs.o
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
new file mode 100644
index 000000000000..1712af1c7b3f
--- /dev/null
+++ b/net/l2tp/l2tp_core.c
@@ -0,0 +1,1666 @@
+/*
+ * L2TP core.
+ *
+ * Copyright (c) 2008,2009,2010 Katalix Systems Ltd
+ *
+ * This file contains some code of the original L2TPv2 pppol2tp
+ * driver, which has the following copyright:
+ *
+ * Authors: Martijn van Oosterhout <kleptog@svana.org>
+ * James Chapman (jchapman@katalix.com)
+ * Contributors:
+ * Michal Ostrowski <mostrows@speakeasy.net>
+ * Arnaldo Carvalho de Melo <acme@xconectiva.com.br>
+ * David S. Miller (davem@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/list.h>
+#include <linux/rculist.h>
+#include <linux/uaccess.h>
+
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/jiffies.h>
+
+#include <linux/netdevice.h>
+#include <linux/net.h>
+#include <linux/inetdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/l2tp.h>
+#include <linux/hash.h>
+#include <linux/sort.h>
+#include <linux/file.h>
+#include <linux/nsproxy.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+#include <net/dst.h>
+#include <net/ip.h>
+#include <net/udp.h>
+#include <net/inet_common.h>
+#include <net/xfrm.h>
+#include <net/protocol.h>
+
+#include <asm/byteorder.h>
+#include <asm/atomic.h>
+
+#include "l2tp_core.h"
+
+#define L2TP_DRV_VERSION "V2.0"
+
+/* L2TP header constants */
+#define L2TP_HDRFLAG_T 0x8000
+#define L2TP_HDRFLAG_L 0x4000
+#define L2TP_HDRFLAG_S 0x0800
+#define L2TP_HDRFLAG_O 0x0200
+#define L2TP_HDRFLAG_P 0x0100
+
+#define L2TP_HDR_VER_MASK 0x000F
+#define L2TP_HDR_VER_2 0x0002
+#define L2TP_HDR_VER_3 0x0003
+
+/* L2TPv3 default L2-specific sublayer */
+#define L2TP_SLFLAG_S 0x40000000
+#define L2TP_SL_SEQ_MASK 0x00ffffff
+
+#define L2TP_HDR_SIZE_SEQ 10
+#define L2TP_HDR_SIZE_NOSEQ 6
+
+/* Default trace flags */
+#define L2TP_DEFAULT_DEBUG_FLAGS 0
+
+#define PRINTK(_mask, _type, _lvl, _fmt, args...) \
+ do { \
+ if ((_mask) & (_type)) \
+ printk(_lvl "L2TP: " _fmt, ##args); \
+ } while (0)
+
+/* Private data stored for received packets in the skb.
+ */
+struct l2tp_skb_cb {
+ u32 ns;
+ u16 has_seq;
+ u16 length;
+ unsigned long expires;
+};
+
+#define L2TP_SKB_CB(skb) ((struct l2tp_skb_cb *) &skb->cb[sizeof(struct inet_skb_parm)])
+
+static atomic_t l2tp_tunnel_count;
+static atomic_t l2tp_session_count;
+
+/* per-net private data for this module */
+static unsigned int l2tp_net_id;
+struct l2tp_net {
+ struct list_head l2tp_tunnel_list;
+ spinlock_t l2tp_tunnel_list_lock;
+ struct hlist_head l2tp_session_hlist[L2TP_HASH_SIZE_2];
+ spinlock_t l2tp_session_hlist_lock;
+};
+
+static inline struct l2tp_net *l2tp_pernet(struct net *net)
+{
+ BUG_ON(!net);
+
+ return net_generic(net, l2tp_net_id);
+}
+
+/* Session hash global list for L2TPv3.
+ * The session_id SHOULD be random according to RFC3931, but several
+ * L2TP implementations use incrementing session_ids. So we do a real
+ * hash on the session_id, rather than a simple bitmask.
+ */
+static inline struct hlist_head *
+l2tp_session_id_hash_2(struct l2tp_net *pn, u32 session_id)
+{
+ return &pn->l2tp_session_hlist[hash_32(session_id, L2TP_HASH_BITS_2)];
+
+}
+
+/* Lookup a session by id in the global session list
+ */
+static struct l2tp_session *l2tp_session_find_2(struct net *net, u32 session_id)
+{
+ struct l2tp_net *pn = l2tp_pernet(net);
+ struct hlist_head *session_list =
+ l2tp_session_id_hash_2(pn, session_id);
+ struct l2tp_session *session;
+ struct hlist_node *walk;
+
+ rcu_read_lock_bh();
+ hlist_for_each_entry_rcu(session, walk, session_list, global_hlist) {
+ if (session->session_id == session_id) {
+ rcu_read_unlock_bh();
+ return session;
+ }
+ }
+ rcu_read_unlock_bh();
+
+ return NULL;
+}
+
+/* Session hash list.
+ * The session_id SHOULD be random according to RFC2661, but several
+ * L2TP implementations (Cisco and Microsoft) use incrementing
+ * session_ids. So we do a real hash on the session_id, rather than a
+ * simple bitmask.
+ */
+static inline struct hlist_head *
+l2tp_session_id_hash(struct l2tp_tunnel *tunnel, u32 session_id)
+{
+ return &tunnel->session_hlist[hash_32(session_id, L2TP_HASH_BITS)];
+}
+
+/* Lookup a session by id
+ */
+struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id)
+{
+ struct hlist_head *session_list;
+ struct l2tp_session *session;
+ struct hlist_node *walk;
+
+ /* In L2TPv3, session_ids are unique over all tunnels and we
+ * sometimes need to look them up before we know the
+ * tunnel.
+ */
+ if (tunnel == NULL)
+ return l2tp_session_find_2(net, session_id);
+
+ session_list = l2tp_session_id_hash(tunnel, session_id);
+ read_lock_bh(&tunnel->hlist_lock);
+ hlist_for_each_entry(session, walk, session_list, hlist) {
+ if (session->session_id == session_id) {
+ read_unlock_bh(&tunnel->hlist_lock);
+ return session;
+ }
+ }
+ read_unlock_bh(&tunnel->hlist_lock);
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(l2tp_session_find);
+
+struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth)
+{
+ int hash;
+ struct hlist_node *walk;
+ struct l2tp_session *session;
+ int count = 0;
+
+ read_lock_bh(&tunnel->hlist_lock);
+ for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
+ hlist_for_each_entry(session, walk, &tunnel->session_hlist[hash], hlist) {
+ if (++count > nth) {
+ read_unlock_bh(&tunnel->hlist_lock);
+ return session;
+ }
+ }
+ }
+
+ read_unlock_bh(&tunnel->hlist_lock);
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(l2tp_session_find_nth);
+
+/* Lookup a session by interface name.
+ * This is very inefficient but is only used by management interfaces.
+ */
+struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname)
+{
+ struct l2tp_net *pn = l2tp_pernet(net);
+ int hash;
+ struct hlist_node *walk;
+ struct l2tp_session *session;
+
+ rcu_read_lock_bh();
+ for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) {
+ hlist_for_each_entry_rcu(session, walk, &pn->l2tp_session_hlist[hash], global_hlist) {
+ if (!strcmp(session->ifname, ifname)) {
+ rcu_read_unlock_bh();
+ return session;
+ }
+ }
+ }
+
+ rcu_read_unlock_bh();
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(l2tp_session_find_by_ifname);
+
+/* Lookup a tunnel by id
+ */
+struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id)
+{
+ struct l2tp_tunnel *tunnel;
+ struct l2tp_net *pn = l2tp_pernet(net);
+
+ rcu_read_lock_bh();
+ list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
+ if (tunnel->tunnel_id == tunnel_id) {
+ rcu_read_unlock_bh();
+ return tunnel;
+ }
+ }
+ rcu_read_unlock_bh();
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(l2tp_tunnel_find);
+
+struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth)
+{
+ struct l2tp_net *pn = l2tp_pernet(net);
+ struct l2tp_tunnel *tunnel;
+ int count = 0;
+
+ rcu_read_lock_bh();
+ list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
+ if (++count > nth) {
+ rcu_read_unlock_bh();
+ return tunnel;
+ }
+ }
+
+ rcu_read_unlock_bh();
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(l2tp_tunnel_find_nth);
+
+/*****************************************************************************
+ * Receive data handling
+ *****************************************************************************/
+
+/* Queue a skb in order. We come here only if the skb has an L2TP sequence
+ * number.
+ */
+static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *skb)
+{
+ struct sk_buff *skbp;
+ struct sk_buff *tmp;
+ u32 ns = L2TP_SKB_CB(skb)->ns;
+
+ spin_lock_bh(&session->reorder_q.lock);
+ skb_queue_walk_safe(&session->reorder_q, skbp, tmp) {
+ if (L2TP_SKB_CB(skbp)->ns > ns) {
+ __skb_queue_before(&session->reorder_q, skbp, skb);
+ PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
+ "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n",
+ session->name, ns, L2TP_SKB_CB(skbp)->ns,
+ skb_queue_len(&session->reorder_q));
+ session->stats.rx_oos_packets++;
+ goto out;
+ }
+ }
+
+ __skb_queue_tail(&session->reorder_q, skb);
+
+out:
+ spin_unlock_bh(&session->reorder_q.lock);
+}
+
+/* Dequeue a single skb.
+ */
+static void l2tp_recv_dequeue_skb(struct l2tp_session *session, struct sk_buff *skb)
+{
+ struct l2tp_tunnel *tunnel = session->tunnel;
+ int length = L2TP_SKB_CB(skb)->length;
+
+ /* We're about to requeue the skb, so return resources
+ * to its current owner (a socket receive buffer).
+ */
+ skb_orphan(skb);
+
+ tunnel->stats.rx_packets++;
+ tunnel->stats.rx_bytes += length;
+ session->stats.rx_packets++;
+ session->stats.rx_bytes += length;
+
+ if (L2TP_SKB_CB(skb)->has_seq) {
+ /* Bump our Nr */
+ session->nr++;
+ if (tunnel->version == L2TP_HDR_VER_2)
+ session->nr &= 0xffff;
+ else
+ session->nr &= 0xffffff;
+
+ PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
+ "%s: updated nr to %hu\n", session->name, session->nr);
+ }
+
+ /* call private receive handler */
+ if (session->recv_skb != NULL)
+ (*session->recv_skb)(session, skb, L2TP_SKB_CB(skb)->length);
+ else
+ kfree_skb(skb);
+
+ if (session->deref)
+ (*session->deref)(session);
+}
+
+/* Dequeue skbs from the session's reorder_q, subject to packet order.
+ * Skbs that have been in the queue for too long are simply discarded.
+ */
+static void l2tp_recv_dequeue(struct l2tp_session *session)
+{
+ struct sk_buff *skb;
+ struct sk_buff *tmp;
+
+ /* If the pkt at the head of the queue has the nr that we
+ * expect to send up next, dequeue it and any other
+ * in-sequence packets behind it.
+ */
+ spin_lock_bh(&session->reorder_q.lock);
+ skb_queue_walk_safe(&session->reorder_q, skb, tmp) {
+ if (time_after(jiffies, L2TP_SKB_CB(skb)->expires)) {
+ session->stats.rx_seq_discards++;
+ session->stats.rx_errors++;
+ PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
+ "%s: oos pkt %u len %d discarded (too old), "
+ "waiting for %u, reorder_q_len=%d\n",
+ session->name, L2TP_SKB_CB(skb)->ns,
+ L2TP_SKB_CB(skb)->length, session->nr,
+ skb_queue_len(&session->reorder_q));
+ __skb_unlink(skb, &session->reorder_q);
+ kfree_skb(skb);
+ if (session->deref)
+ (*session->deref)(session);
+ continue;
+ }
+
+ if (L2TP_SKB_CB(skb)->has_seq) {
+ if (L2TP_SKB_CB(skb)->ns != session->nr) {
+ PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
+ "%s: holding oos pkt %u len %d, "
+ "waiting for %u, reorder_q_len=%d\n",
+ session->name, L2TP_SKB_CB(skb)->ns,
+ L2TP_SKB_CB(skb)->length, session->nr,
+ skb_queue_len(&session->reorder_q));
+ goto out;
+ }
+ }
+ __skb_unlink(skb, &session->reorder_q);
+
+ /* Process the skb. We release the queue lock while we
+ * do so to let other contexts process the queue.
+ */
+ spin_unlock_bh(&session->reorder_q.lock);
+ l2tp_recv_dequeue_skb(session, skb);
+ spin_lock_bh(&session->reorder_q.lock);
+ }
+
+out:
+ spin_unlock_bh(&session->reorder_q.lock);
+}
+
+static inline int l2tp_verify_udp_checksum(struct sock *sk,
+ struct sk_buff *skb)
+{
+ struct udphdr *uh = udp_hdr(skb);
+ u16 ulen = ntohs(uh->len);
+ struct inet_sock *inet;
+ __wsum psum;
+
+ if (sk->sk_no_check || skb_csum_unnecessary(skb) || !uh->check)
+ return 0;
+
+ inet = inet_sk(sk);
+ psum = csum_tcpudp_nofold(inet->inet_saddr, inet->inet_daddr, ulen,
+ IPPROTO_UDP, 0);
+
+ if ((skb->ip_summed == CHECKSUM_COMPLETE) &&
+ !csum_fold(csum_add(psum, skb->csum)))
+ return 0;
+
+ skb->csum = psum;
+
+ return __skb_checksum_complete(skb);
+}
+
+/* Do receive processing of L2TP data frames. We handle both L2TPv2
+ * and L2TPv3 data frames here.
+ *
+ * L2TPv2 Data Message Header
+ *
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |T|L|x|x|S|x|O|P|x|x|x|x| Ver | Length (opt) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Tunnel ID | Session ID |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Ns (opt) | Nr (opt) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Offset Size (opt) | Offset pad... (opt)
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * Data frames are marked by T=0. All other fields are the same as
+ * those in L2TP control frames.
+ *
+ * L2TPv3 Data Message Header
+ *
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | L2TP Session Header |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | L2-Specific Sublayer |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Tunnel Payload ...
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * L2TPv3 Session Header Over IP
+ *
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Session ID |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Cookie (optional, maximum 64 bits)...
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * L2TPv3 L2-Specific Sublayer Format
+ *
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |x|S|x|x|x|x|x|x| Sequence Number |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * Cookie value, sublayer format and offset (pad) are negotiated with
+ * the peer when the session is set up. Unlike L2TPv2, we do not need
+ * to parse the packet header to determine if optional fields are
+ * present.
+ *
+ * Caller must already have parsed the frame and determined that it is
+ * a data (not control) frame before coming here. Fields up to the
+ * session-id have already been parsed and ptr points to the data
+ * after the session-id.
+ */
+void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
+ unsigned char *ptr, unsigned char *optr, u16 hdrflags,
+ int length, int (*payload_hook)(struct sk_buff *skb))
+{
+ struct l2tp_tunnel *tunnel = session->tunnel;
+ int offset;
+ u32 ns, nr;
+
+ /* The ref count is increased since we now hold a pointer to
+ * the session. Take care to decrement the refcnt when exiting
+ * this function from now on...
+ */
+ l2tp_session_inc_refcount(session);
+ if (session->ref)
+ (*session->ref)(session);
+
+ /* Parse and check optional cookie */
+ if (session->peer_cookie_len > 0) {
+ if (memcmp(ptr, &session->peer_cookie[0], session->peer_cookie_len)) {
+ PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO,
+ "%s: cookie mismatch (%u/%u). Discarding.\n",
+ tunnel->name, tunnel->tunnel_id, session->session_id);
+ session->stats.rx_cookie_discards++;
+ goto discard;
+ }
+ ptr += session->peer_cookie_len;
+ }
+
+ /* Handle the optional sequence numbers. Sequence numbers are
+ * in different places for L2TPv2 and L2TPv3.
+ *
+ * If we are the LAC, enable/disable sequence numbers under
+ * the control of the LNS. If no sequence numbers present but
+ * we were expecting them, discard frame.
+ */
+ ns = nr = 0;
+ L2TP_SKB_CB(skb)->has_seq = 0;
+ if (tunnel->version == L2TP_HDR_VER_2) {
+ if (hdrflags & L2TP_HDRFLAG_S) {
+ ns = ntohs(*(__be16 *) ptr);
+ ptr += 2;
+ nr = ntohs(*(__be16 *) ptr);
+ ptr += 2;
+
+ /* Store L2TP info in the skb */
+ L2TP_SKB_CB(skb)->ns = ns;
+ L2TP_SKB_CB(skb)->has_seq = 1;
+
+ PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
+ "%s: recv data ns=%u, nr=%u, session nr=%u\n",
+ session->name, ns, nr, session->nr);
+ }
+ } else if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
+ u32 l2h = ntohl(*(__be32 *) ptr);
+
+ if (l2h & 0x40000000) {
+ ns = l2h & 0x00ffffff;
+
+ /* Store L2TP info in the skb */
+ L2TP_SKB_CB(skb)->ns = ns;
+ L2TP_SKB_CB(skb)->has_seq = 1;
+
+ PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
+ "%s: recv data ns=%u, session nr=%u\n",
+ session->name, ns, session->nr);
+ }
+ }
+
+ /* Advance past L2-specific header, if present */
+ ptr += session->l2specific_len;
+
+ if (L2TP_SKB_CB(skb)->has_seq) {
+ /* Received a packet with sequence numbers. If we're the LNS,
+ * check if we sre sending sequence numbers and if not,
+ * configure it so.
+ */
+ if ((!session->lns_mode) && (!session->send_seq)) {
+ PRINTK(session->debug, L2TP_MSG_SEQ, KERN_INFO,
+ "%s: requested to enable seq numbers by LNS\n",
+ session->name);
+ session->send_seq = -1;
+ l2tp_session_set_header_len(session, tunnel->version);
+ }
+ } else {
+ /* No sequence numbers.
+ * If user has configured mandatory sequence numbers, discard.
+ */
+ if (session->recv_seq) {
+ PRINTK(session->debug, L2TP_MSG_SEQ, KERN_WARNING,
+ "%s: recv data has no seq numbers when required. "
+ "Discarding\n", session->name);
+ session->stats.rx_seq_discards++;
+ goto discard;
+ }
+
+ /* If we're the LAC and we're sending sequence numbers, the
+ * LNS has requested that we no longer send sequence numbers.
+ * If we're the LNS and we're sending sequence numbers, the
+ * LAC is broken. Discard the frame.
+ */
+ if ((!session->lns_mode) && (session->send_seq)) {
+ PRINTK(session->debug, L2TP_MSG_SEQ, KERN_INFO,
+ "%s: requested to disable seq numbers by LNS\n",
+ session->name);
+ session->send_seq = 0;
+ l2tp_session_set_header_len(session, tunnel->version);
+ } else if (session->send_seq) {
+ PRINTK(session->debug, L2TP_MSG_SEQ, KERN_WARNING,
+ "%s: recv data has no seq numbers when required. "
+ "Discarding\n", session->name);
+ session->stats.rx_seq_discards++;
+ goto discard;
+ }
+ }
+
+ /* Session data offset is handled differently for L2TPv2 and
+ * L2TPv3. For L2TPv2, there is an optional 16-bit value in
+ * the header. For L2TPv3, the offset is negotiated using AVPs
+ * in the session setup control protocol.
+ */
+ if (tunnel->version == L2TP_HDR_VER_2) {
+ /* If offset bit set, skip it. */
+ if (hdrflags & L2TP_HDRFLAG_O) {
+ offset = ntohs(*(__be16 *)ptr);
+ ptr += 2 + offset;
+ }
+ } else
+ ptr += session->offset;
+
+ offset = ptr - optr;
+ if (!pskb_may_pull(skb, offset))
+ goto discard;
+
+ __skb_pull(skb, offset);
+
+ /* If caller wants to process the payload before we queue the
+ * packet, do so now.
+ */
+ if (payload_hook)
+ if ((*payload_hook)(skb))
+ goto discard;
+
+ /* Prepare skb for adding to the session's reorder_q. Hold
+ * packets for max reorder_timeout or 1 second if not
+ * reordering.
+ */
+ L2TP_SKB_CB(skb)->length = length;
+ L2TP_SKB_CB(skb)->expires = jiffies +
+ (session->reorder_timeout ? session->reorder_timeout : HZ);
+
+ /* Add packet to the session's receive queue. Reordering is done here, if
+ * enabled. Saved L2TP protocol info is stored in skb->sb[].
+ */
+ if (L2TP_SKB_CB(skb)->has_seq) {
+ if (session->reorder_timeout != 0) {
+ /* Packet reordering enabled. Add skb to session's
+ * reorder queue, in order of ns.
+ */
+ l2tp_recv_queue_skb(session, skb);
+ } else {
+ /* Packet reordering disabled. Discard out-of-sequence
+ * packets
+ */
+ if (L2TP_SKB_CB(skb)->ns != session->nr) {
+ session->stats.rx_seq_discards++;
+ PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
+ "%s: oos pkt %u len %d discarded, "
+ "waiting for %u, reorder_q_len=%d\n",
+ session->name, L2TP_SKB_CB(skb)->ns,
+ L2TP_SKB_CB(skb)->length, session->nr,
+ skb_queue_len(&session->reorder_q));
+ goto discard;
+ }
+ skb_queue_tail(&session->reorder_q, skb);
+ }
+ } else {
+ /* No sequence numbers. Add the skb to the tail of the
+ * reorder queue. This ensures that it will be
+ * delivered after all previous sequenced skbs.
+ */
+ skb_queue_tail(&session->reorder_q, skb);
+ }
+
+ /* Try to dequeue as many skbs from reorder_q as we can. */
+ l2tp_recv_dequeue(session);
+
+ l2tp_session_dec_refcount(session);
+
+ return;
+
+discard:
+ session->stats.rx_errors++;
+ kfree_skb(skb);
+
+ if (session->deref)
+ (*session->deref)(session);
+
+ l2tp_session_dec_refcount(session);
+}
+EXPORT_SYMBOL(l2tp_recv_common);
+
+/* Internal UDP receive frame. Do the real work of receiving an L2TP data frame
+ * here. The skb is not on a list when we get here.
+ * Returns 0 if the packet was a data packet and was successfully passed on.
+ * Returns 1 if the packet was not a good data packet and could not be
+ * forwarded. All such packets are passed up to userspace to deal with.
+ */
+int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
+ int (*payload_hook)(struct sk_buff *skb))
+{
+ struct l2tp_session *session = NULL;
+ unsigned char *ptr, *optr;
+ u16 hdrflags;
+ u32 tunnel_id, session_id;
+ int offset;
+ u16 version;
+ int length;
+
+ if (tunnel->sock && l2tp_verify_udp_checksum(tunnel->sock, skb))
+ goto discard_bad_csum;
+
+ /* UDP always verifies the packet length. */
+ __skb_pull(skb, sizeof(struct udphdr));
+
+ /* Short packet? */
+ if (!pskb_may_pull(skb, L2TP_HDR_SIZE_SEQ)) {
+ PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO,
+ "%s: recv short packet (len=%d)\n", tunnel->name, skb->len);
+ goto error;
+ }
+
+ /* Point to L2TP header */
+ optr = ptr = skb->data;
+
+ /* Trace packet contents, if enabled */
+ if (tunnel->debug & L2TP_MSG_DATA) {
+ length = min(32u, skb->len);
+ if (!pskb_may_pull(skb, length))
+ goto error;
+
+ printk(KERN_DEBUG "%s: recv: ", tunnel->name);
+
+ offset = 0;
+ do {
+ printk(" %02X", ptr[offset]);
+ } while (++offset < length);
+
+ printk("\n");
+ }
+
+ /* Get L2TP header flags */
+ hdrflags = ntohs(*(__be16 *) ptr);
+
+ /* Check protocol version */
+ version = hdrflags & L2TP_HDR_VER_MASK;
+ if (version != tunnel->version) {
+ PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO,
+ "%s: recv protocol version mismatch: got %d expected %d\n",
+ tunnel->name, version, tunnel->version);
+ goto error;
+ }
+
+ /* Get length of L2TP packet */
+ length = skb->len;
+
+ /* If type is control packet, it is handled by userspace. */
+ if (hdrflags & L2TP_HDRFLAG_T) {
+ PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_DEBUG,
+ "%s: recv control packet, len=%d\n", tunnel->name, length);
+ goto error;
+ }
+
+ /* Skip flags */
+ ptr += 2;
+
+ if (tunnel->version == L2TP_HDR_VER_2) {
+ /* If length is present, skip it */
+ if (hdrflags & L2TP_HDRFLAG_L)
+ ptr += 2;
+
+ /* Extract tunnel and session ID */
+ tunnel_id = ntohs(*(__be16 *) ptr);
+ ptr += 2;
+ session_id = ntohs(*(__be16 *) ptr);
+ ptr += 2;
+ } else {
+ ptr += 2; /* skip reserved bits */
+ tunnel_id = tunnel->tunnel_id;
+ session_id = ntohl(*(__be32 *) ptr);
+ ptr += 4;
+ }
+
+ /* Find the session context */
+ session = l2tp_session_find(tunnel->l2tp_net, tunnel, session_id);
+ if (!session || !session->recv_skb) {
+ /* Not found? Pass to userspace to deal with */
+ PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO,
+ "%s: no session found (%u/%u). Passing up.\n",
+ tunnel->name, tunnel_id, session_id);
+ goto error;
+ }
+
+ l2tp_recv_common(session, skb, ptr, optr, hdrflags, length, payload_hook);
+
+ return 0;
+
+discard_bad_csum:
+ LIMIT_NETDEBUG("%s: UDP: bad checksum\n", tunnel->name);
+ UDP_INC_STATS_USER(tunnel->l2tp_net, UDP_MIB_INERRORS, 0);
+ tunnel->stats.rx_errors++;
+ kfree_skb(skb);
+
+ return 0;
+
+error:
+ /* Put UDP header back */
+ __skb_push(skb, sizeof(struct udphdr));
+
+ return 1;
+}
+EXPORT_SYMBOL_GPL(l2tp_udp_recv_core);
+
+/* UDP encapsulation receive handler. See net/ipv4/udp.c.
+ * Return codes:
+ * 0 : success.
+ * <0: error
+ * >0: skb should be passed up to userspace as UDP.
+ */
+int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+{
+ struct l2tp_tunnel *tunnel;
+
+ tunnel = l2tp_sock_to_tunnel(sk);
+ if (tunnel == NULL)
+ goto pass_up;
+
+ PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_DEBUG,
+ "%s: received %d bytes\n", tunnel->name, skb->len);
+
+ if (l2tp_udp_recv_core(tunnel, skb, tunnel->recv_payload_hook))
+ goto pass_up_put;
+
+ sock_put(sk);
+ return 0;
+
+pass_up_put:
+ sock_put(sk);
+pass_up:
+ return 1;
+}
+EXPORT_SYMBOL_GPL(l2tp_udp_encap_recv);
+
+/************************************************************************
+ * Transmit handling
+ ***********************************************************************/
+
+/* Build an L2TP header for the session into the buffer provided.
+ */
+static int l2tp_build_l2tpv2_header(struct l2tp_session *session, void *buf)
+{
+ struct l2tp_tunnel *tunnel = session->tunnel;
+ __be16 *bufp = buf;
+ __be16 *optr = buf;
+ u16 flags = L2TP_HDR_VER_2;
+ u32 tunnel_id = tunnel->peer_tunnel_id;
+ u32 session_id = session->peer_session_id;
+
+ if (session->send_seq)
+ flags |= L2TP_HDRFLAG_S;
+
+ /* Setup L2TP header. */
+ *bufp++ = htons(flags);
+ *bufp++ = htons(tunnel_id);
+ *bufp++ = htons(session_id);
+ if (session->send_seq) {
+ *bufp++ = htons(session->ns);
+ *bufp++ = 0;
+ session->ns++;
+ session->ns &= 0xffff;
+ PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
+ "%s: updated ns to %u\n", session->name, session->ns);
+ }
+
+ return bufp - optr;
+}
+
+static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf)
+{
+ struct l2tp_tunnel *tunnel = session->tunnel;
+ char *bufp = buf;
+ char *optr = bufp;
+
+ /* Setup L2TP header. The header differs slightly for UDP and
+ * IP encapsulations. For UDP, there is 4 bytes of flags.
+ */
+ if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
+ u16 flags = L2TP_HDR_VER_3;
+ *((__be16 *) bufp) = htons(flags);
+ bufp += 2;
+ *((__be16 *) bufp) = 0;
+ bufp += 2;
+ }
+
+ *((__be32 *) bufp) = htonl(session->peer_session_id);
+ bufp += 4;
+ if (session->cookie_len) {
+ memcpy(bufp, &session->cookie[0], session->cookie_len);
+ bufp += session->cookie_len;
+ }
+ if (session->l2specific_len) {
+ if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
+ u32 l2h = 0;
+ if (session->send_seq) {
+ l2h = 0x40000000 | session->ns;
+ session->ns++;
+ session->ns &= 0xffffff;
+ PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
+ "%s: updated ns to %u\n", session->name, session->ns);
+ }
+
+ *((__be32 *) bufp) = htonl(l2h);
+ }
+ bufp += session->l2specific_len;
+ }
+ if (session->offset)
+ bufp += session->offset;
+
+ return bufp - optr;
+}
+
+int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, size_t data_len)
+{
+ struct l2tp_tunnel *tunnel = session->tunnel;
+ unsigned int len = skb->len;
+ int error;
+
+ /* Debug */
+ if (session->send_seq)
+ PRINTK(session->debug, L2TP_MSG_DATA, KERN_DEBUG,
+ "%s: send %Zd bytes, ns=%u\n", session->name,
+ data_len, session->ns - 1);
+ else
+ PRINTK(session->debug, L2TP_MSG_DATA, KERN_DEBUG,
+ "%s: send %Zd bytes\n", session->name, data_len);
+
+ if (session->debug & L2TP_MSG_DATA) {
+ int i;
+ int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
+ unsigned char *datap = skb->data + uhlen;
+
+ printk(KERN_DEBUG "%s: xmit:", session->name);
+ for (i = 0; i < (len - uhlen); i++) {
+ printk(" %02X", *datap++);
+ if (i == 31) {
+ printk(" ...");
+ break;
+ }
+ }
+ printk("\n");
+ }
+
+ /* Queue the packet to IP for output */
+ skb->local_df = 1;
+ error = ip_queue_xmit(skb);
+
+ /* Update stats */
+ if (error >= 0) {
+ tunnel->stats.tx_packets++;
+ tunnel->stats.tx_bytes += len;
+ session->stats.tx_packets++;
+ session->stats.tx_bytes += len;
+ } else {
+ tunnel->stats.tx_errors++;
+ session->stats.tx_errors++;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(l2tp_xmit_core);
+
+/* Automatically called when the skb is freed.
+ */
+static void l2tp_sock_wfree(struct sk_buff *skb)
+{
+ sock_put(skb->sk);
+}
+
+/* For data skbs that we transmit, we associate with the tunnel socket
+ * but don't do accounting.
+ */
+static inline void l2tp_skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
+{
+ sock_hold(sk);
+ skb->sk = sk;
+ skb->destructor = l2tp_sock_wfree;
+}
+
+/* If caller requires the skb to have a ppp header, the header must be
+ * inserted in the skb data before calling this function.
+ */
+int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len)
+{
+ int data_len = skb->len;
+ struct l2tp_tunnel *tunnel = session->tunnel;
+ struct sock *sk = tunnel->sock;
+ struct udphdr *uh;
+ struct inet_sock *inet;
+ __wsum csum;
+ int old_headroom;
+ int new_headroom;
+ int headroom;
+ int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
+ int udp_len;
+
+ /* Check that there's enough headroom in the skb to insert IP,
+ * UDP and L2TP headers. If not enough, expand it to
+ * make room. Adjust truesize.
+ */
+ headroom = NET_SKB_PAD + sizeof(struct iphdr) +
+ uhlen + hdr_len;
+ old_headroom = skb_headroom(skb);
+ if (skb_cow_head(skb, headroom))
+ goto abort;
+
+ new_headroom = skb_headroom(skb);
+ skb_orphan(skb);
+ skb->truesize += new_headroom - old_headroom;
+
+ /* Setup L2TP header */
+ session->build_header(session, __skb_push(skb, hdr_len));
+
+ /* Reset skb netfilter state */
+ memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
+ IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
+ IPSKB_REROUTED);
+ nf_reset(skb);
+
+ /* Get routing info from the tunnel socket */
+ skb_dst_drop(skb);
+ skb_dst_set(skb, dst_clone(__sk_dst_get(sk)));
+
+ switch (tunnel->encap) {
+ case L2TP_ENCAPTYPE_UDP:
+ /* Setup UDP header */
+ inet = inet_sk(sk);
+ __skb_push(skb, sizeof(*uh));
+ skb_reset_transport_header(skb);
+ uh = udp_hdr(skb);
+ uh->source = inet->inet_sport;
+ uh->dest = inet->inet_dport;
+ udp_len = uhlen + hdr_len + data_len;
+ uh->len = htons(udp_len);
+ uh->check = 0;
+
+ /* Calculate UDP checksum if configured to do so */
+ if (sk->sk_no_check == UDP_CSUM_NOXMIT)
+ skb->ip_summed = CHECKSUM_NONE;
+ else if ((skb_dst(skb) && skb_dst(skb)->dev) &&
+ (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM))) {
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ csum = skb_checksum(skb, 0, udp_len, 0);
+ uh->check = csum_tcpudp_magic(inet->inet_saddr,
+ inet->inet_daddr,
+ udp_len, IPPROTO_UDP, csum);
+ if (uh->check == 0)
+ uh->check = CSUM_MANGLED_0;
+ } else {
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ skb->csum_start = skb_transport_header(skb) - skb->head;
+ skb->csum_offset = offsetof(struct udphdr, check);
+ uh->check = ~csum_tcpudp_magic(inet->inet_saddr,
+ inet->inet_daddr,
+ udp_len, IPPROTO_UDP, 0);
+ }
+ break;
+
+ case L2TP_ENCAPTYPE_IP:
+ break;
+ }
+
+ l2tp_skb_set_owner_w(skb, sk);
+
+ l2tp_xmit_core(session, skb, data_len);
+
+abort:
+ return 0;
+}
+EXPORT_SYMBOL_GPL(l2tp_xmit_skb);
+
+/*****************************************************************************
+ * Tinnel and session create/destroy.
+ *****************************************************************************/
+
+/* Tunnel socket destruct hook.
+ * The tunnel context is deleted only when all session sockets have been
+ * closed.
+ */
+void l2tp_tunnel_destruct(struct sock *sk)
+{
+ struct l2tp_tunnel *tunnel;
+
+ tunnel = sk->sk_user_data;
+ if (tunnel == NULL)
+ goto end;
+
+ PRINTK(tunnel->debug, L2TP_MSG_CONTROL, KERN_INFO,
+ "%s: closing...\n", tunnel->name);
+
+ /* Close all sessions */
+ l2tp_tunnel_closeall(tunnel);
+
+ switch (tunnel->encap) {
+ case L2TP_ENCAPTYPE_UDP:
+ /* No longer an encapsulation socket. See net/ipv4/udp.c */
+ (udp_sk(sk))->encap_type = 0;
+ (udp_sk(sk))->encap_rcv = NULL;
+ break;
+ case L2TP_ENCAPTYPE_IP:
+ break;
+ }
+
+ /* Remove hooks into tunnel socket */
+ tunnel->sock = NULL;
+ sk->sk_destruct = tunnel->old_sk_destruct;
+ sk->sk_user_data = NULL;
+
+ /* Call the original destructor */
+ if (sk->sk_destruct)
+ (*sk->sk_destruct)(sk);
+
+ /* We're finished with the socket */
+ l2tp_tunnel_dec_refcount(tunnel);
+
+end:
+ return;
+}
+EXPORT_SYMBOL(l2tp_tunnel_destruct);
+
+/* When the tunnel is closed, all the attached sessions need to go too.
+ */
+void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
+{
+ int hash;
+ struct hlist_node *walk;
+ struct hlist_node *tmp;
+ struct l2tp_session *session;
+
+ BUG_ON(tunnel == NULL);
+
+ PRINTK(tunnel->debug, L2TP_MSG_CONTROL, KERN_INFO,
+ "%s: closing all sessions...\n", tunnel->name);
+
+ write_lock_bh(&tunnel->hlist_lock);
+ for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
+again:
+ hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) {
+ session = hlist_entry(walk, struct l2tp_session, hlist);
+
+ PRINTK(session->debug, L2TP_MSG_CONTROL, KERN_INFO,
+ "%s: closing session\n", session->name);
+
+ hlist_del_init(&session->hlist);
+
+ /* Since we should hold the sock lock while
+ * doing any unbinding, we need to release the
+ * lock we're holding before taking that lock.
+ * Hold a reference to the sock so it doesn't
+ * disappear as we're jumping between locks.
+ */
+ if (session->ref != NULL)
+ (*session->ref)(session);
+
+ write_unlock_bh(&tunnel->hlist_lock);
+
+ if (tunnel->version != L2TP_HDR_VER_2) {
+ struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
+
+ spin_lock_bh(&pn->l2tp_session_hlist_lock);
+ hlist_del_init_rcu(&session->global_hlist);
+ spin_unlock_bh(&pn->l2tp_session_hlist_lock);
+ synchronize_rcu();
+ }
+
+ if (session->session_close != NULL)
+ (*session->session_close)(session);
+
+ if (session->deref != NULL)
+ (*session->deref)(session);
+
+ write_lock_bh(&tunnel->hlist_lock);
+
+ /* Now restart from the beginning of this hash
+ * chain. We always remove a session from the
+ * list so we are guaranteed to make forward
+ * progress.
+ */
+ goto again;
+ }
+ }
+ write_unlock_bh(&tunnel->hlist_lock);
+}
+EXPORT_SYMBOL_GPL(l2tp_tunnel_closeall);
+
+/* Really kill the tunnel.
+ * Come here only when all sessions have been cleared from the tunnel.
+ */
+void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
+{
+ struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
+
+ BUG_ON(atomic_read(&tunnel->ref_count) != 0);
+ BUG_ON(tunnel->sock != NULL);
+
+ PRINTK(tunnel->debug, L2TP_MSG_CONTROL, KERN_INFO,
+ "%s: free...\n", tunnel->name);
+
+ /* Remove from tunnel list */
+ spin_lock_bh(&pn->l2tp_tunnel_list_lock);
+ list_del_rcu(&tunnel->list);
+ spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
+ synchronize_rcu();
+
+ atomic_dec(&l2tp_tunnel_count);
+ kfree(tunnel);
+}
+EXPORT_SYMBOL_GPL(l2tp_tunnel_free);
+
+/* Create a socket for the tunnel, if one isn't set up by
+ * userspace. This is used for static tunnels where there is no
+ * managing L2TP daemon.
+ */
+static int l2tp_tunnel_sock_create(u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct socket **sockp)
+{
+ int err = -EINVAL;
+ struct sockaddr_in udp_addr;
+ struct sockaddr_l2tpip ip_addr;
+ struct socket *sock = NULL;
+
+ switch (cfg->encap) {
+ case L2TP_ENCAPTYPE_UDP:
+ err = sock_create(AF_INET, SOCK_DGRAM, 0, sockp);
+ if (err < 0)
+ goto out;
+
+ sock = *sockp;
+
+ memset(&udp_addr, 0, sizeof(udp_addr));
+ udp_addr.sin_family = AF_INET;
+ udp_addr.sin_addr = cfg->local_ip;
+ udp_addr.sin_port = htons(cfg->local_udp_port);
+ err = kernel_bind(sock, (struct sockaddr *) &udp_addr, sizeof(udp_addr));
+ if (err < 0)
+ goto out;
+
+ udp_addr.sin_family = AF_INET;
+ udp_addr.sin_addr = cfg->peer_ip;
+ udp_addr.sin_port = htons(cfg->peer_udp_port);
+ err = kernel_connect(sock, (struct sockaddr *) &udp_addr, sizeof(udp_addr), 0);
+ if (err < 0)
+ goto out;
+
+ if (!cfg->use_udp_checksums)
+ sock->sk->sk_no_check = UDP_CSUM_NOXMIT;
+
+ break;
+
+ case L2TP_ENCAPTYPE_IP:
+ err = sock_create(AF_INET, SOCK_DGRAM, IPPROTO_L2TP, sockp);
+ if (err < 0)
+ goto out;
+
+ sock = *sockp;
+
+ memset(&ip_addr, 0, sizeof(ip_addr));
+ ip_addr.l2tp_family = AF_INET;
+ ip_addr.l2tp_addr = cfg->local_ip;
+ ip_addr.l2tp_conn_id = tunnel_id;
+ err = kernel_bind(sock, (struct sockaddr *) &ip_addr, sizeof(ip_addr));
+ if (err < 0)
+ goto out;
+
+ ip_addr.l2tp_family = AF_INET;
+ ip_addr.l2tp_addr = cfg->peer_ip;
+ ip_addr.l2tp_conn_id = peer_tunnel_id;
+ err = kernel_connect(sock, (struct sockaddr *) &ip_addr, sizeof(ip_addr), 0);
+ if (err < 0)
+ goto out;
+
+ break;
+
+ default:
+ goto out;
+ }
+
+out:
+ if ((err < 0) && sock) {
+ sock_release(sock);
+ *sockp = NULL;
+ }
+
+ return err;
+}
+
+int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp)
+{
+ struct l2tp_tunnel *tunnel = NULL;
+ int err;
+ struct socket *sock = NULL;
+ struct sock *sk = NULL;
+ struct l2tp_net *pn;
+ enum l2tp_encap_type encap = L2TP_ENCAPTYPE_UDP;
+
+ /* Get the tunnel socket from the fd, which was opened by
+ * the userspace L2TP daemon. If not specified, create a
+ * kernel socket.
+ */
+ if (fd < 0) {
+ err = l2tp_tunnel_sock_create(tunnel_id, peer_tunnel_id, cfg, &sock);
+ if (err < 0)
+ goto err;
+ } else {
+ err = -EBADF;
+ sock = sockfd_lookup(fd, &err);
+ if (!sock) {
+ printk(KERN_ERR "tunl %hu: sockfd_lookup(fd=%d) returned %d\n",
+ tunnel_id, fd, err);
+ goto err;
+ }
+ }
+
+ sk = sock->sk;
+
+ if (cfg != NULL)
+ encap = cfg->encap;
+
+ /* Quick sanity checks */
+ switch (encap) {
+ case L2TP_ENCAPTYPE_UDP:
+ err = -EPROTONOSUPPORT;
+ if (sk->sk_protocol != IPPROTO_UDP) {
+ printk(KERN_ERR "tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
+ tunnel_id, fd, sk->sk_protocol, IPPROTO_UDP);
+ goto err;
+ }
+ break;
+ case L2TP_ENCAPTYPE_IP:
+ err = -EPROTONOSUPPORT;
+ if (sk->sk_protocol != IPPROTO_L2TP) {
+ printk(KERN_ERR "tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
+ tunnel_id, fd, sk->sk_protocol, IPPROTO_L2TP);
+ goto err;
+ }
+ break;
+ }
+
+ /* Check if this socket has already been prepped */
+ tunnel = (struct l2tp_tunnel *)sk->sk_user_data;
+ if (tunnel != NULL) {
+ /* This socket has already been prepped */
+ err = -EBUSY;
+ goto err;
+ }
+
+ tunnel = kzalloc(sizeof(struct l2tp_tunnel), GFP_KERNEL);
+ if (tunnel == NULL) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ tunnel->version = version;
+ tunnel->tunnel_id = tunnel_id;
+ tunnel->peer_tunnel_id = peer_tunnel_id;
+ tunnel->debug = L2TP_DEFAULT_DEBUG_FLAGS;
+
+ tunnel->magic = L2TP_TUNNEL_MAGIC;
+ sprintf(&tunnel->name[0], "tunl %u", tunnel_id);
+ rwlock_init(&tunnel->hlist_lock);
+
+ /* The net we belong to */
+ tunnel->l2tp_net = net;
+ pn = l2tp_pernet(net);
+
+ if (cfg != NULL)
+ tunnel->debug = cfg->debug;
+
+ /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
+ tunnel->encap = encap;
+ if (encap == L2TP_ENCAPTYPE_UDP) {
+ /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
+ udp_sk(sk)->encap_type = UDP_ENCAP_L2TPINUDP;
+ udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv;
+ }
+
+ sk->sk_user_data = tunnel;
+
+ /* Hook on the tunnel socket destructor so that we can cleanup
+ * if the tunnel socket goes away.
+ */
+ tunnel->old_sk_destruct = sk->sk_destruct;
+ sk->sk_destruct = &l2tp_tunnel_destruct;
+ tunnel->sock = sk;
+ sk->sk_allocation = GFP_ATOMIC;
+
+ /* Add tunnel to our list */
+ INIT_LIST_HEAD(&tunnel->list);
+ spin_lock_bh(&pn->l2tp_tunnel_list_lock);
+ list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list);
+ spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
+ synchronize_rcu();
+ atomic_inc(&l2tp_tunnel_count);
+
+ /* Bump the reference count. The tunnel context is deleted
+ * only when this drops to zero.
+ */
+ l2tp_tunnel_inc_refcount(tunnel);
+
+ err = 0;
+err:
+ if (tunnelp)
+ *tunnelp = tunnel;
+
+ /* If tunnel's socket was created by the kernel, it doesn't
+ * have a file.
+ */
+ if (sock && sock->file)
+ sockfd_put(sock);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
+
+/* This function is used by the netlink TUNNEL_DELETE command.
+ */
+int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
+{
+ int err = 0;
+ struct socket *sock = tunnel->sock ? tunnel->sock->sk_socket : NULL;
+
+ /* Force the tunnel socket to close. This will eventually
+ * cause the tunnel to be deleted via the normal socket close
+ * mechanisms when userspace closes the tunnel socket.
+ */
+ if (sock != NULL) {
+ err = inet_shutdown(sock, 2);
+
+ /* If the tunnel's socket was created by the kernel,
+ * close the socket here since the socket was not
+ * created by userspace.
+ */
+ if (sock->file == NULL)
+ err = inet_release(sock);
+ }
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
+
+/* Really kill the session.
+ */
+void l2tp_session_free(struct l2tp_session *session)
+{
+ struct l2tp_tunnel *tunnel;
+
+ BUG_ON(atomic_read(&session->ref_count) != 0);
+
+ tunnel = session->tunnel;
+ if (tunnel != NULL) {
+ BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
+
+ /* Delete the session from the hash */
+ write_lock_bh(&tunnel->hlist_lock);
+ hlist_del_init(&session->hlist);
+ write_unlock_bh(&tunnel->hlist_lock);
+
+ /* Unlink from the global hash if not L2TPv2 */
+ if (tunnel->version != L2TP_HDR_VER_2) {
+ struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
+
+ spin_lock_bh(&pn->l2tp_session_hlist_lock);
+ hlist_del_init_rcu(&session->global_hlist);
+ spin_unlock_bh(&pn->l2tp_session_hlist_lock);
+ synchronize_rcu();
+ }
+
+ if (session->session_id != 0)
+ atomic_dec(&l2tp_session_count);
+
+ sock_put(tunnel->sock);
+
+ /* This will delete the tunnel context if this
+ * is the last session on the tunnel.
+ */
+ session->tunnel = NULL;
+ l2tp_tunnel_dec_refcount(tunnel);
+ }
+
+ kfree(session);
+
+ return;
+}
+EXPORT_SYMBOL_GPL(l2tp_session_free);
+
+/* This function is used by the netlink SESSION_DELETE command and by
+ pseudowire modules.
+ */
+int l2tp_session_delete(struct l2tp_session *session)
+{
+ if (session->session_close != NULL)
+ (*session->session_close)(session);
+
+ l2tp_session_dec_refcount(session);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(l2tp_session_delete);
+
+
+/* We come here whenever a session's send_seq, cookie_len or
+ * l2specific_len parameters are set.
+ */
+void l2tp_session_set_header_len(struct l2tp_session *session, int version)
+{
+ if (version == L2TP_HDR_VER_2) {
+ session->hdr_len = 6;
+ if (session->send_seq)
+ session->hdr_len += 4;
+ } else {
+ session->hdr_len = 4 + session->cookie_len + session->l2specific_len + session->offset;
+ if (session->tunnel->encap == L2TP_ENCAPTYPE_UDP)
+ session->hdr_len += 4;
+ }
+
+}
+EXPORT_SYMBOL_GPL(l2tp_session_set_header_len);
+
+struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
+{
+ struct l2tp_session *session;
+
+ session = kzalloc(sizeof(struct l2tp_session) + priv_size, GFP_KERNEL);
+ if (session != NULL) {
+ session->magic = L2TP_SESSION_MAGIC;
+ session->tunnel = tunnel;
+
+ session->session_id = session_id;
+ session->peer_session_id = peer_session_id;
+ session->nr = 1;
+
+ sprintf(&session->name[0], "sess %u/%u",
+ tunnel->tunnel_id, session->session_id);
+
+ skb_queue_head_init(&session->reorder_q);
+
+ INIT_HLIST_NODE(&session->hlist);
+ INIT_HLIST_NODE(&session->global_hlist);
+
+ /* Inherit debug options from tunnel */
+ session->debug = tunnel->debug;
+
+ if (cfg) {
+ session->pwtype = cfg->pw_type;
+ session->debug = cfg->debug;
+ session->mtu = cfg->mtu;
+ session->mru = cfg->mru;
+ session->send_seq = cfg->send_seq;
+ session->recv_seq = cfg->recv_seq;
+ session->lns_mode = cfg->lns_mode;
+ session->reorder_timeout = cfg->reorder_timeout;
+ session->offset = cfg->offset;
+ session->l2specific_type = cfg->l2specific_type;
+ session->l2specific_len = cfg->l2specific_len;
+ session->cookie_len = cfg->cookie_len;
+ memcpy(&session->cookie[0], &cfg->cookie[0], cfg->cookie_len);
+ session->peer_cookie_len = cfg->peer_cookie_len;
+ memcpy(&session->peer_cookie[0], &cfg->peer_cookie[0], cfg->peer_cookie_len);
+ }
+
+ if (tunnel->version == L2TP_HDR_VER_2)
+ session->build_header = l2tp_build_l2tpv2_header;
+ else
+ session->build_header = l2tp_build_l2tpv3_header;
+
+ l2tp_session_set_header_len(session, tunnel->version);
+
+ /* Bump the reference count. The session context is deleted
+ * only when this drops to zero.
+ */
+ l2tp_session_inc_refcount(session);
+ l2tp_tunnel_inc_refcount(tunnel);
+
+ /* Ensure tunnel socket isn't deleted */
+ sock_hold(tunnel->sock);
+
+ /* Add session to the tunnel's hash list */
+ write_lock_bh(&tunnel->hlist_lock);
+ hlist_add_head(&session->hlist,
+ l2tp_session_id_hash(tunnel, session_id));
+ write_unlock_bh(&tunnel->hlist_lock);
+
+ /* And to the global session list if L2TPv3 */
+ if (tunnel->version != L2TP_HDR_VER_2) {
+ struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
+
+ spin_lock_bh(&pn->l2tp_session_hlist_lock);
+ hlist_add_head_rcu(&session->global_hlist,
+ l2tp_session_id_hash_2(pn, session_id));
+ spin_unlock_bh(&pn->l2tp_session_hlist_lock);
+ synchronize_rcu();
+ }
+
+ /* Ignore management session in session count value */
+ if (session->session_id != 0)
+ atomic_inc(&l2tp_session_count);
+ }
+
+ return session;
+}
+EXPORT_SYMBOL_GPL(l2tp_session_create);
+
+/*****************************************************************************
+ * Init and cleanup
+ *****************************************************************************/
+
+static __net_init int l2tp_init_net(struct net *net)
+{
+ struct l2tp_net *pn = net_generic(net, l2tp_net_id);
+ int hash;
+
+ INIT_LIST_HEAD(&pn->l2tp_tunnel_list);
+ spin_lock_init(&pn->l2tp_tunnel_list_lock);
+
+ for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++)
+ INIT_HLIST_HEAD(&pn->l2tp_session_hlist[hash]);
+
+ spin_lock_init(&pn->l2tp_session_hlist_lock);
+
+ return 0;
+}
+
+static struct pernet_operations l2tp_net_ops = {
+ .init = l2tp_init_net,
+ .id = &l2tp_net_id,
+ .size = sizeof(struct l2tp_net),
+};
+
+static int __init l2tp_init(void)
+{
+ int rc = 0;
+
+ rc = register_pernet_device(&l2tp_net_ops);
+ if (rc)
+ goto out;
+
+ printk(KERN_INFO "L2TP core driver, %s\n", L2TP_DRV_VERSION);
+
+out:
+ return rc;
+}
+
+static void __exit l2tp_exit(void)
+{
+ unregister_pernet_device(&l2tp_net_ops);
+}
+
+module_init(l2tp_init);
+module_exit(l2tp_exit);
+
+MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
+MODULE_DESCRIPTION("L2TP core");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(L2TP_DRV_VERSION);
+
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
new file mode 100644
index 000000000000..f0f318edd3f1
--- /dev/null
+++ b/net/l2tp/l2tp_core.h
@@ -0,0 +1,304 @@
+/*
+ * L2TP internal definitions.
+ *
+ * Copyright (c) 2008,2009 Katalix Systems Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _L2TP_CORE_H_
+#define _L2TP_CORE_H_
+
+/* Just some random numbers */
+#define L2TP_TUNNEL_MAGIC 0x42114DDA
+#define L2TP_SESSION_MAGIC 0x0C04EB7D
+
+/* Per tunnel, session hash table size */
+#define L2TP_HASH_BITS 4
+#define L2TP_HASH_SIZE (1 << L2TP_HASH_BITS)
+
+/* System-wide, session hash table size */
+#define L2TP_HASH_BITS_2 8
+#define L2TP_HASH_SIZE_2 (1 << L2TP_HASH_BITS_2)
+
+/* Debug message categories for the DEBUG socket option */
+enum {
+ L2TP_MSG_DEBUG = (1 << 0), /* verbose debug (if
+ * compiled in) */
+ L2TP_MSG_CONTROL = (1 << 1), /* userspace - kernel
+ * interface */
+ L2TP_MSG_SEQ = (1 << 2), /* sequence numbers */
+ L2TP_MSG_DATA = (1 << 3), /* data packets */
+};
+
+struct sk_buff;
+
+struct l2tp_stats {
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 tx_errors;
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 rx_seq_discards;
+ u64 rx_oos_packets;
+ u64 rx_errors;
+ u64 rx_cookie_discards;
+};
+
+struct l2tp_tunnel;
+
+/* Describes a session. Contains information to determine incoming
+ * packets and transmit outgoing ones.
+ */
+struct l2tp_session_cfg {
+ enum l2tp_pwtype pw_type;
+ unsigned data_seq:2; /* data sequencing level
+ * 0 => none, 1 => IP only,
+ * 2 => all
+ */
+ unsigned recv_seq:1; /* expect receive packets with
+ * sequence numbers? */
+ unsigned send_seq:1; /* send packets with sequence
+ * numbers? */
+ unsigned lns_mode:1; /* behave as LNS? LAC enables
+ * sequence numbers under
+ * control of LNS. */
+ int debug; /* bitmask of debug message
+ * categories */
+ u16 vlan_id; /* VLAN pseudowire only */
+ u16 offset; /* offset to payload */
+ u16 l2specific_len; /* Layer 2 specific length */
+ u16 l2specific_type; /* Layer 2 specific type */
+ u8 cookie[8]; /* optional cookie */
+ int cookie_len; /* 0, 4 or 8 bytes */
+ u8 peer_cookie[8]; /* peer's cookie */
+ int peer_cookie_len; /* 0, 4 or 8 bytes */
+ int reorder_timeout; /* configured reorder timeout
+ * (in jiffies) */
+ int mtu;
+ int mru;
+ char *ifname;
+};
+
+struct l2tp_session {
+ int magic; /* should be
+ * L2TP_SESSION_MAGIC */
+
+ struct l2tp_tunnel *tunnel; /* back pointer to tunnel
+ * context */
+ u32 session_id;
+ u32 peer_session_id;
+ u8 cookie[8];
+ int cookie_len;
+ u8 peer_cookie[8];
+ int peer_cookie_len;
+ u16 offset; /* offset from end of L2TP header
+ to beginning of data */
+ u16 l2specific_len;
+ u16 l2specific_type;
+ u16 hdr_len;
+ u32 nr; /* session NR state (receive) */
+ u32 ns; /* session NR state (send) */
+ struct sk_buff_head reorder_q; /* receive reorder queue */
+ struct hlist_node hlist; /* Hash list node */
+ atomic_t ref_count;
+
+ char name[32]; /* for logging */
+ char ifname[IFNAMSIZ];
+ unsigned data_seq:2; /* data sequencing level
+ * 0 => none, 1 => IP only,
+ * 2 => all
+ */
+ unsigned recv_seq:1; /* expect receive packets with
+ * sequence numbers? */
+ unsigned send_seq:1; /* send packets with sequence
+ * numbers? */
+ unsigned lns_mode:1; /* behave as LNS? LAC enables
+ * sequence numbers under
+ * control of LNS. */
+ int debug; /* bitmask of debug message
+ * categories */
+ int reorder_timeout; /* configured reorder timeout
+ * (in jiffies) */
+ int mtu;
+ int mru;
+ enum l2tp_pwtype pwtype;
+ struct l2tp_stats stats;
+ struct hlist_node global_hlist; /* Global hash list node */
+
+ int (*build_header)(struct l2tp_session *session, void *buf);
+ void (*recv_skb)(struct l2tp_session *session, struct sk_buff *skb, int data_len);
+ void (*session_close)(struct l2tp_session *session);
+ void (*ref)(struct l2tp_session *session);
+ void (*deref)(struct l2tp_session *session);
+#if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE)
+ void (*show)(struct seq_file *m, void *priv);
+#endif
+ uint8_t priv[0]; /* private data */
+};
+
+/* Describes the tunnel. It contains info to track all the associated
+ * sessions so incoming packets can be sorted out
+ */
+struct l2tp_tunnel_cfg {
+ int debug; /* bitmask of debug message
+ * categories */
+ enum l2tp_encap_type encap;
+
+ /* Used only for kernel-created sockets */
+ struct in_addr local_ip;
+ struct in_addr peer_ip;
+ u16 local_udp_port;
+ u16 peer_udp_port;
+ unsigned int use_udp_checksums:1;
+};
+
+struct l2tp_tunnel {
+ int magic; /* Should be L2TP_TUNNEL_MAGIC */
+ rwlock_t hlist_lock; /* protect session_hlist */
+ struct hlist_head session_hlist[L2TP_HASH_SIZE];
+ /* hashed list of sessions,
+ * hashed by id */
+ u32 tunnel_id;
+ u32 peer_tunnel_id;
+ int version; /* 2=>L2TPv2, 3=>L2TPv3 */
+
+ char name[20]; /* for logging */
+ int debug; /* bitmask of debug message
+ * categories */
+ enum l2tp_encap_type encap;
+ struct l2tp_stats stats;
+
+ struct list_head list; /* Keep a list of all tunnels */
+ struct net *l2tp_net; /* the net we belong to */
+
+ atomic_t ref_count;
+#ifdef CONFIG_DEBUG_FS
+ void (*show)(struct seq_file *m, void *arg);
+#endif
+ int (*recv_payload_hook)(struct sk_buff *skb);
+ void (*old_sk_destruct)(struct sock *);
+ struct sock *sock; /* Parent socket */
+ int fd;
+
+ uint8_t priv[0]; /* private data */
+};
+
+struct l2tp_nl_cmd_ops {
+ int (*session_create)(struct net *net, u32 tunnel_id, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg);
+ int (*session_delete)(struct l2tp_session *session);
+};
+
+static inline void *l2tp_tunnel_priv(struct l2tp_tunnel *tunnel)
+{
+ return &tunnel->priv[0];
+}
+
+static inline void *l2tp_session_priv(struct l2tp_session *session)
+{
+ return &session->priv[0];
+}
+
+static inline struct l2tp_tunnel *l2tp_sock_to_tunnel(struct sock *sk)
+{
+ struct l2tp_tunnel *tunnel;
+
+ if (sk == NULL)
+ return NULL;
+
+ sock_hold(sk);
+ tunnel = (struct l2tp_tunnel *)(sk->sk_user_data);
+ if (tunnel == NULL) {
+ sock_put(sk);
+ goto out;
+ }
+
+ BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
+
+out:
+ return tunnel;
+}
+
+extern struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id);
+extern struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth);
+extern struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname);
+extern struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id);
+extern struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth);
+
+extern int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp);
+extern int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel);
+extern struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg);
+extern int l2tp_session_delete(struct l2tp_session *session);
+extern void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);
+extern void l2tp_session_free(struct l2tp_session *session);
+extern void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, unsigned char *ptr, unsigned char *optr, u16 hdrflags, int length, int (*payload_hook)(struct sk_buff *skb));
+extern int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb, int (*payload_hook)(struct sk_buff *skb));
+extern int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb);
+
+extern int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, size_t data_len);
+extern int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len);
+extern void l2tp_tunnel_destruct(struct sock *sk);
+extern void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel);
+extern void l2tp_session_set_header_len(struct l2tp_session *session, int version);
+
+extern int l2tp_nl_register_ops(enum l2tp_pwtype pw_type, const struct l2tp_nl_cmd_ops *ops);
+extern void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type);
+
+/* Tunnel reference counts. Incremented per session that is added to
+ * the tunnel.
+ */
+static inline void l2tp_tunnel_inc_refcount_1(struct l2tp_tunnel *tunnel)
+{
+ atomic_inc(&tunnel->ref_count);
+}
+
+static inline void l2tp_tunnel_dec_refcount_1(struct l2tp_tunnel *tunnel)
+{
+ if (atomic_dec_and_test(&tunnel->ref_count))
+ l2tp_tunnel_free(tunnel);
+}
+#ifdef L2TP_REFCNT_DEBUG
+#define l2tp_tunnel_inc_refcount(_t) do { \
+ printk(KERN_DEBUG "l2tp_tunnel_inc_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_t)->name, atomic_read(&_t->ref_count)); \
+ l2tp_tunnel_inc_refcount_1(_t); \
+ } while (0)
+#define l2tp_tunnel_dec_refcount(_t) do { \
+ printk(KERN_DEBUG "l2tp_tunnel_dec_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_t)->name, atomic_read(&_t->ref_count)); \
+ l2tp_tunnel_dec_refcount_1(_t); \
+ } while (0)
+#else
+#define l2tp_tunnel_inc_refcount(t) l2tp_tunnel_inc_refcount_1(t)
+#define l2tp_tunnel_dec_refcount(t) l2tp_tunnel_dec_refcount_1(t)
+#endif
+
+/* Session reference counts. Incremented when code obtains a reference
+ * to a session.
+ */
+static inline void l2tp_session_inc_refcount_1(struct l2tp_session *session)
+{
+ atomic_inc(&session->ref_count);
+}
+
+static inline void l2tp_session_dec_refcount_1(struct l2tp_session *session)
+{
+ if (atomic_dec_and_test(&session->ref_count))
+ l2tp_session_free(session);
+}
+
+#ifdef L2TP_REFCNT_DEBUG
+#define l2tp_session_inc_refcount(_s) do { \
+ printk(KERN_DEBUG "l2tp_session_inc_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_s)->name, atomic_read(&_s->ref_count)); \
+ l2tp_session_inc_refcount_1(_s); \
+ } while (0)
+#define l2tp_session_dec_refcount(_s) do { \
+ printk(KERN_DEBUG "l2tp_session_dec_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_s)->name, atomic_read(&_s->ref_count)); \
+ l2tp_session_dec_refcount_1(_s); \
+ } while (0)
+#else
+#define l2tp_session_inc_refcount(s) l2tp_session_inc_refcount_1(s)
+#define l2tp_session_dec_refcount(s) l2tp_session_dec_refcount_1(s)
+#endif
+
+#endif /* _L2TP_CORE_H_ */
diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c
new file mode 100644
index 000000000000..104ec3b283d4
--- /dev/null
+++ b/net/l2tp/l2tp_debugfs.c
@@ -0,0 +1,341 @@
+/*
+ * L2TP subsystem debugfs
+ *
+ * Copyright (c) 2010 Katalix Systems Ltd
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/socket.h>
+#include <linux/hash.h>
+#include <linux/l2tp.h>
+#include <linux/in.h>
+#include <linux/etherdevice.h>
+#include <linux/spinlock.h>
+#include <linux/debugfs.h>
+#include <net/sock.h>
+#include <net/ip.h>
+#include <net/icmp.h>
+#include <net/udp.h>
+#include <net/inet_common.h>
+#include <net/inet_hashtables.h>
+#include <net/tcp_states.h>
+#include <net/protocol.h>
+#include <net/xfrm.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+
+#include "l2tp_core.h"
+
+static struct dentry *rootdir;
+static struct dentry *tunnels;
+
+struct l2tp_dfs_seq_data {
+ struct net *net;
+ int tunnel_idx; /* current tunnel */
+ int session_idx; /* index of session within current tunnel */
+ struct l2tp_tunnel *tunnel;
+ struct l2tp_session *session; /* NULL means get next tunnel */
+};
+
+static void l2tp_dfs_next_tunnel(struct l2tp_dfs_seq_data *pd)
+{
+ pd->tunnel = l2tp_tunnel_find_nth(pd->net, pd->tunnel_idx);
+ pd->tunnel_idx++;
+}
+
+static void l2tp_dfs_next_session(struct l2tp_dfs_seq_data *pd)
+{
+ pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx);
+ pd->session_idx++;
+
+ if (pd->session == NULL) {
+ pd->session_idx = 0;
+ l2tp_dfs_next_tunnel(pd);
+ }
+
+}
+
+static void *l2tp_dfs_seq_start(struct seq_file *m, loff_t *offs)
+{
+ struct l2tp_dfs_seq_data *pd = SEQ_START_TOKEN;
+ loff_t pos = *offs;
+
+ if (!pos)
+ goto out;
+
+ BUG_ON(m->private == NULL);
+ pd = m->private;
+
+ if (pd->tunnel == NULL)
+ l2tp_dfs_next_tunnel(pd);
+ else
+ l2tp_dfs_next_session(pd);
+
+ /* NULL tunnel and session indicates end of list */
+ if ((pd->tunnel == NULL) && (pd->session == NULL))
+ pd = NULL;
+
+out:
+ return pd;
+}
+
+
+static void *l2tp_dfs_seq_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ (*pos)++;
+ return NULL;
+}
+
+static void l2tp_dfs_seq_stop(struct seq_file *p, void *v)
+{
+ /* nothing to do */
+}
+
+static void l2tp_dfs_seq_tunnel_show(struct seq_file *m, void *v)
+{
+ struct l2tp_tunnel *tunnel = v;
+ int session_count = 0;
+ int hash;
+ struct hlist_node *walk;
+ struct hlist_node *tmp;
+
+ read_lock_bh(&tunnel->hlist_lock);
+ for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
+ hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) {
+ struct l2tp_session *session;
+
+ session = hlist_entry(walk, struct l2tp_session, hlist);
+ if (session->session_id == 0)
+ continue;
+
+ session_count++;
+ }
+ }
+ read_unlock_bh(&tunnel->hlist_lock);
+
+ seq_printf(m, "\nTUNNEL %u peer %u", tunnel->tunnel_id, tunnel->peer_tunnel_id);
+ if (tunnel->sock) {
+ struct inet_sock *inet = inet_sk(tunnel->sock);
+ seq_printf(m, " from %pI4 to %pI4\n",
+ &inet->inet_saddr, &inet->inet_daddr);
+ if (tunnel->encap == L2TP_ENCAPTYPE_UDP)
+ seq_printf(m, " source port %hu, dest port %hu\n",
+ ntohs(inet->inet_sport), ntohs(inet->inet_dport));
+ }
+ seq_printf(m, " L2TPv%d, %s\n", tunnel->version,
+ tunnel->encap == L2TP_ENCAPTYPE_UDP ? "UDP" :
+ tunnel->encap == L2TP_ENCAPTYPE_IP ? "IP" :
+ "");
+ seq_printf(m, " %d sessions, refcnt %d/%d\n", session_count,
+ tunnel->sock ? atomic_read(&tunnel->sock->sk_refcnt) : 0,
+ atomic_read(&tunnel->ref_count));
+
+ seq_printf(m, " %08x rx %llu/%llu/%llu rx %llu/%llu/%llu\n",
+ tunnel->debug,
+ (unsigned long long)tunnel->stats.tx_packets,
+ (unsigned long long)tunnel->stats.tx_bytes,
+ (unsigned long long)tunnel->stats.tx_errors,
+ (unsigned long long)tunnel->stats.rx_packets,
+ (unsigned long long)tunnel->stats.rx_bytes,
+ (unsigned long long)tunnel->stats.rx_errors);
+
+ if (tunnel->show != NULL)
+ tunnel->show(m, tunnel);
+}
+
+static void l2tp_dfs_seq_session_show(struct seq_file *m, void *v)
+{
+ struct l2tp_session *session = v;
+
+ seq_printf(m, " SESSION %u, peer %u, %s\n", session->session_id,
+ session->peer_session_id,
+ session->pwtype == L2TP_PWTYPE_ETH ? "ETH" :
+ session->pwtype == L2TP_PWTYPE_PPP ? "PPP" :
+ "");
+ if (session->send_seq || session->recv_seq)
+ seq_printf(m, " nr %hu, ns %hu\n", session->nr, session->ns);
+ seq_printf(m, " refcnt %d\n", atomic_read(&session->ref_count));
+ seq_printf(m, " config %d/%d/%c/%c/%s/%s %08x %u\n",
+ session->mtu, session->mru,
+ session->recv_seq ? 'R' : '-',
+ session->send_seq ? 'S' : '-',
+ session->data_seq == 1 ? "IPSEQ" :
+ session->data_seq == 2 ? "DATASEQ" : "-",
+ session->lns_mode ? "LNS" : "LAC",
+ session->debug,
+ jiffies_to_msecs(session->reorder_timeout));
+ seq_printf(m, " offset %hu l2specific %hu/%hu\n",
+ session->offset, session->l2specific_type, session->l2specific_len);
+ if (session->cookie_len) {
+ seq_printf(m, " cookie %02x%02x%02x%02x",
+ session->cookie[0], session->cookie[1],
+ session->cookie[2], session->cookie[3]);
+ if (session->cookie_len == 8)
+ seq_printf(m, "%02x%02x%02x%02x",
+ session->cookie[4], session->cookie[5],
+ session->cookie[6], session->cookie[7]);
+ seq_printf(m, "\n");
+ }
+ if (session->peer_cookie_len) {
+ seq_printf(m, " peer cookie %02x%02x%02x%02x",
+ session->peer_cookie[0], session->peer_cookie[1],
+ session->peer_cookie[2], session->peer_cookie[3]);
+ if (session->peer_cookie_len == 8)
+ seq_printf(m, "%02x%02x%02x%02x",
+ session->peer_cookie[4], session->peer_cookie[5],
+ session->peer_cookie[6], session->peer_cookie[7]);
+ seq_printf(m, "\n");
+ }
+
+ seq_printf(m, " %hu/%hu tx %llu/%llu/%llu rx %llu/%llu/%llu\n",
+ session->nr, session->ns,
+ (unsigned long long)session->stats.tx_packets,
+ (unsigned long long)session->stats.tx_bytes,
+ (unsigned long long)session->stats.tx_errors,
+ (unsigned long long)session->stats.rx_packets,
+ (unsigned long long)session->stats.rx_bytes,
+ (unsigned long long)session->stats.rx_errors);
+
+ if (session->show != NULL)
+ session->show(m, session);
+}
+
+static int l2tp_dfs_seq_show(struct seq_file *m, void *v)
+{
+ struct l2tp_dfs_seq_data *pd = v;
+
+ /* display header on line 1 */
+ if (v == SEQ_START_TOKEN) {
+ seq_puts(m, "TUNNEL ID, peer ID from IP to IP\n");
+ seq_puts(m, " L2TPv2/L2TPv3, UDP/IP\n");
+ seq_puts(m, " sessions session-count, refcnt refcnt/sk->refcnt\n");
+ seq_puts(m, " debug tx-pkts/bytes/errs rx-pkts/bytes/errs\n");
+ seq_puts(m, " SESSION ID, peer ID, PWTYPE\n");
+ seq_puts(m, " refcnt cnt\n");
+ seq_puts(m, " offset OFFSET l2specific TYPE/LEN\n");
+ seq_puts(m, " [ cookie ]\n");
+ seq_puts(m, " [ peer cookie ]\n");
+ seq_puts(m, " config mtu/mru/rcvseq/sendseq/dataseq/lns debug reorderto\n");
+ seq_puts(m, " nr/ns tx-pkts/bytes/errs rx-pkts/bytes/errs\n");
+ goto out;
+ }
+
+ /* Show the tunnel or session context */
+ if (pd->session == NULL)
+ l2tp_dfs_seq_tunnel_show(m, pd->tunnel);
+ else
+ l2tp_dfs_seq_session_show(m, pd->session);
+
+out:
+ return 0;
+}
+
+static const struct seq_operations l2tp_dfs_seq_ops = {
+ .start = l2tp_dfs_seq_start,
+ .next = l2tp_dfs_seq_next,
+ .stop = l2tp_dfs_seq_stop,
+ .show = l2tp_dfs_seq_show,
+};
+
+static int l2tp_dfs_seq_open(struct inode *inode, struct file *file)
+{
+ struct l2tp_dfs_seq_data *pd;
+ struct seq_file *seq;
+ int rc = -ENOMEM;
+
+ pd = kzalloc(GFP_KERNEL, sizeof(*pd));
+ if (pd == NULL)
+ goto out;
+
+ /* Derive the network namespace from the pid opening the
+ * file.
+ */
+ pd->net = get_net_ns_by_pid(current->pid);
+ if (IS_ERR(pd->net)) {
+ rc = -PTR_ERR(pd->net);
+ goto err_free_pd;
+ }
+
+ rc = seq_open(file, &l2tp_dfs_seq_ops);
+ if (rc)
+ goto err_free_net;
+
+ seq = file->private_data;
+ seq->private = pd;
+
+out:
+ return rc;
+
+err_free_net:
+ put_net(pd->net);
+err_free_pd:
+ kfree(pd);
+ goto out;
+}
+
+static int l2tp_dfs_seq_release(struct inode *inode, struct file *file)
+{
+ struct l2tp_dfs_seq_data *pd;
+ struct seq_file *seq;
+
+ seq = file->private_data;
+ pd = seq->private;
+ if (pd->net)
+ put_net(pd->net);
+ kfree(pd);
+ seq_release(inode, file);
+
+ return 0;
+}
+
+static const struct file_operations l2tp_dfs_fops = {
+ .owner = THIS_MODULE,
+ .open = l2tp_dfs_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = l2tp_dfs_seq_release,
+};
+
+static int __init l2tp_debugfs_init(void)
+{
+ int rc = 0;
+
+ rootdir = debugfs_create_dir("l2tp", NULL);
+ if (IS_ERR(rootdir)) {
+ rc = PTR_ERR(rootdir);
+ rootdir = NULL;
+ goto out;
+ }
+
+ tunnels = debugfs_create_file("tunnels", 0600, rootdir, NULL, &l2tp_dfs_fops);
+ if (tunnels == NULL)
+ rc = -EIO;
+
+ printk(KERN_INFO "L2TP debugfs support\n");
+
+out:
+ if (rc)
+ printk(KERN_WARNING "l2tp debugfs: unable to init\n");
+
+ return rc;
+}
+
+static void __exit l2tp_debugfs_exit(void)
+{
+ debugfs_remove(tunnels);
+ debugfs_remove(rootdir);
+}
+
+module_init(l2tp_debugfs_init);
+module_exit(l2tp_debugfs_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
+MODULE_DESCRIPTION("L2TP debugfs driver");
+MODULE_VERSION("1.0");
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
new file mode 100644
index 000000000000..58c6c4cda73b
--- /dev/null
+++ b/net/l2tp/l2tp_eth.c
@@ -0,0 +1,334 @@
+/*
+ * L2TPv3 ethernet pseudowire driver
+ *
+ * Copyright (c) 2008,2009,2010 Katalix Systems Ltd
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/socket.h>
+#include <linux/hash.h>
+#include <linux/l2tp.h>
+#include <linux/in.h>
+#include <linux/etherdevice.h>
+#include <linux/spinlock.h>
+#include <net/sock.h>
+#include <net/ip.h>
+#include <net/icmp.h>
+#include <net/udp.h>
+#include <net/inet_common.h>
+#include <net/inet_hashtables.h>
+#include <net/tcp_states.h>
+#include <net/protocol.h>
+#include <net/xfrm.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+
+#include "l2tp_core.h"
+
+/* Default device name. May be overridden by name specified by user */
+#define L2TP_ETH_DEV_NAME "l2tpeth%d"
+
+/* via netdev_priv() */
+struct l2tp_eth {
+ struct net_device *dev;
+ struct sock *tunnel_sock;
+ struct l2tp_session *session;
+ struct list_head list;
+};
+
+/* via l2tp_session_priv() */
+struct l2tp_eth_sess {
+ struct net_device *dev;
+};
+
+/* per-net private data for this module */
+static unsigned int l2tp_eth_net_id;
+struct l2tp_eth_net {
+ struct list_head l2tp_eth_dev_list;
+ spinlock_t l2tp_eth_lock;
+};
+
+static inline struct l2tp_eth_net *l2tp_eth_pernet(struct net *net)
+{
+ return net_generic(net, l2tp_eth_net_id);
+}
+
+static int l2tp_eth_dev_init(struct net_device *dev)
+{
+ struct l2tp_eth *priv = netdev_priv(dev);
+
+ priv->dev = dev;
+ random_ether_addr(dev->dev_addr);
+ memset(&dev->broadcast[0], 0xff, 6);
+
+ return 0;
+}
+
+static void l2tp_eth_dev_uninit(struct net_device *dev)
+{
+ struct l2tp_eth *priv = netdev_priv(dev);
+ struct l2tp_eth_net *pn = l2tp_eth_pernet(dev_net(dev));
+
+ spin_lock(&pn->l2tp_eth_lock);
+ list_del_init(&priv->list);
+ spin_unlock(&pn->l2tp_eth_lock);
+ dev_put(dev);
+}
+
+static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct l2tp_eth *priv = netdev_priv(dev);
+ struct l2tp_session *session = priv->session;
+
+ l2tp_xmit_skb(session, skb, session->hdr_len);
+
+ dev->stats.tx_bytes += skb->len;
+ dev->stats.tx_packets++;
+
+ return 0;
+}
+
+static struct net_device_ops l2tp_eth_netdev_ops = {
+ .ndo_init = l2tp_eth_dev_init,
+ .ndo_uninit = l2tp_eth_dev_uninit,
+ .ndo_start_xmit = l2tp_eth_dev_xmit,
+};
+
+static void l2tp_eth_dev_setup(struct net_device *dev)
+{
+ ether_setup(dev);
+
+ dev->netdev_ops = &l2tp_eth_netdev_ops;
+ dev->destructor = free_netdev;
+}
+
+static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb, int data_len)
+{
+ struct l2tp_eth_sess *spriv = l2tp_session_priv(session);
+ struct net_device *dev = spriv->dev;
+
+ if (session->debug & L2TP_MSG_DATA) {
+ unsigned int length;
+ int offset;
+ u8 *ptr = skb->data;
+
+ length = min(32u, skb->len);
+ if (!pskb_may_pull(skb, length))
+ goto error;
+
+ printk(KERN_DEBUG "%s: eth recv: ", session->name);
+
+ offset = 0;
+ do {
+ printk(" %02X", ptr[offset]);
+ } while (++offset < length);
+
+ printk("\n");
+ }
+
+ if (data_len < ETH_HLEN)
+ goto error;
+
+ secpath_reset(skb);
+
+ /* checksums verified by L2TP */
+ skb->ip_summed = CHECKSUM_NONE;
+
+ skb_dst_drop(skb);
+ nf_reset(skb);
+
+ if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) {
+ dev->last_rx = jiffies;
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += data_len;
+ } else
+ dev->stats.rx_errors++;
+
+ return;
+
+error:
+ dev->stats.rx_errors++;
+ kfree_skb(skb);
+}
+
+static void l2tp_eth_delete(struct l2tp_session *session)
+{
+ struct l2tp_eth_sess *spriv;
+ struct net_device *dev;
+
+ if (session) {
+ spriv = l2tp_session_priv(session);
+ dev = spriv->dev;
+ if (dev) {
+ unregister_netdev(dev);
+ spriv->dev = NULL;
+ }
+ }
+}
+
+#if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE)
+static void l2tp_eth_show(struct seq_file *m, void *arg)
+{
+ struct l2tp_session *session = arg;
+ struct l2tp_eth_sess *spriv = l2tp_session_priv(session);
+ struct net_device *dev = spriv->dev;
+
+ seq_printf(m, " interface %s\n", dev->name);
+}
+#endif
+
+static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
+{
+ struct net_device *dev;
+ char name[IFNAMSIZ];
+ struct l2tp_tunnel *tunnel;
+ struct l2tp_session *session;
+ struct l2tp_eth *priv;
+ struct l2tp_eth_sess *spriv;
+ int rc;
+ struct l2tp_eth_net *pn;
+
+ tunnel = l2tp_tunnel_find(net, tunnel_id);
+ if (!tunnel) {
+ rc = -ENODEV;
+ goto out;
+ }
+
+ session = l2tp_session_find(net, tunnel, session_id);
+ if (session) {
+ rc = -EEXIST;
+ goto out;
+ }
+
+ if (cfg->ifname) {
+ dev = dev_get_by_name(net, cfg->ifname);
+ if (dev) {
+ dev_put(dev);
+ rc = -EEXIST;
+ goto out;
+ }
+ strlcpy(name, cfg->ifname, IFNAMSIZ);
+ } else
+ strcpy(name, L2TP_ETH_DEV_NAME);
+
+ session = l2tp_session_create(sizeof(*spriv), tunnel, session_id,
+ peer_session_id, cfg);
+ if (!session) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ dev = alloc_netdev(sizeof(*priv), name, l2tp_eth_dev_setup);
+ if (!dev) {
+ rc = -ENOMEM;
+ goto out_del_session;
+ }
+
+ dev_net_set(dev, net);
+ if (session->mtu == 0)
+ session->mtu = dev->mtu - session->hdr_len;
+ dev->mtu = session->mtu;
+ dev->needed_headroom += session->hdr_len;
+
+ priv = netdev_priv(dev);
+ priv->dev = dev;
+ priv->session = session;
+ INIT_LIST_HEAD(&priv->list);
+
+ priv->tunnel_sock = tunnel->sock;
+ session->recv_skb = l2tp_eth_dev_recv;
+ session->session_close = l2tp_eth_delete;
+#if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE)
+ session->show = l2tp_eth_show;
+#endif
+
+ spriv = l2tp_session_priv(session);
+ spriv->dev = dev;
+
+ rc = register_netdev(dev);
+ if (rc < 0)
+ goto out_del_dev;
+
+ /* Must be done after register_netdev() */
+ strlcpy(session->ifname, dev->name, IFNAMSIZ);
+
+ dev_hold(dev);
+ pn = l2tp_eth_pernet(dev_net(dev));
+ spin_lock(&pn->l2tp_eth_lock);
+ list_add(&priv->list, &pn->l2tp_eth_dev_list);
+ spin_unlock(&pn->l2tp_eth_lock);
+
+ return 0;
+
+out_del_dev:
+ free_netdev(dev);
+out_del_session:
+ l2tp_session_delete(session);
+out:
+ return rc;
+}
+
+static __net_init int l2tp_eth_init_net(struct net *net)
+{
+ struct l2tp_eth_net *pn = net_generic(net, l2tp_eth_net_id);
+
+ INIT_LIST_HEAD(&pn->l2tp_eth_dev_list);
+ spin_lock_init(&pn->l2tp_eth_lock);
+
+ return 0;
+}
+
+static __net_initdata struct pernet_operations l2tp_eth_net_ops = {
+ .init = l2tp_eth_init_net,
+ .id = &l2tp_eth_net_id,
+ .size = sizeof(struct l2tp_eth_net),
+};
+
+
+static const struct l2tp_nl_cmd_ops l2tp_eth_nl_cmd_ops = {
+ .session_create = l2tp_eth_create,
+ .session_delete = l2tp_session_delete,
+};
+
+
+static int __init l2tp_eth_init(void)
+{
+ int err = 0;
+
+ err = l2tp_nl_register_ops(L2TP_PWTYPE_ETH, &l2tp_eth_nl_cmd_ops);
+ if (err)
+ goto out;
+
+ err = register_pernet_device(&l2tp_eth_net_ops);
+ if (err)
+ goto out_unreg;
+
+ printk(KERN_INFO "L2TP ethernet pseudowire support (L2TPv3)\n");
+
+ return 0;
+
+out_unreg:
+ l2tp_nl_unregister_ops(L2TP_PWTYPE_ETH);
+out:
+ return err;
+}
+
+static void __exit l2tp_eth_exit(void)
+{
+ unregister_pernet_device(&l2tp_eth_net_ops);
+ l2tp_nl_unregister_ops(L2TP_PWTYPE_ETH);
+}
+
+module_init(l2tp_eth_init);
+module_exit(l2tp_eth_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
+MODULE_DESCRIPTION("L2TP ethernet pseudowire driver");
+MODULE_VERSION("1.0");
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
new file mode 100644
index 000000000000..0852512d392c
--- /dev/null
+++ b/net/l2tp/l2tp_ip.c
@@ -0,0 +1,679 @@
+/*
+ * L2TPv3 IP encapsulation support
+ *
+ * Copyright (c) 2008,2009,2010 Katalix Systems Ltd
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/icmp.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/random.h>
+#include <linux/socket.h>
+#include <linux/l2tp.h>
+#include <linux/in.h>
+#include <net/sock.h>
+#include <net/ip.h>
+#include <net/icmp.h>
+#include <net/udp.h>
+#include <net/inet_common.h>
+#include <net/inet_hashtables.h>
+#include <net/tcp_states.h>
+#include <net/protocol.h>
+#include <net/xfrm.h>
+
+#include "l2tp_core.h"
+
+struct l2tp_ip_sock {
+ /* inet_sock has to be the first member of l2tp_ip_sock */
+ struct inet_sock inet;
+
+ __u32 conn_id;
+ __u32 peer_conn_id;
+
+ __u64 tx_packets;
+ __u64 tx_bytes;
+ __u64 tx_errors;
+ __u64 rx_packets;
+ __u64 rx_bytes;
+ __u64 rx_errors;
+};
+
+static DEFINE_RWLOCK(l2tp_ip_lock);
+static struct hlist_head l2tp_ip_table;
+static struct hlist_head l2tp_ip_bind_table;
+
+static inline struct l2tp_ip_sock *l2tp_ip_sk(const struct sock *sk)
+{
+ return (struct l2tp_ip_sock *)sk;
+}
+
+static struct sock *__l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif, u32 tunnel_id)
+{
+ struct hlist_node *node;
+ struct sock *sk;
+
+ sk_for_each_bound(sk, node, &l2tp_ip_bind_table) {
+ struct inet_sock *inet = inet_sk(sk);
+ struct l2tp_ip_sock *l2tp = l2tp_ip_sk(sk);
+
+ if (l2tp == NULL)
+ continue;
+
+ if ((l2tp->conn_id == tunnel_id) &&
+#ifdef CONFIG_NET_NS
+ (sk->sk_net == net) &&
+#endif
+ !(inet->inet_rcv_saddr && inet->inet_rcv_saddr != laddr) &&
+ !(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif))
+ goto found;
+ }
+
+ sk = NULL;
+found:
+ return sk;
+}
+
+static inline struct sock *l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif, u32 tunnel_id)
+{
+ struct sock *sk = __l2tp_ip_bind_lookup(net, laddr, dif, tunnel_id);
+ if (sk)
+ sock_hold(sk);
+
+ return sk;
+}
+
+/* When processing receive frames, there are two cases to
+ * consider. Data frames consist of a non-zero session-id and an
+ * optional cookie. Control frames consist of a regular L2TP header
+ * preceded by 32-bits of zeros.
+ *
+ * L2TPv3 Session Header Over IP
+ *
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Session ID |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Cookie (optional, maximum 64 bits)...
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * L2TPv3 Control Message Header Over IP
+ *
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | (32 bits of zeros) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |T|L|x|x|S|x|x|x|x|x|x|x| Ver | Length |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Control Connection ID |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Ns | Nr |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * All control frames are passed to userspace.
+ */
+static int l2tp_ip_recv(struct sk_buff *skb)
+{
+ struct sock *sk;
+ u32 session_id;
+ u32 tunnel_id;
+ unsigned char *ptr, *optr;
+ struct l2tp_session *session;
+ struct l2tp_tunnel *tunnel = NULL;
+ int length;
+ int offset;
+
+ /* Point to L2TP header */
+ optr = ptr = skb->data;
+
+ if (!pskb_may_pull(skb, 4))
+ goto discard;
+
+ session_id = ntohl(*((__be32 *) ptr));
+ ptr += 4;
+
+ /* RFC3931: L2TP/IP packets have the first 4 bytes containing
+ * the session_id. If it is 0, the packet is a L2TP control
+ * frame and the session_id value can be discarded.
+ */
+ if (session_id == 0) {
+ __skb_pull(skb, 4);
+ goto pass_up;
+ }
+
+ /* Ok, this is a data packet. Lookup the session. */
+ session = l2tp_session_find(&init_net, NULL, session_id);
+ if (session == NULL)
+ goto discard;
+
+ tunnel = session->tunnel;
+ if (tunnel == NULL)
+ goto discard;
+
+ /* Trace packet contents, if enabled */
+ if (tunnel->debug & L2TP_MSG_DATA) {
+ length = min(32u, skb->len);
+ if (!pskb_may_pull(skb, length))
+ goto discard;
+
+ printk(KERN_DEBUG "%s: ip recv: ", tunnel->name);
+
+ offset = 0;
+ do {
+ printk(" %02X", ptr[offset]);
+ } while (++offset < length);
+
+ printk("\n");
+ }
+
+ l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, tunnel->recv_payload_hook);
+
+ return 0;
+
+pass_up:
+ /* Get the tunnel_id from the L2TP header */
+ if (!pskb_may_pull(skb, 12))
+ goto discard;
+
+ if ((skb->data[0] & 0xc0) != 0xc0)
+ goto discard;
+
+ tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
+ tunnel = l2tp_tunnel_find(&init_net, tunnel_id);
+ if (tunnel != NULL)
+ sk = tunnel->sock;
+ else {
+ struct iphdr *iph = (struct iphdr *) skb_network_header(skb);
+
+ read_lock_bh(&l2tp_ip_lock);
+ sk = __l2tp_ip_bind_lookup(&init_net, iph->daddr, 0, tunnel_id);
+ read_unlock_bh(&l2tp_ip_lock);
+ }
+
+ if (sk == NULL)
+ goto discard;
+
+ sock_hold(sk);
+
+ if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
+ goto discard_put;
+
+ nf_reset(skb);
+
+ return sk_receive_skb(sk, skb, 1);
+
+discard_put:
+ sock_put(sk);
+
+discard:
+ kfree_skb(skb);
+ return 0;
+}
+
+static int l2tp_ip_open(struct sock *sk)
+{
+ /* Prevent autobind. We don't have ports. */
+ inet_sk(sk)->inet_num = IPPROTO_L2TP;
+
+ write_lock_bh(&l2tp_ip_lock);
+ sk_add_node(sk, &l2tp_ip_table);
+ write_unlock_bh(&l2tp_ip_lock);
+
+ return 0;
+}
+
+static void l2tp_ip_close(struct sock *sk, long timeout)
+{
+ write_lock_bh(&l2tp_ip_lock);
+ hlist_del_init(&sk->sk_bind_node);
+ hlist_del_init(&sk->sk_node);
+ write_unlock_bh(&l2tp_ip_lock);
+ sk_common_release(sk);
+}
+
+static void l2tp_ip_destroy_sock(struct sock *sk)
+{
+ struct sk_buff *skb;
+
+ while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
+ kfree_skb(skb);
+
+ sk_refcnt_debug_dec(sk);
+}
+
+static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+{
+ struct inet_sock *inet = inet_sk(sk);
+ struct sockaddr_l2tpip *addr = (struct sockaddr_l2tpip *) uaddr;
+ int ret = -EINVAL;
+ int chk_addr_ret;
+
+ ret = -EADDRINUSE;
+ read_lock_bh(&l2tp_ip_lock);
+ if (__l2tp_ip_bind_lookup(&init_net, addr->l2tp_addr.s_addr, sk->sk_bound_dev_if, addr->l2tp_conn_id))
+ goto out_in_use;
+
+ read_unlock_bh(&l2tp_ip_lock);
+
+ lock_sock(sk);
+ if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_l2tpip))
+ goto out;
+
+ chk_addr_ret = inet_addr_type(&init_net, addr->l2tp_addr.s_addr);
+ ret = -EADDRNOTAVAIL;
+ if (addr->l2tp_addr.s_addr && chk_addr_ret != RTN_LOCAL &&
+ chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST)
+ goto out;
+
+ inet->inet_rcv_saddr = inet->inet_saddr = addr->l2tp_addr.s_addr;
+ if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
+ inet->inet_saddr = 0; /* Use device */
+ sk_dst_reset(sk);
+
+ l2tp_ip_sk(sk)->conn_id = addr->l2tp_conn_id;
+
+ write_lock_bh(&l2tp_ip_lock);
+ sk_add_bind_node(sk, &l2tp_ip_bind_table);
+ sk_del_node_init(sk);
+ write_unlock_bh(&l2tp_ip_lock);
+ ret = 0;
+out:
+ release_sock(sk);
+
+ return ret;
+
+out_in_use:
+ read_unlock_bh(&l2tp_ip_lock);
+
+ return ret;
+}
+
+static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+{
+ int rc;
+ struct inet_sock *inet = inet_sk(sk);
+ struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *) uaddr;
+ struct rtable *rt;
+ __be32 saddr;
+ int oif;
+
+ rc = -EINVAL;
+ if (addr_len < sizeof(*lsa))
+ goto out;
+
+ rc = -EAFNOSUPPORT;
+ if (lsa->l2tp_family != AF_INET)
+ goto out;
+
+ sk_dst_reset(sk);
+
+ oif = sk->sk_bound_dev_if;
+ saddr = inet->inet_saddr;
+
+ rc = -EINVAL;
+ if (ipv4_is_multicast(lsa->l2tp_addr.s_addr))
+ goto out;
+
+ rc = ip_route_connect(&rt, lsa->l2tp_addr.s_addr, saddr,
+ RT_CONN_FLAGS(sk), oif,
+ IPPROTO_L2TP,
+ 0, 0, sk, 1);
+ if (rc) {
+ if (rc == -ENETUNREACH)
+ IP_INC_STATS_BH(&init_net, IPSTATS_MIB_OUTNOROUTES);
+ goto out;
+ }
+
+ rc = -ENETUNREACH;
+ if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
+ ip_rt_put(rt);
+ goto out;
+ }
+
+ l2tp_ip_sk(sk)->peer_conn_id = lsa->l2tp_conn_id;
+
+ if (!inet->inet_saddr)
+ inet->inet_saddr = rt->rt_src;
+ if (!inet->inet_rcv_saddr)
+ inet->inet_rcv_saddr = rt->rt_src;
+ inet->inet_daddr = rt->rt_dst;
+ sk->sk_state = TCP_ESTABLISHED;
+ inet->inet_id = jiffies;
+
+ sk_dst_set(sk, &rt->u.dst);
+
+ write_lock_bh(&l2tp_ip_lock);
+ hlist_del_init(&sk->sk_bind_node);
+ sk_add_bind_node(sk, &l2tp_ip_bind_table);
+ write_unlock_bh(&l2tp_ip_lock);
+
+ rc = 0;
+out:
+ return rc;
+}
+
+static int l2tp_ip_getname(struct socket *sock, struct sockaddr *uaddr,
+ int *uaddr_len, int peer)
+{
+ struct sock *sk = sock->sk;
+ struct inet_sock *inet = inet_sk(sk);
+ struct l2tp_ip_sock *lsk = l2tp_ip_sk(sk);
+ struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *)uaddr;
+
+ memset(lsa, 0, sizeof(*lsa));
+ lsa->l2tp_family = AF_INET;
+ if (peer) {
+ if (!inet->inet_dport)
+ return -ENOTCONN;
+ lsa->l2tp_conn_id = lsk->peer_conn_id;
+ lsa->l2tp_addr.s_addr = inet->inet_daddr;
+ } else {
+ __be32 addr = inet->inet_rcv_saddr;
+ if (!addr)
+ addr = inet->inet_saddr;
+ lsa->l2tp_conn_id = lsk->conn_id;
+ lsa->l2tp_addr.s_addr = addr;
+ }
+ *uaddr_len = sizeof(*lsa);
+ return 0;
+}
+
+static int l2tp_ip_backlog_recv(struct sock *sk, struct sk_buff *skb)
+{
+ int rc;
+
+ if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
+ goto drop;
+
+ nf_reset(skb);
+
+ /* Charge it to the socket, dropping if the queue is full. */
+ rc = sock_queue_rcv_skb(sk, skb);
+ if (rc < 0)
+ goto drop;
+
+ return 0;
+
+drop:
+ IP_INC_STATS(&init_net, IPSTATS_MIB_INDISCARDS);
+ kfree_skb(skb);
+ return -1;
+}
+
+/* Userspace will call sendmsg() on the tunnel socket to send L2TP
+ * control frames.
+ */
+static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len)
+{
+ struct sk_buff *skb;
+ int rc;
+ struct l2tp_ip_sock *lsa = l2tp_ip_sk(sk);
+ struct inet_sock *inet = inet_sk(sk);
+ struct ip_options *opt = inet->opt;
+ struct rtable *rt = NULL;
+ int connected = 0;
+ __be32 daddr;
+
+ if (sock_flag(sk, SOCK_DEAD))
+ return -ENOTCONN;
+
+ /* Get and verify the address. */
+ if (msg->msg_name) {
+ struct sockaddr_l2tpip *lip = (struct sockaddr_l2tpip *) msg->msg_name;
+ if (msg->msg_namelen < sizeof(*lip))
+ return -EINVAL;
+
+ if (lip->l2tp_family != AF_INET) {
+ if (lip->l2tp_family != AF_UNSPEC)
+ return -EAFNOSUPPORT;
+ }
+
+ daddr = lip->l2tp_addr.s_addr;
+ } else {
+ if (sk->sk_state != TCP_ESTABLISHED)
+ return -EDESTADDRREQ;
+
+ daddr = inet->inet_daddr;
+ connected = 1;
+ }
+
+ /* Allocate a socket buffer */
+ rc = -ENOMEM;
+ skb = sock_wmalloc(sk, 2 + NET_SKB_PAD + sizeof(struct iphdr) +
+ 4 + len, 0, GFP_KERNEL);
+ if (!skb)
+ goto error;
+
+ /* Reserve space for headers, putting IP header on 4-byte boundary. */
+ skb_reserve(skb, 2 + NET_SKB_PAD);
+ skb_reset_network_header(skb);
+ skb_reserve(skb, sizeof(struct iphdr));
+ skb_reset_transport_header(skb);
+
+ /* Insert 0 session_id */
+ *((__be32 *) skb_put(skb, 4)) = 0;
+
+ /* Copy user data into skb */
+ rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
+ if (rc < 0) {
+ kfree_skb(skb);
+ goto error;
+ }
+
+ if (connected)
+ rt = (struct rtable *) __sk_dst_check(sk, 0);
+
+ if (rt == NULL) {
+ /* Use correct destination address if we have options. */
+ if (opt && opt->srr)
+ daddr = opt->faddr;
+
+ {
+ struct flowi fl = { .oif = sk->sk_bound_dev_if,
+ .nl_u = { .ip4_u = {
+ .daddr = daddr,
+ .saddr = inet->inet_saddr,
+ .tos = RT_CONN_FLAGS(sk) } },
+ .proto = sk->sk_protocol,
+ .flags = inet_sk_flowi_flags(sk),
+ .uli_u = { .ports = {
+ .sport = inet->inet_sport,
+ .dport = inet->inet_dport } } };
+
+ /* If this fails, retransmit mechanism of transport layer will
+ * keep trying until route appears or the connection times
+ * itself out.
+ */
+ security_sk_classify_flow(sk, &fl);
+ if (ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0))
+ goto no_route;
+ }
+ sk_setup_caps(sk, &rt->u.dst);
+ }
+ skb_dst_set(skb, dst_clone(&rt->u.dst));
+
+ /* Queue the packet to IP for output */
+ rc = ip_queue_xmit(skb);
+
+error:
+ /* Update stats */
+ if (rc >= 0) {
+ lsa->tx_packets++;
+ lsa->tx_bytes += len;
+ rc = len;
+ } else {
+ lsa->tx_errors++;
+ }
+
+ return rc;
+
+no_route:
+ IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
+ kfree_skb(skb);
+ return -EHOSTUNREACH;
+}
+
+static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ size_t len, int noblock, int flags, int *addr_len)
+{
+ struct inet_sock *inet = inet_sk(sk);
+ struct l2tp_ip_sock *lsk = l2tp_ip_sk(sk);
+ size_t copied = 0;
+ int err = -EOPNOTSUPP;
+ struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
+ struct sk_buff *skb;
+
+ if (flags & MSG_OOB)
+ goto out;
+
+ if (addr_len)
+ *addr_len = sizeof(*sin);
+
+ skb = skb_recv_datagram(sk, flags, noblock, &err);
+ if (!skb)
+ goto out;
+
+ copied = skb->len;
+ if (len < copied) {
+ msg->msg_flags |= MSG_TRUNC;
+ copied = len;
+ }
+
+ err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+ if (err)
+ goto done;
+
+ sock_recv_timestamp(msg, sk, skb);
+
+ /* Copy the address. */
+ if (sin) {
+ sin->sin_family = AF_INET;
+ sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
+ sin->sin_port = 0;
+ memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
+ }
+ if (inet->cmsg_flags)
+ ip_cmsg_recv(msg, skb);
+ if (flags & MSG_TRUNC)
+ copied = skb->len;
+done:
+ skb_free_datagram(sk, skb);
+out:
+ if (err) {
+ lsk->rx_errors++;
+ return err;
+ }
+
+ lsk->rx_packets++;
+ lsk->rx_bytes += copied;
+
+ return copied;
+}
+
+struct proto l2tp_ip_prot = {
+ .name = "L2TP/IP",
+ .owner = THIS_MODULE,
+ .init = l2tp_ip_open,
+ .close = l2tp_ip_close,
+ .bind = l2tp_ip_bind,
+ .connect = l2tp_ip_connect,
+ .disconnect = udp_disconnect,
+ .ioctl = udp_ioctl,
+ .destroy = l2tp_ip_destroy_sock,
+ .setsockopt = ip_setsockopt,
+ .getsockopt = ip_getsockopt,
+ .sendmsg = l2tp_ip_sendmsg,
+ .recvmsg = l2tp_ip_recvmsg,
+ .backlog_rcv = l2tp_ip_backlog_recv,
+ .hash = inet_hash,
+ .unhash = inet_unhash,
+ .obj_size = sizeof(struct l2tp_ip_sock),
+#ifdef CONFIG_COMPAT
+ .compat_setsockopt = compat_ip_setsockopt,
+ .compat_getsockopt = compat_ip_getsockopt,
+#endif
+};
+
+static const struct proto_ops l2tp_ip_ops = {
+ .family = PF_INET,
+ .owner = THIS_MODULE,
+ .release = inet_release,
+ .bind = inet_bind,
+ .connect = inet_dgram_connect,
+ .socketpair = sock_no_socketpair,
+ .accept = sock_no_accept,
+ .getname = l2tp_ip_getname,
+ .poll = datagram_poll,
+ .ioctl = inet_ioctl,
+ .listen = sock_no_listen,
+ .shutdown = inet_shutdown,
+ .setsockopt = sock_common_setsockopt,
+ .getsockopt = sock_common_getsockopt,
+ .sendmsg = inet_sendmsg,
+ .recvmsg = sock_common_recvmsg,
+ .mmap = sock_no_mmap,
+ .sendpage = sock_no_sendpage,
+#ifdef CONFIG_COMPAT
+ .compat_setsockopt = compat_sock_common_setsockopt,
+ .compat_getsockopt = compat_sock_common_getsockopt,
+#endif
+};
+
+static struct inet_protosw l2tp_ip_protosw = {
+ .type = SOCK_DGRAM,
+ .protocol = IPPROTO_L2TP,
+ .prot = &l2tp_ip_prot,
+ .ops = &l2tp_ip_ops,
+ .no_check = 0,
+};
+
+static struct net_protocol l2tp_ip_protocol __read_mostly = {
+ .handler = l2tp_ip_recv,
+};
+
+static int __init l2tp_ip_init(void)
+{
+ int err;
+
+ printk(KERN_INFO "L2TP IP encapsulation support (L2TPv3)\n");
+
+ err = proto_register(&l2tp_ip_prot, 1);
+ if (err != 0)
+ goto out;
+
+ err = inet_add_protocol(&l2tp_ip_protocol, IPPROTO_L2TP);
+ if (err)
+ goto out1;
+
+ inet_register_protosw(&l2tp_ip_protosw);
+ return 0;
+
+out1:
+ proto_unregister(&l2tp_ip_prot);
+out:
+ return err;
+}
+
+static void __exit l2tp_ip_exit(void)
+{
+ inet_unregister_protosw(&l2tp_ip_protosw);
+ inet_del_protocol(&l2tp_ip_protocol, IPPROTO_L2TP);
+ proto_unregister(&l2tp_ip_prot);
+}
+
+module_init(l2tp_ip_init);
+module_exit(l2tp_ip_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
+MODULE_DESCRIPTION("L2TP over IP");
+MODULE_VERSION("1.0");
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, SOCK_DGRAM, IPPROTO_L2TP);
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
new file mode 100644
index 000000000000..4c1e540732d7
--- /dev/null
+++ b/net/l2tp/l2tp_netlink.c
@@ -0,0 +1,840 @@
+/*
+ * L2TP netlink layer, for management
+ *
+ * Copyright (c) 2008,2009,2010 Katalix Systems Ltd
+ *
+ * Partly based on the IrDA nelink implementation
+ * (see net/irda/irnetlink.c) which is:
+ * Copyright (c) 2007 Samuel Ortiz <samuel@sortiz.org>
+ * which is in turn partly based on the wireless netlink code:
+ * Copyright 2006 Johannes Berg <johannes@sipsolutions.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <net/sock.h>
+#include <net/genetlink.h>
+#include <net/udp.h>
+#include <linux/in.h>
+#include <linux/udp.h>
+#include <linux/socket.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <net/net_namespace.h>
+
+#include <linux/l2tp.h>
+
+#include "l2tp_core.h"
+
+
+static struct genl_family l2tp_nl_family = {
+ .id = GENL_ID_GENERATE,
+ .name = L2TP_GENL_NAME,
+ .version = L2TP_GENL_VERSION,
+ .hdrsize = 0,
+ .maxattr = L2TP_ATTR_MAX,
+};
+
+/* Accessed under genl lock */
+static const struct l2tp_nl_cmd_ops *l2tp_nl_cmd_ops[__L2TP_PWTYPE_MAX];
+
+static struct l2tp_session *l2tp_nl_session_find(struct genl_info *info)
+{
+ u32 tunnel_id;
+ u32 session_id;
+ char *ifname;
+ struct l2tp_tunnel *tunnel;
+ struct l2tp_session *session = NULL;
+ struct net *net = genl_info_net(info);
+
+ if (info->attrs[L2TP_ATTR_IFNAME]) {
+ ifname = nla_data(info->attrs[L2TP_ATTR_IFNAME]);
+ session = l2tp_session_find_by_ifname(net, ifname);
+ } else if ((info->attrs[L2TP_ATTR_SESSION_ID]) &&
+ (info->attrs[L2TP_ATTR_CONN_ID])) {
+ tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
+ session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]);
+ tunnel = l2tp_tunnel_find(net, tunnel_id);
+ if (tunnel)
+ session = l2tp_session_find(net, tunnel, session_id);
+ }
+
+ return session;
+}
+
+static int l2tp_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
+{
+ struct sk_buff *msg;
+ void *hdr;
+ int ret = -ENOBUFS;
+
+ msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!msg) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq,
+ &l2tp_nl_family, 0, L2TP_CMD_NOOP);
+ if (IS_ERR(hdr)) {
+ ret = PTR_ERR(hdr);
+ goto err_out;
+ }
+
+ genlmsg_end(msg, hdr);
+
+ return genlmsg_unicast(genl_info_net(info), msg, info->snd_pid);
+
+err_out:
+ nlmsg_free(msg);
+
+out:
+ return ret;
+}
+
+static int l2tp_nl_cmd_tunnel_create(struct sk_buff *skb, struct genl_info *info)
+{
+ u32 tunnel_id;
+ u32 peer_tunnel_id;
+ int proto_version;
+ int fd;
+ int ret = 0;
+ struct l2tp_tunnel_cfg cfg = { 0, };
+ struct l2tp_tunnel *tunnel;
+ struct net *net = genl_info_net(info);
+
+ if (!info->attrs[L2TP_ATTR_CONN_ID]) {
+ ret = -EINVAL;
+ goto out;
+ }
+ tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
+
+ if (!info->attrs[L2TP_ATTR_PEER_CONN_ID]) {
+ ret = -EINVAL;
+ goto out;
+ }
+ peer_tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_PEER_CONN_ID]);
+
+ if (!info->attrs[L2TP_ATTR_PROTO_VERSION]) {
+ ret = -EINVAL;
+ goto out;
+ }
+ proto_version = nla_get_u8(info->attrs[L2TP_ATTR_PROTO_VERSION]);
+
+ if (!info->attrs[L2TP_ATTR_ENCAP_TYPE]) {
+ ret = -EINVAL;
+ goto out;
+ }
+ cfg.encap = nla_get_u16(info->attrs[L2TP_ATTR_ENCAP_TYPE]);
+
+ fd = -1;
+ if (info->attrs[L2TP_ATTR_FD]) {
+ fd = nla_get_u32(info->attrs[L2TP_ATTR_FD]);
+ } else {
+ if (info->attrs[L2TP_ATTR_IP_SADDR])
+ cfg.local_ip.s_addr = nla_get_be32(info->attrs[L2TP_ATTR_IP_SADDR]);
+ if (info->attrs[L2TP_ATTR_IP_DADDR])
+ cfg.peer_ip.s_addr = nla_get_be32(info->attrs[L2TP_ATTR_IP_DADDR]);
+ if (info->attrs[L2TP_ATTR_UDP_SPORT])
+ cfg.local_udp_port = nla_get_u16(info->attrs[L2TP_ATTR_UDP_SPORT]);
+ if (info->attrs[L2TP_ATTR_UDP_DPORT])
+ cfg.peer_udp_port = nla_get_u16(info->attrs[L2TP_ATTR_UDP_DPORT]);
+ if (info->attrs[L2TP_ATTR_UDP_CSUM])
+ cfg.use_udp_checksums = nla_get_flag(info->attrs[L2TP_ATTR_UDP_CSUM]);
+ }
+
+ if (info->attrs[L2TP_ATTR_DEBUG])
+ cfg.debug = nla_get_u32(info->attrs[L2TP_ATTR_DEBUG]);
+
+ tunnel = l2tp_tunnel_find(net, tunnel_id);
+ if (tunnel != NULL) {
+ ret = -EEXIST;
+ goto out;
+ }
+
+ ret = -EINVAL;
+ switch (cfg.encap) {
+ case L2TP_ENCAPTYPE_UDP:
+ case L2TP_ENCAPTYPE_IP:
+ ret = l2tp_tunnel_create(net, fd, proto_version, tunnel_id,
+ peer_tunnel_id, &cfg, &tunnel);
+ break;
+ }
+
+out:
+ return ret;
+}
+
+static int l2tp_nl_cmd_tunnel_delete(struct sk_buff *skb, struct genl_info *info)
+{
+ struct l2tp_tunnel *tunnel;
+ u32 tunnel_id;
+ int ret = 0;
+ struct net *net = genl_info_net(info);
+
+ if (!info->attrs[L2TP_ATTR_CONN_ID]) {
+ ret = -EINVAL;
+ goto out;
+ }
+ tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
+
+ tunnel = l2tp_tunnel_find(net, tunnel_id);
+ if (tunnel == NULL) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ (void) l2tp_tunnel_delete(tunnel);
+
+out:
+ return ret;
+}
+
+static int l2tp_nl_cmd_tunnel_modify(struct sk_buff *skb, struct genl_info *info)
+{
+ struct l2tp_tunnel *tunnel;
+ u32 tunnel_id;
+ int ret = 0;
+ struct net *net = genl_info_net(info);
+
+ if (!info->attrs[L2TP_ATTR_CONN_ID]) {
+ ret = -EINVAL;
+ goto out;
+ }
+ tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
+
+ tunnel = l2tp_tunnel_find(net, tunnel_id);
+ if (tunnel == NULL) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (info->attrs[L2TP_ATTR_DEBUG])
+ tunnel->debug = nla_get_u32(info->attrs[L2TP_ATTR_DEBUG]);
+
+out:
+ return ret;
+}
+
+static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 pid, u32 seq, int flags,
+ struct l2tp_tunnel *tunnel)
+{
+ void *hdr;
+ struct nlattr *nest;
+ struct sock *sk = NULL;
+ struct inet_sock *inet;
+
+ hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags,
+ L2TP_CMD_TUNNEL_GET);
+ if (IS_ERR(hdr))
+ return PTR_ERR(hdr);
+
+ NLA_PUT_U8(skb, L2TP_ATTR_PROTO_VERSION, tunnel->version);
+ NLA_PUT_U32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id);
+ NLA_PUT_U32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id);
+ NLA_PUT_U32(skb, L2TP_ATTR_DEBUG, tunnel->debug);
+ NLA_PUT_U16(skb, L2TP_ATTR_ENCAP_TYPE, tunnel->encap);
+
+ nest = nla_nest_start(skb, L2TP_ATTR_STATS);
+ if (nest == NULL)
+ goto nla_put_failure;
+
+ NLA_PUT_U64(skb, L2TP_ATTR_TX_PACKETS, tunnel->stats.tx_packets);
+ NLA_PUT_U64(skb, L2TP_ATTR_TX_BYTES, tunnel->stats.tx_bytes);
+ NLA_PUT_U64(skb, L2TP_ATTR_TX_ERRORS, tunnel->stats.tx_errors);
+ NLA_PUT_U64(skb, L2TP_ATTR_RX_PACKETS, tunnel->stats.rx_packets);
+ NLA_PUT_U64(skb, L2TP_ATTR_RX_BYTES, tunnel->stats.rx_bytes);
+ NLA_PUT_U64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, tunnel->stats.rx_seq_discards);
+ NLA_PUT_U64(skb, L2TP_ATTR_RX_OOS_PACKETS, tunnel->stats.rx_oos_packets);
+ NLA_PUT_U64(skb, L2TP_ATTR_RX_ERRORS, tunnel->stats.rx_errors);
+ nla_nest_end(skb, nest);
+
+ sk = tunnel->sock;
+ if (!sk)
+ goto out;
+
+ inet = inet_sk(sk);
+
+ switch (tunnel->encap) {
+ case L2TP_ENCAPTYPE_UDP:
+ NLA_PUT_U16(skb, L2TP_ATTR_UDP_SPORT, ntohs(inet->inet_sport));
+ NLA_PUT_U16(skb, L2TP_ATTR_UDP_DPORT, ntohs(inet->inet_dport));
+ NLA_PUT_U8(skb, L2TP_ATTR_UDP_CSUM, (sk->sk_no_check != UDP_CSUM_NOXMIT));
+ /* NOBREAK */
+ case L2TP_ENCAPTYPE_IP:
+ NLA_PUT_BE32(skb, L2TP_ATTR_IP_SADDR, inet->inet_saddr);
+ NLA_PUT_BE32(skb, L2TP_ATTR_IP_DADDR, inet->inet_daddr);
+ break;
+ }
+
+out:
+ return genlmsg_end(skb, hdr);
+
+nla_put_failure:
+ genlmsg_cancel(skb, hdr);
+ return -1;
+}
+
+static int l2tp_nl_cmd_tunnel_get(struct sk_buff *skb, struct genl_info *info)
+{
+ struct l2tp_tunnel *tunnel;
+ struct sk_buff *msg;
+ u32 tunnel_id;
+ int ret = -ENOBUFS;
+ struct net *net = genl_info_net(info);
+
+ if (!info->attrs[L2TP_ATTR_CONN_ID]) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
+
+ tunnel = l2tp_tunnel_find(net, tunnel_id);
+ if (tunnel == NULL) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!msg) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = l2tp_nl_tunnel_send(msg, info->snd_pid, info->snd_seq,
+ NLM_F_ACK, tunnel);
+ if (ret < 0)
+ goto err_out;
+
+ return genlmsg_unicast(net, msg, info->snd_pid);
+
+err_out:
+ nlmsg_free(msg);
+
+out:
+ return ret;
+}
+
+static int l2tp_nl_cmd_tunnel_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ int ti = cb->args[0];
+ struct l2tp_tunnel *tunnel;
+ struct net *net = sock_net(skb->sk);
+
+ for (;;) {
+ tunnel = l2tp_tunnel_find_nth(net, ti);
+ if (tunnel == NULL)
+ goto out;
+
+ if (l2tp_nl_tunnel_send(skb, NETLINK_CB(cb->skb).pid,
+ cb->nlh->nlmsg_seq, NLM_F_MULTI,
+ tunnel) <= 0)
+ goto out;
+
+ ti++;
+ }
+
+out:
+ cb->args[0] = ti;
+
+ return skb->len;
+}
+
+static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *info)
+{
+ u32 tunnel_id = 0;
+ u32 session_id;
+ u32 peer_session_id;
+ int ret = 0;
+ struct l2tp_tunnel *tunnel;
+ struct l2tp_session *session;
+ struct l2tp_session_cfg cfg = { 0, };
+ struct net *net = genl_info_net(info);
+
+ if (!info->attrs[L2TP_ATTR_CONN_ID]) {
+ ret = -EINVAL;
+ goto out;
+ }
+ tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
+ tunnel = l2tp_tunnel_find(net, tunnel_id);
+ if (!tunnel) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (!info->attrs[L2TP_ATTR_SESSION_ID]) {
+ ret = -EINVAL;
+ goto out;
+ }
+ session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]);
+ session = l2tp_session_find(net, tunnel, session_id);
+ if (session) {
+ ret = -EEXIST;
+ goto out;
+ }
+
+ if (!info->attrs[L2TP_ATTR_PEER_SESSION_ID]) {
+ ret = -EINVAL;
+ goto out;
+ }
+ peer_session_id = nla_get_u32(info->attrs[L2TP_ATTR_PEER_SESSION_ID]);
+
+ if (!info->attrs[L2TP_ATTR_PW_TYPE]) {
+ ret = -EINVAL;
+ goto out;
+ }
+ cfg.pw_type = nla_get_u16(info->attrs[L2TP_ATTR_PW_TYPE]);
+ if (cfg.pw_type >= __L2TP_PWTYPE_MAX) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (tunnel->version > 2) {
+ if (info->attrs[L2TP_ATTR_OFFSET])
+ cfg.offset = nla_get_u16(info->attrs[L2TP_ATTR_OFFSET]);
+
+ if (info->attrs[L2TP_ATTR_DATA_SEQ])
+ cfg.data_seq = nla_get_u8(info->attrs[L2TP_ATTR_DATA_SEQ]);
+
+ cfg.l2specific_type = L2TP_L2SPECTYPE_DEFAULT;
+ if (info->attrs[L2TP_ATTR_L2SPEC_TYPE])
+ cfg.l2specific_type = nla_get_u8(info->attrs[L2TP_ATTR_L2SPEC_TYPE]);
+
+ cfg.l2specific_len = 4;
+ if (info->attrs[L2TP_ATTR_L2SPEC_LEN])
+ cfg.l2specific_len = nla_get_u8(info->attrs[L2TP_ATTR_L2SPEC_LEN]);
+
+ if (info->attrs[L2TP_ATTR_COOKIE]) {
+ u16 len = nla_len(info->attrs[L2TP_ATTR_COOKIE]);
+ if (len > 8) {
+ ret = -EINVAL;
+ goto out;
+ }
+ cfg.cookie_len = len;
+ memcpy(&cfg.cookie[0], nla_data(info->attrs[L2TP_ATTR_COOKIE]), len);
+ }
+ if (info->attrs[L2TP_ATTR_PEER_COOKIE]) {
+ u16 len = nla_len(info->attrs[L2TP_ATTR_PEER_COOKIE]);
+ if (len > 8) {
+ ret = -EINVAL;
+ goto out;
+ }
+ cfg.peer_cookie_len = len;
+ memcpy(&cfg.peer_cookie[0], nla_data(info->attrs[L2TP_ATTR_PEER_COOKIE]), len);
+ }
+ if (info->attrs[L2TP_ATTR_IFNAME])
+ cfg.ifname = nla_data(info->attrs[L2TP_ATTR_IFNAME]);
+
+ if (info->attrs[L2TP_ATTR_VLAN_ID])
+ cfg.vlan_id = nla_get_u16(info->attrs[L2TP_ATTR_VLAN_ID]);
+ }
+
+ if (info->attrs[L2TP_ATTR_DEBUG])
+ cfg.debug = nla_get_u32(info->attrs[L2TP_ATTR_DEBUG]);
+
+ if (info->attrs[L2TP_ATTR_RECV_SEQ])
+ cfg.recv_seq = nla_get_u8(info->attrs[L2TP_ATTR_RECV_SEQ]);
+
+ if (info->attrs[L2TP_ATTR_SEND_SEQ])
+ cfg.send_seq = nla_get_u8(info->attrs[L2TP_ATTR_SEND_SEQ]);
+
+ if (info->attrs[L2TP_ATTR_LNS_MODE])
+ cfg.lns_mode = nla_get_u8(info->attrs[L2TP_ATTR_LNS_MODE]);
+
+ if (info->attrs[L2TP_ATTR_RECV_TIMEOUT])
+ cfg.reorder_timeout = nla_get_msecs(info->attrs[L2TP_ATTR_RECV_TIMEOUT]);
+
+ if (info->attrs[L2TP_ATTR_MTU])
+ cfg.mtu = nla_get_u16(info->attrs[L2TP_ATTR_MTU]);
+
+ if (info->attrs[L2TP_ATTR_MRU])
+ cfg.mru = nla_get_u16(info->attrs[L2TP_ATTR_MRU]);
+
+ if ((l2tp_nl_cmd_ops[cfg.pw_type] == NULL) ||
+ (l2tp_nl_cmd_ops[cfg.pw_type]->session_create == NULL)) {
+ ret = -EPROTONOSUPPORT;
+ goto out;
+ }
+
+ /* Check that pseudowire-specific params are present */
+ switch (cfg.pw_type) {
+ case L2TP_PWTYPE_NONE:
+ break;
+ case L2TP_PWTYPE_ETH_VLAN:
+ if (!info->attrs[L2TP_ATTR_VLAN_ID]) {
+ ret = -EINVAL;
+ goto out;
+ }
+ break;
+ case L2TP_PWTYPE_ETH:
+ break;
+ case L2TP_PWTYPE_PPP:
+ case L2TP_PWTYPE_PPP_AC:
+ break;
+ case L2TP_PWTYPE_IP:
+ default:
+ ret = -EPROTONOSUPPORT;
+ break;
+ }
+
+ ret = -EPROTONOSUPPORT;
+ if (l2tp_nl_cmd_ops[cfg.pw_type]->session_create)
+ ret = (*l2tp_nl_cmd_ops[cfg.pw_type]->session_create)(net, tunnel_id,
+ session_id, peer_session_id, &cfg);
+
+out:
+ return ret;
+}
+
+static int l2tp_nl_cmd_session_delete(struct sk_buff *skb, struct genl_info *info)
+{
+ int ret = 0;
+ struct l2tp_session *session;
+ u16 pw_type;
+
+ session = l2tp_nl_session_find(info);
+ if (session == NULL) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ pw_type = session->pwtype;
+ if (pw_type < __L2TP_PWTYPE_MAX)
+ if (l2tp_nl_cmd_ops[pw_type] && l2tp_nl_cmd_ops[pw_type]->session_delete)
+ ret = (*l2tp_nl_cmd_ops[pw_type]->session_delete)(session);
+
+out:
+ return ret;
+}
+
+static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *info)
+{
+ int ret = 0;
+ struct l2tp_session *session;
+
+ session = l2tp_nl_session_find(info);
+ if (session == NULL) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (info->attrs[L2TP_ATTR_DEBUG])
+ session->debug = nla_get_u32(info->attrs[L2TP_ATTR_DEBUG]);
+
+ if (info->attrs[L2TP_ATTR_DATA_SEQ])
+ session->data_seq = nla_get_u8(info->attrs[L2TP_ATTR_DATA_SEQ]);
+
+ if (info->attrs[L2TP_ATTR_RECV_SEQ])
+ session->recv_seq = nla_get_u8(info->attrs[L2TP_ATTR_RECV_SEQ]);
+
+ if (info->attrs[L2TP_ATTR_SEND_SEQ])
+ session->send_seq = nla_get_u8(info->attrs[L2TP_ATTR_SEND_SEQ]);
+
+ if (info->attrs[L2TP_ATTR_LNS_MODE])
+ session->lns_mode = nla_get_u8(info->attrs[L2TP_ATTR_LNS_MODE]);
+
+ if (info->attrs[L2TP_ATTR_RECV_TIMEOUT])
+ session->reorder_timeout = nla_get_msecs(info->attrs[L2TP_ATTR_RECV_TIMEOUT]);
+
+ if (info->attrs[L2TP_ATTR_MTU])
+ session->mtu = nla_get_u16(info->attrs[L2TP_ATTR_MTU]);
+
+ if (info->attrs[L2TP_ATTR_MRU])
+ session->mru = nla_get_u16(info->attrs[L2TP_ATTR_MRU]);
+
+out:
+ return ret;
+}
+
+static int l2tp_nl_session_send(struct sk_buff *skb, u32 pid, u32 seq, int flags,
+ struct l2tp_session *session)
+{
+ void *hdr;
+ struct nlattr *nest;
+ struct l2tp_tunnel *tunnel = session->tunnel;
+ struct sock *sk = NULL;
+
+ sk = tunnel->sock;
+
+ hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags, L2TP_CMD_SESSION_GET);
+ if (IS_ERR(hdr))
+ return PTR_ERR(hdr);
+
+ NLA_PUT_U32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id);
+ NLA_PUT_U32(skb, L2TP_ATTR_SESSION_ID, session->session_id);
+ NLA_PUT_U32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id);
+ NLA_PUT_U32(skb, L2TP_ATTR_PEER_SESSION_ID, session->peer_session_id);
+ NLA_PUT_U32(skb, L2TP_ATTR_DEBUG, session->debug);
+ NLA_PUT_U16(skb, L2TP_ATTR_PW_TYPE, session->pwtype);
+ NLA_PUT_U16(skb, L2TP_ATTR_MTU, session->mtu);
+ if (session->mru)
+ NLA_PUT_U16(skb, L2TP_ATTR_MRU, session->mru);
+
+ if (session->ifname && session->ifname[0])
+ NLA_PUT_STRING(skb, L2TP_ATTR_IFNAME, session->ifname);
+ if (session->cookie_len)
+ NLA_PUT(skb, L2TP_ATTR_COOKIE, session->cookie_len, &session->cookie[0]);
+ if (session->peer_cookie_len)
+ NLA_PUT(skb, L2TP_ATTR_PEER_COOKIE, session->peer_cookie_len, &session->peer_cookie[0]);
+ NLA_PUT_U8(skb, L2TP_ATTR_RECV_SEQ, session->recv_seq);
+ NLA_PUT_U8(skb, L2TP_ATTR_SEND_SEQ, session->send_seq);
+ NLA_PUT_U8(skb, L2TP_ATTR_LNS_MODE, session->lns_mode);
+#ifdef CONFIG_XFRM
+ if ((sk) && (sk->sk_policy[0] || sk->sk_policy[1]))
+ NLA_PUT_U8(skb, L2TP_ATTR_USING_IPSEC, 1);
+#endif
+ if (session->reorder_timeout)
+ NLA_PUT_MSECS(skb, L2TP_ATTR_RECV_TIMEOUT, session->reorder_timeout);
+
+ nest = nla_nest_start(skb, L2TP_ATTR_STATS);
+ if (nest == NULL)
+ goto nla_put_failure;
+ NLA_PUT_U64(skb, L2TP_ATTR_TX_PACKETS, session->stats.tx_packets);
+ NLA_PUT_U64(skb, L2TP_ATTR_TX_BYTES, session->stats.tx_bytes);
+ NLA_PUT_U64(skb, L2TP_ATTR_TX_ERRORS, session->stats.tx_errors);
+ NLA_PUT_U64(skb, L2TP_ATTR_RX_PACKETS, session->stats.rx_packets);
+ NLA_PUT_U64(skb, L2TP_ATTR_RX_BYTES, session->stats.rx_bytes);
+ NLA_PUT_U64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, session->stats.rx_seq_discards);
+ NLA_PUT_U64(skb, L2TP_ATTR_RX_OOS_PACKETS, session->stats.rx_oos_packets);
+ NLA_PUT_U64(skb, L2TP_ATTR_RX_ERRORS, session->stats.rx_errors);
+ nla_nest_end(skb, nest);
+
+ return genlmsg_end(skb, hdr);
+
+ nla_put_failure:
+ genlmsg_cancel(skb, hdr);
+ return -1;
+}
+
+static int l2tp_nl_cmd_session_get(struct sk_buff *skb, struct genl_info *info)
+{
+ struct l2tp_session *session;
+ struct sk_buff *msg;
+ int ret;
+
+ session = l2tp_nl_session_find(info);
+ if (session == NULL) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!msg) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = l2tp_nl_session_send(msg, info->snd_pid, info->snd_seq,
+ 0, session);
+ if (ret < 0)
+ goto err_out;
+
+ return genlmsg_unicast(genl_info_net(info), msg, info->snd_pid);
+
+err_out:
+ nlmsg_free(msg);
+
+out:
+ return ret;
+}
+
+static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct net *net = sock_net(skb->sk);
+ struct l2tp_session *session;
+ struct l2tp_tunnel *tunnel = NULL;
+ int ti = cb->args[0];
+ int si = cb->args[1];
+
+ for (;;) {
+ if (tunnel == NULL) {
+ tunnel = l2tp_tunnel_find_nth(net, ti);
+ if (tunnel == NULL)
+ goto out;
+ }
+
+ session = l2tp_session_find_nth(tunnel, si);
+ if (session == NULL) {
+ ti++;
+ tunnel = NULL;
+ si = 0;
+ continue;
+ }
+
+ if (l2tp_nl_session_send(skb, NETLINK_CB(cb->skb).pid,
+ cb->nlh->nlmsg_seq, NLM_F_MULTI,
+ session) <= 0)
+ break;
+
+ si++;
+ }
+
+out:
+ cb->args[0] = ti;
+ cb->args[1] = si;
+
+ return skb->len;
+}
+
+static struct nla_policy l2tp_nl_policy[L2TP_ATTR_MAX + 1] = {
+ [L2TP_ATTR_NONE] = { .type = NLA_UNSPEC, },
+ [L2TP_ATTR_PW_TYPE] = { .type = NLA_U16, },
+ [L2TP_ATTR_ENCAP_TYPE] = { .type = NLA_U16, },
+ [L2TP_ATTR_OFFSET] = { .type = NLA_U16, },
+ [L2TP_ATTR_DATA_SEQ] = { .type = NLA_U8, },
+ [L2TP_ATTR_L2SPEC_TYPE] = { .type = NLA_U8, },
+ [L2TP_ATTR_L2SPEC_LEN] = { .type = NLA_U8, },
+ [L2TP_ATTR_PROTO_VERSION] = { .type = NLA_U8, },
+ [L2TP_ATTR_CONN_ID] = { .type = NLA_U32, },
+ [L2TP_ATTR_PEER_CONN_ID] = { .type = NLA_U32, },
+ [L2TP_ATTR_SESSION_ID] = { .type = NLA_U32, },
+ [L2TP_ATTR_PEER_SESSION_ID] = { .type = NLA_U32, },
+ [L2TP_ATTR_UDP_CSUM] = { .type = NLA_U8, },
+ [L2TP_ATTR_VLAN_ID] = { .type = NLA_U16, },
+ [L2TP_ATTR_DEBUG] = { .type = NLA_U32, },
+ [L2TP_ATTR_RECV_SEQ] = { .type = NLA_U8, },
+ [L2TP_ATTR_SEND_SEQ] = { .type = NLA_U8, },
+ [L2TP_ATTR_LNS_MODE] = { .type = NLA_U8, },
+ [L2TP_ATTR_USING_IPSEC] = { .type = NLA_U8, },
+ [L2TP_ATTR_RECV_TIMEOUT] = { .type = NLA_MSECS, },
+ [L2TP_ATTR_FD] = { .type = NLA_U32, },
+ [L2TP_ATTR_IP_SADDR] = { .type = NLA_U32, },
+ [L2TP_ATTR_IP_DADDR] = { .type = NLA_U32, },
+ [L2TP_ATTR_UDP_SPORT] = { .type = NLA_U16, },
+ [L2TP_ATTR_UDP_DPORT] = { .type = NLA_U16, },
+ [L2TP_ATTR_MTU] = { .type = NLA_U16, },
+ [L2TP_ATTR_MRU] = { .type = NLA_U16, },
+ [L2TP_ATTR_STATS] = { .type = NLA_NESTED, },
+ [L2TP_ATTR_IFNAME] = {
+ .type = NLA_NUL_STRING,
+ .len = IFNAMSIZ - 1,
+ },
+ [L2TP_ATTR_COOKIE] = {
+ .type = NLA_BINARY,
+ .len = 8,
+ },
+ [L2TP_ATTR_PEER_COOKIE] = {
+ .type = NLA_BINARY,
+ .len = 8,
+ },
+};
+
+static struct genl_ops l2tp_nl_ops[] = {
+ {
+ .cmd = L2TP_CMD_NOOP,
+ .doit = l2tp_nl_cmd_noop,
+ .policy = l2tp_nl_policy,
+ /* can be retrieved by unprivileged users */
+ },
+ {
+ .cmd = L2TP_CMD_TUNNEL_CREATE,
+ .doit = l2tp_nl_cmd_tunnel_create,
+ .policy = l2tp_nl_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = L2TP_CMD_TUNNEL_DELETE,
+ .doit = l2tp_nl_cmd_tunnel_delete,
+ .policy = l2tp_nl_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = L2TP_CMD_TUNNEL_MODIFY,
+ .doit = l2tp_nl_cmd_tunnel_modify,
+ .policy = l2tp_nl_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = L2TP_CMD_TUNNEL_GET,
+ .doit = l2tp_nl_cmd_tunnel_get,
+ .dumpit = l2tp_nl_cmd_tunnel_dump,
+ .policy = l2tp_nl_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = L2TP_CMD_SESSION_CREATE,
+ .doit = l2tp_nl_cmd_session_create,
+ .policy = l2tp_nl_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = L2TP_CMD_SESSION_DELETE,
+ .doit = l2tp_nl_cmd_session_delete,
+ .policy = l2tp_nl_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = L2TP_CMD_SESSION_MODIFY,
+ .doit = l2tp_nl_cmd_session_modify,
+ .policy = l2tp_nl_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = L2TP_CMD_SESSION_GET,
+ .doit = l2tp_nl_cmd_session_get,
+ .dumpit = l2tp_nl_cmd_session_dump,
+ .policy = l2tp_nl_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+};
+
+int l2tp_nl_register_ops(enum l2tp_pwtype pw_type, const struct l2tp_nl_cmd_ops *ops)
+{
+ int ret;
+
+ ret = -EINVAL;
+ if (pw_type >= __L2TP_PWTYPE_MAX)
+ goto err;
+
+ genl_lock();
+ ret = -EBUSY;
+ if (l2tp_nl_cmd_ops[pw_type])
+ goto out;
+
+ l2tp_nl_cmd_ops[pw_type] = ops;
+
+out:
+ genl_unlock();
+err:
+ return 0;
+}
+EXPORT_SYMBOL_GPL(l2tp_nl_register_ops);
+
+void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type)
+{
+ if (pw_type < __L2TP_PWTYPE_MAX) {
+ genl_lock();
+ l2tp_nl_cmd_ops[pw_type] = NULL;
+ genl_unlock();
+ }
+}
+EXPORT_SYMBOL_GPL(l2tp_nl_unregister_ops);
+
+static int l2tp_nl_init(void)
+{
+ int err;
+
+ printk(KERN_INFO "L2TP netlink interface\n");
+ err = genl_register_family_with_ops(&l2tp_nl_family, l2tp_nl_ops,
+ ARRAY_SIZE(l2tp_nl_ops));
+
+ return err;
+}
+
+static void l2tp_nl_cleanup(void)
+{
+ genl_unregister_family(&l2tp_nl_family);
+}
+
+module_init(l2tp_nl_init);
+module_exit(l2tp_nl_cleanup);
+
+MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
+MODULE_DESCRIPTION("L2TP netlink");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0");
+MODULE_ALIAS("net-pf-" __stringify(PF_NETLINK) "-proto-" \
+ __stringify(NETLINK_GENERIC) "-type-" "l2tp");
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
new file mode 100644
index 000000000000..90d82b3f2889
--- /dev/null
+++ b/net/l2tp/l2tp_ppp.c
@@ -0,0 +1,1837 @@
+/*****************************************************************************
+ * Linux PPP over L2TP (PPPoX/PPPoL2TP) Sockets
+ *
+ * PPPoX --- Generic PPP encapsulation socket family
+ * PPPoL2TP --- PPP over L2TP (RFC 2661)
+ *
+ * Version: 2.0.0
+ *
+ * Authors: James Chapman (jchapman@katalix.com)
+ *
+ * Based on original work by Martijn van Oosterhout <kleptog@svana.org>
+ *
+ * License:
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+/* This driver handles only L2TP data frames; control frames are handled by a
+ * userspace application.
+ *
+ * To send data in an L2TP session, userspace opens a PPPoL2TP socket and
+ * attaches it to a bound UDP socket with local tunnel_id / session_id and
+ * peer tunnel_id / session_id set. Data can then be sent or received using
+ * regular socket sendmsg() / recvmsg() calls. Kernel parameters of the socket
+ * can be read or modified using ioctl() or [gs]etsockopt() calls.
+ *
+ * When a PPPoL2TP socket is connected with local and peer session_id values
+ * zero, the socket is treated as a special tunnel management socket.
+ *
+ * Here's example userspace code to create a socket for sending/receiving data
+ * over an L2TP session:-
+ *
+ * struct sockaddr_pppol2tp sax;
+ * int fd;
+ * int session_fd;
+ *
+ * fd = socket(AF_PPPOX, SOCK_DGRAM, PX_PROTO_OL2TP);
+ *
+ * sax.sa_family = AF_PPPOX;
+ * sax.sa_protocol = PX_PROTO_OL2TP;
+ * sax.pppol2tp.fd = tunnel_fd; // bound UDP socket
+ * sax.pppol2tp.addr.sin_addr.s_addr = addr->sin_addr.s_addr;
+ * sax.pppol2tp.addr.sin_port = addr->sin_port;
+ * sax.pppol2tp.addr.sin_family = AF_INET;
+ * sax.pppol2tp.s_tunnel = tunnel_id;
+ * sax.pppol2tp.s_session = session_id;
+ * sax.pppol2tp.d_tunnel = peer_tunnel_id;
+ * sax.pppol2tp.d_session = peer_session_id;
+ *
+ * session_fd = connect(fd, (struct sockaddr *)&sax, sizeof(sax));
+ *
+ * A pppd plugin that allows PPP traffic to be carried over L2TP using
+ * this driver is available from the OpenL2TP project at
+ * http://openl2tp.sourceforge.net.
+ */
+
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/list.h>
+#include <linux/uaccess.h>
+
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/jiffies.h>
+
+#include <linux/netdevice.h>
+#include <linux/net.h>
+#include <linux/inetdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/if_pppox.h>
+#include <linux/if_pppol2tp.h>
+#include <net/sock.h>
+#include <linux/ppp_channel.h>
+#include <linux/ppp_defs.h>
+#include <linux/if_ppp.h>
+#include <linux/file.h>
+#include <linux/hash.h>
+#include <linux/sort.h>
+#include <linux/proc_fs.h>
+#include <linux/l2tp.h>
+#include <linux/nsproxy.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+#include <net/dst.h>
+#include <net/ip.h>
+#include <net/udp.h>
+#include <net/xfrm.h>
+
+#include <asm/byteorder.h>
+#include <asm/atomic.h>
+
+#include "l2tp_core.h"
+
+#define PPPOL2TP_DRV_VERSION "V2.0"
+
+/* Space for UDP, L2TP and PPP headers */
+#define PPPOL2TP_HEADER_OVERHEAD 40
+
+#define PRINTK(_mask, _type, _lvl, _fmt, args...) \
+ do { \
+ if ((_mask) & (_type)) \
+ printk(_lvl "PPPOL2TP: " _fmt, ##args); \
+ } while (0)
+
+/* Number of bytes to build transmit L2TP headers.
+ * Unfortunately the size is different depending on whether sequence numbers
+ * are enabled.
+ */
+#define PPPOL2TP_L2TP_HDR_SIZE_SEQ 10
+#define PPPOL2TP_L2TP_HDR_SIZE_NOSEQ 6
+
+/* Private data of each session. This data lives at the end of struct
+ * l2tp_session, referenced via session->priv[].
+ */
+struct pppol2tp_session {
+ int owner; /* pid that opened the socket */
+
+ struct sock *sock; /* Pointer to the session
+ * PPPoX socket */
+ struct sock *tunnel_sock; /* Pointer to the tunnel UDP
+ * socket */
+ int flags; /* accessed by PPPIOCGFLAGS.
+ * Unused. */
+};
+
+static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb);
+
+static struct ppp_channel_ops pppol2tp_chan_ops = { pppol2tp_xmit , NULL };
+static const struct proto_ops pppol2tp_ops;
+
+/* Helpers to obtain tunnel/session contexts from sockets.
+ */
+static inline struct l2tp_session *pppol2tp_sock_to_session(struct sock *sk)
+{
+ struct l2tp_session *session;
+
+ if (sk == NULL)
+ return NULL;
+
+ sock_hold(sk);
+ session = (struct l2tp_session *)(sk->sk_user_data);
+ if (session == NULL) {
+ sock_put(sk);
+ goto out;
+ }
+
+ BUG_ON(session->magic != L2TP_SESSION_MAGIC);
+
+out:
+ return session;
+}
+
+/*****************************************************************************
+ * Receive data handling
+ *****************************************************************************/
+
+static int pppol2tp_recv_payload_hook(struct sk_buff *skb)
+{
+ /* Skip PPP header, if present. In testing, Microsoft L2TP clients
+ * don't send the PPP header (PPP header compression enabled), but
+ * other clients can include the header. So we cope with both cases
+ * here. The PPP header is always FF03 when using L2TP.
+ *
+ * Note that skb->data[] isn't dereferenced from a u16 ptr here since
+ * the field may be unaligned.
+ */
+ if (!pskb_may_pull(skb, 2))
+ return 1;
+
+ if ((skb->data[0] == 0xff) && (skb->data[1] == 0x03))
+ skb_pull(skb, 2);
+
+ return 0;
+}
+
+/* Receive message. This is the recvmsg for the PPPoL2TP socket.
+ */
+static int pppol2tp_recvmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *msg, size_t len,
+ int flags)
+{
+ int err;
+ struct sk_buff *skb;
+ struct sock *sk = sock->sk;
+
+ err = -EIO;
+ if (sk->sk_state & PPPOX_BOUND)
+ goto end;
+
+ msg->msg_namelen = 0;
+
+ err = 0;
+ skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
+ flags & MSG_DONTWAIT, &err);
+ if (!skb)
+ goto end;
+
+ if (len > skb->len)
+ len = skb->len;
+ else if (len < skb->len)
+ msg->msg_flags |= MSG_TRUNC;
+
+ err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len);
+ if (likely(err == 0))
+ err = len;
+
+ kfree_skb(skb);
+end:
+ return err;
+}
+
+static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int data_len)
+{
+ struct pppol2tp_session *ps = l2tp_session_priv(session);
+ struct sock *sk = NULL;
+
+ /* If the socket is bound, send it in to PPP's input queue. Otherwise
+ * queue it on the session socket.
+ */
+ sk = ps->sock;
+ if (sk == NULL)
+ goto no_sock;
+
+ if (sk->sk_state & PPPOX_BOUND) {
+ struct pppox_sock *po;
+ PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
+ "%s: recv %d byte data frame, passing to ppp\n",
+ session->name, data_len);
+
+ /* We need to forget all info related to the L2TP packet
+ * gathered in the skb as we are going to reuse the same
+ * skb for the inner packet.
+ * Namely we need to:
+ * - reset xfrm (IPSec) information as it applies to
+ * the outer L2TP packet and not to the inner one
+ * - release the dst to force a route lookup on the inner
+ * IP packet since skb->dst currently points to the dst
+ * of the UDP tunnel
+ * - reset netfilter information as it doesn't apply
+ * to the inner packet either
+ */
+ secpath_reset(skb);
+ skb_dst_drop(skb);
+ nf_reset(skb);
+
+ po = pppox_sk(sk);
+ ppp_input(&po->chan, skb);
+ } else {
+ PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_INFO,
+ "%s: socket not bound\n", session->name);
+
+ /* Not bound. Nothing we can do, so discard. */
+ session->stats.rx_errors++;
+ kfree_skb(skb);
+ }
+
+ return;
+
+no_sock:
+ PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_INFO,
+ "%s: no socket\n", session->name);
+ kfree_skb(skb);
+}
+
+static void pppol2tp_session_sock_hold(struct l2tp_session *session)
+{
+ struct pppol2tp_session *ps = l2tp_session_priv(session);
+
+ if (ps->sock)
+ sock_hold(ps->sock);
+}
+
+static void pppol2tp_session_sock_put(struct l2tp_session *session)
+{
+ struct pppol2tp_session *ps = l2tp_session_priv(session);
+
+ if (ps->sock)
+ sock_put(ps->sock);
+}
+
+/************************************************************************
+ * Transmit handling
+ ***********************************************************************/
+
+/* This is the sendmsg for the PPPoL2TP pppol2tp_session socket. We come here
+ * when a user application does a sendmsg() on the session socket. L2TP and
+ * PPP headers must be inserted into the user's data.
+ */
+static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
+ size_t total_len)
+{
+ static const unsigned char ppph[2] = { 0xff, 0x03 };
+ struct sock *sk = sock->sk;
+ struct sk_buff *skb;
+ int error;
+ struct l2tp_session *session;
+ struct l2tp_tunnel *tunnel;
+ struct pppol2tp_session *ps;
+ int uhlen;
+
+ error = -ENOTCONN;
+ if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
+ goto error;
+
+ /* Get session and tunnel contexts */
+ error = -EBADF;
+ session = pppol2tp_sock_to_session(sk);
+ if (session == NULL)
+ goto error;
+
+ ps = l2tp_session_priv(session);
+ tunnel = l2tp_sock_to_tunnel(ps->tunnel_sock);
+ if (tunnel == NULL)
+ goto error_put_sess;
+
+ uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
+
+ /* Allocate a socket buffer */
+ error = -ENOMEM;
+ skb = sock_wmalloc(sk, NET_SKB_PAD + sizeof(struct iphdr) +
+ uhlen + session->hdr_len +
+ sizeof(ppph) + total_len,
+ 0, GFP_KERNEL);
+ if (!skb)
+ goto error_put_sess_tun;
+
+ /* Reserve space for headers. */
+ skb_reserve(skb, NET_SKB_PAD);
+ skb_reset_network_header(skb);
+ skb_reserve(skb, sizeof(struct iphdr));
+ skb_reset_transport_header(skb);
+ skb_reserve(skb, uhlen);
+
+ /* Add PPP header */
+ skb->data[0] = ppph[0];
+ skb->data[1] = ppph[1];
+ skb_put(skb, 2);
+
+ /* Copy user data into skb */
+ error = memcpy_fromiovec(skb->data, m->msg_iov, total_len);
+ if (error < 0) {
+ kfree_skb(skb);
+ goto error_put_sess_tun;
+ }
+ skb_put(skb, total_len);
+
+ l2tp_xmit_skb(session, skb, session->hdr_len);
+
+ sock_put(ps->tunnel_sock);
+
+ return error;
+
+error_put_sess_tun:
+ sock_put(ps->tunnel_sock);
+error_put_sess:
+ sock_put(sk);
+error:
+ return error;
+}
+
+/* Transmit function called by generic PPP driver. Sends PPP frame
+ * over PPPoL2TP socket.
+ *
+ * This is almost the same as pppol2tp_sendmsg(), but rather than
+ * being called with a msghdr from userspace, it is called with a skb
+ * from the kernel.
+ *
+ * The supplied skb from ppp doesn't have enough headroom for the
+ * insertion of L2TP, UDP and IP headers so we need to allocate more
+ * headroom in the skb. This will create a cloned skb. But we must be
+ * careful in the error case because the caller will expect to free
+ * the skb it supplied, not our cloned skb. So we take care to always
+ * leave the original skb unfreed if we return an error.
+ */
+static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
+{
+ static const u8 ppph[2] = { 0xff, 0x03 };
+ struct sock *sk = (struct sock *) chan->private;
+ struct sock *sk_tun;
+ struct l2tp_session *session;
+ struct l2tp_tunnel *tunnel;
+ struct pppol2tp_session *ps;
+ int old_headroom;
+ int new_headroom;
+
+ if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
+ goto abort;
+
+ /* Get session and tunnel contexts from the socket */
+ session = pppol2tp_sock_to_session(sk);
+ if (session == NULL)
+ goto abort;
+
+ ps = l2tp_session_priv(session);
+ sk_tun = ps->tunnel_sock;
+ if (sk_tun == NULL)
+ goto abort_put_sess;
+ tunnel = l2tp_sock_to_tunnel(sk_tun);
+ if (tunnel == NULL)
+ goto abort_put_sess;
+
+ old_headroom = skb_headroom(skb);
+ if (skb_cow_head(skb, sizeof(ppph)))
+ goto abort_put_sess_tun;
+
+ new_headroom = skb_headroom(skb);
+ skb->truesize += new_headroom - old_headroom;
+
+ /* Setup PPP header */
+ __skb_push(skb, sizeof(ppph));
+ skb->data[0] = ppph[0];
+ skb->data[1] = ppph[1];
+
+ l2tp_xmit_skb(session, skb, session->hdr_len);
+
+ sock_put(sk_tun);
+ sock_put(sk);
+ return 1;
+
+abort_put_sess_tun:
+ sock_put(sk_tun);
+abort_put_sess:
+ sock_put(sk);
+abort:
+ /* Free the original skb */
+ kfree_skb(skb);
+ return 1;
+}
+
+/*****************************************************************************
+ * Session (and tunnel control) socket create/destroy.
+ *****************************************************************************/
+
+/* Called by l2tp_core when a session socket is being closed.
+ */
+static void pppol2tp_session_close(struct l2tp_session *session)
+{
+ struct pppol2tp_session *ps = l2tp_session_priv(session);
+ struct sock *sk = ps->sock;
+ struct sk_buff *skb;
+
+ BUG_ON(session->magic != L2TP_SESSION_MAGIC);
+
+ if (session->session_id == 0)
+ goto out;
+
+ if (sk != NULL) {
+ lock_sock(sk);
+
+ if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
+ pppox_unbind_sock(sk);
+ sk->sk_state = PPPOX_DEAD;
+ sk->sk_state_change(sk);
+ }
+
+ /* Purge any queued data */
+ skb_queue_purge(&sk->sk_receive_queue);
+ skb_queue_purge(&sk->sk_write_queue);
+ while ((skb = skb_dequeue(&session->reorder_q))) {
+ kfree_skb(skb);
+ sock_put(sk);
+ }
+
+ release_sock(sk);
+ }
+
+out:
+ return;
+}
+
+/* Really kill the session socket. (Called from sock_put() if
+ * refcnt == 0.)
+ */
+static void pppol2tp_session_destruct(struct sock *sk)
+{
+ struct l2tp_session *session;
+
+ if (sk->sk_user_data != NULL) {
+ session = sk->sk_user_data;
+ if (session == NULL)
+ goto out;
+
+ sk->sk_user_data = NULL;
+ BUG_ON(session->magic != L2TP_SESSION_MAGIC);
+ l2tp_session_dec_refcount(session);
+ }
+
+out:
+ return;
+}
+
+/* Called when the PPPoX socket (session) is closed.
+ */
+static int pppol2tp_release(struct socket *sock)
+{
+ struct sock *sk = sock->sk;
+ struct l2tp_session *session;
+ int error;
+
+ if (!sk)
+ return 0;
+
+ error = -EBADF;
+ lock_sock(sk);
+ if (sock_flag(sk, SOCK_DEAD) != 0)
+ goto error;
+
+ pppox_unbind_sock(sk);
+
+ /* Signal the death of the socket. */
+ sk->sk_state = PPPOX_DEAD;
+ sock_orphan(sk);
+ sock->sk = NULL;
+
+ session = pppol2tp_sock_to_session(sk);
+
+ /* Purge any queued data */
+ skb_queue_purge(&sk->sk_receive_queue);
+ skb_queue_purge(&sk->sk_write_queue);
+ if (session != NULL) {
+ struct sk_buff *skb;
+ while ((skb = skb_dequeue(&session->reorder_q))) {
+ kfree_skb(skb);
+ sock_put(sk);
+ }
+ sock_put(sk);
+ }
+
+ release_sock(sk);
+
+ /* This will delete the session context via
+ * pppol2tp_session_destruct() if the socket's refcnt drops to
+ * zero.
+ */
+ sock_put(sk);
+
+ return 0;
+
+error:
+ release_sock(sk);
+ return error;
+}
+
+static struct proto pppol2tp_sk_proto = {
+ .name = "PPPOL2TP",
+ .owner = THIS_MODULE,
+ .obj_size = sizeof(struct pppox_sock),
+};
+
+static int pppol2tp_backlog_recv(struct sock *sk, struct sk_buff *skb)
+{
+ int rc;
+
+ rc = l2tp_udp_encap_recv(sk, skb);
+ if (rc)
+ kfree_skb(skb);
+
+ return NET_RX_SUCCESS;
+}
+
+/* socket() handler. Initialize a new struct sock.
+ */
+static int pppol2tp_create(struct net *net, struct socket *sock)
+{
+ int error = -ENOMEM;
+ struct sock *sk;
+
+ sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppol2tp_sk_proto);
+ if (!sk)
+ goto out;
+
+ sock_init_data(sock, sk);
+
+ sock->state = SS_UNCONNECTED;
+ sock->ops = &pppol2tp_ops;
+
+ sk->sk_backlog_rcv = pppol2tp_backlog_recv;
+ sk->sk_protocol = PX_PROTO_OL2TP;
+ sk->sk_family = PF_PPPOX;
+ sk->sk_state = PPPOX_NONE;
+ sk->sk_type = SOCK_STREAM;
+ sk->sk_destruct = pppol2tp_session_destruct;
+
+ error = 0;
+
+out:
+ return error;
+}
+
+#if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE)
+static void pppol2tp_show(struct seq_file *m, void *arg)
+{
+ struct l2tp_session *session = arg;
+ struct pppol2tp_session *ps = l2tp_session_priv(session);
+
+ if (ps) {
+ struct pppox_sock *po = pppox_sk(ps->sock);
+ if (po)
+ seq_printf(m, " interface %s\n", ppp_dev_name(&po->chan));
+ }
+}
+#endif
+
+/* connect() handler. Attach a PPPoX socket to a tunnel UDP socket
+ */
+static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
+ int sockaddr_len, int flags)
+{
+ struct sock *sk = sock->sk;
+ struct sockaddr_pppol2tp *sp = (struct sockaddr_pppol2tp *) uservaddr;
+ struct sockaddr_pppol2tpv3 *sp3 = (struct sockaddr_pppol2tpv3 *) uservaddr;
+ struct pppox_sock *po = pppox_sk(sk);
+ struct l2tp_session *session = NULL;
+ struct l2tp_tunnel *tunnel;
+ struct pppol2tp_session *ps;
+ struct dst_entry *dst;
+ struct l2tp_session_cfg cfg = { 0, };
+ int error = 0;
+ u32 tunnel_id, peer_tunnel_id;
+ u32 session_id, peer_session_id;
+ int ver = 2;
+ int fd;
+
+ lock_sock(sk);
+
+ error = -EINVAL;
+ if (sp->sa_protocol != PX_PROTO_OL2TP)
+ goto end;
+
+ /* Check for already bound sockets */
+ error = -EBUSY;
+ if (sk->sk_state & PPPOX_CONNECTED)
+ goto end;
+
+ /* We don't supporting rebinding anyway */
+ error = -EALREADY;
+ if (sk->sk_user_data)
+ goto end; /* socket is already attached */
+
+ /* Get params from socket address. Handle L2TPv2 and L2TPv3 */
+ if (sockaddr_len == sizeof(struct sockaddr_pppol2tp)) {
+ fd = sp->pppol2tp.fd;
+ tunnel_id = sp->pppol2tp.s_tunnel;
+ peer_tunnel_id = sp->pppol2tp.d_tunnel;
+ session_id = sp->pppol2tp.s_session;
+ peer_session_id = sp->pppol2tp.d_session;
+ } else if (sockaddr_len == sizeof(struct sockaddr_pppol2tpv3)) {
+ ver = 3;
+ fd = sp3->pppol2tp.fd;
+ tunnel_id = sp3->pppol2tp.s_tunnel;
+ peer_tunnel_id = sp3->pppol2tp.d_tunnel;
+ session_id = sp3->pppol2tp.s_session;
+ peer_session_id = sp3->pppol2tp.d_session;
+ } else {
+ error = -EINVAL;
+ goto end; /* bad socket address */
+ }
+
+ /* Don't bind if tunnel_id is 0 */
+ error = -EINVAL;
+ if (tunnel_id == 0)
+ goto end;
+
+ tunnel = l2tp_tunnel_find(sock_net(sk), tunnel_id);
+
+ /* Special case: create tunnel context if session_id and
+ * peer_session_id is 0. Otherwise look up tunnel using supplied
+ * tunnel id.
+ */
+ if ((session_id == 0) && (peer_session_id == 0)) {
+ if (tunnel == NULL) {
+ struct l2tp_tunnel_cfg tcfg = {
+ .encap = L2TP_ENCAPTYPE_UDP,
+ .debug = 0,
+ };
+ error = l2tp_tunnel_create(sock_net(sk), fd, ver, tunnel_id, peer_tunnel_id, &tcfg, &tunnel);
+ if (error < 0)
+ goto end;
+ }
+ } else {
+ /* Error if we can't find the tunnel */
+ error = -ENOENT;
+ if (tunnel == NULL)
+ goto end;
+
+ /* Error if socket is not prepped */
+ if (tunnel->sock == NULL)
+ goto end;
+ }
+
+ if (tunnel->recv_payload_hook == NULL)
+ tunnel->recv_payload_hook = pppol2tp_recv_payload_hook;
+
+ if (tunnel->peer_tunnel_id == 0) {
+ if (ver == 2)
+ tunnel->peer_tunnel_id = sp->pppol2tp.d_tunnel;
+ else
+ tunnel->peer_tunnel_id = sp3->pppol2tp.d_tunnel;
+ }
+
+ /* Create session if it doesn't already exist. We handle the
+ * case where a session was previously created by the netlink
+ * interface by checking that the session doesn't already have
+ * a socket and its tunnel socket are what we expect. If any
+ * of those checks fail, return EEXIST to the caller.
+ */
+ session = l2tp_session_find(sock_net(sk), tunnel, session_id);
+ if (session == NULL) {
+ /* Default MTU must allow space for UDP/L2TP/PPP
+ * headers.
+ */
+ cfg.mtu = cfg.mru = 1500 - PPPOL2TP_HEADER_OVERHEAD;
+
+ /* Allocate and initialize a new session context. */
+ session = l2tp_session_create(sizeof(struct pppol2tp_session),
+ tunnel, session_id,
+ peer_session_id, &cfg);
+ if (session == NULL) {
+ error = -ENOMEM;
+ goto end;
+ }
+ } else {
+ ps = l2tp_session_priv(session);
+ error = -EEXIST;
+ if (ps->sock != NULL)
+ goto end;
+
+ /* consistency checks */
+ if (ps->tunnel_sock != tunnel->sock)
+ goto end;
+ }
+
+ /* Associate session with its PPPoL2TP socket */
+ ps = l2tp_session_priv(session);
+ ps->owner = current->pid;
+ ps->sock = sk;
+ ps->tunnel_sock = tunnel->sock;
+
+ session->recv_skb = pppol2tp_recv;
+ session->session_close = pppol2tp_session_close;
+#if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE)
+ session->show = pppol2tp_show;
+#endif
+
+ /* We need to know each time a skb is dropped from the reorder
+ * queue.
+ */
+ session->ref = pppol2tp_session_sock_hold;
+ session->deref = pppol2tp_session_sock_put;
+
+ /* If PMTU discovery was enabled, use the MTU that was discovered */
+ dst = sk_dst_get(sk);
+ if (dst != NULL) {
+ u32 pmtu = dst_mtu(__sk_dst_get(sk));
+ if (pmtu != 0)
+ session->mtu = session->mru = pmtu -
+ PPPOL2TP_HEADER_OVERHEAD;
+ dst_release(dst);
+ }
+
+ /* Special case: if source & dest session_id == 0x0000, this
+ * socket is being created to manage the tunnel. Just set up
+ * the internal context for use by ioctl() and sockopt()
+ * handlers.
+ */
+ if ((session->session_id == 0) &&
+ (session->peer_session_id == 0)) {
+ error = 0;
+ goto out_no_ppp;
+ }
+
+ /* The only header we need to worry about is the L2TP
+ * header. This size is different depending on whether
+ * sequence numbers are enabled for the data channel.
+ */
+ po->chan.hdrlen = PPPOL2TP_L2TP_HDR_SIZE_NOSEQ;
+
+ po->chan.private = sk;
+ po->chan.ops = &pppol2tp_chan_ops;
+ po->chan.mtu = session->mtu;
+
+ error = ppp_register_net_channel(sock_net(sk), &po->chan);
+ if (error)
+ goto end;
+
+out_no_ppp:
+ /* This is how we get the session context from the socket. */
+ sk->sk_user_data = session;
+ sk->sk_state = PPPOX_CONNECTED;
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: created\n", session->name);
+
+end:
+ release_sock(sk);
+
+ return error;
+}
+
+#ifdef CONFIG_L2TP_V3
+
+/* Called when creating sessions via the netlink interface.
+ */
+static int pppol2tp_session_create(struct net *net, u32 tunnel_id, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
+{
+ int error;
+ struct l2tp_tunnel *tunnel;
+ struct l2tp_session *session;
+ struct pppol2tp_session *ps;
+
+ tunnel = l2tp_tunnel_find(net, tunnel_id);
+
+ /* Error if we can't find the tunnel */
+ error = -ENOENT;
+ if (tunnel == NULL)
+ goto out;
+
+ /* Error if tunnel socket is not prepped */
+ if (tunnel->sock == NULL)
+ goto out;
+
+ /* Check that this session doesn't already exist */
+ error = -EEXIST;
+ session = l2tp_session_find(net, tunnel, session_id);
+ if (session != NULL)
+ goto out;
+
+ /* Default MTU values. */
+ if (cfg->mtu == 0)
+ cfg->mtu = 1500 - PPPOL2TP_HEADER_OVERHEAD;
+ if (cfg->mru == 0)
+ cfg->mru = cfg->mtu;
+
+ /* Allocate and initialize a new session context. */
+ error = -ENOMEM;
+ session = l2tp_session_create(sizeof(struct pppol2tp_session),
+ tunnel, session_id,
+ peer_session_id, cfg);
+ if (session == NULL)
+ goto out;
+
+ ps = l2tp_session_priv(session);
+ ps->tunnel_sock = tunnel->sock;
+
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: created\n", session->name);
+
+ error = 0;
+
+out:
+ return error;
+}
+
+/* Called when deleting sessions via the netlink interface.
+ */
+static int pppol2tp_session_delete(struct l2tp_session *session)
+{
+ struct pppol2tp_session *ps = l2tp_session_priv(session);
+
+ if (ps->sock == NULL)
+ l2tp_session_dec_refcount(session);
+
+ return 0;
+}
+
+#endif /* CONFIG_L2TP_V3 */
+
+/* getname() support.
+ */
+static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr,
+ int *usockaddr_len, int peer)
+{
+ int len = 0;
+ int error = 0;
+ struct l2tp_session *session;
+ struct l2tp_tunnel *tunnel;
+ struct sock *sk = sock->sk;
+ struct inet_sock *inet;
+ struct pppol2tp_session *pls;
+
+ error = -ENOTCONN;
+ if (sk == NULL)
+ goto end;
+ if (sk->sk_state != PPPOX_CONNECTED)
+ goto end;
+
+ error = -EBADF;
+ session = pppol2tp_sock_to_session(sk);
+ if (session == NULL)
+ goto end;
+
+ pls = l2tp_session_priv(session);
+ tunnel = l2tp_sock_to_tunnel(pls->tunnel_sock);
+ if (tunnel == NULL) {
+ error = -EBADF;
+ goto end_put_sess;
+ }
+
+ inet = inet_sk(sk);
+ if (tunnel->version == 2) {
+ struct sockaddr_pppol2tp sp;
+ len = sizeof(sp);
+ memset(&sp, 0, len);
+ sp.sa_family = AF_PPPOX;
+ sp.sa_protocol = PX_PROTO_OL2TP;
+ sp.pppol2tp.fd = tunnel->fd;
+ sp.pppol2tp.pid = pls->owner;
+ sp.pppol2tp.s_tunnel = tunnel->tunnel_id;
+ sp.pppol2tp.d_tunnel = tunnel->peer_tunnel_id;
+ sp.pppol2tp.s_session = session->session_id;
+ sp.pppol2tp.d_session = session->peer_session_id;
+ sp.pppol2tp.addr.sin_family = AF_INET;
+ sp.pppol2tp.addr.sin_port = inet->inet_dport;
+ sp.pppol2tp.addr.sin_addr.s_addr = inet->inet_daddr;
+ memcpy(uaddr, &sp, len);
+ } else if (tunnel->version == 3) {
+ struct sockaddr_pppol2tpv3 sp;
+ len = sizeof(sp);
+ memset(&sp, 0, len);
+ sp.sa_family = AF_PPPOX;
+ sp.sa_protocol = PX_PROTO_OL2TP;
+ sp.pppol2tp.fd = tunnel->fd;
+ sp.pppol2tp.pid = pls->owner;
+ sp.pppol2tp.s_tunnel = tunnel->tunnel_id;
+ sp.pppol2tp.d_tunnel = tunnel->peer_tunnel_id;
+ sp.pppol2tp.s_session = session->session_id;
+ sp.pppol2tp.d_session = session->peer_session_id;
+ sp.pppol2tp.addr.sin_family = AF_INET;
+ sp.pppol2tp.addr.sin_port = inet->inet_dport;
+ sp.pppol2tp.addr.sin_addr.s_addr = inet->inet_daddr;
+ memcpy(uaddr, &sp, len);
+ }
+
+ *usockaddr_len = len;
+
+ sock_put(pls->tunnel_sock);
+end_put_sess:
+ sock_put(sk);
+ error = 0;
+
+end:
+ return error;
+}
+
+/****************************************************************************
+ * ioctl() handlers.
+ *
+ * The PPPoX socket is created for L2TP sessions: tunnels have their own UDP
+ * sockets. However, in order to control kernel tunnel features, we allow
+ * userspace to create a special "tunnel" PPPoX socket which is used for
+ * control only. Tunnel PPPoX sockets have session_id == 0 and simply allow
+ * the user application to issue L2TP setsockopt(), getsockopt() and ioctl()
+ * calls.
+ ****************************************************************************/
+
+static void pppol2tp_copy_stats(struct pppol2tp_ioc_stats *dest,
+ struct l2tp_stats *stats)
+{
+ dest->tx_packets = stats->tx_packets;
+ dest->tx_bytes = stats->tx_bytes;
+ dest->tx_errors = stats->tx_errors;
+ dest->rx_packets = stats->rx_packets;
+ dest->rx_bytes = stats->rx_bytes;
+ dest->rx_seq_discards = stats->rx_seq_discards;
+ dest->rx_oos_packets = stats->rx_oos_packets;
+ dest->rx_errors = stats->rx_errors;
+}
+
+/* Session ioctl helper.
+ */
+static int pppol2tp_session_ioctl(struct l2tp_session *session,
+ unsigned int cmd, unsigned long arg)
+{
+ struct ifreq ifr;
+ int err = 0;
+ struct sock *sk;
+ int val = (int) arg;
+ struct pppol2tp_session *ps = l2tp_session_priv(session);
+ struct l2tp_tunnel *tunnel = session->tunnel;
+ struct pppol2tp_ioc_stats stats;
+
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_DEBUG,
+ "%s: pppol2tp_session_ioctl(cmd=%#x, arg=%#lx)\n",
+ session->name, cmd, arg);
+
+ sk = ps->sock;
+ sock_hold(sk);
+
+ switch (cmd) {
+ case SIOCGIFMTU:
+ err = -ENXIO;
+ if (!(sk->sk_state & PPPOX_CONNECTED))
+ break;
+
+ err = -EFAULT;
+ if (copy_from_user(&ifr, (void __user *) arg, sizeof(struct ifreq)))
+ break;
+ ifr.ifr_mtu = session->mtu;
+ if (copy_to_user((void __user *) arg, &ifr, sizeof(struct ifreq)))
+ break;
+
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: get mtu=%d\n", session->name, session->mtu);
+ err = 0;
+ break;
+
+ case SIOCSIFMTU:
+ err = -ENXIO;
+ if (!(sk->sk_state & PPPOX_CONNECTED))
+ break;
+
+ err = -EFAULT;
+ if (copy_from_user(&ifr, (void __user *) arg, sizeof(struct ifreq)))
+ break;
+
+ session->mtu = ifr.ifr_mtu;
+
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: set mtu=%d\n", session->name, session->mtu);
+ err = 0;
+ break;
+
+ case PPPIOCGMRU:
+ err = -ENXIO;
+ if (!(sk->sk_state & PPPOX_CONNECTED))
+ break;
+
+ err = -EFAULT;
+ if (put_user(session->mru, (int __user *) arg))
+ break;
+
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: get mru=%d\n", session->name, session->mru);
+ err = 0;
+ break;
+
+ case PPPIOCSMRU:
+ err = -ENXIO;
+ if (!(sk->sk_state & PPPOX_CONNECTED))
+ break;
+
+ err = -EFAULT;
+ if (get_user(val, (int __user *) arg))
+ break;
+
+ session->mru = val;
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: set mru=%d\n", session->name, session->mru);
+ err = 0;
+ break;
+
+ case PPPIOCGFLAGS:
+ err = -EFAULT;
+ if (put_user(ps->flags, (int __user *) arg))
+ break;
+
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: get flags=%d\n", session->name, ps->flags);
+ err = 0;
+ break;
+
+ case PPPIOCSFLAGS:
+ err = -EFAULT;
+ if (get_user(val, (int __user *) arg))
+ break;
+ ps->flags = val;
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: set flags=%d\n", session->name, ps->flags);
+ err = 0;
+ break;
+
+ case PPPIOCGL2TPSTATS:
+ err = -ENXIO;
+ if (!(sk->sk_state & PPPOX_CONNECTED))
+ break;
+
+ memset(&stats, 0, sizeof(stats));
+ stats.tunnel_id = tunnel->tunnel_id;
+ stats.session_id = session->session_id;
+ pppol2tp_copy_stats(&stats, &session->stats);
+ if (copy_to_user((void __user *) arg, &stats,
+ sizeof(stats)))
+ break;
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: get L2TP stats\n", session->name);
+ err = 0;
+ break;
+
+ default:
+ err = -ENOSYS;
+ break;
+ }
+
+ sock_put(sk);
+
+ return err;
+}
+
+/* Tunnel ioctl helper.
+ *
+ * Note the special handling for PPPIOCGL2TPSTATS below. If the ioctl data
+ * specifies a session_id, the session ioctl handler is called. This allows an
+ * application to retrieve session stats via a tunnel socket.
+ */
+static int pppol2tp_tunnel_ioctl(struct l2tp_tunnel *tunnel,
+ unsigned int cmd, unsigned long arg)
+{
+ int err = 0;
+ struct sock *sk;
+ struct pppol2tp_ioc_stats stats;
+
+ PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_DEBUG,
+ "%s: pppol2tp_tunnel_ioctl(cmd=%#x, arg=%#lx)\n",
+ tunnel->name, cmd, arg);
+
+ sk = tunnel->sock;
+ sock_hold(sk);
+
+ switch (cmd) {
+ case PPPIOCGL2TPSTATS:
+ err = -ENXIO;
+ if (!(sk->sk_state & PPPOX_CONNECTED))
+ break;
+
+ if (copy_from_user(&stats, (void __user *) arg,
+ sizeof(stats))) {
+ err = -EFAULT;
+ break;
+ }
+ if (stats.session_id != 0) {
+ /* resend to session ioctl handler */
+ struct l2tp_session *session =
+ l2tp_session_find(sock_net(sk), tunnel, stats.session_id);
+ if (session != NULL)
+ err = pppol2tp_session_ioctl(session, cmd, arg);
+ else
+ err = -EBADR;
+ break;
+ }
+#ifdef CONFIG_XFRM
+ stats.using_ipsec = (sk->sk_policy[0] || sk->sk_policy[1]) ? 1 : 0;
+#endif
+ pppol2tp_copy_stats(&stats, &tunnel->stats);
+ if (copy_to_user((void __user *) arg, &stats, sizeof(stats))) {
+ err = -EFAULT;
+ break;
+ }
+ PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: get L2TP stats\n", tunnel->name);
+ err = 0;
+ break;
+
+ default:
+ err = -ENOSYS;
+ break;
+ }
+
+ sock_put(sk);
+
+ return err;
+}
+
+/* Main ioctl() handler.
+ * Dispatch to tunnel or session helpers depending on the socket.
+ */
+static int pppol2tp_ioctl(struct socket *sock, unsigned int cmd,
+ unsigned long arg)
+{
+ struct sock *sk = sock->sk;
+ struct l2tp_session *session;
+ struct l2tp_tunnel *tunnel;
+ struct pppol2tp_session *ps;
+ int err;
+
+ if (!sk)
+ return 0;
+
+ err = -EBADF;
+ if (sock_flag(sk, SOCK_DEAD) != 0)
+ goto end;
+
+ err = -ENOTCONN;
+ if ((sk->sk_user_data == NULL) ||
+ (!(sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND))))
+ goto end;
+
+ /* Get session context from the socket */
+ err = -EBADF;
+ session = pppol2tp_sock_to_session(sk);
+ if (session == NULL)
+ goto end;
+
+ /* Special case: if session's session_id is zero, treat ioctl as a
+ * tunnel ioctl
+ */
+ ps = l2tp_session_priv(session);
+ if ((session->session_id == 0) &&
+ (session->peer_session_id == 0)) {
+ err = -EBADF;
+ tunnel = l2tp_sock_to_tunnel(ps->tunnel_sock);
+ if (tunnel == NULL)
+ goto end_put_sess;
+
+ err = pppol2tp_tunnel_ioctl(tunnel, cmd, arg);
+ sock_put(ps->tunnel_sock);
+ goto end_put_sess;
+ }
+
+ err = pppol2tp_session_ioctl(session, cmd, arg);
+
+end_put_sess:
+ sock_put(sk);
+end:
+ return err;
+}
+
+/*****************************************************************************
+ * setsockopt() / getsockopt() support.
+ *
+ * The PPPoX socket is created for L2TP sessions: tunnels have their own UDP
+ * sockets. In order to control kernel tunnel features, we allow userspace to
+ * create a special "tunnel" PPPoX socket which is used for control only.
+ * Tunnel PPPoX sockets have session_id == 0 and simply allow the user
+ * application to issue L2TP setsockopt(), getsockopt() and ioctl() calls.
+ *****************************************************************************/
+
+/* Tunnel setsockopt() helper.
+ */
+static int pppol2tp_tunnel_setsockopt(struct sock *sk,
+ struct l2tp_tunnel *tunnel,
+ int optname, int val)
+{
+ int err = 0;
+
+ switch (optname) {
+ case PPPOL2TP_SO_DEBUG:
+ tunnel->debug = val;
+ PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: set debug=%x\n", tunnel->name, tunnel->debug);
+ break;
+
+ default:
+ err = -ENOPROTOOPT;
+ break;
+ }
+
+ return err;
+}
+
+/* Session setsockopt helper.
+ */
+static int pppol2tp_session_setsockopt(struct sock *sk,
+ struct l2tp_session *session,
+ int optname, int val)
+{
+ int err = 0;
+ struct pppol2tp_session *ps = l2tp_session_priv(session);
+
+ switch (optname) {
+ case PPPOL2TP_SO_RECVSEQ:
+ if ((val != 0) && (val != 1)) {
+ err = -EINVAL;
+ break;
+ }
+ session->recv_seq = val ? -1 : 0;
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: set recv_seq=%d\n", session->name, session->recv_seq);
+ break;
+
+ case PPPOL2TP_SO_SENDSEQ:
+ if ((val != 0) && (val != 1)) {
+ err = -EINVAL;
+ break;
+ }
+ session->send_seq = val ? -1 : 0;
+ {
+ struct sock *ssk = ps->sock;
+ struct pppox_sock *po = pppox_sk(ssk);
+ po->chan.hdrlen = val ? PPPOL2TP_L2TP_HDR_SIZE_SEQ :
+ PPPOL2TP_L2TP_HDR_SIZE_NOSEQ;
+ }
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: set send_seq=%d\n", session->name, session->send_seq);
+ break;
+
+ case PPPOL2TP_SO_LNSMODE:
+ if ((val != 0) && (val != 1)) {
+ err = -EINVAL;
+ break;
+ }
+ session->lns_mode = val ? -1 : 0;
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: set lns_mode=%d\n", session->name, session->lns_mode);
+ break;
+
+ case PPPOL2TP_SO_DEBUG:
+ session->debug = val;
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: set debug=%x\n", session->name, session->debug);
+ break;
+
+ case PPPOL2TP_SO_REORDERTO:
+ session->reorder_timeout = msecs_to_jiffies(val);
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: set reorder_timeout=%d\n", session->name, session->reorder_timeout);
+ break;
+
+ default:
+ err = -ENOPROTOOPT;
+ break;
+ }
+
+ return err;
+}
+
+/* Main setsockopt() entry point.
+ * Does API checks, then calls either the tunnel or session setsockopt
+ * handler, according to whether the PPPoL2TP socket is a for a regular
+ * session or the special tunnel type.
+ */
+static int pppol2tp_setsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, unsigned int optlen)
+{
+ struct sock *sk = sock->sk;
+ struct l2tp_session *session;
+ struct l2tp_tunnel *tunnel;
+ struct pppol2tp_session *ps;
+ int val;
+ int err;
+
+ if (level != SOL_PPPOL2TP)
+ return udp_prot.setsockopt(sk, level, optname, optval, optlen);
+
+ if (optlen < sizeof(int))
+ return -EINVAL;
+
+ if (get_user(val, (int __user *)optval))
+ return -EFAULT;
+
+ err = -ENOTCONN;
+ if (sk->sk_user_data == NULL)
+ goto end;
+
+ /* Get session context from the socket */
+ err = -EBADF;
+ session = pppol2tp_sock_to_session(sk);
+ if (session == NULL)
+ goto end;
+
+ /* Special case: if session_id == 0x0000, treat as operation on tunnel
+ */
+ ps = l2tp_session_priv(session);
+ if ((session->session_id == 0) &&
+ (session->peer_session_id == 0)) {
+ err = -EBADF;
+ tunnel = l2tp_sock_to_tunnel(ps->tunnel_sock);
+ if (tunnel == NULL)
+ goto end_put_sess;
+
+ err = pppol2tp_tunnel_setsockopt(sk, tunnel, optname, val);
+ sock_put(ps->tunnel_sock);
+ } else
+ err = pppol2tp_session_setsockopt(sk, session, optname, val);
+
+ err = 0;
+
+end_put_sess:
+ sock_put(sk);
+end:
+ return err;
+}
+
+/* Tunnel getsockopt helper. Called with sock locked.
+ */
+static int pppol2tp_tunnel_getsockopt(struct sock *sk,
+ struct l2tp_tunnel *tunnel,
+ int optname, int *val)
+{
+ int err = 0;
+
+ switch (optname) {
+ case PPPOL2TP_SO_DEBUG:
+ *val = tunnel->debug;
+ PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: get debug=%x\n", tunnel->name, tunnel->debug);
+ break;
+
+ default:
+ err = -ENOPROTOOPT;
+ break;
+ }
+
+ return err;
+}
+
+/* Session getsockopt helper. Called with sock locked.
+ */
+static int pppol2tp_session_getsockopt(struct sock *sk,
+ struct l2tp_session *session,
+ int optname, int *val)
+{
+ int err = 0;
+
+ switch (optname) {
+ case PPPOL2TP_SO_RECVSEQ:
+ *val = session->recv_seq;
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: get recv_seq=%d\n", session->name, *val);
+ break;
+
+ case PPPOL2TP_SO_SENDSEQ:
+ *val = session->send_seq;
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: get send_seq=%d\n", session->name, *val);
+ break;
+
+ case PPPOL2TP_SO_LNSMODE:
+ *val = session->lns_mode;
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: get lns_mode=%d\n", session->name, *val);
+ break;
+
+ case PPPOL2TP_SO_DEBUG:
+ *val = session->debug;
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: get debug=%d\n", session->name, *val);
+ break;
+
+ case PPPOL2TP_SO_REORDERTO:
+ *val = (int) jiffies_to_msecs(session->reorder_timeout);
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: get reorder_timeout=%d\n", session->name, *val);
+ break;
+
+ default:
+ err = -ENOPROTOOPT;
+ }
+
+ return err;
+}
+
+/* Main getsockopt() entry point.
+ * Does API checks, then calls either the tunnel or session getsockopt
+ * handler, according to whether the PPPoX socket is a for a regular session
+ * or the special tunnel type.
+ */
+static int pppol2tp_getsockopt(struct socket *sock, int level,
+ int optname, char __user *optval, int __user *optlen)
+{
+ struct sock *sk = sock->sk;
+ struct l2tp_session *session;
+ struct l2tp_tunnel *tunnel;
+ int val, len;
+ int err;
+ struct pppol2tp_session *ps;
+
+ if (level != SOL_PPPOL2TP)
+ return udp_prot.getsockopt(sk, level, optname, optval, optlen);
+
+ if (get_user(len, (int __user *) optlen))
+ return -EFAULT;
+
+ len = min_t(unsigned int, len, sizeof(int));
+
+ if (len < 0)
+ return -EINVAL;
+
+ err = -ENOTCONN;
+ if (sk->sk_user_data == NULL)
+ goto end;
+
+ /* Get the session context */
+ err = -EBADF;
+ session = pppol2tp_sock_to_session(sk);
+ if (session == NULL)
+ goto end;
+
+ /* Special case: if session_id == 0x0000, treat as operation on tunnel */
+ ps = l2tp_session_priv(session);
+ if ((session->session_id == 0) &&
+ (session->peer_session_id == 0)) {
+ err = -EBADF;
+ tunnel = l2tp_sock_to_tunnel(ps->tunnel_sock);
+ if (tunnel == NULL)
+ goto end_put_sess;
+
+ err = pppol2tp_tunnel_getsockopt(sk, tunnel, optname, &val);
+ sock_put(ps->tunnel_sock);
+ } else
+ err = pppol2tp_session_getsockopt(sk, session, optname, &val);
+
+ err = -EFAULT;
+ if (put_user(len, (int __user *) optlen))
+ goto end_put_sess;
+
+ if (copy_to_user((void __user *) optval, &val, len))
+ goto end_put_sess;
+
+ err = 0;
+
+end_put_sess:
+ sock_put(sk);
+end:
+ return err;
+}
+
+/*****************************************************************************
+ * /proc filesystem for debug
+ * Since the original pppol2tp driver provided /proc/net/pppol2tp for
+ * L2TPv2, we dump only L2TPv2 tunnels and sessions here.
+ *****************************************************************************/
+
+static unsigned int pppol2tp_net_id;
+
+#ifdef CONFIG_PROC_FS
+
+struct pppol2tp_seq_data {
+ struct seq_net_private p;
+ int tunnel_idx; /* current tunnel */
+ int session_idx; /* index of session within current tunnel */
+ struct l2tp_tunnel *tunnel;
+ struct l2tp_session *session; /* NULL means get next tunnel */
+};
+
+static void pppol2tp_next_tunnel(struct net *net, struct pppol2tp_seq_data *pd)
+{
+ for (;;) {
+ pd->tunnel = l2tp_tunnel_find_nth(net, pd->tunnel_idx);
+ pd->tunnel_idx++;
+
+ if (pd->tunnel == NULL)
+ break;
+
+ /* Ignore L2TPv3 tunnels */
+ if (pd->tunnel->version < 3)
+ break;
+ }
+}
+
+static void pppol2tp_next_session(struct net *net, struct pppol2tp_seq_data *pd)
+{
+ pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx);
+ pd->session_idx++;
+
+ if (pd->session == NULL) {
+ pd->session_idx = 0;
+ pppol2tp_next_tunnel(net, pd);
+ }
+}
+
+static void *pppol2tp_seq_start(struct seq_file *m, loff_t *offs)
+{
+ struct pppol2tp_seq_data *pd = SEQ_START_TOKEN;
+ loff_t pos = *offs;
+ struct net *net;
+
+ if (!pos)
+ goto out;
+
+ BUG_ON(m->private == NULL);
+ pd = m->private;
+ net = seq_file_net(m);
+
+ if (pd->tunnel == NULL)
+ pppol2tp_next_tunnel(net, pd);
+ else
+ pppol2tp_next_session(net, pd);
+
+ /* NULL tunnel and session indicates end of list */
+ if ((pd->tunnel == NULL) && (pd->session == NULL))
+ pd = NULL;
+
+out:
+ return pd;
+}
+
+static void *pppol2tp_seq_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ (*pos)++;
+ return NULL;
+}
+
+static void pppol2tp_seq_stop(struct seq_file *p, void *v)
+{
+ /* nothing to do */
+}
+
+static void pppol2tp_seq_tunnel_show(struct seq_file *m, void *v)
+{
+ struct l2tp_tunnel *tunnel = v;
+
+ seq_printf(m, "\nTUNNEL '%s', %c %d\n",
+ tunnel->name,
+ (tunnel == tunnel->sock->sk_user_data) ? 'Y' : 'N',
+ atomic_read(&tunnel->ref_count) - 1);
+ seq_printf(m, " %08x %llu/%llu/%llu %llu/%llu/%llu\n",
+ tunnel->debug,
+ (unsigned long long)tunnel->stats.tx_packets,
+ (unsigned long long)tunnel->stats.tx_bytes,
+ (unsigned long long)tunnel->stats.tx_errors,
+ (unsigned long long)tunnel->stats.rx_packets,
+ (unsigned long long)tunnel->stats.rx_bytes,
+ (unsigned long long)tunnel->stats.rx_errors);
+}
+
+static void pppol2tp_seq_session_show(struct seq_file *m, void *v)
+{
+ struct l2tp_session *session = v;
+ struct l2tp_tunnel *tunnel = session->tunnel;
+ struct pppol2tp_session *ps = l2tp_session_priv(session);
+ struct pppox_sock *po = pppox_sk(ps->sock);
+ u32 ip = 0;
+ u16 port = 0;
+
+ if (tunnel->sock) {
+ struct inet_sock *inet = inet_sk(tunnel->sock);
+ ip = ntohl(inet->inet_saddr);
+ port = ntohs(inet->inet_sport);
+ }
+
+ seq_printf(m, " SESSION '%s' %08X/%d %04X/%04X -> "
+ "%04X/%04X %d %c\n",
+ session->name, ip, port,
+ tunnel->tunnel_id,
+ session->session_id,
+ tunnel->peer_tunnel_id,
+ session->peer_session_id,
+ ps->sock->sk_state,
+ (session == ps->sock->sk_user_data) ?
+ 'Y' : 'N');
+ seq_printf(m, " %d/%d/%c/%c/%s %08x %u\n",
+ session->mtu, session->mru,
+ session->recv_seq ? 'R' : '-',
+ session->send_seq ? 'S' : '-',
+ session->lns_mode ? "LNS" : "LAC",
+ session->debug,
+ jiffies_to_msecs(session->reorder_timeout));
+ seq_printf(m, " %hu/%hu %llu/%llu/%llu %llu/%llu/%llu\n",
+ session->nr, session->ns,
+ (unsigned long long)session->stats.tx_packets,
+ (unsigned long long)session->stats.tx_bytes,
+ (unsigned long long)session->stats.tx_errors,
+ (unsigned long long)session->stats.rx_packets,
+ (unsigned long long)session->stats.rx_bytes,
+ (unsigned long long)session->stats.rx_errors);
+
+ if (po)
+ seq_printf(m, " interface %s\n", ppp_dev_name(&po->chan));
+}
+
+static int pppol2tp_seq_show(struct seq_file *m, void *v)
+{
+ struct pppol2tp_seq_data *pd = v;
+
+ /* display header on line 1 */
+ if (v == SEQ_START_TOKEN) {
+ seq_puts(m, "PPPoL2TP driver info, " PPPOL2TP_DRV_VERSION "\n");
+ seq_puts(m, "TUNNEL name, user-data-ok session-count\n");
+ seq_puts(m, " debug tx-pkts/bytes/errs rx-pkts/bytes/errs\n");
+ seq_puts(m, " SESSION name, addr/port src-tid/sid "
+ "dest-tid/sid state user-data-ok\n");
+ seq_puts(m, " mtu/mru/rcvseq/sendseq/lns debug reorderto\n");
+ seq_puts(m, " nr/ns tx-pkts/bytes/errs rx-pkts/bytes/errs\n");
+ goto out;
+ }
+
+ /* Show the tunnel or session context.
+ */
+ if (pd->session == NULL)
+ pppol2tp_seq_tunnel_show(m, pd->tunnel);
+ else
+ pppol2tp_seq_session_show(m, pd->session);
+
+out:
+ return 0;
+}
+
+static const struct seq_operations pppol2tp_seq_ops = {
+ .start = pppol2tp_seq_start,
+ .next = pppol2tp_seq_next,
+ .stop = pppol2tp_seq_stop,
+ .show = pppol2tp_seq_show,
+};
+
+/* Called when our /proc file is opened. We allocate data for use when
+ * iterating our tunnel / session contexts and store it in the private
+ * data of the seq_file.
+ */
+static int pppol2tp_proc_open(struct inode *inode, struct file *file)
+{
+ return seq_open_net(inode, file, &pppol2tp_seq_ops,
+ sizeof(struct pppol2tp_seq_data));
+}
+
+static const struct file_operations pppol2tp_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = pppol2tp_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_net,
+};
+
+#endif /* CONFIG_PROC_FS */
+
+/*****************************************************************************
+ * Network namespace
+ *****************************************************************************/
+
+static __net_init int pppol2tp_init_net(struct net *net)
+{
+ struct proc_dir_entry *pde;
+ int err = 0;
+
+ pde = proc_net_fops_create(net, "pppol2tp", S_IRUGO, &pppol2tp_proc_fops);
+ if (!pde) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+out:
+ return err;
+}
+
+static __net_exit void pppol2tp_exit_net(struct net *net)
+{
+ proc_net_remove(net, "pppol2tp");
+}
+
+static struct pernet_operations pppol2tp_net_ops = {
+ .init = pppol2tp_init_net,
+ .exit = pppol2tp_exit_net,
+ .id = &pppol2tp_net_id,
+};
+
+/*****************************************************************************
+ * Init and cleanup
+ *****************************************************************************/
+
+static const struct proto_ops pppol2tp_ops = {
+ .family = AF_PPPOX,
+ .owner = THIS_MODULE,
+ .release = pppol2tp_release,
+ .bind = sock_no_bind,
+ .connect = pppol2tp_connect,
+ .socketpair = sock_no_socketpair,
+ .accept = sock_no_accept,
+ .getname = pppol2tp_getname,
+ .poll = datagram_poll,
+ .listen = sock_no_listen,
+ .shutdown = sock_no_shutdown,
+ .setsockopt = pppol2tp_setsockopt,
+ .getsockopt = pppol2tp_getsockopt,
+ .sendmsg = pppol2tp_sendmsg,
+ .recvmsg = pppol2tp_recvmsg,
+ .mmap = sock_no_mmap,
+ .ioctl = pppox_ioctl,
+};
+
+static struct pppox_proto pppol2tp_proto = {
+ .create = pppol2tp_create,
+ .ioctl = pppol2tp_ioctl
+};
+
+#ifdef CONFIG_L2TP_V3
+
+static const struct l2tp_nl_cmd_ops pppol2tp_nl_cmd_ops = {
+ .session_create = pppol2tp_session_create,
+ .session_delete = pppol2tp_session_delete,
+};
+
+#endif /* CONFIG_L2TP_V3 */
+
+static int __init pppol2tp_init(void)
+{
+ int err;
+
+ err = register_pernet_device(&pppol2tp_net_ops);
+ if (err)
+ goto out;
+
+ err = proto_register(&pppol2tp_sk_proto, 0);
+ if (err)
+ goto out_unregister_pppol2tp_pernet;
+
+ err = register_pppox_proto(PX_PROTO_OL2TP, &pppol2tp_proto);
+ if (err)
+ goto out_unregister_pppol2tp_proto;
+
+#ifdef CONFIG_L2TP_V3
+ err = l2tp_nl_register_ops(L2TP_PWTYPE_PPP, &pppol2tp_nl_cmd_ops);
+ if (err)
+ goto out_unregister_pppox;
+#endif
+
+ printk(KERN_INFO "PPPoL2TP kernel driver, %s\n",
+ PPPOL2TP_DRV_VERSION);
+
+out:
+ return err;
+
+#ifdef CONFIG_L2TP_V3
+out_unregister_pppox:
+ unregister_pppox_proto(PX_PROTO_OL2TP);
+#endif
+out_unregister_pppol2tp_proto:
+ proto_unregister(&pppol2tp_sk_proto);
+out_unregister_pppol2tp_pernet:
+ unregister_pernet_device(&pppol2tp_net_ops);
+ goto out;
+}
+
+static void __exit pppol2tp_exit(void)
+{
+#ifdef CONFIG_L2TP_V3
+ l2tp_nl_unregister_ops(L2TP_PWTYPE_PPP);
+#endif
+ unregister_pppox_proto(PX_PROTO_OL2TP);
+ proto_unregister(&pppol2tp_sk_proto);
+ unregister_pernet_device(&pppol2tp_net_ops);
+}
+
+module_init(pppol2tp_init);
+module_exit(pppol2tp_exit);
+
+MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
+MODULE_DESCRIPTION("PPP over L2TP over UDP");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(PPPOL2TP_DRV_VERSION);
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 2db6a9f75913..023ba820236f 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -536,7 +536,7 @@ static int llc_ui_wait_for_disc(struct sock *sk, long timeout)
int rc = 0;
while (1) {
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
if (sk_wait_event(sk, &timeout, sk->sk_state == TCP_CLOSE))
break;
rc = -ERESTARTSYS;
@@ -547,7 +547,7 @@ static int llc_ui_wait_for_disc(struct sock *sk, long timeout)
break;
rc = 0;
}
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
return rc;
}
@@ -556,13 +556,13 @@ static int llc_ui_wait_for_conn(struct sock *sk, long timeout)
DEFINE_WAIT(wait);
while (1) {
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
if (sk_wait_event(sk, &timeout, sk->sk_state != TCP_SYN_SENT))
break;
if (signal_pending(current) || !timeout)
break;
}
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
return timeout;
}
@@ -573,7 +573,7 @@ static int llc_ui_wait_for_busy_core(struct sock *sk, long timeout)
int rc;
while (1) {
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
rc = 0;
if (sk_wait_event(sk, &timeout,
(sk->sk_shutdown & RCV_SHUTDOWN) ||
@@ -588,7 +588,7 @@ static int llc_ui_wait_for_busy_core(struct sock *sk, long timeout)
if (!timeout)
break;
}
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
return rc;
}
diff --git a/net/llc/llc_core.c b/net/llc/llc_core.c
index 78167e81dfeb..2bb0ddff8c0f 100644
--- a/net/llc/llc_core.c
+++ b/net/llc/llc_core.c
@@ -144,12 +144,6 @@ static struct packet_type llc_tr_packet_type __read_mostly = {
static int __init llc_init(void)
{
- struct net_device *dev;
-
- dev = first_net_device(&init_net);
- if (dev != NULL)
- dev = next_net_device(dev);
-
dev_add_pack(&llc_packet_type);
dev_add_pack(&llc_tr_packet_type);
return 0;
diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c
index a432f0ec051c..94e7fca75b85 100644
--- a/net/llc/llc_sap.c
+++ b/net/llc/llc_sap.c
@@ -31,7 +31,7 @@ static int llc_mac_header_len(unsigned short devtype)
case ARPHRD_ETHER:
case ARPHRD_LOOPBACK:
return sizeof(struct ethhdr);
-#ifdef CONFIG_TR
+#if defined(CONFIG_TR) || defined(CONFIG_TR_MODULE)
case ARPHRD_IEEE802_TR:
return sizeof(struct trh_hdr);
#endif
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index a952b7f8c648..8a91f6c0bb18 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -15,8 +15,12 @@ comment "CFG80211 needs to be enabled for MAC80211"
if MAC80211 != n
+config MAC80211_HAS_RC
+ def_bool n
+
config MAC80211_RC_PID
bool "PID controller based rate control algorithm" if EMBEDDED
+ select MAC80211_HAS_RC
---help---
This option enables a TX rate control algorithm for
mac80211 that uses a PID controller to select the TX
@@ -24,12 +28,14 @@ config MAC80211_RC_PID
config MAC80211_RC_MINSTREL
bool "Minstrel" if EMBEDDED
+ select MAC80211_HAS_RC
default y
---help---
This option enables the 'minstrel' TX rate control algorithm
choice
prompt "Default rate control algorithm"
+ depends on MAC80211_HAS_RC
default MAC80211_RC_DEFAULT_MINSTREL
---help---
This option selects the default rate control algorithm
@@ -62,6 +68,9 @@ config MAC80211_RC_DEFAULT
endif
+comment "Some wireless drivers require a rate control algorithm"
+ depends on MAC80211_HAS_RC=n
+
config MAC80211_MESH
bool "Enable mac80211 mesh networking (pre-802.11s) support"
depends on MAC80211 && EXPERIMENTAL
@@ -212,8 +221,8 @@ config MAC80211_DRIVER_API_TRACER
depends on EVENT_TRACING
help
Say Y here to make mac80211 register with the ftrace
- framework for the driver API -- you can see which
- driver methods it is calling then by looking at the
- trace.
+ framework for the driver API -- you can then see which
+ driver methods it is calling and which API functions
+ drivers are calling by looking at the trace.
- If unsure, say N.
+ If unsure, say Y.
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index 04420291e7ad..84b48ba8a77e 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -23,7 +23,8 @@ mac80211-y := \
key.o \
util.o \
wme.o \
- event.o
+ event.o \
+ chan.o
mac80211-$(CONFIG_MAC80211_LEDS) += led.o
mac80211-$(CONFIG_MAC80211_DEBUGFS) += \
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index f9516a27e233..6bb9a9a94960 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -19,23 +19,25 @@
#include "ieee80211_i.h"
#include "driver-ops.h"
-void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
- u16 initiator, u16 reason)
+static void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
+ u16 initiator, u16 reason,
+ bool from_timer)
{
struct ieee80211_local *local = sta->local;
+ struct tid_ampdu_rx *tid_rx;
int i;
- /* check if TID is in operational state */
spin_lock_bh(&sta->lock);
- if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_OPERATIONAL) {
+
+ /* check if TID is in operational state */
+ if (!sta->ampdu_mlme.tid_active_rx[tid]) {
spin_unlock_bh(&sta->lock);
return;
}
- sta->ampdu_mlme.tid_state_rx[tid] =
- HT_AGG_STATE_REQ_STOP_BA_MSK |
- (initiator << HT_AGG_STATE_INITIATOR_SHIFT);
- spin_unlock_bh(&sta->lock);
+ sta->ampdu_mlme.tid_active_rx[tid] = false;
+
+ tid_rx = sta->ampdu_mlme.tid_rx[tid];
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "Rx BA session stop requested for %pM tid %u\n",
@@ -47,61 +49,42 @@ void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
printk(KERN_DEBUG "HW problem - can not stop rx "
"aggregation for tid %d\n", tid);
- /* shutdown timer has not expired */
- if (initiator != WLAN_BACK_TIMER)
- del_timer_sync(&sta->ampdu_mlme.tid_rx[tid]->session_timer);
-
/* check if this is a self generated aggregation halt */
- if (initiator == WLAN_BACK_RECIPIENT || initiator == WLAN_BACK_TIMER)
+ if (initiator == WLAN_BACK_RECIPIENT)
ieee80211_send_delba(sta->sdata, sta->sta.addr,
tid, 0, reason);
/* free the reordering buffer */
- for (i = 0; i < sta->ampdu_mlme.tid_rx[tid]->buf_size; i++) {
- if (sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i]) {
+ for (i = 0; i < tid_rx->buf_size; i++) {
+ if (tid_rx->reorder_buf[i]) {
/* release the reordered frames */
- dev_kfree_skb(sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i]);
- sta->ampdu_mlme.tid_rx[tid]->stored_mpdu_num--;
- sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i] = NULL;
+ dev_kfree_skb(tid_rx->reorder_buf[i]);
+ tid_rx->stored_mpdu_num--;
+ tid_rx->reorder_buf[i] = NULL;
}
}
- spin_lock_bh(&sta->lock);
/* free resources */
- kfree(sta->ampdu_mlme.tid_rx[tid]->reorder_buf);
- kfree(sta->ampdu_mlme.tid_rx[tid]->reorder_time);
-
- if (!sta->ampdu_mlme.tid_rx[tid]->shutdown) {
- kfree(sta->ampdu_mlme.tid_rx[tid]);
- sta->ampdu_mlme.tid_rx[tid] = NULL;
- }
+ kfree(tid_rx->reorder_buf);
+ kfree(tid_rx->reorder_time);
+ sta->ampdu_mlme.tid_rx[tid] = NULL;
- sta->ampdu_mlme.tid_state_rx[tid] = HT_AGG_STATE_IDLE;
spin_unlock_bh(&sta->lock);
+
+ if (!from_timer)
+ del_timer_sync(&tid_rx->session_timer);
+ kfree(tid_rx);
}
-void ieee80211_sta_stop_rx_ba_session(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid,
- u16 initiator, u16 reason)
+void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
+ u16 initiator, u16 reason)
{
- struct sta_info *sta;
-
- rcu_read_lock();
-
- sta = sta_info_get(sdata, ra);
- if (!sta) {
- rcu_read_unlock();
- return;
- }
-
- __ieee80211_stop_rx_ba_session(sta, tid, initiator, reason);
-
- rcu_read_unlock();
+ ___ieee80211_stop_rx_ba_session(sta, tid, initiator, reason, false);
}
/*
* After accepting the AddBA Request we activated a timer,
* resetting it after each frame that arrives from the originator.
- * if this timer expires ieee80211_sta_stop_rx_ba_session will be executed.
*/
static void sta_rx_agg_session_timer_expired(unsigned long data)
{
@@ -117,9 +100,8 @@ static void sta_rx_agg_session_timer_expired(unsigned long data)
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid);
#endif
- ieee80211_sta_stop_rx_ba_session(sta->sdata, sta->sta.addr,
- (u16)*ptid, WLAN_BACK_TIMER,
- WLAN_REASON_QSTA_TIMEOUT);
+ ___ieee80211_stop_rx_ba_session(sta, *ptid, WLAN_BACK_RECIPIENT,
+ WLAN_REASON_QSTA_TIMEOUT, true);
}
static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *da, u16 tid,
@@ -194,7 +176,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
status = WLAN_STATUS_REQUEST_DECLINED;
- if (test_sta_flags(sta, WLAN_STA_SUSPEND)) {
+ if (test_sta_flags(sta, WLAN_STA_BLOCK_BA)) {
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "Suspend in progress. "
"Denying ADDBA request\n");
@@ -232,7 +214,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
/* examine state machine */
spin_lock_bh(&sta->lock);
- if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_IDLE) {
+ if (sta->ampdu_mlme.tid_active_rx[tid]) {
#ifdef CONFIG_MAC80211_HT_DEBUG
if (net_ratelimit())
printk(KERN_DEBUG "unexpected AddBA Req from "
@@ -294,7 +276,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
}
/* change state and send addba resp */
- sta->ampdu_mlme.tid_state_rx[tid] = HT_AGG_STATE_OPERATIONAL;
+ sta->ampdu_mlme.tid_active_rx[tid] = true;
tid_agg_rx->dialog_token = dialog_token;
tid_agg_rx->ssn = start_seq_num;
tid_agg_rx->head_seq_num = start_seq_num;
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 87782a4bb541..c163d0a149f4 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -186,7 +186,7 @@ static void sta_addba_resp_timer_expired(unsigned long data)
spin_unlock_bh(&sta->lock);
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "timer expired on tid %d but we are not "
- "(or no longer) expecting addBA response there",
+ "(or no longer) expecting addBA response there\n",
tid);
#endif
return;
@@ -214,6 +214,8 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
int ret = 0;
u16 start_seq_num;
+ trace_api_start_tx_ba_session(pubsta, tid);
+
if (WARN_ON(!local->ops->ampdu_action))
return -EINVAL;
@@ -245,7 +247,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
return -EINVAL;
}
- if (test_sta_flags(sta, WLAN_STA_SUSPEND)) {
+ if (test_sta_flags(sta, WLAN_STA_BLOCK_BA)) {
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "Suspend in progress. "
"Denying BA session request\n");
@@ -414,7 +416,7 @@ static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
struct sta_info *sta, u16 tid)
{
#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "Aggregation is on for tid %d \n", tid);
+ printk(KERN_DEBUG "Aggregation is on for tid %d\n", tid);
#endif
spin_lock(&local->ampdu_lock);
@@ -440,6 +442,8 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
struct sta_info *sta;
u8 *state;
+ trace_api_start_tx_ba_cb(sdata, ra, tid);
+
if (tid >= STA_TID_NUM) {
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n",
@@ -541,6 +545,8 @@ int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
struct ieee80211_sub_if_data *sdata = sta->sdata;
struct ieee80211_local *local = sdata->local;
+ trace_api_stop_tx_ba_session(pubsta, tid, initiator);
+
if (!local->ops->ampdu_action)
return -EINVAL;
@@ -558,6 +564,8 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
struct sta_info *sta;
u8 *state;
+ trace_api_stop_tx_ba_cb(sdata, ra, tid);
+
if (tid >= STA_TID_NUM) {
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n",
@@ -674,7 +682,7 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
del_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "switched off addBA timer for tid %d \n", tid);
+ printk(KERN_DEBUG "switched off addBA timer for tid %d\n", tid);
#endif /* CONFIG_MAC80211_HT_DEBUG */
if (le16_to_cpu(mgmt->u.action.u.addba_resp.status)
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index edc872e22c9b..c7000a6ca379 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -97,9 +97,6 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
params->mesh_id_len,
params->mesh_id);
- if (sdata->vif.type != NL80211_IFTYPE_MONITOR || !flags)
- return 0;
-
if (type == NL80211_IFTYPE_AP_VLAN &&
params && params->use_4addr == 0)
rcu_assign_pointer(sdata->u.vlan.sta, NULL);
@@ -107,7 +104,9 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
params && params->use_4addr >= 0)
sdata->u.mgd.use_4addr = params->use_4addr;
- sdata->u.mntr_flags = *flags;
+ if (sdata->vif.type == NL80211_IFTYPE_MONITOR && flags)
+ sdata->u.mntr_flags = *flags;
+
return 0;
}
@@ -411,6 +410,17 @@ static int ieee80211_dump_station(struct wiphy *wiphy, struct net_device *dev,
return ret;
}
+static int ieee80211_dump_survey(struct wiphy *wiphy, struct net_device *dev,
+ int idx, struct survey_info *survey)
+{
+ struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
+
+ if (!local->ops->get_survey)
+ return -EOPNOTSUPP;
+
+ return drv_get_survey(local, idx, survey);
+}
+
static int ieee80211_get_station(struct wiphy *wiphy, struct net_device *dev,
u8 *mac, struct station_info *sinfo)
{
@@ -1104,6 +1114,13 @@ static int ieee80211_change_bss(struct wiphy *wiphy,
changed |= BSS_CHANGED_BASIC_RATES;
}
+ if (params->ap_isolate >= 0) {
+ if (params->ap_isolate)
+ sdata->flags |= IEEE80211_SDATA_DONT_BRIDGE_PACKETS;
+ else
+ sdata->flags &= ~IEEE80211_SDATA_DONT_BRIDGE_PACKETS;
+ }
+
ieee80211_bss_info_change_notify(sdata, changed);
return 0;
@@ -1137,19 +1154,47 @@ static int ieee80211_set_txq_params(struct wiphy *wiphy,
return -EINVAL;
}
+ /* enable WMM or activate new settings */
+ local->hw.conf.flags |= IEEE80211_CONF_QOS;
+ drv_config(local, IEEE80211_CONF_CHANGE_QOS);
+
return 0;
}
static int ieee80211_set_channel(struct wiphy *wiphy,
+ struct net_device *netdev,
struct ieee80211_channel *chan,
enum nl80211_channel_type channel_type)
{
struct ieee80211_local *local = wiphy_priv(wiphy);
+ struct ieee80211_sub_if_data *sdata = NULL;
+
+ if (netdev)
+ sdata = IEEE80211_DEV_TO_SUB_IF(netdev);
+
+ switch (ieee80211_get_channel_mode(local, NULL)) {
+ case CHAN_MODE_HOPPING:
+ return -EBUSY;
+ case CHAN_MODE_FIXED:
+ if (local->oper_channel != chan)
+ return -EBUSY;
+ if (!sdata && local->_oper_channel_type == channel_type)
+ return 0;
+ break;
+ case CHAN_MODE_UNDEFINED:
+ break;
+ }
local->oper_channel = chan;
- local->oper_channel_type = channel_type;
- return ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
+ if (!ieee80211_set_channel_type(local, sdata, channel_type))
+ return -EBUSY;
+
+ ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
+ if (sdata && sdata->vif.type != NL80211_IFTYPE_MONITOR)
+ ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_HT);
+
+ return 0;
}
#ifdef CONFIG_PM
@@ -1193,6 +1238,20 @@ static int ieee80211_auth(struct wiphy *wiphy, struct net_device *dev,
static int ieee80211_assoc(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_assoc_request *req)
{
+ struct ieee80211_local *local = wiphy_priv(wiphy);
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+
+ switch (ieee80211_get_channel_mode(local, sdata)) {
+ case CHAN_MODE_HOPPING:
+ return -EBUSY;
+ case CHAN_MODE_FIXED:
+ if (local->oper_channel == req->bss->channel)
+ break;
+ return -EBUSY;
+ case CHAN_MODE_UNDEFINED:
+ break;
+ }
+
return ieee80211_mgd_assoc(IEEE80211_DEV_TO_SUB_IF(dev), req);
}
@@ -1215,8 +1274,22 @@ static int ieee80211_disassoc(struct wiphy *wiphy, struct net_device *dev,
static int ieee80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_ibss_params *params)
{
+ struct ieee80211_local *local = wiphy_priv(wiphy);
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ switch (ieee80211_get_channel_mode(local, sdata)) {
+ case CHAN_MODE_HOPPING:
+ return -EBUSY;
+ case CHAN_MODE_FIXED:
+ if (!params->channel_fixed)
+ return -EBUSY;
+ if (local->oper_channel == params->channel)
+ break;
+ return -EBUSY;
+ case CHAN_MODE_UNDEFINED:
+ break;
+ }
+
return ieee80211_ibss_join(sdata, params);
}
@@ -1345,7 +1418,7 @@ int __ieee80211_request_smps(struct ieee80211_sub_if_data *sdata,
* association, there's no need to send an action frame.
*/
if (!sdata->u.mgd.associated ||
- sdata->local->oper_channel_type == NL80211_CHAN_NO_HT) {
+ sdata->vif.bss_conf.channel_type == NL80211_CHAN_NO_HT) {
mutex_lock(&sdata->local->iflist_mtx);
ieee80211_recalc_smps(sdata->local, sdata);
mutex_unlock(&sdata->local->iflist_mtx);
@@ -1384,11 +1457,11 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
return -EOPNOTSUPP;
if (enabled == sdata->u.mgd.powersave &&
- timeout == conf->dynamic_ps_timeout)
+ timeout == conf->dynamic_ps_forced_timeout)
return 0;
sdata->u.mgd.powersave = enabled;
- conf->dynamic_ps_timeout = timeout;
+ conf->dynamic_ps_forced_timeout = timeout;
/* no change, but if automatic follow powersave */
mutex_lock(&sdata->u.mgd.mtx);
@@ -1403,6 +1476,35 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
return 0;
}
+static int ieee80211_set_cqm_rssi_config(struct wiphy *wiphy,
+ struct net_device *dev,
+ s32 rssi_thold, u32 rssi_hyst)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
+ struct ieee80211_vif *vif = &sdata->vif;
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+
+ if (rssi_thold == bss_conf->cqm_rssi_thold &&
+ rssi_hyst == bss_conf->cqm_rssi_hyst)
+ return 0;
+
+ bss_conf->cqm_rssi_thold = rssi_thold;
+ bss_conf->cqm_rssi_hyst = rssi_hyst;
+
+ if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_CQM_RSSI)) {
+ if (sdata->vif.type != NL80211_IFTYPE_STATION)
+ return -EOPNOTSUPP;
+ return 0;
+ }
+
+ /* tell the driver upon association, unless already associated */
+ if (sdata->u.mgd.associated)
+ ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_CQM);
+
+ return 0;
+}
+
static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
struct net_device *dev,
const u8 *addr,
@@ -1475,6 +1577,7 @@ struct cfg80211_ops mac80211_config_ops = {
.change_station = ieee80211_change_station,
.get_station = ieee80211_get_station,
.dump_station = ieee80211_dump_station,
+ .dump_survey = ieee80211_dump_survey,
#ifdef CONFIG_MAC80211_MESH
.add_mpath = ieee80211_add_mpath,
.del_mpath = ieee80211_del_mpath,
@@ -1507,4 +1610,5 @@ struct cfg80211_ops mac80211_config_ops = {
.remain_on_channel = ieee80211_remain_on_channel,
.cancel_remain_on_channel = ieee80211_cancel_remain_on_channel,
.action = ieee80211_action,
+ .set_cqm_rssi_config = ieee80211_set_cqm_rssi_config,
};
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
new file mode 100644
index 000000000000..5d218c530a4e
--- /dev/null
+++ b/net/mac80211/chan.c
@@ -0,0 +1,127 @@
+/*
+ * mac80211 - channel management
+ */
+
+#include <linux/nl80211.h>
+#include "ieee80211_i.h"
+
+enum ieee80211_chan_mode
+__ieee80211_get_channel_mode(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *ignore)
+{
+ struct ieee80211_sub_if_data *sdata;
+
+ WARN_ON(!mutex_is_locked(&local->iflist_mtx));
+
+ list_for_each_entry(sdata, &local->interfaces, list) {
+ if (sdata == ignore)
+ continue;
+
+ if (!ieee80211_sdata_running(sdata))
+ continue;
+
+ if (sdata->vif.type == NL80211_IFTYPE_MONITOR)
+ continue;
+
+ if (sdata->vif.type == NL80211_IFTYPE_STATION &&
+ !sdata->u.mgd.associated)
+ continue;
+
+ if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
+ if (!sdata->u.ibss.ssid_len)
+ continue;
+ if (!sdata->u.ibss.fixed_channel)
+ return CHAN_MODE_HOPPING;
+ }
+
+ if (sdata->vif.type == NL80211_IFTYPE_AP &&
+ !sdata->u.ap.beacon)
+ continue;
+
+ return CHAN_MODE_FIXED;
+ }
+
+ return CHAN_MODE_UNDEFINED;
+}
+
+enum ieee80211_chan_mode
+ieee80211_get_channel_mode(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *ignore)
+{
+ enum ieee80211_chan_mode mode;
+
+ mutex_lock(&local->iflist_mtx);
+ mode = __ieee80211_get_channel_mode(local, ignore);
+ mutex_unlock(&local->iflist_mtx);
+
+ return mode;
+}
+
+bool ieee80211_set_channel_type(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ enum nl80211_channel_type chantype)
+{
+ struct ieee80211_sub_if_data *tmp;
+ enum nl80211_channel_type superchan = NL80211_CHAN_NO_HT;
+ bool result;
+
+ mutex_lock(&local->iflist_mtx);
+
+ list_for_each_entry(tmp, &local->interfaces, list) {
+ if (tmp == sdata)
+ continue;
+
+ if (!ieee80211_sdata_running(tmp))
+ continue;
+
+ switch (tmp->vif.bss_conf.channel_type) {
+ case NL80211_CHAN_NO_HT:
+ case NL80211_CHAN_HT20:
+ superchan = tmp->vif.bss_conf.channel_type;
+ break;
+ case NL80211_CHAN_HT40PLUS:
+ WARN_ON(superchan == NL80211_CHAN_HT40MINUS);
+ superchan = NL80211_CHAN_HT40PLUS;
+ break;
+ case NL80211_CHAN_HT40MINUS:
+ WARN_ON(superchan == NL80211_CHAN_HT40PLUS);
+ superchan = NL80211_CHAN_HT40MINUS;
+ break;
+ }
+ }
+
+ switch (superchan) {
+ case NL80211_CHAN_NO_HT:
+ case NL80211_CHAN_HT20:
+ /*
+ * allow any change that doesn't go to no-HT
+ * (if it already is no-HT no change is needed)
+ */
+ if (chantype == NL80211_CHAN_NO_HT)
+ break;
+ superchan = chantype;
+ break;
+ case NL80211_CHAN_HT40PLUS:
+ case NL80211_CHAN_HT40MINUS:
+ /* allow smaller bandwidth and same */
+ if (chantype == NL80211_CHAN_NO_HT)
+ break;
+ if (chantype == NL80211_CHAN_HT20)
+ break;
+ if (superchan == chantype)
+ break;
+ result = false;
+ goto out;
+ }
+
+ local->_oper_channel_type = superchan;
+
+ if (sdata)
+ sdata->vif.bss_conf.channel_type = chantype;
+
+ result = true;
+ out:
+ mutex_unlock(&local->iflist_mtx);
+
+ return result;
+}
diff --git a/net/mac80211/debugfs.h b/net/mac80211/debugfs.h
index 68e6a2050f9a..09cc9be34796 100644
--- a/net/mac80211/debugfs.h
+++ b/net/mac80211/debugfs.h
@@ -7,7 +7,6 @@ extern int mac80211_open_file_generic(struct inode *inode, struct file *file);
#else
static inline void debugfs_hw_add(struct ieee80211_local *local)
{
- return;
}
#endif
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 83d4289d954b..20b2998fa0ed 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -100,6 +100,14 @@ static ssize_t ieee80211_if_fmt_##name( \
return scnprintf(buf, buflen, "%pM\n", sdata->field); \
}
+#define IEEE80211_IF_FMT_DEC_DIV_16(name, field) \
+static ssize_t ieee80211_if_fmt_##name( \
+ const struct ieee80211_sub_if_data *sdata, \
+ char *buf, int buflen) \
+{ \
+ return scnprintf(buf, buflen, "%d\n", sdata->field / 16); \
+}
+
#define __IEEE80211_IF_FILE(name, _write) \
static ssize_t ieee80211_if_read_##name(struct file *file, \
char __user *userbuf, \
@@ -140,6 +148,8 @@ IEEE80211_IF_FILE(rc_rateidx_mask_5ghz, rc_rateidx_mask[IEEE80211_BAND_5GHZ],
/* STA attributes */
IEEE80211_IF_FILE(bssid, u.mgd.bssid, MAC);
IEEE80211_IF_FILE(aid, u.mgd.aid, DEC);
+IEEE80211_IF_FILE(last_beacon, u.mgd.last_beacon_signal, DEC);
+IEEE80211_IF_FILE(ave_beacon, u.mgd.ave_beacon_signal, DEC_DIV_16);
static int ieee80211_set_smps(struct ieee80211_sub_if_data *sdata,
enum ieee80211_smps_mode smps_mode)
@@ -276,6 +286,8 @@ static void add_sta_files(struct ieee80211_sub_if_data *sdata)
DEBUGFS_ADD(bssid);
DEBUGFS_ADD(aid);
+ DEBUGFS_ADD(last_beacon);
+ DEBUGFS_ADD(ave_beacon);
DEBUGFS_ADD_MODE(smps, 0600);
}
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index d92800bb2d2f..e763f1529ddb 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -39,6 +39,13 @@ static const struct file_operations sta_ ##name## _ops = { \
.open = mac80211_open_file_generic, \
}
+#define STA_OPS_RW(name) \
+static const struct file_operations sta_ ##name## _ops = { \
+ .read = sta_##name##_read, \
+ .write = sta_##name##_write, \
+ .open = mac80211_open_file_generic, \
+}
+
#define STA_FILE(name, field, format) \
STA_READ_##format(name, field) \
STA_OPS(name)
@@ -57,7 +64,6 @@ STA_FILE(tx_filtered, tx_filtered_count, LU);
STA_FILE(tx_retry_failed, tx_retry_failed, LU);
STA_FILE(tx_retry_count, tx_retry_count, LU);
STA_FILE(last_signal, last_signal, D);
-STA_FILE(last_noise, last_noise, D);
STA_FILE(wep_weak_iv_count, wep_weak_iv_count, LU);
static ssize_t sta_flags_read(struct file *file, char __user *userbuf,
@@ -120,7 +126,7 @@ STA_OPS(last_seq_ctrl);
static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
- char buf[64 + STA_TID_NUM * 40], *p = buf;
+ char buf[71 + STA_TID_NUM * 40], *p = buf;
int i;
struct sta_info *sta = file->private_data;
@@ -128,16 +134,16 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n",
sta->ampdu_mlme.dialog_token_allocator + 1);
p += scnprintf(p, sizeof(buf) + buf - p,
- "TID\t\tRX\tDTKN\tSSN\t\tTX\tDTKN\tSSN\tpending\n");
+ "TID\t\tRX active\tDTKN\tSSN\t\tTX\tDTKN\tSSN\tpending\n");
for (i = 0; i < STA_TID_NUM; i++) {
p += scnprintf(p, sizeof(buf) + buf - p, "%02d", i);
p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x",
- sta->ampdu_mlme.tid_state_rx[i]);
+ sta->ampdu_mlme.tid_active_rx[i]);
p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.2x",
- sta->ampdu_mlme.tid_state_rx[i] ?
+ sta->ampdu_mlme.tid_active_rx[i] ?
sta->ampdu_mlme.tid_rx[i]->dialog_token : 0);
p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.3x",
- sta->ampdu_mlme.tid_state_rx[i] ?
+ sta->ampdu_mlme.tid_active_rx[i] ?
sta->ampdu_mlme.tid_rx[i]->ssn : 0);
p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x",
@@ -157,7 +163,63 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
}
-STA_OPS(agg_status);
+
+static ssize_t sta_agg_status_write(struct file *file, const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ char _buf[12], *buf = _buf;
+ struct sta_info *sta = file->private_data;
+ bool start, tx;
+ unsigned long tid;
+ int ret;
+
+ if (count > sizeof(_buf))
+ return -EINVAL;
+
+ if (copy_from_user(buf, userbuf, count))
+ return -EFAULT;
+
+ buf[sizeof(_buf) - 1] = '\0';
+
+ if (strncmp(buf, "tx ", 3) == 0) {
+ buf += 3;
+ tx = true;
+ } else if (strncmp(buf, "rx ", 3) == 0) {
+ buf += 3;
+ tx = false;
+ } else
+ return -EINVAL;
+
+ if (strncmp(buf, "start ", 6) == 0) {
+ buf += 6;
+ start = true;
+ if (!tx)
+ return -EINVAL;
+ } else if (strncmp(buf, "stop ", 5) == 0) {
+ buf += 5;
+ start = false;
+ } else
+ return -EINVAL;
+
+ tid = simple_strtoul(buf, NULL, 0);
+
+ if (tid >= STA_TID_NUM)
+ return -EINVAL;
+
+ if (tx) {
+ if (start)
+ ret = ieee80211_start_tx_ba_session(&sta->sta, tid);
+ else
+ ret = ieee80211_stop_tx_ba_session(&sta->sta, tid,
+ WLAN_BACK_RECIPIENT);
+ } else {
+ __ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_RECIPIENT, 3);
+ ret = 0;
+ }
+
+ return ret ?: count;
+}
+STA_OPS_RW(agg_status);
static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
@@ -177,7 +239,7 @@ static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf,
if (htc->ht_supported) {
p += scnprintf(p, sizeof(buf)+buf-p, "cap: %#.4x\n", htc->cap);
- PRINT_HT_CAP((htc->cap & BIT(0)), "RX LDCP");
+ PRINT_HT_CAP((htc->cap & BIT(0)), "RX LDPC");
PRINT_HT_CAP((htc->cap & BIT(1)), "HT20/HT40");
PRINT_HT_CAP(!(htc->cap & BIT(1)), "HT20");
@@ -289,7 +351,6 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta)
DEBUGFS_ADD(tx_retry_failed);
DEBUGFS_ADD(tx_retry_count);
DEBUGFS_ADD(last_signal);
- DEBUGFS_ADD(last_noise);
DEBUGFS_ADD(wep_weak_iv_count);
DEBUGFS_ADD(ht_capa);
}
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index c3d844093a2f..4f2271316650 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -84,16 +84,14 @@ static inline void drv_bss_info_changed(struct ieee80211_local *local,
}
static inline u64 drv_prepare_multicast(struct ieee80211_local *local,
- int mc_count,
- struct dev_addr_list *mc_list)
+ struct netdev_hw_addr_list *mc_list)
{
u64 ret = 0;
if (local->ops->prepare_multicast)
- ret = local->ops->prepare_multicast(&local->hw, mc_count,
- mc_list);
+ ret = local->ops->prepare_multicast(&local->hw, mc_list);
- trace_drv_prepare_multicast(local, mc_count, ret);
+ trace_drv_prepare_multicast(local, mc_list->count, ret);
return ret;
}
@@ -154,14 +152,15 @@ static inline void drv_update_tkip_key(struct ieee80211_local *local,
}
static inline int drv_hw_scan(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
struct cfg80211_scan_request *req)
{
int ret;
might_sleep();
- ret = local->ops->hw_scan(&local->hw, req);
- trace_drv_hw_scan(local, req, ret);
+ ret = local->ops->hw_scan(&local->hw, &sdata->vif, req);
+ trace_drv_hw_scan(local, sdata, req, ret);
return ret;
}
@@ -346,6 +345,15 @@ static inline int drv_ampdu_action(struct ieee80211_local *local,
return ret;
}
+static inline int drv_get_survey(struct ieee80211_local *local, int idx,
+ struct survey_info *survey)
+{
+ int ret = -EOPNOTSUPP;
+ if (local->ops->conf_tx)
+ ret = local->ops->get_survey(&local->hw, idx, survey);
+ /* trace_drv_get_survey(local, idx, survey, ret); */
+ return ret;
+}
static inline void drv_rfkill_poll(struct ieee80211_local *local)
{
@@ -363,4 +371,15 @@ static inline void drv_flush(struct ieee80211_local *local, bool drop)
if (local->ops->flush)
local->ops->flush(&local->hw, drop);
}
+
+static inline void drv_channel_switch(struct ieee80211_local *local,
+ struct ieee80211_channel_switch *ch_switch)
+{
+ might_sleep();
+
+ local->ops->channel_switch(&local->hw, ch_switch);
+
+ trace_drv_channel_switch(local, ch_switch);
+}
+
#endif /* __MAC80211_DRIVER_OPS */
diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h
index 41baf730a5c7..6a9b2342a9c2 100644
--- a/net/mac80211/driver-trace.h
+++ b/net/mac80211/driver-trace.h
@@ -32,6 +32,10 @@ static inline void trace_ ## name(proto) {}
#define VIF_PR_FMT " vif:%s(%d)"
#define VIF_PR_ARG __get_str(vif_name), __entry->vif_type
+/*
+ * Tracing for driver callbacks.
+ */
+
TRACE_EVENT(drv_start,
TP_PROTO(struct ieee80211_local *local, int ret),
@@ -359,23 +363,26 @@ TRACE_EVENT(drv_update_tkip_key,
TRACE_EVENT(drv_hw_scan,
TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
struct cfg80211_scan_request *req, int ret),
- TP_ARGS(local, req, ret),
+ TP_ARGS(local, sdata, req, ret),
TP_STRUCT__entry(
LOCAL_ENTRY
+ VIF_ENTRY
__field(int, ret)
),
TP_fast_assign(
LOCAL_ASSIGN;
+ VIF_ASSIGN;
__entry->ret = ret;
),
TP_printk(
- LOCAL_PR_FMT " ret:%d",
- LOCAL_PR_ARG, __entry->ret
+ LOCAL_PR_FMT VIF_PR_FMT " ret:%d",
+ LOCAL_PR_ARG,VIF_PR_ARG, __entry->ret
)
);
@@ -766,6 +773,326 @@ TRACE_EVENT(drv_flush,
LOCAL_PR_ARG, __entry->drop
)
);
+
+TRACE_EVENT(drv_channel_switch,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_channel_switch *ch_switch),
+
+ TP_ARGS(local, ch_switch),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ __field(u64, timestamp)
+ __field(bool, block_tx)
+ __field(u16, freq)
+ __field(u8, count)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ __entry->timestamp = ch_switch->timestamp;
+ __entry->block_tx = ch_switch->block_tx;
+ __entry->freq = ch_switch->channel->center_freq;
+ __entry->count = ch_switch->count;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT " new freq:%u count:%d",
+ LOCAL_PR_ARG, __entry->freq, __entry->count
+ )
+);
+
+/*
+ * Tracing for API calls that drivers call.
+ */
+
+TRACE_EVENT(api_start_tx_ba_session,
+ TP_PROTO(struct ieee80211_sta *sta, u16 tid),
+
+ TP_ARGS(sta, tid),
+
+ TP_STRUCT__entry(
+ STA_ENTRY
+ __field(u16, tid)
+ ),
+
+ TP_fast_assign(
+ STA_ASSIGN;
+ __entry->tid = tid;
+ ),
+
+ TP_printk(
+ STA_PR_FMT " tid:%d",
+ STA_PR_ARG, __entry->tid
+ )
+);
+
+TRACE_EVENT(api_start_tx_ba_cb,
+ TP_PROTO(struct ieee80211_sub_if_data *sdata, const u8 *ra, u16 tid),
+
+ TP_ARGS(sdata, ra, tid),
+
+ TP_STRUCT__entry(
+ VIF_ENTRY
+ __array(u8, ra, ETH_ALEN)
+ __field(u16, tid)
+ ),
+
+ TP_fast_assign(
+ VIF_ASSIGN;
+ memcpy(__entry->ra, ra, ETH_ALEN);
+ __entry->tid = tid;
+ ),
+
+ TP_printk(
+ VIF_PR_FMT " ra:%pM tid:%d",
+ VIF_PR_ARG, __entry->ra, __entry->tid
+ )
+);
+
+TRACE_EVENT(api_stop_tx_ba_session,
+ TP_PROTO(struct ieee80211_sta *sta, u16 tid, u16 initiator),
+
+ TP_ARGS(sta, tid, initiator),
+
+ TP_STRUCT__entry(
+ STA_ENTRY
+ __field(u16, tid)
+ __field(u16, initiator)
+ ),
+
+ TP_fast_assign(
+ STA_ASSIGN;
+ __entry->tid = tid;
+ __entry->initiator = initiator;
+ ),
+
+ TP_printk(
+ STA_PR_FMT " tid:%d initiator:%d",
+ STA_PR_ARG, __entry->tid, __entry->initiator
+ )
+);
+
+TRACE_EVENT(api_stop_tx_ba_cb,
+ TP_PROTO(struct ieee80211_sub_if_data *sdata, const u8 *ra, u16 tid),
+
+ TP_ARGS(sdata, ra, tid),
+
+ TP_STRUCT__entry(
+ VIF_ENTRY
+ __array(u8, ra, ETH_ALEN)
+ __field(u16, tid)
+ ),
+
+ TP_fast_assign(
+ VIF_ASSIGN;
+ memcpy(__entry->ra, ra, ETH_ALEN);
+ __entry->tid = tid;
+ ),
+
+ TP_printk(
+ VIF_PR_FMT " ra:%pM tid:%d",
+ VIF_PR_ARG, __entry->ra, __entry->tid
+ )
+);
+
+TRACE_EVENT(api_restart_hw,
+ TP_PROTO(struct ieee80211_local *local),
+
+ TP_ARGS(local),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT,
+ LOCAL_PR_ARG
+ )
+);
+
+TRACE_EVENT(api_beacon_loss,
+ TP_PROTO(struct ieee80211_sub_if_data *sdata),
+
+ TP_ARGS(sdata),
+
+ TP_STRUCT__entry(
+ VIF_ENTRY
+ ),
+
+ TP_fast_assign(
+ VIF_ASSIGN;
+ ),
+
+ TP_printk(
+ VIF_PR_FMT,
+ VIF_PR_ARG
+ )
+);
+
+TRACE_EVENT(api_connection_loss,
+ TP_PROTO(struct ieee80211_sub_if_data *sdata),
+
+ TP_ARGS(sdata),
+
+ TP_STRUCT__entry(
+ VIF_ENTRY
+ ),
+
+ TP_fast_assign(
+ VIF_ASSIGN;
+ ),
+
+ TP_printk(
+ VIF_PR_FMT,
+ VIF_PR_ARG
+ )
+);
+
+TRACE_EVENT(api_cqm_rssi_notify,
+ TP_PROTO(struct ieee80211_sub_if_data *sdata,
+ enum nl80211_cqm_rssi_threshold_event rssi_event),
+
+ TP_ARGS(sdata, rssi_event),
+
+ TP_STRUCT__entry(
+ VIF_ENTRY
+ __field(u32, rssi_event)
+ ),
+
+ TP_fast_assign(
+ VIF_ASSIGN;
+ __entry->rssi_event = rssi_event;
+ ),
+
+ TP_printk(
+ VIF_PR_FMT " event:%d",
+ VIF_PR_ARG, __entry->rssi_event
+ )
+);
+
+TRACE_EVENT(api_scan_completed,
+ TP_PROTO(struct ieee80211_local *local, bool aborted),
+
+ TP_ARGS(local, aborted),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ __field(bool, aborted)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ __entry->aborted = aborted;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT " aborted:%d",
+ LOCAL_PR_ARG, __entry->aborted
+ )
+);
+
+TRACE_EVENT(api_sta_block_awake,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sta *sta, bool block),
+
+ TP_ARGS(local, sta, block),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ STA_ENTRY
+ __field(bool, block)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ STA_ASSIGN;
+ __entry->block = block;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT STA_PR_FMT " block:%d",
+ LOCAL_PR_ARG, STA_PR_FMT, __entry->block
+ )
+);
+
+TRACE_EVENT(api_chswitch_done,
+ TP_PROTO(struct ieee80211_sub_if_data *sdata, bool success),
+
+ TP_ARGS(sdata, success),
+
+ TP_STRUCT__entry(
+ VIF_ENTRY
+ __field(bool, success)
+ ),
+
+ TP_fast_assign(
+ VIF_ASSIGN;
+ __entry->success = success;
+ ),
+
+ TP_printk(
+ VIF_PR_FMT " success=%d",
+ VIF_PR_ARG, __entry->success
+ )
+);
+
+/*
+ * Tracing for internal functions
+ * (which may also be called in response to driver calls)
+ */
+
+TRACE_EVENT(wake_queue,
+ TP_PROTO(struct ieee80211_local *local, u16 queue,
+ enum queue_stop_reason reason),
+
+ TP_ARGS(local, queue, reason),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ __field(u16, queue)
+ __field(u32, reason)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ __entry->queue = queue;
+ __entry->reason = reason;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT " queue:%d, reason:%d",
+ LOCAL_PR_ARG, __entry->queue, __entry->reason
+ )
+);
+
+TRACE_EVENT(stop_queue,
+ TP_PROTO(struct ieee80211_local *local, u16 queue,
+ enum queue_stop_reason reason),
+
+ TP_ARGS(local, queue, reason),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ __field(u16, queue)
+ __field(u32, reason)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ __entry->queue = queue;
+ __entry->reason = reason;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT " queue:%d, reason:%d",
+ LOCAL_PR_ARG, __entry->queue, __entry->reason
+ )
+);
#endif /* !__MAC80211_DRIVER_TRACE || TRACE_HEADER_MULTI_READ */
#undef TRACE_INCLUDE_PATH
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index bb677a73b7c9..2ab106a0a491 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -175,8 +175,7 @@ void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata,
#endif /* CONFIG_MAC80211_HT_DEBUG */
if (initiator == WLAN_BACK_INITIATOR)
- ieee80211_sta_stop_rx_ba_session(sdata, sta->sta.addr, tid,
- WLAN_BACK_INITIATOR, 0);
+ __ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_INITIATOR, 0);
else { /* WLAN_BACK_RECIPIENT */
spin_lock_bh(&sta->lock);
if (sta->ampdu_mlme.tid_state_tx[tid] & HT_ADDBA_REQUESTED_MSK)
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index e2976da4e0d9..b2cc1fda6cfd 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -92,12 +92,18 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
if (memcmp(ifibss->bssid, bssid, ETH_ALEN))
sta_info_flush(sdata->local, sdata);
+ /* if merging, indicate to driver that we leave the old IBSS */
+ if (sdata->vif.bss_conf.ibss_joined) {
+ sdata->vif.bss_conf.ibss_joined = false;
+ ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_IBSS);
+ }
+
memcpy(ifibss->bssid, bssid, ETH_ALEN);
sdata->drop_unencrypted = capability & WLAN_CAPABILITY_PRIVACY ? 1 : 0;
local->oper_channel = chan;
- local->oper_channel_type = NL80211_CHAN_NO_HT;
+ WARN_ON(!ieee80211_set_channel_type(local, sdata, NL80211_CHAN_NO_HT));
ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
sband = local->hw.wiphy->bands[chan->band];
@@ -171,6 +177,8 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
bss_change |= BSS_CHANGED_BSSID;
bss_change |= BSS_CHANGED_BEACON;
bss_change |= BSS_CHANGED_BEACON_ENABLED;
+ bss_change |= BSS_CHANGED_IBSS;
+ sdata->vif.bss_conf.ibss_joined = true;
ieee80211_bss_info_change_notify(sdata, bss_change);
ieee80211_sta_def_wmm_params(sdata, sband->n_bitrates, supp_rates);
@@ -265,17 +273,16 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
sta->sta.supp_rates[band] = supp_rates |
ieee80211_mandatory_rates(local, band);
+ if (sta->sta.supp_rates[band] != prev_rates) {
#ifdef CONFIG_MAC80211_IBSS_DEBUG
- if (sta->sta.supp_rates[band] != prev_rates)
printk(KERN_DEBUG "%s: updated supp_rates set "
- "for %pM based on beacon info (0x%llx | "
- "0x%llx -> 0x%llx)\n",
- sdata->name,
- sta->sta.addr,
- (unsigned long long) prev_rates,
- (unsigned long long) supp_rates,
- (unsigned long long) sta->sta.supp_rates[band]);
+ "for %pM based on beacon/probe_response "
+ "(0x%x -> 0x%x)\n",
+ sdata->name, sta->sta.addr,
+ prev_rates, sta->sta.supp_rates[band]);
#endif
+ rate_control_rate_init(sta);
+ }
rcu_read_unlock();
} else {
rcu_read_unlock();
@@ -371,6 +378,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
sdata->name, mgmt->bssid);
#endif
ieee80211_sta_join_ibss(sdata, bss);
+ supp_rates = ieee80211_sta_get_rates(local, elems, band);
ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa,
supp_rates, GFP_KERNEL);
}
@@ -481,7 +489,9 @@ static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata)
printk(KERN_DEBUG "%s: No active IBSS STAs - trying to scan for other "
"IBSS networks with same SSID (merge)\n", sdata->name);
- ieee80211_request_internal_scan(sdata, ifibss->ssid, ifibss->ssid_len);
+ ieee80211_request_internal_scan(sdata,
+ ifibss->ssid, ifibss->ssid_len,
+ ifibss->fixed_channel ? ifibss->channel : NULL);
}
static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
@@ -588,8 +598,9 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
printk(KERN_DEBUG "%s: Trigger new scan to find an IBSS to "
"join\n", sdata->name);
- ieee80211_request_internal_scan(sdata, ifibss->ssid,
- ifibss->ssid_len);
+ ieee80211_request_internal_scan(sdata,
+ ifibss->ssid, ifibss->ssid_len,
+ ifibss->fixed_channel ? ifibss->channel : NULL);
} else {
int interval = IEEE80211_SCAN_INTERVAL;
@@ -897,6 +908,13 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
sdata->u.ibss.channel = params->channel;
sdata->u.ibss.fixed_channel = params->channel_fixed;
+ /* fix ourselves to that channel now already */
+ if (params->channel_fixed) {
+ sdata->local->oper_channel = params->channel;
+ WARN_ON(!ieee80211_set_channel_type(sdata->local, sdata,
+ NL80211_CHAN_NO_HT));
+ }
+
if (params->ie) {
sdata->u.ibss.ie = kmemdup(params->ie, params->ie_len,
GFP_KERNEL);
@@ -951,7 +969,9 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
kfree(sdata->u.ibss.ie);
skb = sdata->u.ibss.presp;
rcu_assign_pointer(sdata->u.ibss.presp, NULL);
- ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED);
+ sdata->vif.bss_conf.ibss_joined = false;
+ ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED |
+ BSS_CHANGED_IBSS);
synchronize_rcu();
kfree_skb(skb);
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 241533e1bc03..1a9e2da37a93 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -317,6 +317,7 @@ enum ieee80211_sta_flags {
IEEE80211_STA_MFP_ENABLED = BIT(6),
IEEE80211_STA_UAPSD_ENABLED = BIT(7),
IEEE80211_STA_NULLFUNC_ACKED = BIT(8),
+ IEEE80211_STA_RESET_SIGNAL_AVE = BIT(9),
};
struct ieee80211_if_managed {
@@ -327,7 +328,7 @@ struct ieee80211_if_managed {
struct work_struct work;
struct work_struct monitor_work;
struct work_struct chswitch_work;
- struct work_struct beacon_loss_work;
+ struct work_struct beacon_connection_loss_work;
unsigned long probe_timeout;
int probe_send_count;
@@ -359,6 +360,24 @@ struct ieee80211_if_managed {
int wmm_last_param_set;
u8 use_4addr;
+
+ /* Signal strength from the last Beacon frame in the current BSS. */
+ int last_beacon_signal;
+
+ /*
+ * Weighted average of the signal strength from Beacon frames in the
+ * current BSS. This is in units of 1/16 of the signal unit to maintain
+ * accuracy and to speed up calculations, i.e., the value need to be
+ * divided by 16 to get the actual value.
+ */
+ int ave_beacon_signal;
+
+ /*
+ * Last Beacon frame signal strength average (ave_beacon_signal / 16)
+ * that triggered a cqm event. 0 indicates that no event has been
+ * generated for the current association.
+ */
+ int last_cqm_event_signal;
};
enum ieee80211_ibss_request {
@@ -646,8 +665,7 @@ struct ieee80211_local {
struct work_struct recalc_smps;
/* aggregated multicast list */
- struct dev_addr_list *mc_list;
- int mc_count;
+ struct netdev_hw_addr_list mc_list;
bool tim_in_locked_section; /* see ieee80211_beacon_get() */
@@ -745,10 +763,11 @@ struct ieee80211_local {
int scan_channel_idx;
int scan_ies_len;
+ unsigned long leave_oper_channel_time;
enum mac80211_scan_state next_scan_state;
struct delayed_work scan_work;
struct ieee80211_sub_if_data *scan_sdata;
- enum nl80211_channel_type oper_channel_type;
+ enum nl80211_channel_type _oper_channel_type;
struct ieee80211_channel *oper_channel, *csa_channel;
/* Temporary remain-on-channel for off-channel operations */
@@ -979,7 +998,8 @@ int ieee80211_max_network_latency(struct notifier_block *nb,
unsigned long data, void *dummy);
void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
struct ieee80211_channel_sw_ie *sw_elem,
- struct ieee80211_bss *bss);
+ struct ieee80211_bss *bss,
+ u64 timestamp);
void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata);
void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata);
@@ -1000,7 +1020,8 @@ void ieee80211_ibss_restart(struct ieee80211_sub_if_data *sdata);
/* scan/BSS handling */
void ieee80211_scan_work(struct work_struct *work);
int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata,
- const u8 *ssid, u8 ssid_len);
+ const u8 *ssid, u8 ssid_len,
+ struct ieee80211_channel *chan);
int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
struct cfg80211_scan_request *req);
void ieee80211_scan_cancel(struct ieee80211_local *local);
@@ -1078,8 +1099,6 @@ int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata,
enum ieee80211_smps_mode smps, const u8 *da,
const u8 *bssid);
-void ieee80211_sta_stop_rx_ba_session(struct ieee80211_sub_if_data *sdata, u8 *da,
- u16 tid, u16 initiator, u16 reason);
void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
u16 initiator, u16 reason);
void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta);
@@ -1155,7 +1174,7 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local,
int powersave);
void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
struct ieee80211_hdr *hdr);
-void ieee80211_beacon_loss_work(struct work_struct *work);
+void ieee80211_beacon_connection_loss_work(struct work_struct *work);
void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw,
enum queue_stop_reason reason);
@@ -1210,6 +1229,20 @@ int ieee80211_wk_remain_on_channel(struct ieee80211_sub_if_data *sdata,
int ieee80211_wk_cancel_remain_on_channel(
struct ieee80211_sub_if_data *sdata, u64 cookie);
+/* channel management */
+enum ieee80211_chan_mode {
+ CHAN_MODE_UNDEFINED,
+ CHAN_MODE_HOPPING,
+ CHAN_MODE_FIXED,
+};
+
+enum ieee80211_chan_mode
+ieee80211_get_channel_mode(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *ignore);
+bool ieee80211_set_channel_type(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ enum nl80211_channel_type chantype);
+
#ifdef CONFIG_MAC80211_NOINLINE
#define debug_noinline noinline
#else
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index e08fa8eda1b3..50deb017fd6e 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -413,8 +413,7 @@ static int ieee80211_stop(struct net_device *dev)
netif_addr_lock_bh(dev);
spin_lock_bh(&local->filter_lock);
- __dev_addr_unsync(&local->mc_list, &local->mc_count,
- &dev->mc_list, &dev->mc_count);
+ __hw_addr_unsync(&local->mc_list, &dev->mc, dev->addr_len);
spin_unlock_bh(&local->filter_lock);
netif_addr_unlock_bh(dev);
@@ -487,7 +486,7 @@ static int ieee80211_stop(struct net_device *dev)
cancel_work_sync(&sdata->u.mgd.work);
cancel_work_sync(&sdata->u.mgd.chswitch_work);
cancel_work_sync(&sdata->u.mgd.monitor_work);
- cancel_work_sync(&sdata->u.mgd.beacon_loss_work);
+ cancel_work_sync(&sdata->u.mgd.beacon_connection_loss_work);
/*
* When we get here, the interface is marked down.
@@ -597,8 +596,7 @@ static void ieee80211_set_multicast_list(struct net_device *dev)
sdata->flags ^= IEEE80211_SDATA_PROMISC;
}
spin_lock_bh(&local->filter_lock);
- __dev_addr_sync(&local->mc_list, &local->mc_count,
- &dev->mc_list, &dev->mc_count);
+ __hw_addr_sync(&local->mc_list, &dev->mc, dev->addr_len);
spin_unlock_bh(&local->filter_lock);
ieee80211_queue_work(&local->hw, &local->reconfig_filter);
}
@@ -816,6 +814,118 @@ int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata,
return 0;
}
+static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
+ struct net_device *dev,
+ enum nl80211_iftype type)
+{
+ struct ieee80211_sub_if_data *sdata;
+ u64 mask, start, addr, val, inc;
+ u8 *m;
+ u8 tmp_addr[ETH_ALEN];
+ int i;
+
+ /* default ... something at least */
+ memcpy(dev->perm_addr, local->hw.wiphy->perm_addr, ETH_ALEN);
+
+ if (is_zero_ether_addr(local->hw.wiphy->addr_mask) &&
+ local->hw.wiphy->n_addresses <= 1)
+ return;
+
+
+ mutex_lock(&local->iflist_mtx);
+
+ switch (type) {
+ case NL80211_IFTYPE_MONITOR:
+ /* doesn't matter */
+ break;
+ case NL80211_IFTYPE_WDS:
+ case NL80211_IFTYPE_AP_VLAN:
+ /* match up with an AP interface */
+ list_for_each_entry(sdata, &local->interfaces, list) {
+ if (sdata->vif.type != NL80211_IFTYPE_AP)
+ continue;
+ memcpy(dev->perm_addr, sdata->vif.addr, ETH_ALEN);
+ break;
+ }
+ /* keep default if no AP interface present */
+ break;
+ default:
+ /* assign a new address if possible -- try n_addresses first */
+ for (i = 0; i < local->hw.wiphy->n_addresses; i++) {
+ bool used = false;
+
+ list_for_each_entry(sdata, &local->interfaces, list) {
+ if (memcmp(local->hw.wiphy->addresses[i].addr,
+ sdata->vif.addr, ETH_ALEN) == 0) {
+ used = true;
+ break;
+ }
+ }
+
+ if (!used) {
+ memcpy(dev->perm_addr,
+ local->hw.wiphy->addresses[i].addr,
+ ETH_ALEN);
+ break;
+ }
+ }
+
+ /* try mask if available */
+ if (is_zero_ether_addr(local->hw.wiphy->addr_mask))
+ break;
+
+ m = local->hw.wiphy->addr_mask;
+ mask = ((u64)m[0] << 5*8) | ((u64)m[1] << 4*8) |
+ ((u64)m[2] << 3*8) | ((u64)m[3] << 2*8) |
+ ((u64)m[4] << 1*8) | ((u64)m[5] << 0*8);
+
+ if (__ffs64(mask) + hweight64(mask) != fls64(mask)) {
+ /* not a contiguous mask ... not handled now! */
+ printk(KERN_DEBUG "not contiguous\n");
+ break;
+ }
+
+ m = local->hw.wiphy->perm_addr;
+ start = ((u64)m[0] << 5*8) | ((u64)m[1] << 4*8) |
+ ((u64)m[2] << 3*8) | ((u64)m[3] << 2*8) |
+ ((u64)m[4] << 1*8) | ((u64)m[5] << 0*8);
+
+ inc = 1ULL<<__ffs64(mask);
+ val = (start & mask);
+ addr = (start & ~mask) | (val & mask);
+ do {
+ bool used = false;
+
+ tmp_addr[5] = addr >> 0*8;
+ tmp_addr[4] = addr >> 1*8;
+ tmp_addr[3] = addr >> 2*8;
+ tmp_addr[2] = addr >> 3*8;
+ tmp_addr[1] = addr >> 4*8;
+ tmp_addr[0] = addr >> 5*8;
+
+ val += inc;
+
+ list_for_each_entry(sdata, &local->interfaces, list) {
+ if (memcmp(tmp_addr, sdata->vif.addr,
+ ETH_ALEN) == 0) {
+ used = true;
+ break;
+ }
+ }
+
+ if (!used) {
+ memcpy(dev->perm_addr, tmp_addr, ETH_ALEN);
+ break;
+ }
+ addr = (start & ~mask) | (val & mask);
+ } while (addr != start);
+
+ break;
+ }
+
+ mutex_unlock(&local->iflist_mtx);
+}
+
int ieee80211_if_add(struct ieee80211_local *local, const char *name,
struct net_device **new_dev, enum nl80211_iftype type,
struct vif_params *params)
@@ -845,8 +955,8 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
if (ret < 0)
goto fail;
- memcpy(ndev->dev_addr, local->hw.wiphy->perm_addr, ETH_ALEN);
- memcpy(ndev->perm_addr, ndev->dev_addr, ETH_ALEN);
+ ieee80211_assign_perm_addr(local, ndev, type);
+ memcpy(ndev->dev_addr, ndev->perm_addr, ETH_ALEN);
SET_NETDEV_DEV(ndev, wiphy_dev(local->hw.wiphy));
/* don't use IEEE80211_DEV_TO_SUB_IF because it checks too much */
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index e8f6e3b252d8..8d4b41787dcf 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -140,6 +140,7 @@ static void ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
struct ieee80211_sub_if_data,
u.ap);
+ key->conf.ap_addr = sdata->dev->dev_addr;
ret = drv_set_key(key->local, SET_KEY, sdata, sta, &key->conf);
if (!ret) {
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index b887e484ae04..22a384dfab65 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -71,7 +71,7 @@ void ieee80211_configure_filter(struct ieee80211_local *local)
spin_lock_bh(&local->filter_lock);
changed_flags = local->filter_flags ^ new_flags;
- mc = drv_prepare_multicast(local, local->mc_count, local->mc_list);
+ mc = drv_prepare_multicast(local, &local->mc_list);
spin_unlock_bh(&local->filter_lock);
/* be a bit nasty */
@@ -111,7 +111,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
channel_type = local->tmp_channel_type;
} else {
chan = local->oper_channel;
- channel_type = local->oper_channel_type;
+ channel_type = local->_oper_channel_type;
}
if (chan != local->hw.conf.channel ||
@@ -309,6 +309,8 @@ void ieee80211_restart_hw(struct ieee80211_hw *hw)
{
struct ieee80211_local *local = hw_to_local(hw);
+ trace_api_restart_hw(local);
+
/* use this reason, __ieee80211_resume will unblock it */
ieee80211_stop_queues_by_reason(hw,
IEEE80211_QUEUE_STOP_REASON_SUSPEND);
@@ -388,6 +390,9 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
local->uapsd_max_sp_len = IEEE80211_DEFAULT_MAX_SP_LEN;
INIT_LIST_HEAD(&local->interfaces);
+
+ __hw_addr_init(&local->mc_list);
+
mutex_init(&local->iflist_mtx);
mutex_init(&local->scan_mtx);
@@ -437,7 +442,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
struct ieee80211_local *local = hw_to_local(hw);
int result;
enum ieee80211_band band;
- int channels, i, j, max_bitrates;
+ int channels, max_bitrates;
bool supp_ht;
static const u32 cipher_suites[] = {
WLAN_CIPHER_SUITE_WEP40,
@@ -567,6 +572,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
local->hw.conf.listen_interval = local->hw.max_listen_interval;
+ local->hw.conf.dynamic_ps_forced_timeout = -1;
+
result = sta_info_start(local);
if (result < 0)
goto fail_sta_info;
@@ -601,21 +608,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
ieee80211_led_init(local);
- /* alloc internal scan request */
- i = 0;
- local->int_scan_req->ssids = &local->scan_ssid;
- local->int_scan_req->n_ssids = 1;
- for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
- if (!hw->wiphy->bands[band])
- continue;
- for (j = 0; j < hw->wiphy->bands[band]->n_channels; j++) {
- local->int_scan_req->channels[i] =
- &hw->wiphy->bands[band]->channels[j];
- i++;
- }
- }
- local->int_scan_req->n_channels = i;
-
local->network_latency_notifier.notifier_call =
ieee80211_max_network_latency;
result = pm_qos_add_notifier(PM_QOS_NETWORK_LATENCY,
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 859ee5f3d941..bde81031727a 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -287,8 +287,6 @@ void mesh_mgmt_ies_add(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
*pos++ |= sdata->u.mesh.accepting_plinks ?
MESHCONF_CAPAB_ACCEPT_PLINKS : 0x00;
*pos++ = 0x00;
-
- return;
}
u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata, struct mesh_table *tbl)
@@ -601,10 +599,10 @@ static void ieee80211_mesh_rx_mgmt_action(struct ieee80211_sub_if_data *sdata,
struct ieee80211_rx_status *rx_status)
{
switch (mgmt->u.action.category) {
- case MESH_PLINK_CATEGORY:
+ case WLAN_CATEGORY_MESH_PLINK:
mesh_rx_plink_frame(sdata, mgmt, len, rx_status);
break;
- case MESH_PATH_SEL_CATEGORY:
+ case WLAN_CATEGORY_MESH_PATH_SEL:
mesh_rx_path_sel_frame(sdata, mgmt, len);
break;
}
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index 85562c59d7d6..c88087f1cd0f 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -209,8 +209,6 @@ struct mesh_rmc {
#define MESH_MAX_MPATHS 1024
/* Pending ANA approval */
-#define MESH_PLINK_CATEGORY 30
-#define MESH_PATH_SEL_CATEGORY 32
#define MESH_PATH_SEL_ACTION 0
/* PERR reason codes */
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index fefc45c4b4e8..0705018d8d1e 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -132,7 +132,7 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
/* BSSID == SA */
memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
- mgmt->u.action.category = MESH_PATH_SEL_CATEGORY;
+ mgmt->u.action.category = WLAN_CATEGORY_MESH_PATH_SEL;
mgmt->u.action.u.mesh_action.action_code = MESH_PATH_SEL_ACTION;
switch (action) {
@@ -225,7 +225,7 @@ int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn,
memcpy(mgmt->da, ra, ETH_ALEN);
memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
/* BSSID is left zeroed, wildcard value */
- mgmt->u.action.category = MESH_PATH_SEL_CATEGORY;
+ mgmt->u.action.category = WLAN_CATEGORY_MESH_PATH_SEL;
mgmt->u.action.u.mesh_action.action_code = MESH_PATH_SEL_ACTION;
ie_len = 15;
pos = skb_put(skb, 2 + ie_len);
@@ -624,7 +624,6 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
fail:
rcu_read_unlock();
sdata->u.mesh.mshstats.dropped_frames_no_route++;
- return;
}
static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 7b7080e2b49f..3cd5f7b5d693 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -172,7 +172,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
memcpy(mgmt->da, da, ETH_ALEN);
memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
/* BSSID is left zeroed, wildcard value */
- mgmt->u.action.category = MESH_PLINK_CATEGORY;
+ mgmt->u.action.category = WLAN_CATEGORY_MESH_PLINK;
mgmt->u.action.u.plink_action.action_code = action;
if (action == PLINK_CLOSE)
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 4aefa6dc3091..0839c4e8fd2e 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -47,6 +47,13 @@
*/
#define IEEE80211_PROBE_WAIT (HZ / 2)
+/*
+ * Weight given to the latest Beacon frame when calculating average signal
+ * strength for Beacon frames received in the current BSS. This must be
+ * between 1 and 15.
+ */
+#define IEEE80211_SIGNAL_AVE_WEIGHT 3
+
#define TMR_RUNNING_TIMER 0
#define TMR_RUNNING_CHANSW 1
@@ -130,11 +137,14 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta;
u32 changed = 0;
u16 ht_opmode;
- bool enable_ht = true, ht_changed;
+ bool enable_ht = true;
+ enum nl80211_channel_type prev_chantype;
enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
+ prev_chantype = sdata->vif.bss_conf.channel_type;
+
/* HT is not supported */
if (!sband->ht_cap.ht_supported)
enable_ht = false;
@@ -165,38 +175,37 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
}
}
- ht_changed = conf_is_ht(&local->hw.conf) != enable_ht ||
- channel_type != local->hw.conf.channel_type;
-
if (local->tmp_channel)
local->tmp_channel_type = channel_type;
- local->oper_channel_type = channel_type;
- if (ht_changed) {
- /* channel_type change automatically detected */
- ieee80211_hw_config(local, 0);
+ if (!ieee80211_set_channel_type(local, sdata, channel_type)) {
+ /* can only fail due to HT40+/- mismatch */
+ channel_type = NL80211_CHAN_HT20;
+ WARN_ON(!ieee80211_set_channel_type(local, sdata, channel_type));
+ }
+
+ /* channel_type change automatically detected */
+ ieee80211_hw_config(local, 0);
+ if (prev_chantype != channel_type) {
rcu_read_lock();
sta = sta_info_get(sdata, bssid);
if (sta)
rate_control_rate_update(local, sband, sta,
IEEE80211_RC_HT_CHANGED,
- local->oper_channel_type);
+ channel_type);
rcu_read_unlock();
- }
-
- /* disable HT */
- if (!enable_ht)
- return 0;
+ }
ht_opmode = le16_to_cpu(hti->operation_mode);
/* if bss configuration changed store the new one */
- if (!sdata->ht_opmode_valid ||
- sdata->vif.bss_conf.ht_operation_mode != ht_opmode) {
+ if (sdata->ht_opmode_valid != enable_ht ||
+ sdata->vif.bss_conf.ht_operation_mode != ht_opmode ||
+ prev_chantype != channel_type) {
changed |= BSS_CHANGED_HT;
sdata->vif.bss_conf.ht_operation_mode = ht_opmode;
- sdata->ht_opmode_valid = true;
+ sdata->ht_opmode_valid = enable_ht;
}
return changed;
@@ -206,7 +215,7 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
const u8 *bssid, u16 stype, u16 reason,
- void *cookie)
+ void *cookie, bool send_frame)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
@@ -243,7 +252,11 @@ static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
cfg80211_send_disassoc(sdata->dev, (u8 *)mgmt, skb->len);
if (!(ifmgd->flags & IEEE80211_STA_MFP_ENABLED))
IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
- ieee80211_tx_skb(sdata, skb);
+
+ if (send_frame)
+ ieee80211_tx_skb(sdata, skb);
+ else
+ kfree_skb(skb);
}
void ieee80211_send_pspoll(struct ieee80211_local *local,
@@ -329,7 +342,11 @@ static void ieee80211_chswitch_work(struct work_struct *work)
goto out;
sdata->local->oper_channel = sdata->local->csa_channel;
- ieee80211_hw_config(sdata->local, IEEE80211_CONF_CHANGE_CHANNEL);
+ if (!sdata->local->ops->channel_switch) {
+ /* call "hw_config" only if doing sw channel switch */
+ ieee80211_hw_config(sdata->local,
+ IEEE80211_CONF_CHANGE_CHANNEL);
+ }
/* XXX: shouldn't really modify cfg80211-owned data! */
ifmgd->associated->channel = sdata->local->oper_channel;
@@ -341,6 +358,29 @@ static void ieee80211_chswitch_work(struct work_struct *work)
mutex_unlock(&ifmgd->mtx);
}
+void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success)
+{
+ struct ieee80211_sub_if_data *sdata;
+ struct ieee80211_if_managed *ifmgd;
+
+ sdata = vif_to_sdata(vif);
+ ifmgd = &sdata->u.mgd;
+
+ trace_api_chswitch_done(sdata, success);
+ if (!success) {
+ /*
+ * If the channel switch was not successful, stay
+ * around on the old channel. We currently lack
+ * good handling of this situation, possibly we
+ * should just drop the association.
+ */
+ sdata->local->csa_channel = sdata->local->oper_channel;
+ }
+
+ ieee80211_queue_work(&sdata->local->hw, &ifmgd->chswitch_work);
+}
+EXPORT_SYMBOL(ieee80211_chswitch_done);
+
static void ieee80211_chswitch_timer(unsigned long data)
{
struct ieee80211_sub_if_data *sdata =
@@ -357,7 +397,8 @@ static void ieee80211_chswitch_timer(unsigned long data)
void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
struct ieee80211_channel_sw_ie *sw_elem,
- struct ieee80211_bss *bss)
+ struct ieee80211_bss *bss,
+ u64 timestamp)
{
struct cfg80211_bss *cbss =
container_of((void *)bss, struct cfg80211_bss, priv);
@@ -385,10 +426,29 @@ void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
sdata->local->csa_channel = new_ch;
+ if (sdata->local->ops->channel_switch) {
+ /* use driver's channel switch callback */
+ struct ieee80211_channel_switch ch_switch;
+ memset(&ch_switch, 0, sizeof(ch_switch));
+ ch_switch.timestamp = timestamp;
+ if (sw_elem->mode) {
+ ch_switch.block_tx = true;
+ ieee80211_stop_queues_by_reason(&sdata->local->hw,
+ IEEE80211_QUEUE_STOP_REASON_CSA);
+ }
+ ch_switch.channel = new_ch;
+ ch_switch.count = sw_elem->count;
+ ifmgd->flags |= IEEE80211_STA_CSA_RECEIVED;
+ drv_channel_switch(sdata->local, &ch_switch);
+ return;
+ }
+
+ /* channel switch handled in software */
if (sw_elem->count <= 1) {
ieee80211_queue_work(&sdata->local->hw, &ifmgd->chswitch_work);
} else {
- ieee80211_stop_queues_by_reason(&sdata->local->hw,
+ if (sw_elem->mode)
+ ieee80211_stop_queues_by_reason(&sdata->local->hw,
IEEE80211_QUEUE_STOP_REASON_CSA);
ifmgd->flags |= IEEE80211_STA_CSA_RECEIVED;
mod_timer(&ifmgd->chswitch_timer,
@@ -467,6 +527,7 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
{
struct ieee80211_sub_if_data *sdata, *found = NULL;
int count = 0;
+ int timeout;
if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS)) {
local->ps_sdata = NULL;
@@ -495,11 +556,31 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
s32 beaconint_us;
if (latency < 0)
- latency = pm_qos_requirement(PM_QOS_NETWORK_LATENCY);
+ latency = pm_qos_request(PM_QOS_NETWORK_LATENCY);
beaconint_us = ieee80211_tu_to_usec(
found->vif.bss_conf.beacon_int);
+ timeout = local->hw.conf.dynamic_ps_forced_timeout;
+ if (timeout < 0) {
+ /*
+ * The 2 second value is there for compatibility until
+ * the PM_QOS_NETWORK_LATENCY is configured with real
+ * values.
+ */
+ if (latency == 2000000000)
+ timeout = 100;
+ else if (latency <= 50000)
+ timeout = 300;
+ else if (latency <= 100000)
+ timeout = 100;
+ else if (latency <= 500000)
+ timeout = 50;
+ else
+ timeout = 0;
+ }
+ local->hw.conf.dynamic_ps_timeout = timeout;
+
if (beaconint_us > latency) {
local->ps_sdata = NULL;
} else {
@@ -592,6 +673,9 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
int count;
u8 *pos, uapsd_queues = 0;
+ if (!local->ops->conf_tx)
+ return;
+
if (local->hw.queues < 4)
return;
@@ -666,11 +750,15 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
params.aifs, params.cw_min, params.cw_max, params.txop,
params.uapsd);
#endif
- if (drv_conf_tx(local, queue, &params) && local->ops->conf_tx)
+ if (drv_conf_tx(local, queue, &params))
printk(KERN_DEBUG "%s: failed to set TX queue "
"parameters for queue %d\n",
wiphy_name(local->hw.wiphy), queue);
}
+
+ /* enable WMM or activate new settings */
+ local->hw.conf.flags |= IEEE80211_CONF_QOS;
+ drv_config(local, IEEE80211_CONF_CHANGE_QOS);
}
static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata,
@@ -731,6 +819,8 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
sdata->u.mgd.associated = cbss;
memcpy(sdata->u.mgd.bssid, cbss->bssid, ETH_ALEN);
+ sdata->u.mgd.flags |= IEEE80211_STA_RESET_SIGNAL_AVE;
+
/* just to be sure */
sdata->u.mgd.flags &= ~(IEEE80211_STA_CONNECTION_POLL |
IEEE80211_STA_BEACON_POLL);
@@ -756,6 +846,11 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
/* And the BSSID changed - we're associated now */
bss_info_changed |= BSS_CHANGED_BSSID;
+ /* Tell the driver to monitor connection quality (if supported) */
+ if ((local->hw.flags & IEEE80211_HW_SUPPORTS_CQM_RSSI) &&
+ sdata->vif.bss_conf.cqm_rssi_thold)
+ bss_info_changed |= BSS_CHANGED_CQM;
+
ieee80211_bss_info_change_notify(sdata, bss_info_changed);
mutex_lock(&local->iflist_mtx);
@@ -767,7 +862,8 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
netif_carrier_on(sdata->dev);
}
-static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata)
+static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
+ bool remove_sta)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_local *local = sdata->local;
@@ -819,7 +915,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata)
ieee80211_set_wmm_default(sdata);
/* channel(_type) changes are handled by ieee80211_hw_config */
- local->oper_channel_type = NL80211_CHAN_NO_HT;
+ WARN_ON(!ieee80211_set_channel_type(local, sdata, NL80211_CHAN_NO_HT));
/* on the next assoc, re-program HT parameters */
sdata->ht_opmode_valid = false;
@@ -836,11 +932,12 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata)
ieee80211_hw_config(local, config_changed);
- /* And the BSSID changed -- not very interesting here */
- changed |= BSS_CHANGED_BSSID;
+ /* The BSSID (not really interesting) and HT changed */
+ changed |= BSS_CHANGED_BSSID | BSS_CHANGED_HT;
ieee80211_bss_info_change_notify(sdata, changed);
- sta_info_destroy_addr(sdata, bssid);
+ if (remove_sta)
+ sta_info_destroy_addr(sdata, bssid);
}
void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
@@ -857,6 +954,9 @@ void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
if (is_multicast_ether_addr(hdr->addr1))
return;
+ if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR)
+ return;
+
mod_timer(&sdata->u.mgd.conn_mon_timer,
round_jiffies_up(jiffies + IEEE80211_CONNECTION_IDLE_TIME));
}
@@ -934,23 +1034,72 @@ static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata,
mutex_unlock(&ifmgd->mtx);
}
-void ieee80211_beacon_loss_work(struct work_struct *work)
+static void __ieee80211_connection_loss(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ struct ieee80211_local *local = sdata->local;
+ u8 bssid[ETH_ALEN];
+
+ mutex_lock(&ifmgd->mtx);
+ if (!ifmgd->associated) {
+ mutex_unlock(&ifmgd->mtx);
+ return;
+ }
+
+ memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN);
+
+ printk(KERN_DEBUG "Connection to AP %pM lost.\n", bssid);
+
+ ieee80211_set_disassoc(sdata, true);
+ ieee80211_recalc_idle(local);
+ mutex_unlock(&ifmgd->mtx);
+ /*
+ * must be outside lock due to cfg80211,
+ * but that's not a problem.
+ */
+ ieee80211_send_deauth_disassoc(sdata, bssid,
+ IEEE80211_STYPE_DEAUTH,
+ WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
+ NULL, true);
+}
+
+void ieee80211_beacon_connection_loss_work(struct work_struct *work)
{
struct ieee80211_sub_if_data *sdata =
container_of(work, struct ieee80211_sub_if_data,
- u.mgd.beacon_loss_work);
+ u.mgd.beacon_connection_loss_work);
- ieee80211_mgd_probe_ap(sdata, true);
+ if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR)
+ __ieee80211_connection_loss(sdata);
+ else
+ ieee80211_mgd_probe_ap(sdata, true);
}
void ieee80211_beacon_loss(struct ieee80211_vif *vif)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+ struct ieee80211_hw *hw = &sdata->local->hw;
- ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.beacon_loss_work);
+ trace_api_beacon_loss(sdata);
+
+ WARN_ON(hw->flags & IEEE80211_HW_CONNECTION_MONITOR);
+ ieee80211_queue_work(hw, &sdata->u.mgd.beacon_connection_loss_work);
}
EXPORT_SYMBOL(ieee80211_beacon_loss);
+void ieee80211_connection_loss(struct ieee80211_vif *vif)
+{
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+ struct ieee80211_hw *hw = &sdata->local->hw;
+
+ trace_api_connection_loss(sdata);
+
+ WARN_ON(!(hw->flags & IEEE80211_HW_CONNECTION_MONITOR));
+ ieee80211_queue_work(hw, &sdata->u.mgd.beacon_connection_loss_work);
+}
+EXPORT_SYMBOL(ieee80211_connection_loss);
+
+
static enum rx_mgmt_action __must_check
ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt, size_t len)
@@ -971,7 +1120,7 @@ ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
printk(KERN_DEBUG "%s: deauthenticated from %pM (Reason: %u)\n",
sdata->name, bssid, reason_code);
- ieee80211_set_disassoc(sdata);
+ ieee80211_set_disassoc(sdata, true);
ieee80211_recalc_idle(sdata->local);
return RX_MGMT_CFG80211_DEAUTH;
@@ -1001,7 +1150,7 @@ ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata,
printk(KERN_DEBUG "%s: disassociated from %pM (Reason: %u)\n",
sdata->name, mgmt->sa, reason_code);
- ieee80211_set_disassoc(sdata);
+ ieee80211_set_disassoc(sdata, true);
ieee80211_recalc_idle(sdata->local);
return RX_MGMT_CFG80211_DISASSOC;
}
@@ -1215,7 +1364,8 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
ETH_ALEN) == 0)) {
struct ieee80211_channel_sw_ie *sw_elem =
(struct ieee80211_channel_sw_ie *)elems->ch_switch_elem;
- ieee80211_sta_process_chanswitch(sdata, sw_elem, bss);
+ ieee80211_sta_process_chanswitch(sdata, sw_elem,
+ bss, rx_status->mactime);
}
}
@@ -1254,12 +1404,17 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
mutex_lock(&sdata->local->iflist_mtx);
ieee80211_recalc_ps(sdata->local, -1);
mutex_unlock(&sdata->local->iflist_mtx);
+
+ if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR)
+ return;
+
/*
* We've received a probe response, but are not sure whether
* we have or will be receiving any beacons or data, so let's
* schedule the timers again, just in case.
*/
mod_beacon_timer(sdata);
+
mod_timer(&ifmgd->conn_mon_timer,
round_jiffies_up(jiffies +
IEEE80211_CONNECTION_IDLE_TIME));
@@ -1293,6 +1448,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
struct ieee80211_rx_status *rx_status)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
size_t baselen;
struct ieee802_11_elems elems;
struct ieee80211_local *local = sdata->local;
@@ -1328,6 +1484,41 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
if (memcmp(bssid, mgmt->bssid, ETH_ALEN) != 0)
return;
+ /* Track average RSSI from the Beacon frames of the current AP */
+ ifmgd->last_beacon_signal = rx_status->signal;
+ if (ifmgd->flags & IEEE80211_STA_RESET_SIGNAL_AVE) {
+ ifmgd->flags &= ~IEEE80211_STA_RESET_SIGNAL_AVE;
+ ifmgd->ave_beacon_signal = rx_status->signal;
+ ifmgd->last_cqm_event_signal = 0;
+ } else {
+ ifmgd->ave_beacon_signal =
+ (IEEE80211_SIGNAL_AVE_WEIGHT * rx_status->signal * 16 +
+ (16 - IEEE80211_SIGNAL_AVE_WEIGHT) *
+ ifmgd->ave_beacon_signal) / 16;
+ }
+ if (bss_conf->cqm_rssi_thold &&
+ !(local->hw.flags & IEEE80211_HW_SUPPORTS_CQM_RSSI)) {
+ int sig = ifmgd->ave_beacon_signal / 16;
+ int last_event = ifmgd->last_cqm_event_signal;
+ int thold = bss_conf->cqm_rssi_thold;
+ int hyst = bss_conf->cqm_rssi_hyst;
+ if (sig < thold &&
+ (last_event == 0 || sig < last_event - hyst)) {
+ ifmgd->last_cqm_event_signal = sig;
+ ieee80211_cqm_rssi_notify(
+ &sdata->vif,
+ NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW,
+ GFP_KERNEL);
+ } else if (sig > thold &&
+ (last_event == 0 || sig > last_event + hyst)) {
+ ifmgd->last_cqm_event_signal = sig;
+ ieee80211_cqm_rssi_notify(
+ &sdata->vif,
+ NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH,
+ GFP_KERNEL);
+ }
+ }
+
if (ifmgd->flags & IEEE80211_STA_BEACON_POLL) {
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
if (net_ratelimit()) {
@@ -1506,7 +1697,8 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
ieee80211_sta_process_chanswitch(sdata,
&mgmt->u.action.u.chan_switch.sw_elem,
- (void *)ifmgd->associated->priv);
+ (void *)ifmgd->associated->priv,
+ rx_status->mactime);
break;
}
mutex_unlock(&ifmgd->mtx);
@@ -1613,7 +1805,7 @@ static void ieee80211_sta_work(struct work_struct *work)
printk(KERN_DEBUG "No probe response from AP %pM"
" after %dms, disconnecting.\n",
bssid, (1000 * IEEE80211_PROBE_WAIT)/HZ);
- ieee80211_set_disassoc(sdata);
+ ieee80211_set_disassoc(sdata, true);
ieee80211_recalc_idle(local);
mutex_unlock(&ifmgd->mtx);
/*
@@ -1623,7 +1815,7 @@ static void ieee80211_sta_work(struct work_struct *work)
ieee80211_send_deauth_disassoc(sdata, bssid,
IEEE80211_STYPE_DEAUTH,
WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
- NULL);
+ NULL, true);
mutex_lock(&ifmgd->mtx);
}
}
@@ -1640,7 +1832,8 @@ static void ieee80211_sta_bcn_mon_timer(unsigned long data)
if (local->quiescing)
return;
- ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.beacon_loss_work);
+ ieee80211_queue_work(&sdata->local->hw,
+ &sdata->u.mgd.beacon_connection_loss_work);
}
static void ieee80211_sta_conn_mon_timer(unsigned long data)
@@ -1692,7 +1885,7 @@ void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata)
*/
cancel_work_sync(&ifmgd->work);
- cancel_work_sync(&ifmgd->beacon_loss_work);
+ cancel_work_sync(&ifmgd->beacon_connection_loss_work);
if (del_timer_sync(&ifmgd->timer))
set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running);
@@ -1726,7 +1919,8 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
INIT_WORK(&ifmgd->work, ieee80211_sta_work);
INIT_WORK(&ifmgd->monitor_work, ieee80211_sta_monitor_work);
INIT_WORK(&ifmgd->chswitch_work, ieee80211_chswitch_work);
- INIT_WORK(&ifmgd->beacon_loss_work, ieee80211_beacon_loss_work);
+ INIT_WORK(&ifmgd->beacon_connection_loss_work,
+ ieee80211_beacon_connection_loss_work);
setup_timer(&ifmgd->timer, ieee80211_sta_timer,
(unsigned long) sdata);
setup_timer(&ifmgd->bcn_mon_timer, ieee80211_sta_bcn_mon_timer,
@@ -1805,6 +1999,9 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
struct ieee80211_work *wk;
u16 auth_alg;
+ if (req->local_state_change)
+ return 0; /* no need to update mac80211 state */
+
switch (req->auth_type) {
case NL80211_AUTHTYPE_OPEN_SYSTEM:
auth_alg = WLAN_AUTH_OPEN;
@@ -1913,7 +2110,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
}
/* Trying to reassociate - clear previous association state */
- ieee80211_set_disassoc(sdata);
+ ieee80211_set_disassoc(sdata, true);
}
mutex_unlock(&ifmgd->mtx);
@@ -2017,7 +2214,7 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
if (ifmgd->associated == req->bss) {
bssid = req->bss->bssid;
- ieee80211_set_disassoc(sdata);
+ ieee80211_set_disassoc(sdata, true);
mutex_unlock(&ifmgd->mtx);
} else {
bool not_auth_yet = false;
@@ -2030,7 +2227,8 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
continue;
if (wk->type != IEEE80211_WORK_DIRECT_PROBE &&
- wk->type != IEEE80211_WORK_AUTH)
+ wk->type != IEEE80211_WORK_AUTH &&
+ wk->type != IEEE80211_WORK_ASSOC)
continue;
if (memcmp(req->bss->bssid, wk->filter_ta, ETH_ALEN))
@@ -2060,9 +2258,9 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
printk(KERN_DEBUG "%s: deauthenticating from %pM by local choice (reason=%d)\n",
sdata->name, bssid, req->reason_code);
- ieee80211_send_deauth_disassoc(sdata, bssid,
- IEEE80211_STYPE_DEAUTH, req->reason_code,
- cookie);
+ ieee80211_send_deauth_disassoc(sdata, bssid, IEEE80211_STYPE_DEAUTH,
+ req->reason_code, cookie,
+ !req->local_state_change);
ieee80211_recalc_idle(sdata->local);
@@ -2074,6 +2272,7 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
void *cookie)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ u8 bssid[ETH_ALEN];
mutex_lock(&ifmgd->mtx);
@@ -2091,13 +2290,15 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
printk(KERN_DEBUG "%s: disassociating from %pM by local choice (reason=%d)\n",
sdata->name, req->bss->bssid, req->reason_code);
- ieee80211_set_disassoc(sdata);
+ memcpy(bssid, req->bss->bssid, ETH_ALEN);
+ ieee80211_set_disassoc(sdata, false);
mutex_unlock(&ifmgd->mtx);
ieee80211_send_deauth_disassoc(sdata, req->bss->bssid,
IEEE80211_STYPE_DISASSOC, req->reason_code,
- cookie);
+ cookie, !req->local_state_change);
+ sta_info_destroy_addr(sdata, bssid);
ieee80211_recalc_idle(sdata->local);
@@ -2117,7 +2318,7 @@ int ieee80211_mgd_action(struct ieee80211_sub_if_data *sdata,
if ((chan != local->tmp_channel ||
channel_type != local->tmp_channel_type) &&
(chan != local->oper_channel ||
- channel_type != local->oper_channel_type))
+ channel_type != local->_oper_channel_type))
return -EBUSY;
skb = dev_alloc_skb(local->hw.extra_tx_headroom + len);
@@ -2138,3 +2339,15 @@ int ieee80211_mgd_action(struct ieee80211_sub_if_data *sdata,
*cookie = (unsigned long) skb;
return 0;
}
+
+void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif,
+ enum nl80211_cqm_rssi_threshold_event rssi_event,
+ gfp_t gfp)
+{
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+
+ trace_api_cqm_rssi_notify(sdata, rssi_event);
+
+ cfg80211_cqm_rssi_notify(sdata->dev, rssi_event, gfp);
+}
+EXPORT_SYMBOL(ieee80211_cqm_rssi_notify);
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index 0e64484e861c..75202b295a4e 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -46,7 +46,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
list_for_each_entry_rcu(sta, &local->sta_list, list) {
- set_sta_flags(sta, WLAN_STA_SUSPEND);
+ set_sta_flags(sta, WLAN_STA_BLOCK_BA);
ieee80211_sta_tear_down_BA_sessions(sta);
}
}
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index 818abfae9007..f65ce6dcc8e2 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -542,7 +542,7 @@ minstrel_free(void *priv)
kfree(priv);
}
-static struct rate_control_ops mac80211_minstrel = {
+struct rate_control_ops mac80211_minstrel = {
.name = "minstrel",
.tx_status = minstrel_tx_status,
.get_rate = minstrel_get_rate,
diff --git a/net/mac80211/rc80211_minstrel.h b/net/mac80211/rc80211_minstrel.h
index 38bf4168fc3a..0f5a83370aa6 100644
--- a/net/mac80211/rc80211_minstrel.h
+++ b/net/mac80211/rc80211_minstrel.h
@@ -80,7 +80,18 @@ struct minstrel_priv {
unsigned int lookaround_rate_mrr;
};
+struct minstrel_debugfs_info {
+ size_t len;
+ char buf[];
+};
+
+extern struct rate_control_ops mac80211_minstrel;
void minstrel_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir);
void minstrel_remove_sta_debugfs(void *priv, void *priv_sta);
+/* debugfs */
+int minstrel_stats_open(struct inode *inode, struct file *file);
+ssize_t minstrel_stats_read(struct file *file, char __user *buf, size_t len, loff_t *ppos);
+int minstrel_stats_release(struct inode *inode, struct file *file);
+
#endif
diff --git a/net/mac80211/rc80211_minstrel_debugfs.c b/net/mac80211/rc80211_minstrel_debugfs.c
index 0e1f12b1b6dd..241e76f3fdf2 100644
--- a/net/mac80211/rc80211_minstrel_debugfs.c
+++ b/net/mac80211/rc80211_minstrel_debugfs.c
@@ -53,21 +53,15 @@
#include <net/mac80211.h>
#include "rc80211_minstrel.h"
-struct minstrel_stats_info {
- struct minstrel_sta_info *mi;
- char buf[4096];
- size_t len;
-};
-
-static int
+int
minstrel_stats_open(struct inode *inode, struct file *file)
{
struct minstrel_sta_info *mi = inode->i_private;
- struct minstrel_stats_info *ms;
+ struct minstrel_debugfs_info *ms;
unsigned int i, tp, prob, eprob;
char *p;
- ms = kmalloc(sizeof(*ms), GFP_KERNEL);
+ ms = kmalloc(sizeof(*ms) + 4096, GFP_KERNEL);
if (!ms)
return -ENOMEM;
@@ -107,36 +101,19 @@ minstrel_stats_open(struct inode *inode, struct file *file)
return 0;
}
-static ssize_t
-minstrel_stats_read(struct file *file, char __user *buf, size_t len, loff_t *o)
+ssize_t
+minstrel_stats_read(struct file *file, char __user *buf, size_t len, loff_t *ppos)
{
- struct minstrel_stats_info *ms;
- char *src;
+ struct minstrel_debugfs_info *ms;
ms = file->private_data;
- src = ms->buf;
-
- len = min(len, ms->len);
- if (len <= *o)
- return 0;
-
- src += *o;
- len -= *o;
- *o += len;
-
- if (copy_to_user(buf, src, len))
- return -EFAULT;
-
- return len;
+ return simple_read_from_buffer(buf, len, ppos, ms->buf, ms->len);
}
-static int
+int
minstrel_stats_release(struct inode *inode, struct file *file)
{
- struct minstrel_stats_info *ms = file->private_data;
-
- kfree(ms);
-
+ kfree(file->private_data);
return 0;
}
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 04ea07f0e78a..6e2a7bcd8cb8 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -39,7 +39,7 @@ static struct sk_buff *remove_monitor_info(struct ieee80211_local *local,
{
if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) {
if (likely(skb->len > FCS_LEN))
- skb_trim(skb, skb->len - FCS_LEN);
+ __pskb_trim(skb, skb->len - FCS_LEN);
else {
/* driver bug */
WARN_ON(1);
@@ -81,8 +81,6 @@ ieee80211_rx_radiotap_len(struct ieee80211_local *local,
len += 8;
if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
len += 1;
- if (local->hw.flags & IEEE80211_HW_NOISE_DBM)
- len += 1;
if (len & 1) /* padding for RX_FLAGS if necessary */
len++;
@@ -179,14 +177,6 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
pos++;
}
- /* IEEE80211_RADIOTAP_DBM_ANTNOISE */
- if (local->hw.flags & IEEE80211_HW_NOISE_DBM) {
- *pos = status->noise;
- rthdr->it_present |=
- cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTNOISE);
- pos++;
- }
-
/* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */
/* IEEE80211_RADIOTAP_ANTENNA */
@@ -236,6 +226,12 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
present_fcs_len = FCS_LEN;
+ /* make sure hdr->frame_control is on the linear part */
+ if (!pskb_may_pull(origskb, 2)) {
+ dev_kfree_skb(origskb);
+ return NULL;
+ }
+
if (!local->monitors) {
if (should_drop_frame(origskb, present_fcs_len)) {
dev_kfree_skb(origskb);
@@ -493,7 +489,7 @@ ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
if (ieee80211_is_action(hdr->frame_control)) {
mgmt = (struct ieee80211_mgmt *)hdr;
- if (mgmt->u.action.category != MESH_PLINK_CATEGORY)
+ if (mgmt->u.action.category != WLAN_CATEGORY_MESH_PLINK)
return RX_DROP_MONITOR;
return RX_CONTINUE;
}
@@ -723,14 +719,16 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
- if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_OPERATIONAL)
- goto dont_reorder;
+ spin_lock(&sta->lock);
+
+ if (!sta->ampdu_mlme.tid_active_rx[tid])
+ goto dont_reorder_unlock;
tid_agg_rx = sta->ampdu_mlme.tid_rx[tid];
/* qos null data frames are excluded */
if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
- goto dont_reorder;
+ goto dont_reorder_unlock;
/* new, potentially un-ordered, ampdu frame - process it */
@@ -742,15 +740,20 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
/* if this mpdu is fragmented - terminate rx aggregation session */
sc = le16_to_cpu(hdr->seq_ctrl);
if (sc & IEEE80211_SCTL_FRAG) {
- ieee80211_sta_stop_rx_ba_session(sta->sdata, sta->sta.addr,
- tid, 0, WLAN_REASON_QSTA_REQUIRE_SETUP);
+ spin_unlock(&sta->lock);
+ __ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_RECIPIENT,
+ WLAN_REASON_QSTA_REQUIRE_SETUP);
dev_kfree_skb(skb);
return;
}
- if (ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, frames))
+ if (ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, frames)) {
+ spin_unlock(&sta->lock);
return;
+ }
+ dont_reorder_unlock:
+ spin_unlock(&sta->lock);
dont_reorder:
__skb_queue_tail(frames, skb);
}
@@ -897,6 +900,7 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
rx->key = key;
return RX_CONTINUE;
} else {
+ u8 keyid;
/*
* The device doesn't give us the IV so we won't be
* able to look up the key. That's ok though, we
@@ -919,7 +923,8 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
* no need to call ieee80211_wep_get_keyidx,
* it verifies a bunch of things we've done already
*/
- keyidx = rx->skb->data[hdrlen + 3] >> 6;
+ skb_copy_bits(rx->skb, hdrlen + 3, &keyid, 1);
+ keyidx = keyid >> 6;
rx->key = rcu_dereference(rx->sdata->keys[keyidx]);
@@ -940,6 +945,11 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
return RX_DROP_MONITOR;
}
+ if (skb_linearize(rx->skb))
+ return RX_DROP_UNUSABLE;
+
+ hdr = (struct ieee80211_hdr *)rx->skb->data;
+
/* Check for weak IVs if possible */
if (rx->sta && rx->key->conf.alg == ALG_WEP &&
ieee80211_is_data(hdr->frame_control) &&
@@ -1078,7 +1088,6 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
sta->rx_fragments++;
sta->rx_bytes += rx->skb->len;
sta->last_signal = status->signal;
- sta->last_noise = status->noise;
/*
* Change STA power saving mode only at the end of a frame
@@ -1241,6 +1250,15 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
}
I802_DEBUG_INC(rx->local->rx_handlers_fragments);
+ if (skb_linearize(rx->skb))
+ return RX_DROP_UNUSABLE;
+
+ /*
+ * skb_linearize() might change the skb->data and
+ * previously cached variables (in this case, hdr) need to
+ * be refreshed with the new data.
+ */
+ hdr = (struct ieee80211_hdr *)rx->skb->data;
seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
if (frag == 0) {
@@ -1406,21 +1424,24 @@ static int
ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
__le16 fc = hdr->frame_control;
- int res;
- res = ieee80211_drop_unencrypted(rx, fc);
- if (unlikely(res))
- return res;
+ /*
+ * Pass through unencrypted frames if the hardware has
+ * decrypted them already.
+ */
+ if (status->flag & RX_FLAG_DECRYPTED)
+ return 0;
if (rx->sta && test_sta_flags(rx->sta, WLAN_STA_MFP)) {
- if (unlikely(ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
+ if (unlikely(!ieee80211_has_protected(fc) &&
+ ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
rx->key))
return -EACCES;
/* BIP does not use Protected field, so need to check MMIE */
if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) &&
- ieee80211_get_mmie_keyidx(rx->skb) < 0 &&
- rx->key))
+ ieee80211_get_mmie_keyidx(rx->skb) < 0))
return -EACCES;
/*
* When using MFP, Action frames are not allowed prior to
@@ -1598,6 +1619,9 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
skb->dev = dev;
__skb_queue_head_init(&frame_list);
+ if (skb_linearize(skb))
+ return RX_DROP_UNUSABLE;
+
ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
rx->sdata->vif.type,
rx->local->hw.extra_tx_headroom);
@@ -1796,10 +1820,12 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
if (ieee80211_is_back_req(bar->frame_control)) {
if (!rx->sta)
return RX_DROP_MONITOR;
+ spin_lock(&rx->sta->lock);
tid = le16_to_cpu(bar->control) >> 12;
- if (rx->sta->ampdu_mlme.tid_state_rx[tid]
- != HT_AGG_STATE_OPERATIONAL)
+ if (!rx->sta->ampdu_mlme.tid_active_rx[tid]) {
+ spin_unlock(&rx->sta->lock);
return RX_DROP_MONITOR;
+ }
tid_agg_rx = rx->sta->ampdu_mlme.tid_rx[tid];
start_seq_num = le16_to_cpu(bar->start_seq_num) >> 4;
@@ -1813,6 +1839,7 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
ieee80211_release_reorder_frames(hw, tid_agg_rx, start_seq_num,
frames);
kfree_skb(skb);
+ spin_unlock(&rx->sta->lock);
return RX_QUEUED;
}
@@ -1974,8 +2001,8 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
goto handled;
}
break;
- case MESH_PLINK_CATEGORY:
- case MESH_PATH_SEL_CATEGORY:
+ case WLAN_CATEGORY_MESH_PLINK:
+ case WLAN_CATEGORY_MESH_PATH_SEL:
if (ieee80211_vif_is_mesh(&sdata->vif))
return ieee80211_mesh_rx_mgmt(sdata, rx->skb);
break;
@@ -2372,29 +2399,42 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
struct ieee80211_local *local = hw_to_local(hw);
struct ieee80211_sub_if_data *sdata;
struct ieee80211_hdr *hdr;
+ __le16 fc;
struct ieee80211_rx_data rx;
int prepares;
struct ieee80211_sub_if_data *prev = NULL;
struct sk_buff *skb_new;
struct sta_info *sta, *tmp;
bool found_sta = false;
+ int err = 0;
- hdr = (struct ieee80211_hdr *)skb->data;
+ fc = ((struct ieee80211_hdr *)skb->data)->frame_control;
memset(&rx, 0, sizeof(rx));
rx.skb = skb;
rx.local = local;
- if (ieee80211_is_data(hdr->frame_control) || ieee80211_is_mgmt(hdr->frame_control))
+ if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc))
local->dot11ReceivedFragmentCount++;
if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning) ||
test_bit(SCAN_OFF_CHANNEL, &local->scanning)))
rx.flags |= IEEE80211_RX_IN_SCAN;
+ if (ieee80211_is_mgmt(fc))
+ err = skb_linearize(skb);
+ else
+ err = !pskb_may_pull(skb, ieee80211_hdrlen(fc));
+
+ if (err) {
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ hdr = (struct ieee80211_hdr *)skb->data;
ieee80211_parse_qos(&rx);
ieee80211_verify_alignment(&rx);
- if (ieee80211_is_data(hdr->frame_control)) {
+ if (ieee80211_is_data(fc)) {
for_each_sta_info(local, hdr->addr2, sta, tmp) {
rx.sta = sta;
found_sta = true;
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 85507bd9e341..e1b0be7a57b9 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -14,6 +14,8 @@
#include <linux/if_arp.h>
#include <linux/rtnetlink.h>
+#include <linux/pm_qos_params.h>
+#include <net/sch_generic.h>
#include <linux/slab.h>
#include <net/mac80211.h>
@@ -83,7 +85,7 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
{
struct cfg80211_bss *cbss;
struct ieee80211_bss *bss;
- int clen;
+ int clen, srlen;
s32 signal = 0;
if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
@@ -112,23 +114,24 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
bss->dtim_period = tim_ie->dtim_period;
}
- bss->supp_rates_len = 0;
+ /* replace old supported rates if we get new values */
+ srlen = 0;
if (elems->supp_rates) {
- clen = IEEE80211_MAX_SUPP_RATES - bss->supp_rates_len;
+ clen = IEEE80211_MAX_SUPP_RATES;
if (clen > elems->supp_rates_len)
clen = elems->supp_rates_len;
- memcpy(&bss->supp_rates[bss->supp_rates_len], elems->supp_rates,
- clen);
- bss->supp_rates_len += clen;
+ memcpy(bss->supp_rates, elems->supp_rates, clen);
+ srlen += clen;
}
if (elems->ext_supp_rates) {
- clen = IEEE80211_MAX_SUPP_RATES - bss->supp_rates_len;
+ clen = IEEE80211_MAX_SUPP_RATES - srlen;
if (clen > elems->ext_supp_rates_len)
clen = elems->ext_supp_rates_len;
- memcpy(&bss->supp_rates[bss->supp_rates_len],
- elems->ext_supp_rates, clen);
- bss->supp_rates_len += clen;
+ memcpy(bss->supp_rates + srlen, elems->ext_supp_rates, clen);
+ srlen += clen;
}
+ if (srlen)
+ bss->supp_rates_len = srlen;
bss->wmm_used = elems->wmm_param || elems->wmm_info;
bss->uapsd_supported = is_uapsd_supported(elems);
@@ -246,6 +249,8 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
struct ieee80211_local *local = hw_to_local(hw);
bool was_hw_scan;
+ trace_api_scan_completed(local, aborted);
+
mutex_lock(&local->scan_mtx);
/*
@@ -322,6 +327,7 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local)
ieee80211_offchannel_stop_beaconing(local);
+ local->leave_oper_channel_time = 0;
local->next_scan_state = SCAN_DECISION;
local->scan_channel_idx = 0;
@@ -406,7 +412,7 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
if (local->ops->hw_scan) {
WARN_ON(!ieee80211_prep_hw_scan(local));
- rc = drv_hw_scan(local, local->hw_scan_req);
+ rc = drv_hw_scan(local, sdata, local->hw_scan_req);
} else
rc = ieee80211_start_sw_scan(local);
@@ -426,11 +432,28 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
return rc;
}
+static unsigned long
+ieee80211_scan_get_channel_time(struct ieee80211_channel *chan)
+{
+ /*
+ * TODO: channel switching also consumes quite some time,
+ * add that delay as well to get a better estimation
+ */
+ if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)
+ return IEEE80211_PASSIVE_CHANNEL_TIME;
+ return IEEE80211_PROBE_DELAY + IEEE80211_CHANNEL_TIME;
+}
+
static int ieee80211_scan_state_decision(struct ieee80211_local *local,
unsigned long *next_delay)
{
bool associated = false;
+ bool tx_empty = true;
+ bool bad_latency;
+ bool listen_int_exceeded;
+ unsigned long min_beacon_int = 0;
struct ieee80211_sub_if_data *sdata;
+ struct ieee80211_channel *next_chan;
/* if no more bands/channels left, complete scan and advance to the idle state */
if (local->scan_channel_idx >= local->scan_req->n_channels) {
@@ -438,7 +461,11 @@ static int ieee80211_scan_state_decision(struct ieee80211_local *local,
return 1;
}
- /* check if at least one STA interface is associated */
+ /*
+ * check if at least one STA interface is associated,
+ * check if at least one STA interface has pending tx frames
+ * and grab the lowest used beacon interval
+ */
mutex_lock(&local->iflist_mtx);
list_for_each_entry(sdata, &local->interfaces, list) {
if (!ieee80211_sdata_running(sdata))
@@ -447,7 +474,16 @@ static int ieee80211_scan_state_decision(struct ieee80211_local *local,
if (sdata->vif.type == NL80211_IFTYPE_STATION) {
if (sdata->u.mgd.associated) {
associated = true;
- break;
+
+ if (sdata->vif.bss_conf.beacon_int <
+ min_beacon_int || min_beacon_int == 0)
+ min_beacon_int =
+ sdata->vif.bss_conf.beacon_int;
+
+ if (!qdisc_all_tx_empty(sdata->dev)) {
+ tx_empty = false;
+ break;
+ }
}
}
}
@@ -456,11 +492,34 @@ static int ieee80211_scan_state_decision(struct ieee80211_local *local,
if (local->scan_channel) {
/*
* we're currently scanning a different channel, let's
- * switch back to the operating channel now if at least
- * one interface is associated. Otherwise just scan the
- * next channel
+ * see if we can scan another channel without interfering
+ * with the current traffic situation.
+ *
+ * Since we don't know if the AP has pending frames for us
+ * we can only check for our tx queues and use the current
+ * pm_qos requirements for rx. Hence, if no tx traffic occurs
+ * at all we will scan as many channels in a row as the pm_qos
+ * latency allows us to. Additionally we also check for the
+ * currently negotiated listen interval to prevent losing
+ * frames unnecessarily.
+ *
+ * Otherwise switch back to the operating channel.
*/
- if (associated)
+ next_chan = local->scan_req->channels[local->scan_channel_idx];
+
+ bad_latency = time_after(jiffies +
+ ieee80211_scan_get_channel_time(next_chan),
+ local->leave_oper_channel_time +
+ usecs_to_jiffies(pm_qos_request(PM_QOS_NETWORK_LATENCY)));
+
+ listen_int_exceeded = time_after(jiffies +
+ ieee80211_scan_get_channel_time(next_chan),
+ local->leave_oper_channel_time +
+ usecs_to_jiffies(min_beacon_int * 1024) *
+ local->hw.conf.listen_interval);
+
+ if (associated && ( !tx_empty || bad_latency ||
+ listen_int_exceeded))
local->next_scan_state = SCAN_ENTER_OPER_CHANNEL;
else
local->next_scan_state = SCAN_SET_CHANNEL;
@@ -492,6 +551,9 @@ static void ieee80211_scan_state_leave_oper_channel(struct ieee80211_local *loca
else
*next_delay = HZ / 10;
+ /* remember when we left the operating channel */
+ local->leave_oper_channel_time = jiffies;
+
/* advance to the next channel to be scanned */
local->next_scan_state = SCAN_SET_CHANNEL;
}
@@ -594,7 +656,7 @@ void ieee80211_scan_work(struct work_struct *work)
}
if (local->hw_scan_req) {
- int rc = drv_hw_scan(local, local->hw_scan_req);
+ int rc = drv_hw_scan(local, sdata, local->hw_scan_req);
mutex_unlock(&local->scan_mtx);
if (rc)
ieee80211_scan_completed(&local->hw, true);
@@ -667,10 +729,12 @@ int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
}
int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata,
- const u8 *ssid, u8 ssid_len)
+ const u8 *ssid, u8 ssid_len,
+ struct ieee80211_channel *chan)
{
struct ieee80211_local *local = sdata->local;
int ret = -EBUSY;
+ enum nl80211_band band;
mutex_lock(&local->scan_mtx);
@@ -678,6 +742,30 @@ int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata,
if (local->scan_req)
goto unlock;
+ /* fill internal scan request */
+ if (!chan) {
+ int i, nchan = 0;
+
+ for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ if (!local->hw.wiphy->bands[band])
+ continue;
+ for (i = 0;
+ i < local->hw.wiphy->bands[band]->n_channels;
+ i++) {
+ local->int_scan_req->channels[nchan] =
+ &local->hw.wiphy->bands[band]->channels[i];
+ nchan++;
+ }
+ }
+
+ local->int_scan_req->n_channels = nchan;
+ } else {
+ local->int_scan_req->channels[0] = chan;
+ local->int_scan_req->n_channels = 1;
+ }
+
+ local->int_scan_req->ssids = &local->scan_ssid;
+ local->int_scan_req->n_ssids = 1;
memcpy(local->int_scan_req->ssids[0].ssid, ssid, IEEE80211_MAX_SSID_LEN);
local->int_scan_req->ssids[0].ssid_len = ssid_len;
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index fb12cec4d333..730197591ab5 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -250,9 +250,6 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
* enable session_timer's data differentiation. refer to
* sta_rx_agg_session_timer_expired for useage */
sta->timer_to_tid[i] = i;
- /* rx */
- sta->ampdu_mlme.tid_state_rx[i] = HT_AGG_STATE_IDLE;
- sta->ampdu_mlme.tid_rx[i] = NULL;
/* tx */
sta->ampdu_mlme.tid_state_tx[i] = HT_AGG_STATE_IDLE;
sta->ampdu_mlme.tid_tx[i] = NULL;
@@ -578,7 +575,7 @@ static int sta_info_buffer_expired(struct sta_info *sta,
}
-static void sta_info_cleanup_expire_buffered(struct ieee80211_local *local,
+static bool sta_info_cleanup_expire_buffered(struct ieee80211_local *local,
struct sta_info *sta)
{
unsigned long flags;
@@ -586,7 +583,7 @@ static void sta_info_cleanup_expire_buffered(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata;
if (skb_queue_empty(&sta->ps_tx_buf))
- return;
+ return false;
for (;;) {
spin_lock_irqsave(&sta->ps_tx_buf.lock, flags);
@@ -611,6 +608,8 @@ static void sta_info_cleanup_expire_buffered(struct ieee80211_local *local,
if (skb_queue_empty(&sta->ps_tx_buf))
sta_info_clear_tim_bit(sta);
}
+
+ return true;
}
static int __must_check __sta_info_destroy(struct sta_info *sta)
@@ -619,7 +618,7 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
struct ieee80211_sub_if_data *sdata;
struct sk_buff *skb;
unsigned long flags;
- int ret, i;
+ int ret;
might_sleep();
@@ -629,6 +628,15 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
local = sta->local;
sdata = sta->sdata;
+ /*
+ * Before removing the station from the driver and
+ * rate control, it might still start new aggregation
+ * sessions -- block that to make sure the tear-down
+ * will be sufficient.
+ */
+ set_sta_flags(sta, WLAN_STA_BLOCK_BA);
+ ieee80211_sta_tear_down_BA_sessions(sta);
+
spin_lock_irqsave(&local->sta_lock, flags);
ret = sta_info_hash_del(local, sta);
/* this might still be the pending list ... which is fine */
@@ -645,9 +653,6 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
* may mean it is removed from hardware which requires that
* the key->sta pointer is still valid, so flush the key todo
* list here.
- *
- * ieee80211_key_todo() will synchronize_rcu() so after this
- * nothing can reference this sta struct any more.
*/
ieee80211_key_todo();
@@ -679,11 +684,17 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
sdata = sta->sdata;
}
+ /*
+ * At this point, after we wait for an RCU grace period,
+ * neither mac80211 nor the driver can reference this
+ * sta struct any more except by still existing timers
+ * associated with this station that we clean up below.
+ */
+ synchronize_rcu();
+
#ifdef CONFIG_MAC80211_MESH
- if (ieee80211_vif_is_mesh(&sdata->vif)) {
+ if (ieee80211_vif_is_mesh(&sdata->vif))
mesh_accept_plinks_update(sdata);
- del_timer(&sta->plink_timer);
- }
#endif
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
@@ -710,50 +721,6 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
while ((skb = skb_dequeue(&sta->tx_filtered)) != NULL)
dev_kfree_skb_any(skb);
- for (i = 0; i < STA_TID_NUM; i++) {
- struct tid_ampdu_rx *tid_rx;
- struct tid_ampdu_tx *tid_tx;
-
- spin_lock_bh(&sta->lock);
- tid_rx = sta->ampdu_mlme.tid_rx[i];
- /* Make sure timer won't free the tid_rx struct, see below */
- if (tid_rx)
- tid_rx->shutdown = true;
-
- spin_unlock_bh(&sta->lock);
-
- /*
- * Outside spinlock - shutdown is true now so that the timer
- * won't free tid_rx, we have to do that now. Can't let the
- * timer do it because we have to sync the timer outside the
- * lock that it takes itself.
- */
- if (tid_rx) {
- del_timer_sync(&tid_rx->session_timer);
- kfree(tid_rx);
- }
-
- /*
- * No need to do such complications for TX agg sessions, the
- * path leading to freeing the tid_tx struct goes via a call
- * from the driver, and thus needs to look up the sta struct
- * again, which cannot be found when we get here. Hence, we
- * just need to delete the timer and free the aggregation
- * info; we won't be telling the peer about it then but that
- * doesn't matter if we're not talking to it again anyway.
- */
- tid_tx = sta->ampdu_mlme.tid_tx[i];
- if (tid_tx) {
- del_timer_sync(&tid_tx->addba_resp_timer);
- /*
- * STA removed while aggregation session being
- * started? Bit odd, but purge frames anyway.
- */
- skb_queue_purge(&tid_tx->pending);
- kfree(tid_tx);
- }
- }
-
__sta_info_free(local, sta);
return 0;
@@ -790,15 +757,20 @@ static void sta_info_cleanup(unsigned long data)
{
struct ieee80211_local *local = (struct ieee80211_local *) data;
struct sta_info *sta;
+ bool timer_needed = false;
rcu_read_lock();
list_for_each_entry_rcu(sta, &local->sta_list, list)
- sta_info_cleanup_expire_buffered(local, sta);
+ if (sta_info_cleanup_expire_buffered(local, sta))
+ timer_needed = true;
rcu_read_unlock();
if (local->quiescing)
return;
+ if (!timer_needed)
+ return;
+
local->sta_cleanup.expires =
round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL);
add_timer(&local->sta_cleanup);
@@ -883,8 +855,12 @@ struct ieee80211_sta *ieee80211_find_sta_by_hw(struct ieee80211_hw *hw,
struct sta_info *sta, *nxt;
/* Just return a random station ... first in list ... */
- for_each_sta_info(hw_to_local(hw), addr, sta, nxt)
+ for_each_sta_info(hw_to_local(hw), addr, sta, nxt) {
+ if (!sta->uploaded)
+ return NULL;
return &sta->sta;
+ }
+
return NULL;
}
EXPORT_SYMBOL_GPL(ieee80211_find_sta_by_hw);
@@ -892,14 +868,19 @@ EXPORT_SYMBOL_GPL(ieee80211_find_sta_by_hw);
struct ieee80211_sta *ieee80211_find_sta(struct ieee80211_vif *vif,
const u8 *addr)
{
- struct ieee80211_sub_if_data *sdata;
+ struct sta_info *sta;
if (!vif)
return NULL;
- sdata = vif_to_sdata(vif);
+ sta = sta_info_get_bss(vif_to_sdata(vif), addr);
+ if (!sta)
+ return NULL;
+
+ if (!sta->uploaded)
+ return NULL;
- return ieee80211_find_sta_by_hw(&sdata->local->hw, addr);
+ return &sta->sta;
}
EXPORT_SYMBOL(ieee80211_find_sta);
@@ -992,6 +973,8 @@ void ieee80211_sta_block_awake(struct ieee80211_hw *hw,
{
struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
+ trace_api_sta_block_awake(sta->local, pubsta, block);
+
if (block)
set_sta_flags(sta, WLAN_STA_PS_DRIVER);
else
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 822d84522937..48a5e80957f0 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -35,8 +35,8 @@
* IEEE80211_TX_CTL_CLEAR_PS_FILT control flag) when the next
* frame to this station is transmitted.
* @WLAN_STA_MFP: Management frame protection is used with this STA.
- * @WLAN_STA_SUSPEND: Set/cleared during a suspend/resume cycle.
- * Used to deny ADDBA requests (both TX and RX).
+ * @WLAN_STA_BLOCK_BA: Used to deny ADDBA requests (both TX and RX)
+ * during suspend/resume and station removal.
* @WLAN_STA_PS_DRIVER: driver requires keeping this station in
* power-save mode logically to flush frames that might still
* be in the queues
@@ -57,7 +57,7 @@ enum ieee80211_sta_info_flags {
WLAN_STA_WDS = 1<<7,
WLAN_STA_CLEAR_PS_FILT = 1<<9,
WLAN_STA_MFP = 1<<10,
- WLAN_STA_SUSPEND = 1<<11,
+ WLAN_STA_BLOCK_BA = 1<<11,
WLAN_STA_PS_DRIVER = 1<<12,
WLAN_STA_PSPOLL = 1<<13,
WLAN_STA_DISASSOC = 1<<14,
@@ -106,7 +106,6 @@ struct tid_ampdu_tx {
* @buf_size: buffer size for incoming A-MPDUs
* @timeout: reset timer value (in TUs).
* @dialog_token: dialog token for aggregation session
- * @shutdown: this session is being shut down due to STA removal
*/
struct tid_ampdu_rx {
struct sk_buff **reorder_buf;
@@ -118,7 +117,6 @@ struct tid_ampdu_rx {
u16 buf_size;
u16 timeout;
u8 dialog_token;
- bool shutdown;
};
/**
@@ -156,7 +154,7 @@ enum plink_state {
*/
struct sta_ampdu_mlme {
/* rx */
- u8 tid_state_rx[STA_TID_NUM];
+ bool tid_active_rx[STA_TID_NUM];
struct tid_ampdu_rx *tid_rx[STA_TID_NUM];
/* tx */
u8 tid_state_tx[STA_TID_NUM];
@@ -200,7 +198,6 @@ struct sta_ampdu_mlme {
* @rx_fragments: number of received MPDUs
* @rx_dropped: number of dropped MPDUs from this STA
* @last_signal: signal of last received frame from this STA
- * @last_noise: noise of last received frame from this STA
* @last_seq_ctrl: last received seq/frag number from this STA (per RX queue)
* @tx_filtered_count: number of frames the hardware filtered for this STA
* @tx_retry_failed: number of frames that failed retry
@@ -267,7 +264,6 @@ struct sta_info {
unsigned long rx_fragments;
unsigned long rx_dropped;
int last_signal;
- int last_noise;
__le16 last_seq_ctrl[NUM_RX_DATA_QUEUES];
/* Updated from TX status path only, no locking requirements */
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 56d5b9a6ec5b..94613af009f3 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -171,13 +171,16 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
struct net_device *prev_dev = NULL;
struct sta_info *sta, *tmp;
int retry_count = -1, i;
- bool injected;
+ int rates_idx = -1;
+ bool send_to_cooked;
for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
/* the HW cannot have attempted that rate */
if (i >= hw->max_rates) {
info->status.rates[i].idx = -1;
info->status.rates[i].count = 0;
+ } else if (info->status.rates[i].idx >= 0) {
+ rates_idx = i;
}
retry_count += info->status.rates[i].count;
@@ -206,6 +209,10 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
return;
}
+ if ((local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) &&
+ (rates_idx != -1))
+ sta->last_tx_rate = info->status.rates[rates_idx];
+
if ((info->flags & IEEE80211_TX_STAT_AMPDU_NO_BACK) &&
(ieee80211_is_data_qos(fc))) {
u16 tid, ssn;
@@ -296,11 +303,15 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
/* this was a transmitted frame, but now we want to reuse it */
skb_orphan(skb);
+ /* Need to make a copy before skb->cb gets cleared */
+ send_to_cooked = !!(info->flags & IEEE80211_TX_CTL_INJECTED) ||
+ (type != IEEE80211_FTYPE_DATA);
+
/*
* This is a bit racy but we can avoid a lot of work
* with this test...
*/
- if (!local->monitors && !local->cooked_mntrs) {
+ if (!local->monitors && (!send_to_cooked || !local->cooked_mntrs)) {
dev_kfree_skb(skb);
return;
}
@@ -345,9 +356,6 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
/* for now report the total retry_count */
rthdr->data_retries = retry_count;
- /* Need to make a copy before skb->cb gets cleared */
- injected = !!(info->flags & IEEE80211_TX_CTL_INJECTED);
-
/* XXX: is this sufficient for BPF? */
skb_set_mac_header(skb, 0);
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -362,8 +370,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
continue;
if ((sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) &&
- !injected &&
- (type == IEEE80211_FTYPE_DATA))
+ !send_to_cooked)
continue;
if (prev_dev) {
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index cfc473e1b050..680bcb7093db 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -429,6 +429,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
struct sta_info *sta = tx->sta;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
+ struct ieee80211_local *local = tx->local;
u32 staflags;
if (unlikely(!sta ||
@@ -476,6 +477,12 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
info->control.vif = &tx->sdata->vif;
info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
skb_queue_tail(&sta->ps_tx_buf, tx->skb);
+
+ if (!timer_pending(&local->sta_cleanup))
+ mod_timer(&local->sta_cleanup,
+ round_jiffies(jiffies +
+ STA_INFO_CLEANUP_INTERVAL));
+
return TX_QUEUED;
}
#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
@@ -513,6 +520,8 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
else if (tx->sta && (key = rcu_dereference(tx->sta->key)))
tx->key = key;
else if (ieee80211_is_mgmt(hdr->frame_control) &&
+ is_multicast_ether_addr(hdr->addr1) &&
+ ieee80211_is_robust_mgmt_frame(hdr) &&
(key = rcu_dereference(tx->sdata->default_mgmt_key)))
tx->key = key;
else if ((key = rcu_dereference(tx->sdata->default_key)))
@@ -584,7 +593,8 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
struct ieee80211_hdr *hdr = (void *)tx->skb->data;
struct ieee80211_supported_band *sband;
struct ieee80211_rate *rate;
- int i, len;
+ int i;
+ u32 len;
bool inval = false, rts = false, short_preamble = false;
struct ieee80211_tx_rate_control txrc;
u32 sta_flags;
@@ -593,7 +603,7 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
sband = tx->local->hw.wiphy->bands[tx->channel->band];
- len = min_t(int, tx->skb->len + FCS_LEN,
+ len = min_t(u32, tx->skb->len + FCS_LEN,
tx->local->hw.wiphy->frag_threshold);
/* set up the tx rate control struct we give the RC algo */
@@ -1142,13 +1152,12 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) &&
(local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)) {
- unsigned long flags;
struct tid_ampdu_tx *tid_tx;
qc = ieee80211_get_qos_ctl(hdr);
tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
- spin_lock_irqsave(&tx->sta->lock, flags);
+ spin_lock(&tx->sta->lock);
/*
* XXX: This spinlock could be fairly expensive, but see the
* comment in agg-tx.c:ieee80211_agg_tx_operational().
@@ -1173,7 +1182,7 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
__skb_queue_tail(&tid_tx->pending, skb);
}
- spin_unlock_irqrestore(&tx->sta->lock, flags);
+ spin_unlock(&tx->sta->lock);
if (unlikely(queued))
return TX_QUEUED;
@@ -2011,14 +2020,12 @@ void ieee80211_tx_pending(unsigned long data)
while (!skb_queue_empty(&local->pending[i])) {
struct sk_buff *skb = __skb_dequeue(&local->pending[i]);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ieee80211_sub_if_data *sdata;
if (WARN_ON(!info->control.vif)) {
kfree_skb(skb);
continue;
}
- sdata = vif_to_sdata(info->control.vif);
spin_unlock_irqrestore(&local->queue_stop_reason_lock,
flags);
@@ -2244,8 +2251,9 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
info->control.vif = vif;
- info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
- info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
+ info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT |
+ IEEE80211_TX_CTL_ASSIGN_SEQ |
+ IEEE80211_TX_CTL_FIRST_FRAGMENT;
out:
rcu_read_unlock();
return skb;
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 53af57047435..5b79d552780a 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -270,6 +270,8 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue,
struct ieee80211_local *local = hw_to_local(hw);
struct ieee80211_sub_if_data *sdata;
+ trace_wake_queue(local, queue, reason);
+
if (WARN_ON(queue >= hw->queues))
return;
@@ -312,6 +314,8 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue,
struct ieee80211_local *local = hw_to_local(hw);
struct ieee80211_sub_if_data *sdata;
+ trace_stop_queue(local, queue, reason);
+
if (WARN_ON(queue >= hw->queues))
return;
@@ -796,6 +800,11 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata)
drv_conf_tx(local, queue, &qparam);
}
+
+ /* after reinitialize QoS TX queues setting to default,
+ * disable QoS at all */
+ local->hw.conf.flags &= ~IEEE80211_CONF_QOS;
+ drv_config(local, IEEE80211_CONF_CHANGE_QOS);
}
void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
@@ -1135,7 +1144,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
list_for_each_entry_rcu(sta, &local->sta_list, list) {
- clear_sta_flags(sta, WLAN_STA_SUSPEND);
+ clear_sta_flags(sta, WLAN_STA_BLOCK_BA);
}
}
@@ -1151,18 +1160,33 @@ int ieee80211_reconfig(struct ieee80211_local *local)
/* Finally also reconfigure all the BSS information */
list_for_each_entry(sdata, &local->interfaces, list) {
- u32 changed = ~0;
+ u32 changed;
+
if (!ieee80211_sdata_running(sdata))
continue;
+
+ /* common change flags for all interface types */
+ changed = BSS_CHANGED_ERP_CTS_PROT |
+ BSS_CHANGED_ERP_PREAMBLE |
+ BSS_CHANGED_ERP_SLOT |
+ BSS_CHANGED_HT |
+ BSS_CHANGED_BASIC_RATES |
+ BSS_CHANGED_BEACON_INT |
+ BSS_CHANGED_BSSID |
+ BSS_CHANGED_CQM;
+
switch (sdata->vif.type) {
case NL80211_IFTYPE_STATION:
- /* disable beacon change bits */
- changed &= ~(BSS_CHANGED_BEACON |
- BSS_CHANGED_BEACON_ENABLED);
- /* fall through */
+ changed |= BSS_CHANGED_ASSOC;
+ ieee80211_bss_info_change_notify(sdata, changed);
+ break;
case NL80211_IFTYPE_ADHOC:
+ changed |= BSS_CHANGED_IBSS;
+ /* fall through */
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_MESH_POINT:
+ changed |= BSS_CHANGED_BEACON |
+ BSS_CHANGED_BEACON_ENABLED;
ieee80211_bss_info_change_notify(sdata, changed);
break;
case NL80211_IFTYPE_WDS:
diff --git a/net/mac80211/work.c b/net/mac80211/work.c
index 15e1ba931b87..be3d4a698692 100644
--- a/net/mac80211/work.c
+++ b/net/mac80211/work.c
@@ -33,6 +33,7 @@
#define IEEE80211_MAX_PROBE_TRIES 5
enum work_action {
+ WORK_ACT_MISMATCH,
WORK_ACT_NONE,
WORK_ACT_TIMEOUT,
WORK_ACT_DONE,
@@ -213,15 +214,25 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata,
sband = local->hw.wiphy->bands[wk->chan->band];
- /*
- * Get all rates supported by the device and the AP as
- * some APs don't like getting a superset of their rates
- * in the association request (e.g. D-Link DAP 1353 in
- * b-only mode)...
- */
- rates_len = ieee80211_compatible_rates(wk->assoc.supp_rates,
- wk->assoc.supp_rates_len,
- sband, &rates);
+ if (wk->assoc.supp_rates_len) {
+ /*
+ * Get all rates supported by the device and the AP as
+ * some APs don't like getting a superset of their rates
+ * in the association request (e.g. D-Link DAP 1353 in
+ * b-only mode)...
+ */
+ rates_len = ieee80211_compatible_rates(wk->assoc.supp_rates,
+ wk->assoc.supp_rates_len,
+ sband, &rates);
+ } else {
+ /*
+ * In case AP not provide any supported rates information
+ * before association, we send information element(s) with
+ * all rates that we support.
+ */
+ rates = ~0;
+ rates_len = sband->n_bitrates;
+ }
skb = alloc_skb(local->hw.extra_tx_headroom +
sizeof(*mgmt) + /* bit too much but doesn't matter */
@@ -575,7 +586,7 @@ ieee80211_rx_mgmt_auth(struct ieee80211_work *wk,
u16 auth_alg, auth_transaction, status_code;
if (wk->type != IEEE80211_WORK_AUTH)
- return WORK_ACT_NONE;
+ return WORK_ACT_MISMATCH;
if (len < 24 + 6)
return WORK_ACT_NONE;
@@ -626,6 +637,9 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_work *wk,
struct ieee802_11_elems elems;
u8 *pos;
+ if (wk->type != IEEE80211_WORK_ASSOC)
+ return WORK_ACT_MISMATCH;
+
/*
* AssocResp and ReassocResp have identical structure, so process both
* of them in this function.
@@ -681,6 +695,12 @@ ieee80211_rx_mgmt_probe_resp(struct ieee80211_work *wk,
ASSERT_WORK_MTX(local);
+ if (wk->type != IEEE80211_WORK_DIRECT_PROBE)
+ return WORK_ACT_MISMATCH;
+
+ if (len < 24 + 12)
+ return WORK_ACT_NONE;
+
baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt;
if (baselen > len)
return WORK_ACT_NONE;
@@ -695,7 +715,7 @@ static void ieee80211_work_rx_queued_mgmt(struct ieee80211_local *local,
struct ieee80211_rx_status *rx_status;
struct ieee80211_mgmt *mgmt;
struct ieee80211_work *wk;
- enum work_action rma = WORK_ACT_NONE;
+ enum work_action rma;
u16 fc;
rx_status = (struct ieee80211_rx_status *) skb->cb;
@@ -742,7 +762,17 @@ static void ieee80211_work_rx_queued_mgmt(struct ieee80211_local *local,
break;
default:
WARN_ON(1);
+ rma = WORK_ACT_NONE;
}
+
+ /*
+ * We've either received an unexpected frame, or we have
+ * multiple work items and need to match the frame to the
+ * right one.
+ */
+ if (rma == WORK_ACT_MISMATCH)
+ continue;
+
/*
* We've processed this frame for that work, so it can't
* belong to another work struct.
@@ -752,6 +782,9 @@ static void ieee80211_work_rx_queued_mgmt(struct ieee80211_local *local,
}
switch (rma) {
+ case WORK_ACT_MISMATCH:
+ /* ignore this unmatched frame */
+ break;
case WORK_ACT_NONE:
break;
case WORK_ACT_DONE:
@@ -920,11 +953,16 @@ static void ieee80211_work_work(struct work_struct *work)
run_again(local, jiffies + HZ/2);
}
- if (list_empty(&local->work_list) && local->scan_req)
+ mutex_lock(&local->scan_mtx);
+
+ if (list_empty(&local->work_list) && local->scan_req &&
+ !local->scanning)
ieee80211_queue_delayed_work(&local->hw,
&local->scan_work,
round_jiffies_relative(0));
+ mutex_unlock(&local->scan_mtx);
+
mutex_unlock(&local->work_mtx);
ieee80211_recalc_idle(local);
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 18d77b5c351a..8593a77cfea9 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -314,8 +314,39 @@ config NETFILTER_XTABLES
if NETFILTER_XTABLES
+comment "Xtables combined modules"
+
+config NETFILTER_XT_MARK
+ tristate 'nfmark target and match support'
+ default m if NETFILTER_ADVANCED=n
+ ---help---
+ This option adds the "MARK" target and "mark" match.
+
+ Netfilter mark matching allows you to match packets based on the
+ "nfmark" value in the packet.
+ The target allows you to create rules in the "mangle" table which alter
+ the netfilter mark (nfmark) field associated with the packet.
+
+ Prior to routing, the nfmark can influence the routing method (see
+ "Use netfilter MARK value as routing key") and can also be used by
+ other subsystems to change their behavior.
+
+config NETFILTER_XT_CONNMARK
+ tristate 'ctmark target and match support'
+ depends on NF_CONNTRACK
+ depends on NETFILTER_ADVANCED
+ select NF_CONNTRACK_MARK
+ ---help---
+ This option adds the "CONNMARK" target and "connmark" match.
+
+ Netfilter allows you to store a mark value per connection (a.k.a.
+ ctmark), similarly to the packet mark (nfmark). Using this
+ target and match, you can set and match on this mark.
+
# alphabetically ordered list of targets
+comment "Xtables targets"
+
config NETFILTER_XT_TARGET_CLASSIFY
tristate '"CLASSIFY" target support'
depends on NETFILTER_ADVANCED
@@ -332,15 +363,11 @@ config NETFILTER_XT_TARGET_CONNMARK
tristate '"CONNMARK" target support'
depends on NF_CONNTRACK
depends on NETFILTER_ADVANCED
- select NF_CONNTRACK_MARK
- help
- This option adds a `CONNMARK' target, which allows one to manipulate
- the connection mark value. Similar to the MARK target, but
- affects the connection mark value rather than the packet mark value.
-
- If you want to compile it as a module, say M here and read
- <file:Documentation/kbuild/modules.txt>. The module will be called
- ipt_CONNMARK. If unsure, say `N'.
+ select NETFILTER_XT_CONNMARK
+ ---help---
+ This is a backwards-compat option for the user's convenience
+ (e.g. when running oldconfig). It selects
+ CONFIG_NETFILTER_XT_CONNMARK (combined connmark/CONNMARK module).
config NETFILTER_XT_TARGET_CONNSECMARK
tristate '"CONNSECMARK" target support'
@@ -423,16 +450,12 @@ config NETFILTER_XT_TARGET_LED
config NETFILTER_XT_TARGET_MARK
tristate '"MARK" target support'
- default m if NETFILTER_ADVANCED=n
- help
- This option adds a `MARK' target, which allows you to create rules
- in the `mangle' table which alter the netfilter mark (nfmark) field
- associated with the packet prior to routing. This can change
- the routing method (see `Use netfilter MARK value as routing
- key') and can also be used by other subsystems to change their
- behavior.
-
- To compile it as a module, choose M here. If unsure, say N.
+ depends on NETFILTER_ADVANCED
+ select NETFILTER_XT_MARK
+ ---help---
+ This is a backwards-compat option for the user's convenience
+ (e.g. when running oldconfig). It selects
+ CONFIG_NETFILTER_XT_MARK (combined mark/MARK module).
config NETFILTER_XT_TARGET_NFLOG
tristate '"NFLOG" target support'
@@ -479,6 +502,15 @@ config NETFILTER_XT_TARGET_RATEEST
To compile it as a module, choose M here. If unsure, say N.
+config NETFILTER_XT_TARGET_TEE
+ tristate '"TEE" - packet cloning to alternate destiantion'
+ depends on NETFILTER_ADVANCED
+ depends on (IPV6 || IPV6=n)
+ depends on !NF_CONNTRACK || NF_CONNTRACK
+ ---help---
+ This option adds a "TEE" target with which a packet can be cloned and
+ this clone be rerouted to another nexthop.
+
config NETFILTER_XT_TARGET_TPROXY
tristate '"TPROXY" target support (EXPERIMENTAL)'
depends on EXPERIMENTAL
@@ -552,6 +584,10 @@ config NETFILTER_XT_TARGET_TCPOPTSTRIP
This option adds a "TCPOPTSTRIP" target, which allows you to strip
TCP options from TCP packets.
+# alphabetically ordered list of matches
+
+comment "Xtables matches"
+
config NETFILTER_XT_MATCH_CLUSTER
tristate '"cluster" match support'
depends on NF_CONNTRACK
@@ -602,14 +638,11 @@ config NETFILTER_XT_MATCH_CONNMARK
tristate '"connmark" connection mark match support'
depends on NF_CONNTRACK
depends on NETFILTER_ADVANCED
- select NF_CONNTRACK_MARK
- help
- This option adds a `connmark' match, which allows you to match the
- connection mark value previously set for the session by `CONNMARK'.
-
- If you want to compile it as a module, say M here and read
- <file:Documentation/kbuild/modules.txt>. The module will be called
- ipt_connmark. If unsure, say `N'.
+ select NETFILTER_XT_CONNMARK
+ ---help---
+ This is a backwards-compat option for the user's convenience
+ (e.g. when running oldconfig). It selects
+ CONFIG_NETFILTER_XT_CONNMARK (combined connmark/CONNMARK module).
config NETFILTER_XT_MATCH_CONNTRACK
tristate '"conntrack" connection tracking match support'
@@ -733,13 +766,12 @@ config NETFILTER_XT_MATCH_MAC
config NETFILTER_XT_MATCH_MARK
tristate '"mark" match support'
- default m if NETFILTER_ADVANCED=n
- help
- Netfilter mark matching allows you to match packets based on the
- `nfmark' value in the packet. This can be set by the MARK target
- (see below).
-
- To compile it as a module, choose M here. If unsure, say N.
+ depends on NETFILTER_ADVANCED
+ select NETFILTER_XT_MARK
+ ---help---
+ This is a backwards-compat option for the user's convenience
+ (e.g. when running oldconfig). It selects
+ CONFIG_NETFILTER_XT_MARK (combined mark/MARK module).
config NETFILTER_XT_MATCH_MULTIPORT
tristate '"multiport" Multiple port match support'
@@ -751,6 +783,19 @@ config NETFILTER_XT_MATCH_MULTIPORT
To compile it as a module, choose M here. If unsure, say N.
+config NETFILTER_XT_MATCH_OSF
+ tristate '"osf" Passive OS fingerprint match'
+ depends on NETFILTER_ADVANCED && NETFILTER_NETLINK
+ help
+ This option selects the Passive OS Fingerprinting match module
+ that allows to passively match the remote operating system by
+ analyzing incoming TCP SYN packets.
+
+ Rules and loading software can be downloaded from
+ http://www.ioremap.net/projects/osf
+
+ To compile it as a module, choose M here. If unsure, say N.
+
config NETFILTER_XT_MATCH_OWNER
tristate '"owner" match support'
depends on NETFILTER_ADVANCED
@@ -836,13 +881,6 @@ config NETFILTER_XT_MATCH_RECENT
Short options are available by using 'iptables -m recent -h'
Official Website: <http://snowman.net/projects/ipt_recent/>
-config NETFILTER_XT_MATCH_RECENT_PROC_COMPAT
- bool 'Enable obsolete /proc/net/ipt_recent'
- depends on NETFILTER_XT_MATCH_RECENT && PROC_FS
- ---help---
- This option enables the old /proc/net/ipt_recent interface,
- which has been obsoleted by /proc/net/xt_recent.
-
config NETFILTER_XT_MATCH_SCTP
tristate '"sctp" protocol match support (EXPERIMENTAL)'
depends on EXPERIMENTAL
@@ -942,19 +980,6 @@ config NETFILTER_XT_MATCH_U32
Details and examples are in the kernel module source.
-config NETFILTER_XT_MATCH_OSF
- tristate '"osf" Passive OS fingerprint match'
- depends on NETFILTER_ADVANCED && NETFILTER_NETLINK
- help
- This option selects the Passive OS Fingerprinting match module
- that allows to passively match the remote operating system by
- analyzing incoming TCP SYN packets.
-
- Rules and loading software can be downloaded from
- http://www.ioremap.net/projects/osf
-
- To compile it as a module, choose M here. If unsure, say N.
-
endif # NETFILTER_XTABLES
endmenu
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index f873644f02f6..14e3a8fd8180 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -40,15 +40,17 @@ obj-$(CONFIG_NETFILTER_TPROXY) += nf_tproxy_core.o
# generic X tables
obj-$(CONFIG_NETFILTER_XTABLES) += x_tables.o xt_tcpudp.o
+# combos
+obj-$(CONFIG_NETFILTER_XT_MARK) += xt_mark.o
+obj-$(CONFIG_NETFILTER_XT_CONNMARK) += xt_connmark.o
+
# targets
obj-$(CONFIG_NETFILTER_XT_TARGET_CLASSIFY) += xt_CLASSIFY.o
-obj-$(CONFIG_NETFILTER_XT_TARGET_CONNMARK) += xt_CONNMARK.o
obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o
obj-$(CONFIG_NETFILTER_XT_TARGET_CT) += xt_CT.o
obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o
obj-$(CONFIG_NETFILTER_XT_TARGET_HL) += xt_HL.o
obj-$(CONFIG_NETFILTER_XT_TARGET_LED) += xt_LED.o
-obj-$(CONFIG_NETFILTER_XT_TARGET_MARK) += xt_MARK.o
obj-$(CONFIG_NETFILTER_XT_TARGET_NFLOG) += xt_NFLOG.o
obj-$(CONFIG_NETFILTER_XT_TARGET_NFQUEUE) += xt_NFQUEUE.o
obj-$(CONFIG_NETFILTER_XT_TARGET_NOTRACK) += xt_NOTRACK.o
@@ -57,6 +59,7 @@ obj-$(CONFIG_NETFILTER_XT_TARGET_SECMARK) += xt_SECMARK.o
obj-$(CONFIG_NETFILTER_XT_TARGET_TPROXY) += xt_TPROXY.o
obj-$(CONFIG_NETFILTER_XT_TARGET_TCPMSS) += xt_TCPMSS.o
obj-$(CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP) += xt_TCPOPTSTRIP.o
+obj-$(CONFIG_NETFILTER_XT_TARGET_TEE) += xt_TEE.o
obj-$(CONFIG_NETFILTER_XT_TARGET_TRACE) += xt_TRACE.o
# matches
@@ -64,7 +67,6 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CLUSTER) += xt_cluster.o
obj-$(CONFIG_NETFILTER_XT_MATCH_COMMENT) += xt_comment.o
obj-$(CONFIG_NETFILTER_XT_MATCH_CONNBYTES) += xt_connbytes.o
obj-$(CONFIG_NETFILTER_XT_MATCH_CONNLIMIT) += xt_connlimit.o
-obj-$(CONFIG_NETFILTER_XT_MATCH_CONNMARK) += xt_connmark.o
obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRACK) += xt_conntrack.o
obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
@@ -76,7 +78,6 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_IPRANGE) += xt_iprange.o
obj-$(CONFIG_NETFILTER_XT_MATCH_LENGTH) += xt_length.o
obj-$(CONFIG_NETFILTER_XT_MATCH_LIMIT) += xt_limit.o
obj-$(CONFIG_NETFILTER_XT_MATCH_MAC) += xt_mac.o
-obj-$(CONFIG_NETFILTER_XT_MATCH_MARK) += xt_mark.o
obj-$(CONFIG_NETFILTER_XT_MATCH_MULTIPORT) += xt_multiport.o
obj-$(CONFIG_NETFILTER_XT_MATCH_OSF) += xt_osf.o
obj-$(CONFIG_NETFILTER_XT_MATCH_OWNER) += xt_owner.o
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
index 2c7f185dfae4..2ae747a376a5 100644
--- a/net/netfilter/ipvs/ip_vs_ftp.c
+++ b/net/netfilter/ipvs/ip_vs_ftp.c
@@ -209,8 +209,14 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
*/
from.ip = n_cp->vaddr.ip;
port = n_cp->vport;
- sprintf(buf, "%u,%u,%u,%u,%u,%u", NIPQUAD(from.ip),
- (ntohs(port)>>8)&255, ntohs(port)&255);
+ snprintf(buf, sizeof(buf), "%u,%u,%u,%u,%u,%u",
+ ((unsigned char *)&from.ip)[0],
+ ((unsigned char *)&from.ip)[1],
+ ((unsigned char *)&from.ip)[2],
+ ((unsigned char *)&from.ip)[3],
+ ntohs(port) >> 8,
+ ntohs(port) & 0xFF);
+
buf_len = strlen(buf);
/*
diff --git a/net/netfilter/ipvs/ip_vs_proto.c b/net/netfilter/ipvs/ip_vs_proto.c
index 7fc49f4cf5ad..2d3d5e4b35f8 100644
--- a/net/netfilter/ipvs/ip_vs_proto.c
+++ b/net/netfilter/ipvs/ip_vs_proto.c
@@ -167,26 +167,24 @@ ip_vs_tcpudp_debug_packet_v4(struct ip_vs_protocol *pp,
ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph);
if (ih == NULL)
- sprintf(buf, "%s TRUNCATED", pp->name);
+ sprintf(buf, "TRUNCATED");
else if (ih->frag_off & htons(IP_OFFSET))
- sprintf(buf, "%s %pI4->%pI4 frag",
- pp->name, &ih->saddr, &ih->daddr);
+ sprintf(buf, "%pI4->%pI4 frag", &ih->saddr, &ih->daddr);
else {
__be16 _ports[2], *pptr
;
pptr = skb_header_pointer(skb, offset + ih->ihl*4,
sizeof(_ports), _ports);
if (pptr == NULL)
- sprintf(buf, "%s TRUNCATED %pI4->%pI4",
- pp->name, &ih->saddr, &ih->daddr);
+ sprintf(buf, "TRUNCATED %pI4->%pI4",
+ &ih->saddr, &ih->daddr);
else
- sprintf(buf, "%s %pI4:%u->%pI4:%u",
- pp->name,
+ sprintf(buf, "%pI4:%u->%pI4:%u",
&ih->saddr, ntohs(pptr[0]),
&ih->daddr, ntohs(pptr[1]));
}
- pr_debug("%s: %s\n", msg, buf);
+ pr_debug("%s: %s %s\n", msg, pp->name, buf);
}
#ifdef CONFIG_IP_VS_IPV6
@@ -201,26 +199,24 @@ ip_vs_tcpudp_debug_packet_v6(struct ip_vs_protocol *pp,
ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph);
if (ih == NULL)
- sprintf(buf, "%s TRUNCATED", pp->name);
+ sprintf(buf, "TRUNCATED");
else if (ih->nexthdr == IPPROTO_FRAGMENT)
- sprintf(buf, "%s %pI6->%pI6 frag",
- pp->name, &ih->saddr, &ih->daddr);
+ sprintf(buf, "%pI6->%pI6 frag", &ih->saddr, &ih->daddr);
else {
__be16 _ports[2], *pptr;
pptr = skb_header_pointer(skb, offset + sizeof(struct ipv6hdr),
sizeof(_ports), _ports);
if (pptr == NULL)
- sprintf(buf, "%s TRUNCATED %pI6->%pI6",
- pp->name, &ih->saddr, &ih->daddr);
+ sprintf(buf, "TRUNCATED %pI6->%pI6",
+ &ih->saddr, &ih->daddr);
else
- sprintf(buf, "%s %pI6:%u->%pI6:%u",
- pp->name,
+ sprintf(buf, "%pI6:%u->%pI6:%u",
&ih->saddr, ntohs(pptr[0]),
&ih->daddr, ntohs(pptr[1]));
}
- pr_debug("%s: %s\n", msg, buf);
+ pr_debug("%s: %s %s\n", msg, pp->name, buf);
}
#endif
diff --git a/net/netfilter/ipvs/ip_vs_proto_ah_esp.c b/net/netfilter/ipvs/ip_vs_proto_ah_esp.c
index c30b43c36cd7..1892dfc12fdd 100644
--- a/net/netfilter/ipvs/ip_vs_proto_ah_esp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_ah_esp.c
@@ -136,12 +136,11 @@ ah_esp_debug_packet_v4(struct ip_vs_protocol *pp, const struct sk_buff *skb,
ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph);
if (ih == NULL)
- sprintf(buf, "%s TRUNCATED", pp->name);
+ sprintf(buf, "TRUNCATED");
else
- sprintf(buf, "%s %pI4->%pI4",
- pp->name, &ih->saddr, &ih->daddr);
+ sprintf(buf, "%pI4->%pI4", &ih->saddr, &ih->daddr);
- pr_debug("%s: %s\n", msg, buf);
+ pr_debug("%s: %s %s\n", msg, pp->name, buf);
}
#ifdef CONFIG_IP_VS_IPV6
@@ -154,12 +153,11 @@ ah_esp_debug_packet_v6(struct ip_vs_protocol *pp, const struct sk_buff *skb,
ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph);
if (ih == NULL)
- sprintf(buf, "%s TRUNCATED", pp->name);
+ sprintf(buf, "TRUNCATED");
else
- sprintf(buf, "%s %pI6->%pI6",
- pp->name, &ih->saddr, &ih->daddr);
+ sprintf(buf, "%pI6->%pI6", &ih->saddr, &ih->daddr);
- pr_debug("%s: %s\n", msg, buf);
+ pr_debug("%s: %s %s\n", msg, pp->name, buf);
}
#endif
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index 8fb0ae616761..7ba06939829f 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -802,7 +802,7 @@ static int sync_thread_backup(void *data)
ip_vs_backup_mcast_ifn, ip_vs_backup_syncid);
while (!kthread_should_stop()) {
- wait_event_interruptible(*tinfo->sock->sk->sk_sleep,
+ wait_event_interruptible(*sk_sleep(tinfo->sock->sk),
!skb_queue_empty(&tinfo->sock->sk->sk_receive_queue)
|| kthread_should_stop());
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index e450cd6f4eb5..93c15a107b2c 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -270,7 +270,7 @@ ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
/* Another hack: avoid icmp_send in ip_fragment */
skb->local_df = 1;
- IP_VS_XMIT(PF_INET, skb, rt);
+ IP_VS_XMIT(NFPROTO_IPV4, skb, rt);
LeaveFunction(10);
return NF_STOLEN;
@@ -334,7 +334,7 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
/* Another hack: avoid icmp_send in ip_fragment */
skb->local_df = 1;
- IP_VS_XMIT(PF_INET6, skb, rt);
+ IP_VS_XMIT(NFPROTO_IPV6, skb, rt);
LeaveFunction(10);
return NF_STOLEN;
@@ -410,7 +410,7 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
/* Another hack: avoid icmp_send in ip_fragment */
skb->local_df = 1;
- IP_VS_XMIT(PF_INET, skb, rt);
+ IP_VS_XMIT(NFPROTO_IPV4, skb, rt);
LeaveFunction(10);
return NF_STOLEN;
@@ -486,7 +486,7 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
/* Another hack: avoid icmp_send in ip_fragment */
skb->local_df = 1;
- IP_VS_XMIT(PF_INET6, skb, rt);
+ IP_VS_XMIT(NFPROTO_IPV6, skb, rt);
LeaveFunction(10);
return NF_STOLEN;
@@ -785,7 +785,7 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
/* Another hack: avoid icmp_send in ip_fragment */
skb->local_df = 1;
- IP_VS_XMIT(PF_INET, skb, rt);
+ IP_VS_XMIT(NFPROTO_IPV4, skb, rt);
LeaveFunction(10);
return NF_STOLEN;
@@ -838,7 +838,7 @@ ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
/* Another hack: avoid icmp_send in ip_fragment */
skb->local_df = 1;
- IP_VS_XMIT(PF_INET6, skb, rt);
+ IP_VS_XMIT(NFPROTO_IPV6, skb, rt);
LeaveFunction(10);
return NF_STOLEN;
@@ -912,7 +912,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
/* Another hack: avoid icmp_send in ip_fragment */
skb->local_df = 1;
- IP_VS_XMIT(PF_INET, skb, rt);
+ IP_VS_XMIT(NFPROTO_IPV4, skb, rt);
rc = NF_STOLEN;
goto out;
@@ -987,7 +987,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
/* Another hack: avoid icmp_send in ip_fragment */
skb->local_df = 1;
- IP_VS_XMIT(PF_INET6, skb, rt);
+ IP_VS_XMIT(NFPROTO_IPV6, skb, rt);
rc = NF_STOLEN;
goto out;
diff --git a/net/netfilter/nf_conntrack_amanda.c b/net/netfilter/nf_conntrack_amanda.c
index 372e80f07a81..13fd2c55e329 100644
--- a/net/netfilter/nf_conntrack_amanda.c
+++ b/net/netfilter/nf_conntrack_amanda.c
@@ -108,7 +108,7 @@ static int amanda_help(struct sk_buff *skb,
dataoff = protoff + sizeof(struct udphdr);
if (dataoff >= skb->len) {
if (net_ratelimit())
- printk("amanda_help: skblen = %u\n", skb->len);
+ printk(KERN_ERR "amanda_help: skblen = %u\n", skb->len);
return NF_ACCEPT;
}
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 0c9bbe93cc16..b83c530c5e0a 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -319,8 +319,10 @@ begin:
* not the expected one, we must restart lookup.
* We probably met an item that was moved to another chain.
*/
- if (get_nulls_value(n) != hash)
+ if (get_nulls_value(n) != hash) {
+ NF_CT_STAT_INC(net, search_restart);
goto begin;
+ }
local_bh_enable();
return NULL;
@@ -1333,7 +1335,7 @@ static int nf_conntrack_init_init_net(void)
}
nf_conntrack_max = max_factor * nf_conntrack_htable_size;
- printk("nf_conntrack version %s (%u buckets, %d max)\n",
+ printk(KERN_INFO "nf_conntrack version %s (%u buckets, %d max)\n",
NF_CONNTRACK_VERSION, nf_conntrack_htable_size,
nf_conntrack_max);
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
index f516961a83b4..cdcc7649476b 100644
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -85,7 +85,8 @@ int nf_conntrack_register_notifier(struct nf_ct_event_notifier *new)
struct nf_ct_event_notifier *notify;
mutex_lock(&nf_ct_ecache_mutex);
- notify = rcu_dereference(nf_conntrack_event_cb);
+ notify = rcu_dereference_protected(nf_conntrack_event_cb,
+ lockdep_is_held(&nf_ct_ecache_mutex));
if (notify != NULL) {
ret = -EBUSY;
goto out_unlock;
@@ -105,7 +106,8 @@ void nf_conntrack_unregister_notifier(struct nf_ct_event_notifier *new)
struct nf_ct_event_notifier *notify;
mutex_lock(&nf_ct_ecache_mutex);
- notify = rcu_dereference(nf_conntrack_event_cb);
+ notify = rcu_dereference_protected(nf_conntrack_event_cb,
+ lockdep_is_held(&nf_ct_ecache_mutex));
BUG_ON(notify != new);
rcu_assign_pointer(nf_conntrack_event_cb, NULL);
mutex_unlock(&nf_ct_ecache_mutex);
@@ -118,7 +120,8 @@ int nf_ct_expect_register_notifier(struct nf_exp_event_notifier *new)
struct nf_exp_event_notifier *notify;
mutex_lock(&nf_ct_ecache_mutex);
- notify = rcu_dereference(nf_expect_event_cb);
+ notify = rcu_dereference_protected(nf_expect_event_cb,
+ lockdep_is_held(&nf_ct_ecache_mutex));
if (notify != NULL) {
ret = -EBUSY;
goto out_unlock;
@@ -138,7 +141,8 @@ void nf_ct_expect_unregister_notifier(struct nf_exp_event_notifier *new)
struct nf_exp_event_notifier *notify;
mutex_lock(&nf_ct_ecache_mutex);
- notify = rcu_dereference(nf_expect_event_cb);
+ notify = rcu_dereference_protected(nf_expect_event_cb,
+ lockdep_is_held(&nf_ct_ecache_mutex));
BUG_ON(notify != new);
rcu_assign_pointer(nf_expect_event_cb, NULL);
mutex_unlock(&nf_ct_ecache_mutex);
diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c
index 2ae3169e7633..e17cb7c7dd8f 100644
--- a/net/netfilter/nf_conntrack_ftp.c
+++ b/net/netfilter/nf_conntrack_ftp.c
@@ -573,8 +573,8 @@ static int __init nf_conntrack_ftp_init(void)
ftp[i][j].tuple.src.l3num, ports[i]);
ret = nf_conntrack_helper_register(&ftp[i][j]);
if (ret) {
- printk("nf_ct_ftp: failed to register helper "
- " for pf: %d port: %d\n",
+ printk(KERN_ERR "nf_ct_ftp: failed to register"
+ " helper for pf: %d port: %d\n",
ftp[i][j].tuple.src.l3num, ports[i]);
nf_conntrack_ftp_fini();
return ret;
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index a487c8038044..6eaee7c8a337 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -194,8 +194,7 @@ static int get_tpkt_data(struct sk_buff *skb, unsigned int protoff,
return 0;
}
- if (net_ratelimit())
- printk("nf_ct_h323: incomplete TPKT (fragmented?)\n");
+ pr_debug("nf_ct_h323: incomplete TPKT (fragmented?)\n");
goto clear_out;
}
@@ -608,7 +607,7 @@ static int h245_help(struct sk_buff *skb, unsigned int protoff,
drop:
spin_unlock_bh(&nf_h323_lock);
if (net_ratelimit())
- printk("nf_ct_h245: packet dropped\n");
+ pr_info("nf_ct_h245: packet dropped\n");
return NF_DROP;
}
@@ -1153,7 +1152,7 @@ static int q931_help(struct sk_buff *skb, unsigned int protoff,
drop:
spin_unlock_bh(&nf_h323_lock);
if (net_ratelimit())
- printk("nf_ct_q931: packet dropped\n");
+ pr_info("nf_ct_q931: packet dropped\n");
return NF_DROP;
}
@@ -1728,7 +1727,7 @@ static int ras_help(struct sk_buff *skb, unsigned int protoff,
drop:
spin_unlock_bh(&nf_h323_lock);
if (net_ratelimit())
- printk("nf_ct_ras: packet dropped\n");
+ pr_info("nf_ct_ras: packet dropped\n");
return NF_DROP;
}
diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c
index 7673930ca342..b394aa318776 100644
--- a/net/netfilter/nf_conntrack_irc.c
+++ b/net/netfilter/nf_conntrack_irc.c
@@ -235,7 +235,7 @@ static int __init nf_conntrack_irc_init(void)
char *tmpname;
if (max_dcc_channels < 1) {
- printk("nf_ct_irc: max_dcc_channels must not be zero\n");
+ printk(KERN_ERR "nf_ct_irc: max_dcc_channels must not be zero\n");
return -EINVAL;
}
@@ -267,7 +267,7 @@ static int __init nf_conntrack_irc_init(void)
ret = nf_conntrack_helper_register(&irc[i]);
if (ret) {
- printk("nf_ct_irc: failed to register helper "
+ printk(KERN_ERR "nf_ct_irc: failed to register helper "
"for pf: %u port: %u\n",
irc[i].tuple.src.l3num, ports[i]);
nf_conntrack_irc_fini();
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index afc52f2ee4ac..c42ff6aa441d 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -427,6 +427,17 @@ ctnetlink_proto_size(const struct nf_conn *ct)
}
static inline size_t
+ctnetlink_counters_size(const struct nf_conn *ct)
+{
+ if (!nf_ct_ext_exist(ct, NF_CT_EXT_ACCT))
+ return 0;
+ return 2 * nla_total_size(0) /* CTA_COUNTERS_ORIG|REPL */
+ + 2 * nla_total_size(sizeof(uint64_t)) /* CTA_COUNTERS_PACKETS */
+ + 2 * nla_total_size(sizeof(uint64_t)) /* CTA_COUNTERS_BYTES */
+ ;
+}
+
+static inline size_t
ctnetlink_nlmsg_size(const struct nf_conn *ct)
{
return NLMSG_ALIGN(sizeof(struct nfgenmsg))
@@ -436,11 +447,7 @@ ctnetlink_nlmsg_size(const struct nf_conn *ct)
+ 3 * nla_total_size(sizeof(u_int8_t)) /* CTA_PROTO_NUM */
+ nla_total_size(sizeof(u_int32_t)) /* CTA_ID */
+ nla_total_size(sizeof(u_int32_t)) /* CTA_STATUS */
-#ifdef CONFIG_NF_CT_ACCT
- + 2 * nla_total_size(0) /* CTA_COUNTERS_ORIG|REPL */
- + 2 * nla_total_size(sizeof(uint64_t)) /* CTA_COUNTERS_PACKETS */
- + 2 * nla_total_size(sizeof(uint64_t)) /* CTA_COUNTERS_BYTES */
-#endif
+ + ctnetlink_counters_size(ct)
+ nla_total_size(sizeof(u_int32_t)) /* CTA_TIMEOUT */
+ nla_total_size(0) /* CTA_PROTOINFO */
+ nla_total_size(0) /* CTA_HELP */
@@ -2050,29 +2057,29 @@ static int __init ctnetlink_init(void)
{
int ret;
- printk("ctnetlink v%s: registering with nfnetlink.\n", version);
+ pr_info("ctnetlink v%s: registering with nfnetlink.\n", version);
ret = nfnetlink_subsys_register(&ctnl_subsys);
if (ret < 0) {
- printk("ctnetlink_init: cannot register with nfnetlink.\n");
+ pr_err("ctnetlink_init: cannot register with nfnetlink.\n");
goto err_out;
}
ret = nfnetlink_subsys_register(&ctnl_exp_subsys);
if (ret < 0) {
- printk("ctnetlink_init: cannot register exp with nfnetlink.\n");
+ pr_err("ctnetlink_init: cannot register exp with nfnetlink.\n");
goto err_unreg_subsys;
}
#ifdef CONFIG_NF_CONNTRACK_EVENTS
ret = nf_conntrack_register_notifier(&ctnl_notifier);
if (ret < 0) {
- printk("ctnetlink_init: cannot register notifier.\n");
+ pr_err("ctnetlink_init: cannot register notifier.\n");
goto err_unreg_exp_subsys;
}
ret = nf_ct_expect_register_notifier(&ctnl_notifier_exp);
if (ret < 0) {
- printk("ctnetlink_init: cannot expect register notifier.\n");
+ pr_err("ctnetlink_init: cannot expect register notifier.\n");
goto err_unreg_notifier;
}
#endif
@@ -2093,7 +2100,7 @@ err_out:
static void __exit ctnetlink_exit(void)
{
- printk("ctnetlink: unregistering from nfnetlink.\n");
+ pr_info("ctnetlink: unregistering from nfnetlink.\n");
#ifdef CONFIG_NF_CONNTRACK_EVENTS
nf_ct_expect_unregister_notifier(&ctnl_notifier_exp);
@@ -2102,7 +2109,6 @@ static void __exit ctnetlink_exit(void)
nfnetlink_subsys_unregister(&ctnl_exp_subsys);
nfnetlink_subsys_unregister(&ctnl_subsys);
- return;
}
module_init(ctnetlink_init);
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index a44fa75b5178..5886ba1d52a0 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -14,12 +14,10 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/mutex.h>
-#include <linux/skbuff.h>
#include <linux/vmalloc.h>
#include <linux/stddef.h>
#include <linux/err.h>
#include <linux/percpu.h>
-#include <linux/moduleparam.h>
#include <linux/notifier.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
@@ -119,9 +117,13 @@ void nf_ct_l3proto_module_put(unsigned short l3proto)
{
struct nf_conntrack_l3proto *p;
- /* rcu_read_lock not necessary since the caller holds a reference */
+ /* rcu_read_lock not necessary since the caller holds a reference, but
+ * taken anyways to avoid lockdep warnings in __nf_ct_l3proto_find()
+ */
+ rcu_read_lock();
p = __nf_ct_l3proto_find(l3proto);
module_put(p->me);
+ rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(nf_ct_l3proto_module_put);
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index b68ff15ed979..c6049c2d5ea8 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -717,12 +717,12 @@ static int __init nf_conntrack_proto_sctp_init(void)
ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_sctp4);
if (ret) {
- printk("nf_conntrack_l4proto_sctp4: protocol register failed\n");
+ pr_err("nf_conntrack_l4proto_sctp4: protocol register failed\n");
goto out;
}
ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_sctp6);
if (ret) {
- printk("nf_conntrack_l4proto_sctp6: protocol register failed\n");
+ pr_err("nf_conntrack_l4proto_sctp6: protocol register failed\n");
goto cleanup_sctp4;
}
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index c6cd1b84eddd..b20f4275893c 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -1549,8 +1549,8 @@ static int __init nf_conntrack_sip_init(void)
ret = nf_conntrack_helper_register(&sip[i][j]);
if (ret) {
- printk("nf_ct_sip: failed to register helper "
- "for pf: %u port: %u\n",
+ printk(KERN_ERR "nf_ct_sip: failed to register"
+ " helper for pf: %u port: %u\n",
sip[i][j].tuple.src.l3num, ports[i]);
nf_conntrack_sip_fini();
return ret;
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index faa8eb3722b9..eb973fcd67ab 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -252,12 +252,12 @@ static int ct_cpu_seq_show(struct seq_file *seq, void *v)
const struct ip_conntrack_stat *st = v;
if (v == SEQ_START_TOKEN) {
- seq_printf(seq, "entries searched found new invalid ignore delete delete_list insert insert_failed drop early_drop icmp_error expect_new expect_create expect_delete\n");
+ seq_printf(seq, "entries searched found new invalid ignore delete delete_list insert insert_failed drop early_drop icmp_error expect_new expect_create expect_delete search_restart\n");
return 0;
}
seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x "
- "%08x %08x %08x %08x %08x %08x %08x %08x \n",
+ "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
nr_conntracks,
st->searched,
st->found,
@@ -274,7 +274,8 @@ static int ct_cpu_seq_show(struct seq_file *seq, void *v)
st->expect_new,
st->expect_create,
- st->expect_delete
+ st->expect_delete,
+ st->search_restart
);
return 0;
}
@@ -445,7 +446,7 @@ out_kmemdup:
if (net_eq(net, &init_net))
unregister_sysctl_table(nf_ct_netfilter_header);
out:
- printk("nf_conntrack: can't register to sysctl.\n");
+ printk(KERN_ERR "nf_conntrack: can't register to sysctl.\n");
return -ENOMEM;
}
diff --git a/net/netfilter/nf_conntrack_tftp.c b/net/netfilter/nf_conntrack_tftp.c
index 46e646b2e9b9..75466fd72f4f 100644
--- a/net/netfilter/nf_conntrack_tftp.c
+++ b/net/netfilter/nf_conntrack_tftp.c
@@ -138,8 +138,8 @@ static int __init nf_conntrack_tftp_init(void)
ret = nf_conntrack_helper_register(&tftp[i][j]);
if (ret) {
- printk("nf_ct_tftp: failed to register helper "
- "for pf: %u port: %u\n",
+ printk(KERN_ERR "nf_ct_tftp: failed to register"
+ " helper for pf: %u port: %u\n",
tftp[i][j].tuple.src.l3num, ports[i]);
nf_conntrack_tftp_fini();
return ret;
diff --git a/net/netfilter/nf_internals.h b/net/netfilter/nf_internals.h
index bf6609978af7..770f76432ad0 100644
--- a/net/netfilter/nf_internals.h
+++ b/net/netfilter/nf_internals.h
@@ -6,7 +6,7 @@
#include <linux/netdevice.h>
#ifdef CONFIG_NETFILTER_DEBUG
-#define NFDEBUG(format, args...) printk(format , ## args)
+#define NFDEBUG(format, args...) printk(KERN_DEBUG format , ## args)
#else
#define NFDEBUG(format, args...)
#endif
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index 015725a5cd50..7df37fd786bc 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -52,7 +52,8 @@ int nf_log_register(u_int8_t pf, struct nf_logger *logger)
} else {
/* register at end of list to honor first register win */
list_add_tail(&logger->list[pf], &nf_loggers_l[pf]);
- llog = rcu_dereference(nf_loggers[pf]);
+ llog = rcu_dereference_protected(nf_loggers[pf],
+ lockdep_is_held(&nf_log_mutex));
if (llog == NULL)
rcu_assign_pointer(nf_loggers[pf], logger);
}
@@ -70,7 +71,8 @@ void nf_log_unregister(struct nf_logger *logger)
mutex_lock(&nf_log_mutex);
for (i = 0; i < ARRAY_SIZE(nf_loggers); i++) {
- c_logger = rcu_dereference(nf_loggers[i]);
+ c_logger = rcu_dereference_protected(nf_loggers[i],
+ lockdep_is_held(&nf_log_mutex));
if (c_logger == logger)
rcu_assign_pointer(nf_loggers[i], NULL);
list_del(&logger->list[i]);
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index c49ef219899e..78b3cf9c519c 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -9,6 +9,7 @@
#include <linux/rcupdate.h>
#include <net/protocol.h>
#include <net/netfilter/nf_queue.h>
+#include <net/dst.h>
#include "nf_internals.h"
@@ -170,6 +171,7 @@ static int __nf_queue(struct sk_buff *skb,
dev_hold(physoutdev);
}
#endif
+ skb_dst_force(skb);
afinfo->saveroute(skb, entry);
status = qh->outfn(entry, queuenum);
@@ -279,7 +281,6 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
}
rcu_read_unlock();
kfree(entry);
- return;
}
EXPORT_SYMBOL(nf_reinject);
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 6afa3d52ea5f..b4a4532823e8 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -18,12 +18,9 @@
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/kernel.h>
-#include <linux/major.h>
-#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
-#include <linux/fcntl.h>
#include <linux/skbuff.h>
#include <asm/uaccess.h>
#include <asm/system.h>
@@ -215,13 +212,13 @@ static struct pernet_operations nfnetlink_net_ops = {
static int __init nfnetlink_init(void)
{
- printk("Netfilter messages via NETLINK v%s.\n", nfversion);
+ pr_info("Netfilter messages via NETLINK v%s.\n", nfversion);
return register_pernet_subsys(&nfnetlink_net_ops);
}
static void __exit nfnetlink_exit(void)
{
- printk("Removing netfilter NETLINK layer.\n");
+ pr_info("Removing netfilter NETLINK layer.\n");
unregister_pernet_subsys(&nfnetlink_net_ops);
}
module_init(nfnetlink_init);
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 203643fb2c52..fc9a211e629e 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -297,7 +297,7 @@ nfulnl_alloc_skb(unsigned int inst_size, unsigned int pkt_size)
n = max(inst_size, pkt_size);
skb = alloc_skb(n, GFP_ATOMIC);
if (!skb) {
- PRINTR("nfnetlink_log: can't alloc whole buffer (%u bytes)\n",
+ pr_notice("nfnetlink_log: can't alloc whole buffer (%u bytes)\n",
inst_size);
if (n > pkt_size) {
@@ -306,7 +306,7 @@ nfulnl_alloc_skb(unsigned int inst_size, unsigned int pkt_size)
skb = alloc_skb(pkt_size, GFP_ATOMIC);
if (!skb)
- PRINTR("nfnetlink_log: can't even alloc %u "
+ pr_err("nfnetlink_log: can't even alloc %u "
"bytes\n", pkt_size);
}
}
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index e70a6ef1f4f2..12e1ab37fcd8 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -246,8 +246,7 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
break;
case NFQNL_COPY_PACKET:
- if ((entskb->ip_summed == CHECKSUM_PARTIAL ||
- entskb->ip_summed == CHECKSUM_COMPLETE) &&
+ if (entskb->ip_summed == CHECKSUM_PARTIAL &&
skb_checksum_help(entskb)) {
spin_unlock_bh(&queue->lock);
return NULL;
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 665f5beef6ad..445de702b8b7 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -12,7 +12,7 @@
* published by the Free Software Foundation.
*
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/socket.h>
#include <linux/net.h>
@@ -55,12 +55,6 @@ struct xt_af {
static struct xt_af *xt;
-#ifdef DEBUG_IP_FIREWALL_USER
-#define duprintf(format, args...) printk(format , ## args)
-#else
-#define duprintf(format, args...)
-#endif
-
static const char *const xt_prefix[NFPROTO_NUMPROTO] = {
[NFPROTO_UNSPEC] = "x",
[NFPROTO_IPV4] = "ip",
@@ -69,6 +63,9 @@ static const char *const xt_prefix[NFPROTO_NUMPROTO] = {
[NFPROTO_IPV6] = "ip6",
};
+/* Allow this many total (re)entries. */
+static const unsigned int xt_jumpstack_multiplier = 2;
+
/* Registration hooks for targets. */
int
xt_register_target(struct xt_target *target)
@@ -221,6 +218,17 @@ struct xt_match *xt_find_match(u8 af, const char *name, u8 revision)
}
EXPORT_SYMBOL(xt_find_match);
+struct xt_match *
+xt_request_find_match(uint8_t nfproto, const char *name, uint8_t revision)
+{
+ struct xt_match *match;
+
+ match = try_then_request_module(xt_find_match(nfproto, name, revision),
+ "%st_%s", xt_prefix[nfproto], name);
+ return (match != NULL) ? match : ERR_PTR(-ENOENT);
+}
+EXPORT_SYMBOL_GPL(xt_request_find_match);
+
/* Find target, grabs ref. Returns ERR_PTR() on error. */
struct xt_target *xt_find_target(u8 af, const char *name, u8 revision)
{
@@ -257,9 +265,7 @@ struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision)
target = try_then_request_module(xt_find_target(af, name, revision),
"%st_%s", xt_prefix[af], name);
- if (IS_ERR(target) || !target)
- return NULL;
- return target;
+ return (target != NULL) ? target : ERR_PTR(-ENOENT);
}
EXPORT_SYMBOL_GPL(xt_request_find_target);
@@ -361,6 +367,8 @@ static char *textify_hooks(char *buf, size_t size, unsigned int mask)
int xt_check_match(struct xt_mtchk_param *par,
unsigned int size, u_int8_t proto, bool inv_proto)
{
+ int ret;
+
if (XT_ALIGN(par->match->matchsize) != size &&
par->match->matchsize != -1) {
/*
@@ -397,8 +405,14 @@ int xt_check_match(struct xt_mtchk_param *par,
par->match->proto);
return -EINVAL;
}
- if (par->match->checkentry != NULL && !par->match->checkentry(par))
- return -EINVAL;
+ if (par->match->checkentry != NULL) {
+ ret = par->match->checkentry(par);
+ if (ret < 0)
+ return ret;
+ else if (ret > 0)
+ /* Flag up potential errors. */
+ return -EIO;
+ }
return 0;
}
EXPORT_SYMBOL_GPL(xt_check_match);
@@ -518,6 +532,8 @@ EXPORT_SYMBOL_GPL(xt_compat_match_to_user);
int xt_check_target(struct xt_tgchk_param *par,
unsigned int size, u_int8_t proto, bool inv_proto)
{
+ int ret;
+
if (XT_ALIGN(par->target->targetsize) != size) {
pr_err("%s_tables: %s.%u target: invalid size "
"%u (kernel) != (user) %u\n",
@@ -549,8 +565,14 @@ int xt_check_target(struct xt_tgchk_param *par,
par->target->proto);
return -EINVAL;
}
- if (par->target->checkentry != NULL && !par->target->checkentry(par))
- return -EINVAL;
+ if (par->target->checkentry != NULL) {
+ ret = par->target->checkentry(par);
+ if (ret < 0)
+ return ret;
+ else if (ret > 0)
+ /* Flag up potential errors. */
+ return -EIO;
+ }
return 0;
}
EXPORT_SYMBOL_GPL(xt_check_target);
@@ -662,6 +684,26 @@ void xt_free_table_info(struct xt_table_info *info)
else
vfree(info->entries[cpu]);
}
+
+ if (info->jumpstack != NULL) {
+ if (sizeof(void *) * info->stacksize > PAGE_SIZE) {
+ for_each_possible_cpu(cpu)
+ vfree(info->jumpstack[cpu]);
+ } else {
+ for_each_possible_cpu(cpu)
+ kfree(info->jumpstack[cpu]);
+ }
+ }
+
+ if (sizeof(void **) * nr_cpu_ids > PAGE_SIZE)
+ vfree(info->jumpstack);
+ else
+ kfree(info->jumpstack);
+ if (sizeof(unsigned int) * nr_cpu_ids > PAGE_SIZE)
+ vfree(info->stackptr);
+ else
+ kfree(info->stackptr);
+
kfree(info);
}
EXPORT_SYMBOL(xt_free_table_info);
@@ -706,6 +748,49 @@ EXPORT_SYMBOL_GPL(xt_compat_unlock);
DEFINE_PER_CPU(struct xt_info_lock, xt_info_locks);
EXPORT_PER_CPU_SYMBOL_GPL(xt_info_locks);
+static int xt_jumpstack_alloc(struct xt_table_info *i)
+{
+ unsigned int size;
+ int cpu;
+
+ size = sizeof(unsigned int) * nr_cpu_ids;
+ if (size > PAGE_SIZE)
+ i->stackptr = vmalloc(size);
+ else
+ i->stackptr = kmalloc(size, GFP_KERNEL);
+ if (i->stackptr == NULL)
+ return -ENOMEM;
+ memset(i->stackptr, 0, size);
+
+ size = sizeof(void **) * nr_cpu_ids;
+ if (size > PAGE_SIZE)
+ i->jumpstack = vmalloc(size);
+ else
+ i->jumpstack = kmalloc(size, GFP_KERNEL);
+ if (i->jumpstack == NULL)
+ return -ENOMEM;
+ memset(i->jumpstack, 0, size);
+
+ i->stacksize *= xt_jumpstack_multiplier;
+ size = sizeof(void *) * i->stacksize;
+ for_each_possible_cpu(cpu) {
+ if (size > PAGE_SIZE)
+ i->jumpstack[cpu] = vmalloc_node(size,
+ cpu_to_node(cpu));
+ else
+ i->jumpstack[cpu] = kmalloc_node(size,
+ GFP_KERNEL, cpu_to_node(cpu));
+ if (i->jumpstack[cpu] == NULL)
+ /*
+ * Freeing will be done later on by the callers. The
+ * chain is: xt_replace_table -> __do_replace ->
+ * do_replace -> xt_free_table_info.
+ */
+ return -ENOMEM;
+ }
+
+ return 0;
+}
struct xt_table_info *
xt_replace_table(struct xt_table *table,
@@ -714,6 +799,13 @@ xt_replace_table(struct xt_table *table,
int *error)
{
struct xt_table_info *private;
+ int ret;
+
+ ret = xt_jumpstack_alloc(newinfo);
+ if (ret < 0) {
+ *error = ret;
+ return NULL;
+ }
/* Do the substitution. */
local_bh_disable();
@@ -721,7 +813,7 @@ xt_replace_table(struct xt_table *table,
/* Check inside lock: is the old number correct? */
if (num_counters != private->number) {
- duprintf("num_counters != table->private->number (%u/%u)\n",
+ pr_debug("num_counters != table->private->number (%u/%u)\n",
num_counters, private->number);
local_bh_enable();
*error = -EAGAIN;
@@ -752,6 +844,10 @@ struct xt_table *xt_register_table(struct net *net,
struct xt_table_info *private;
struct xt_table *t, *table;
+ ret = xt_jumpstack_alloc(newinfo);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
/* Don't add one object to multiple lists. */
table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL);
if (!table) {
@@ -778,7 +874,7 @@ struct xt_table *xt_register_table(struct net *net,
goto unlock;
private = table->private;
- duprintf("table->private->number = %u\n", private->number);
+ pr_debug("table->private->number = %u\n", private->number);
/* save number of initial entries */
private->initial_entries = private->number;
diff --git a/net/netfilter/xt_CLASSIFY.c b/net/netfilter/xt_CLASSIFY.c
index 011bc80dd2a1..c2c0e4abeb99 100644
--- a/net/netfilter/xt_CLASSIFY.c
+++ b/net/netfilter/xt_CLASSIFY.c
@@ -27,7 +27,7 @@ MODULE_ALIAS("ipt_CLASSIFY");
MODULE_ALIAS("ip6t_CLASSIFY");
static unsigned int
-classify_tg(struct sk_buff *skb, const struct xt_target_param *par)
+classify_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct xt_classify_target_info *clinfo = par->targinfo;
diff --git a/net/netfilter/xt_CONNMARK.c b/net/netfilter/xt_CONNMARK.c
deleted file mode 100644
index 593457068ae1..000000000000
--- a/net/netfilter/xt_CONNMARK.c
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * xt_CONNMARK - Netfilter module to modify the connection mark values
- *
- * Copyright (C) 2002,2004 MARA Systems AB <http://www.marasystems.com>
- * by Henrik Nordstrom <hno@marasystems.com>
- * Copyright © CC Computer Consultants GmbH, 2007 - 2008
- * Jan Engelhardt <jengelh@computergmbh.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#include <linux/module.h>
-#include <linux/skbuff.h>
-#include <linux/ip.h>
-#include <net/checksum.h>
-
-MODULE_AUTHOR("Henrik Nordstrom <hno@marasystems.com>");
-MODULE_DESCRIPTION("Xtables: connection mark modification");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("ipt_CONNMARK");
-MODULE_ALIAS("ip6t_CONNMARK");
-
-#include <linux/netfilter/x_tables.h>
-#include <linux/netfilter/xt_CONNMARK.h>
-#include <net/netfilter/nf_conntrack_ecache.h>
-
-static unsigned int
-connmark_tg(struct sk_buff *skb, const struct xt_target_param *par)
-{
- const struct xt_connmark_tginfo1 *info = par->targinfo;
- enum ip_conntrack_info ctinfo;
- struct nf_conn *ct;
- u_int32_t newmark;
-
- ct = nf_ct_get(skb, &ctinfo);
- if (ct == NULL)
- return XT_CONTINUE;
-
- switch (info->mode) {
- case XT_CONNMARK_SET:
- newmark = (ct->mark & ~info->ctmask) ^ info->ctmark;
- if (ct->mark != newmark) {
- ct->mark = newmark;
- nf_conntrack_event_cache(IPCT_MARK, ct);
- }
- break;
- case XT_CONNMARK_SAVE:
- newmark = (ct->mark & ~info->ctmask) ^
- (skb->mark & info->nfmask);
- if (ct->mark != newmark) {
- ct->mark = newmark;
- nf_conntrack_event_cache(IPCT_MARK, ct);
- }
- break;
- case XT_CONNMARK_RESTORE:
- newmark = (skb->mark & ~info->nfmask) ^
- (ct->mark & info->ctmask);
- skb->mark = newmark;
- break;
- }
-
- return XT_CONTINUE;
-}
-
-static bool connmark_tg_check(const struct xt_tgchk_param *par)
-{
- if (nf_ct_l3proto_try_module_get(par->family) < 0) {
- printk(KERN_WARNING "cannot load conntrack support for "
- "proto=%u\n", par->family);
- return false;
- }
- return true;
-}
-
-static void connmark_tg_destroy(const struct xt_tgdtor_param *par)
-{
- nf_ct_l3proto_module_put(par->family);
-}
-
-static struct xt_target connmark_tg_reg __read_mostly = {
- .name = "CONNMARK",
- .revision = 1,
- .family = NFPROTO_UNSPEC,
- .checkentry = connmark_tg_check,
- .target = connmark_tg,
- .targetsize = sizeof(struct xt_connmark_tginfo1),
- .destroy = connmark_tg_destroy,
- .me = THIS_MODULE,
-};
-
-static int __init connmark_tg_init(void)
-{
- return xt_register_target(&connmark_tg_reg);
-}
-
-static void __exit connmark_tg_exit(void)
-{
- xt_unregister_target(&connmark_tg_reg);
-}
-
-module_init(connmark_tg_init);
-module_exit(connmark_tg_exit);
diff --git a/net/netfilter/xt_CONNSECMARK.c b/net/netfilter/xt_CONNSECMARK.c
index b54c3756fdc3..e04dc282e3bb 100644
--- a/net/netfilter/xt_CONNSECMARK.c
+++ b/net/netfilter/xt_CONNSECMARK.c
@@ -15,6 +15,7 @@
* published by the Free Software Foundation.
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/netfilter/x_tables.h>
@@ -22,8 +23,6 @@
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_ecache.h>
-#define PFX "CONNSECMARK: "
-
MODULE_LICENSE("GPL");
MODULE_AUTHOR("James Morris <jmorris@redhat.com>");
MODULE_DESCRIPTION("Xtables: target for copying between connection and security mark");
@@ -65,7 +64,7 @@ static void secmark_restore(struct sk_buff *skb)
}
static unsigned int
-connsecmark_tg(struct sk_buff *skb, const struct xt_target_param *par)
+connsecmark_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct xt_connsecmark_target_info *info = par->targinfo;
@@ -85,15 +84,16 @@ connsecmark_tg(struct sk_buff *skb, const struct xt_target_param *par)
return XT_CONTINUE;
}
-static bool connsecmark_tg_check(const struct xt_tgchk_param *par)
+static int connsecmark_tg_check(const struct xt_tgchk_param *par)
{
const struct xt_connsecmark_target_info *info = par->targinfo;
+ int ret;
if (strcmp(par->table, "mangle") != 0 &&
strcmp(par->table, "security") != 0) {
- printk(KERN_INFO PFX "target only valid in the \'mangle\' "
- "or \'security\' tables, not \'%s\'.\n", par->table);
- return false;
+ pr_info("target only valid in the \'mangle\' "
+ "or \'security\' tables, not \'%s\'.\n", par->table);
+ return -EINVAL;
}
switch (info->mode) {
@@ -102,16 +102,15 @@ static bool connsecmark_tg_check(const struct xt_tgchk_param *par)
break;
default:
- printk(KERN_INFO PFX "invalid mode: %hu\n", info->mode);
- return false;
+ pr_info("invalid mode: %hu\n", info->mode);
+ return -EINVAL;
}
- if (nf_ct_l3proto_try_module_get(par->family) < 0) {
- printk(KERN_WARNING "can't load conntrack support for "
- "proto=%u\n", par->family);
- return false;
- }
- return true;
+ ret = nf_ct_l3proto_try_module_get(par->family);
+ if (ret < 0)
+ pr_info("cannot load conntrack support for proto=%u\n",
+ par->family);
+ return ret;
}
static void connsecmark_tg_destroy(const struct xt_tgdtor_param *par)
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index ee18b231b950..562bf3266e04 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -20,7 +20,7 @@
#include <net/netfilter/nf_conntrack_zones.h>
static unsigned int xt_ct_target(struct sk_buff *skb,
- const struct xt_target_param *par)
+ const struct xt_action_param *par)
{
const struct xt_ct_target_info *info = par->targinfo;
struct nf_conn *ct = info->ct;
@@ -38,13 +38,13 @@ static unsigned int xt_ct_target(struct sk_buff *skb,
static u8 xt_ct_find_proto(const struct xt_tgchk_param *par)
{
- if (par->family == AF_INET) {
+ if (par->family == NFPROTO_IPV4) {
const struct ipt_entry *e = par->entryinfo;
if (e->ip.invflags & IPT_INV_PROTO)
return 0;
return e->ip.proto;
- } else if (par->family == AF_INET6) {
+ } else if (par->family == NFPROTO_IPV6) {
const struct ip6t_entry *e = par->entryinfo;
if (e->ipv6.invflags & IP6T_INV_PROTO)
@@ -54,16 +54,17 @@ static u8 xt_ct_find_proto(const struct xt_tgchk_param *par)
return 0;
}
-static bool xt_ct_tg_check(const struct xt_tgchk_param *par)
+static int xt_ct_tg_check(const struct xt_tgchk_param *par)
{
struct xt_ct_target_info *info = par->targinfo;
struct nf_conntrack_tuple t;
struct nf_conn_help *help;
struct nf_conn *ct;
+ int ret = 0;
u8 proto;
if (info->flags & ~XT_CT_NOTRACK)
- return false;
+ return -EINVAL;
if (info->flags & XT_CT_NOTRACK) {
ct = &nf_conntrack_untracked;
@@ -76,28 +77,34 @@ static bool xt_ct_tg_check(const struct xt_tgchk_param *par)
goto err1;
#endif
- if (nf_ct_l3proto_try_module_get(par->family) < 0)
+ ret = nf_ct_l3proto_try_module_get(par->family);
+ if (ret < 0)
goto err1;
memset(&t, 0, sizeof(t));
ct = nf_conntrack_alloc(par->net, info->zone, &t, &t, GFP_KERNEL);
+ ret = PTR_ERR(ct);
if (IS_ERR(ct))
goto err2;
+ ret = 0;
if ((info->ct_events || info->exp_events) &&
!nf_ct_ecache_ext_add(ct, info->ct_events, info->exp_events,
GFP_KERNEL))
goto err3;
if (info->helper[0]) {
+ ret = -ENOENT;
proto = xt_ct_find_proto(par);
if (!proto)
goto err3;
+ ret = -ENOMEM;
help = nf_ct_helper_ext_add(ct, GFP_KERNEL);
if (help == NULL)
goto err3;
+ ret = -ENOENT;
help->helper = nf_conntrack_helper_try_module_get(info->helper,
par->family,
proto);
@@ -109,14 +116,14 @@ static bool xt_ct_tg_check(const struct xt_tgchk_param *par)
__set_bit(IPS_CONFIRMED_BIT, &ct->status);
out:
info->ct = ct;
- return true;
+ return 0;
err3:
nf_conntrack_free(ct);
err2:
nf_ct_l3proto_module_put(par->family);
err1:
- return false;
+ return ret;
}
static void xt_ct_tg_destroy(const struct xt_tgdtor_param *par)
@@ -138,7 +145,7 @@ static void xt_ct_tg_destroy(const struct xt_tgdtor_param *par)
static struct xt_target xt_ct_tg __read_mostly = {
.name = "CT",
.family = NFPROTO_UNSPEC,
- .targetsize = XT_ALIGN(sizeof(struct xt_ct_target_info)),
+ .targetsize = sizeof(struct xt_ct_target_info),
.checkentry = xt_ct_tg_check,
.destroy = xt_ct_tg_destroy,
.target = xt_ct_target,
diff --git a/net/netfilter/xt_DSCP.c b/net/netfilter/xt_DSCP.c
index 74ce89260056..0a229191e55b 100644
--- a/net/netfilter/xt_DSCP.c
+++ b/net/netfilter/xt_DSCP.c
@@ -9,7 +9,7 @@
*
* See RFC2474 for a description of the DSCP field within the IP Header.
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/ip.h>
@@ -28,7 +28,7 @@ MODULE_ALIAS("ipt_TOS");
MODULE_ALIAS("ip6t_TOS");
static unsigned int
-dscp_tg(struct sk_buff *skb, const struct xt_target_param *par)
+dscp_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct xt_DSCP_info *dinfo = par->targinfo;
u_int8_t dscp = ipv4_get_dsfield(ip_hdr(skb)) >> XT_DSCP_SHIFT;
@@ -45,7 +45,7 @@ dscp_tg(struct sk_buff *skb, const struct xt_target_param *par)
}
static unsigned int
-dscp_tg6(struct sk_buff *skb, const struct xt_target_param *par)
+dscp_tg6(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct xt_DSCP_info *dinfo = par->targinfo;
u_int8_t dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> XT_DSCP_SHIFT;
@@ -60,19 +60,19 @@ dscp_tg6(struct sk_buff *skb, const struct xt_target_param *par)
return XT_CONTINUE;
}
-static bool dscp_tg_check(const struct xt_tgchk_param *par)
+static int dscp_tg_check(const struct xt_tgchk_param *par)
{
const struct xt_DSCP_info *info = par->targinfo;
if (info->dscp > XT_DSCP_MAX) {
- printk(KERN_WARNING "DSCP: dscp %x out of range\n", info->dscp);
- return false;
+ pr_info("dscp %x out of range\n", info->dscp);
+ return -EDOM;
}
- return true;
+ return 0;
}
static unsigned int
-tos_tg(struct sk_buff *skb, const struct xt_target_param *par)
+tos_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct xt_tos_target_info *info = par->targinfo;
struct iphdr *iph = ip_hdr(skb);
@@ -92,7 +92,7 @@ tos_tg(struct sk_buff *skb, const struct xt_target_param *par)
}
static unsigned int
-tos_tg6(struct sk_buff *skb, const struct xt_target_param *par)
+tos_tg6(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct xt_tos_target_info *info = par->targinfo;
struct ipv6hdr *iph = ipv6_hdr(skb);
diff --git a/net/netfilter/xt_HL.c b/net/netfilter/xt_HL.c
index 10e789e2d12a..95b084800fcc 100644
--- a/net/netfilter/xt_HL.c
+++ b/net/netfilter/xt_HL.c
@@ -9,7 +9,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/ip.h>
@@ -26,7 +26,7 @@ MODULE_DESCRIPTION("Xtables: Hoplimit/TTL Limit field modification target");
MODULE_LICENSE("GPL");
static unsigned int
-ttl_tg(struct sk_buff *skb, const struct xt_target_param *par)
+ttl_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
struct iphdr *iph;
const struct ipt_TTL_info *info = par->targinfo;
@@ -66,7 +66,7 @@ ttl_tg(struct sk_buff *skb, const struct xt_target_param *par)
}
static unsigned int
-hl_tg6(struct sk_buff *skb, const struct xt_target_param *par)
+hl_tg6(struct sk_buff *skb, const struct xt_action_param *par)
{
struct ipv6hdr *ip6h;
const struct ip6t_HL_info *info = par->targinfo;
@@ -101,35 +101,33 @@ hl_tg6(struct sk_buff *skb, const struct xt_target_param *par)
return XT_CONTINUE;
}
-static bool ttl_tg_check(const struct xt_tgchk_param *par)
+static int ttl_tg_check(const struct xt_tgchk_param *par)
{
const struct ipt_TTL_info *info = par->targinfo;
if (info->mode > IPT_TTL_MAXMODE) {
- printk(KERN_WARNING "ipt_TTL: invalid or unknown Mode %u\n",
- info->mode);
- return false;
+ pr_info("TTL: invalid or unknown mode %u\n", info->mode);
+ return -EINVAL;
}
if (info->mode != IPT_TTL_SET && info->ttl == 0)
- return false;
- return true;
+ return -EINVAL;
+ return 0;
}
-static bool hl_tg6_check(const struct xt_tgchk_param *par)
+static int hl_tg6_check(const struct xt_tgchk_param *par)
{
const struct ip6t_HL_info *info = par->targinfo;
if (info->mode > IP6T_HL_MAXMODE) {
- printk(KERN_WARNING "ip6t_HL: invalid or unknown Mode %u\n",
- info->mode);
- return false;
+ pr_info("invalid or unknown mode %u\n", info->mode);
+ return -EINVAL;
}
if (info->mode != IP6T_HL_SET && info->hop_limit == 0) {
- printk(KERN_WARNING "ip6t_HL: increment/decrement doesn't "
+ pr_info("increment/decrement does not "
"make sense with value 0\n");
- return false;
+ return -EINVAL;
}
- return true;
+ return 0;
}
static struct xt_target hl_tg_reg[] __read_mostly = {
diff --git a/net/netfilter/xt_LED.c b/net/netfilter/xt_LED.c
index 3271c8e52153..a4140509eea1 100644
--- a/net/netfilter/xt_LED.c
+++ b/net/netfilter/xt_LED.c
@@ -18,7 +18,7 @@
* 02110-1301 USA.
*
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/netfilter/x_tables.h>
@@ -32,18 +32,24 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Adam Nielsen <a.nielsen@shikadi.net>");
MODULE_DESCRIPTION("Xtables: trigger LED devices on packet match");
+static LIST_HEAD(xt_led_triggers);
+static DEFINE_MUTEX(xt_led_mutex);
+
/*
* This is declared in here (the kernel module) only, to avoid having these
* dependencies in userspace code. This is what xt_led_info.internal_data
* points to.
*/
struct xt_led_info_internal {
+ struct list_head list;
+ int refcnt;
+ char *trigger_id;
struct led_trigger netfilter_led_trigger;
struct timer_list timer;
};
static unsigned int
-led_tg(struct sk_buff *skb, const struct xt_target_param *par)
+led_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct xt_led_info *ledinfo = par->targinfo;
struct xt_led_info_internal *ledinternal = ledinfo->internal_data;
@@ -54,7 +60,7 @@ led_tg(struct sk_buff *skb, const struct xt_target_param *par)
*/
if ((ledinfo->delay > 0) && ledinfo->always_blink &&
timer_pending(&ledinternal->timer))
- led_trigger_event(&ledinternal->netfilter_led_trigger,LED_OFF);
+ led_trigger_event(&ledinternal->netfilter_led_trigger, LED_OFF);
led_trigger_event(&ledinternal->netfilter_led_trigger, LED_FULL);
@@ -75,54 +81,86 @@ led_tg(struct sk_buff *skb, const struct xt_target_param *par)
static void led_timeout_callback(unsigned long data)
{
- struct xt_led_info *ledinfo = (struct xt_led_info *)data;
- struct xt_led_info_internal *ledinternal = ledinfo->internal_data;
+ struct xt_led_info_internal *ledinternal = (struct xt_led_info_internal *)data;
led_trigger_event(&ledinternal->netfilter_led_trigger, LED_OFF);
}
-static bool led_tg_check(const struct xt_tgchk_param *par)
+static struct xt_led_info_internal *led_trigger_lookup(const char *name)
+{
+ struct xt_led_info_internal *ledinternal;
+
+ list_for_each_entry(ledinternal, &xt_led_triggers, list) {
+ if (!strcmp(name, ledinternal->netfilter_led_trigger.name)) {
+ return ledinternal;
+ }
+ }
+ return NULL;
+}
+
+static int led_tg_check(const struct xt_tgchk_param *par)
{
struct xt_led_info *ledinfo = par->targinfo;
struct xt_led_info_internal *ledinternal;
int err;
if (ledinfo->id[0] == '\0') {
- printk(KERN_ERR KBUILD_MODNAME ": No 'id' parameter given.\n");
- return false;
+ pr_info("No 'id' parameter given.\n");
+ return -EINVAL;
}
- ledinternal = kzalloc(sizeof(struct xt_led_info_internal), GFP_KERNEL);
- if (!ledinternal) {
- printk(KERN_CRIT KBUILD_MODNAME ": out of memory\n");
- return false;
+ mutex_lock(&xt_led_mutex);
+
+ ledinternal = led_trigger_lookup(ledinfo->id);
+ if (ledinternal) {
+ ledinternal->refcnt++;
+ goto out;
}
- ledinternal->netfilter_led_trigger.name = ledinfo->id;
+ err = -ENOMEM;
+ ledinternal = kzalloc(sizeof(struct xt_led_info_internal), GFP_KERNEL);
+ if (!ledinternal)
+ goto exit_mutex_only;
+
+ ledinternal->trigger_id = kstrdup(ledinfo->id, GFP_KERNEL);
+ if (!ledinternal->trigger_id)
+ goto exit_internal_alloc;
+
+ ledinternal->refcnt = 1;
+ ledinternal->netfilter_led_trigger.name = ledinternal->trigger_id;
err = led_trigger_register(&ledinternal->netfilter_led_trigger);
if (err) {
- printk(KERN_CRIT KBUILD_MODNAME
- ": led_trigger_register() failed\n");
+ pr_warning("led_trigger_register() failed\n");
if (err == -EEXIST)
- printk(KERN_ERR KBUILD_MODNAME
- ": Trigger name is already in use.\n");
+ pr_warning("Trigger name is already in use.\n");
goto exit_alloc;
}
/* See if we need to set up a timer */
if (ledinfo->delay > 0)
setup_timer(&ledinternal->timer, led_timeout_callback,
- (unsigned long)ledinfo);
+ (unsigned long)ledinternal);
+
+ list_add_tail(&ledinternal->list, &xt_led_triggers);
+
+out:
+ mutex_unlock(&xt_led_mutex);
ledinfo->internal_data = ledinternal;
- return true;
+ return 0;
exit_alloc:
+ kfree(ledinternal->trigger_id);
+
+exit_internal_alloc:
kfree(ledinternal);
- return false;
+exit_mutex_only:
+ mutex_unlock(&xt_led_mutex);
+
+ return err;
}
static void led_tg_destroy(const struct xt_tgdtor_param *par)
@@ -130,10 +168,23 @@ static void led_tg_destroy(const struct xt_tgdtor_param *par)
const struct xt_led_info *ledinfo = par->targinfo;
struct xt_led_info_internal *ledinternal = ledinfo->internal_data;
+ mutex_lock(&xt_led_mutex);
+
+ if (--ledinternal->refcnt) {
+ mutex_unlock(&xt_led_mutex);
+ return;
+ }
+
+ list_del(&ledinternal->list);
+
if (ledinfo->delay > 0)
del_timer_sync(&ledinternal->timer);
led_trigger_unregister(&ledinternal->netfilter_led_trigger);
+
+ mutex_unlock(&xt_led_mutex);
+
+ kfree(ledinternal->trigger_id);
kfree(ledinternal);
}
@@ -142,7 +193,7 @@ static struct xt_target led_tg_reg __read_mostly = {
.revision = 0,
.family = NFPROTO_UNSPEC,
.target = led_tg,
- .targetsize = XT_ALIGN(sizeof(struct xt_led_info)),
+ .targetsize = sizeof(struct xt_led_info),
.checkentry = led_tg_check,
.destroy = led_tg_destroy,
.me = THIS_MODULE,
diff --git a/net/netfilter/xt_MARK.c b/net/netfilter/xt_MARK.c
deleted file mode 100644
index 225f8d11e173..000000000000
--- a/net/netfilter/xt_MARK.c
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * xt_MARK - Netfilter module to modify the NFMARK field of an skb
- *
- * (C) 1999-2001 Marc Boucher <marc@mbsi.ca>
- * Copyright © CC Computer Consultants GmbH, 2007 - 2008
- * Jan Engelhardt <jengelh@computergmbh.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/skbuff.h>
-#include <linux/ip.h>
-#include <net/checksum.h>
-
-#include <linux/netfilter/x_tables.h>
-#include <linux/netfilter/xt_MARK.h>
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>");
-MODULE_DESCRIPTION("Xtables: packet mark modification");
-MODULE_ALIAS("ipt_MARK");
-MODULE_ALIAS("ip6t_MARK");
-
-static unsigned int
-mark_tg(struct sk_buff *skb, const struct xt_target_param *par)
-{
- const struct xt_mark_tginfo2 *info = par->targinfo;
-
- skb->mark = (skb->mark & ~info->mask) ^ info->mark;
- return XT_CONTINUE;
-}
-
-static struct xt_target mark_tg_reg __read_mostly = {
- .name = "MARK",
- .revision = 2,
- .family = NFPROTO_UNSPEC,
- .target = mark_tg,
- .targetsize = sizeof(struct xt_mark_tginfo2),
- .me = THIS_MODULE,
-};
-
-static int __init mark_tg_init(void)
-{
- return xt_register_target(&mark_tg_reg);
-}
-
-static void __exit mark_tg_exit(void)
-{
- xt_unregister_target(&mark_tg_reg);
-}
-
-module_init(mark_tg_init);
-module_exit(mark_tg_exit);
diff --git a/net/netfilter/xt_NFLOG.c b/net/netfilter/xt_NFLOG.c
index a57c5cf018ec..a17dd0f589b2 100644
--- a/net/netfilter/xt_NFLOG.c
+++ b/net/netfilter/xt_NFLOG.c
@@ -22,7 +22,7 @@ MODULE_ALIAS("ipt_NFLOG");
MODULE_ALIAS("ip6t_NFLOG");
static unsigned int
-nflog_tg(struct sk_buff *skb, const struct xt_target_param *par)
+nflog_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct xt_nflog_info *info = par->targinfo;
struct nf_loginfo li;
@@ -37,15 +37,15 @@ nflog_tg(struct sk_buff *skb, const struct xt_target_param *par)
return XT_CONTINUE;
}
-static bool nflog_tg_check(const struct xt_tgchk_param *par)
+static int nflog_tg_check(const struct xt_tgchk_param *par)
{
const struct xt_nflog_info *info = par->targinfo;
if (info->flags & ~XT_NFLOG_MASK)
- return false;
+ return -EINVAL;
if (info->prefix[sizeof(info->prefix) - 1] != '\0')
- return false;
- return true;
+ return -EINVAL;
+ return 0;
}
static struct xt_target nflog_tg_reg __read_mostly = {
diff --git a/net/netfilter/xt_NFQUEUE.c b/net/netfilter/xt_NFQUEUE.c
index 12dcd7007c3e..039cce1bde3d 100644
--- a/net/netfilter/xt_NFQUEUE.c
+++ b/net/netfilter/xt_NFQUEUE.c
@@ -31,7 +31,7 @@ static u32 jhash_initval __read_mostly;
static bool rnd_inited __read_mostly;
static unsigned int
-nfqueue_tg(struct sk_buff *skb, const struct xt_target_param *par)
+nfqueue_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct xt_NFQ_info *tinfo = par->targinfo;
@@ -49,17 +49,6 @@ static u32 hash_v4(const struct sk_buff *skb)
return jhash_2words((__force u32)ipaddr, iph->protocol, jhash_initval);
}
-static unsigned int
-nfqueue_tg4_v1(struct sk_buff *skb, const struct xt_target_param *par)
-{
- const struct xt_NFQ_info_v1 *info = par->targinfo;
- u32 queue = info->queuenum;
-
- if (info->queues_total > 1)
- queue = hash_v4(skb) % info->queues_total + queue;
- return NF_QUEUE_NR(queue);
-}
-
#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
static u32 hash_v6(const struct sk_buff *skb)
{
@@ -73,20 +62,26 @@ static u32 hash_v6(const struct sk_buff *skb)
return jhash2((__force u32 *)addr, ARRAY_SIZE(addr), jhash_initval);
}
+#endif
static unsigned int
-nfqueue_tg6_v1(struct sk_buff *skb, const struct xt_target_param *par)
+nfqueue_tg_v1(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct xt_NFQ_info_v1 *info = par->targinfo;
u32 queue = info->queuenum;
- if (info->queues_total > 1)
- queue = hash_v6(skb) % info->queues_total + queue;
+ if (info->queues_total > 1) {
+ if (par->family == NFPROTO_IPV4)
+ queue = hash_v4(skb) % info->queues_total + queue;
+#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+ else if (par->family == NFPROTO_IPV6)
+ queue = hash_v6(skb) % info->queues_total + queue;
+#endif
+ }
return NF_QUEUE_NR(queue);
}
-#endif
-static bool nfqueue_tg_v1_check(const struct xt_tgchk_param *par)
+static int nfqueue_tg_v1_check(const struct xt_tgchk_param *par)
{
const struct xt_NFQ_info_v1 *info = par->targinfo;
u32 maxid;
@@ -97,15 +92,15 @@ static bool nfqueue_tg_v1_check(const struct xt_tgchk_param *par)
}
if (info->queues_total == 0) {
pr_err("NFQUEUE: number of total queues is 0\n");
- return false;
+ return -EINVAL;
}
maxid = info->queues_total - 1 + info->queuenum;
if (maxid > 0xffff) {
pr_err("NFQUEUE: number of queues (%u) out of range (got %u)\n",
info->queues_total, maxid);
- return false;
+ return -ERANGE;
}
- return true;
+ return 0;
}
static struct xt_target nfqueue_tg_reg[] __read_mostly = {
@@ -119,23 +114,12 @@ static struct xt_target nfqueue_tg_reg[] __read_mostly = {
{
.name = "NFQUEUE",
.revision = 1,
- .family = NFPROTO_IPV4,
- .checkentry = nfqueue_tg_v1_check,
- .target = nfqueue_tg4_v1,
- .targetsize = sizeof(struct xt_NFQ_info_v1),
- .me = THIS_MODULE,
- },
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
- {
- .name = "NFQUEUE",
- .revision = 1,
- .family = NFPROTO_IPV6,
+ .family = NFPROTO_UNSPEC,
.checkentry = nfqueue_tg_v1_check,
- .target = nfqueue_tg6_v1,
+ .target = nfqueue_tg_v1,
.targetsize = sizeof(struct xt_NFQ_info_v1),
.me = THIS_MODULE,
},
-#endif
};
static int __init nfqueue_tg_init(void)
diff --git a/net/netfilter/xt_NOTRACK.c b/net/netfilter/xt_NOTRACK.c
index e7a0a54fd4ea..512b9123252f 100644
--- a/net/netfilter/xt_NOTRACK.c
+++ b/net/netfilter/xt_NOTRACK.c
@@ -13,7 +13,7 @@ MODULE_ALIAS("ipt_NOTRACK");
MODULE_ALIAS("ip6t_NOTRACK");
static unsigned int
-notrack_tg(struct sk_buff *skb, const struct xt_target_param *par)
+notrack_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
/* Previously seen (loopback)? Ignore. */
if (skb->nfct != NULL)
diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c
index d16d55df4f61..69c01e10f8af 100644
--- a/net/netfilter/xt_RATEEST.c
+++ b/net/netfilter/xt_RATEEST.c
@@ -73,7 +73,7 @@ void xt_rateest_put(struct xt_rateest *est)
EXPORT_SYMBOL_GPL(xt_rateest_put);
static unsigned int
-xt_rateest_tg(struct sk_buff *skb, const struct xt_target_param *par)
+xt_rateest_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct xt_rateest_target_info *info = par->targinfo;
struct gnet_stats_basic_packed *stats = &info->est->bstats;
@@ -86,7 +86,7 @@ xt_rateest_tg(struct sk_buff *skb, const struct xt_target_param *par)
return XT_CONTINUE;
}
-static bool xt_rateest_tg_checkentry(const struct xt_tgchk_param *par)
+static int xt_rateest_tg_checkentry(const struct xt_tgchk_param *par)
{
struct xt_rateest_target_info *info = par->targinfo;
struct xt_rateest *est;
@@ -94,6 +94,7 @@ static bool xt_rateest_tg_checkentry(const struct xt_tgchk_param *par)
struct nlattr opt;
struct gnet_estimator est;
} cfg;
+ int ret;
if (unlikely(!rnd_inited)) {
get_random_bytes(&jhash_rnd, sizeof(jhash_rnd));
@@ -110,12 +111,13 @@ static bool xt_rateest_tg_checkentry(const struct xt_tgchk_param *par)
(info->interval != est->params.interval ||
info->ewma_log != est->params.ewma_log)) {
xt_rateest_put(est);
- return false;
+ return -EINVAL;
}
info->est = est;
- return true;
+ return 0;
}
+ ret = -ENOMEM;
est = kzalloc(sizeof(*est), GFP_KERNEL);
if (!est)
goto err1;
@@ -131,19 +133,19 @@ static bool xt_rateest_tg_checkentry(const struct xt_tgchk_param *par)
cfg.est.interval = info->interval;
cfg.est.ewma_log = info->ewma_log;
- if (gen_new_estimator(&est->bstats, &est->rstats, &est->lock,
- &cfg.opt) < 0)
+ ret = gen_new_estimator(&est->bstats, &est->rstats,
+ &est->lock, &cfg.opt);
+ if (ret < 0)
goto err2;
info->est = est;
xt_rateest_hash_insert(est);
-
- return true;
+ return 0;
err2:
kfree(est);
err1:
- return false;
+ return ret;
}
static void xt_rateest_tg_destroy(const struct xt_tgdtor_param *par)
diff --git a/net/netfilter/xt_SECMARK.c b/net/netfilter/xt_SECMARK.c
index 7a6f9e6f5dfa..23b2d6c486b5 100644
--- a/net/netfilter/xt_SECMARK.c
+++ b/net/netfilter/xt_SECMARK.c
@@ -12,6 +12,7 @@
* published by the Free Software Foundation.
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/selinux.h>
@@ -29,7 +30,7 @@ MODULE_ALIAS("ip6t_SECMARK");
static u8 mode;
static unsigned int
-secmark_tg(struct sk_buff *skb, const struct xt_target_param *par)
+secmark_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
u32 secmark = 0;
const struct xt_secmark_target_info *info = par->targinfo;
@@ -49,7 +50,7 @@ secmark_tg(struct sk_buff *skb, const struct xt_target_param *par)
return XT_CONTINUE;
}
-static bool checkentry_selinux(struct xt_secmark_target_info *info)
+static int checkentry_selinux(struct xt_secmark_target_info *info)
{
int err;
struct xt_secmark_target_selinux_info *sel = &info->u.sel;
@@ -59,58 +60,59 @@ static bool checkentry_selinux(struct xt_secmark_target_info *info)
err = selinux_string_to_sid(sel->selctx, &sel->selsid);
if (err) {
if (err == -EINVAL)
- printk(KERN_INFO PFX "invalid SELinux context \'%s\'\n",
- sel->selctx);
- return false;
+ pr_info("invalid SELinux context \'%s\'\n",
+ sel->selctx);
+ return err;
}
if (!sel->selsid) {
- printk(KERN_INFO PFX "unable to map SELinux context \'%s\'\n",
- sel->selctx);
- return false;
+ pr_info("unable to map SELinux context \'%s\'\n", sel->selctx);
+ return -ENOENT;
}
err = selinux_secmark_relabel_packet_permission(sel->selsid);
if (err) {
- printk(KERN_INFO PFX "unable to obtain relabeling permission\n");
- return false;
+ pr_info("unable to obtain relabeling permission\n");
+ return err;
}
selinux_secmark_refcount_inc();
- return true;
+ return 0;
}
-static bool secmark_tg_check(const struct xt_tgchk_param *par)
+static int secmark_tg_check(const struct xt_tgchk_param *par)
{
struct xt_secmark_target_info *info = par->targinfo;
+ int err;
if (strcmp(par->table, "mangle") != 0 &&
strcmp(par->table, "security") != 0) {
- printk(KERN_INFO PFX "target only valid in the \'mangle\' "
- "or \'security\' tables, not \'%s\'.\n", par->table);
- return false;
+ pr_info("target only valid in the \'mangle\' "
+ "or \'security\' tables, not \'%s\'.\n", par->table);
+ return -EINVAL;
}
if (mode && mode != info->mode) {
- printk(KERN_INFO PFX "mode already set to %hu cannot mix with "
- "rules for mode %hu\n", mode, info->mode);
- return false;
+ pr_info("mode already set to %hu cannot mix with "
+ "rules for mode %hu\n", mode, info->mode);
+ return -EINVAL;
}
switch (info->mode) {
case SECMARK_MODE_SEL:
- if (!checkentry_selinux(info))
- return false;
+ err = checkentry_selinux(info);
+ if (err <= 0)
+ return err;
break;
default:
- printk(KERN_INFO PFX "invalid mode: %hu\n", info->mode);
- return false;
+ pr_info("invalid mode: %hu\n", info->mode);
+ return -EINVAL;
}
if (!mode)
mode = info->mode;
- return true;
+ return 0;
}
static void secmark_tg_destroy(const struct xt_tgdtor_param *par)
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index c5f4b9919e9a..62ec021fbd50 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -7,7 +7,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/ip.h>
@@ -68,15 +68,14 @@ tcpmss_mangle_packet(struct sk_buff *skb,
if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
if (dst_mtu(skb_dst(skb)) <= minlen) {
if (net_ratelimit())
- printk(KERN_ERR "xt_TCPMSS: "
- "unknown or invalid path-MTU (%u)\n",
+ pr_err("unknown or invalid path-MTU (%u)\n",
dst_mtu(skb_dst(skb)));
return -1;
}
if (in_mtu <= minlen) {
if (net_ratelimit())
- printk(KERN_ERR "xt_TCPMSS: unknown or "
- "invalid path-MTU (%u)\n", in_mtu);
+ pr_err("unknown or invalid path-MTU (%u)\n",
+ in_mtu);
return -1;
}
newmss = min(dst_mtu(skb_dst(skb)), in_mtu) - minlen;
@@ -173,7 +172,7 @@ static u_int32_t tcpmss_reverse_mtu(const struct sk_buff *skb,
}
static unsigned int
-tcpmss_tg4(struct sk_buff *skb, const struct xt_target_param *par)
+tcpmss_tg4(struct sk_buff *skb, const struct xt_action_param *par)
{
struct iphdr *iph = ip_hdr(skb);
__be16 newlen;
@@ -196,7 +195,7 @@ tcpmss_tg4(struct sk_buff *skb, const struct xt_target_param *par)
#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
static unsigned int
-tcpmss_tg6(struct sk_buff *skb, const struct xt_target_param *par)
+tcpmss_tg6(struct sk_buff *skb, const struct xt_action_param *par)
{
struct ipv6hdr *ipv6h = ipv6_hdr(skb);
u8 nexthdr;
@@ -236,7 +235,7 @@ static inline bool find_syn_match(const struct xt_entry_match *m)
return false;
}
-static bool tcpmss_tg4_check(const struct xt_tgchk_param *par)
+static int tcpmss_tg4_check(const struct xt_tgchk_param *par)
{
const struct xt_tcpmss_info *info = par->targinfo;
const struct ipt_entry *e = par->entryinfo;
@@ -246,19 +245,19 @@ static bool tcpmss_tg4_check(const struct xt_tgchk_param *par)
(par->hook_mask & ~((1 << NF_INET_FORWARD) |
(1 << NF_INET_LOCAL_OUT) |
(1 << NF_INET_POST_ROUTING))) != 0) {
- printk("xt_TCPMSS: path-MTU clamping only supported in "
- "FORWARD, OUTPUT and POSTROUTING hooks\n");
- return false;
+ pr_info("path-MTU clamping only supported in "
+ "FORWARD, OUTPUT and POSTROUTING hooks\n");
+ return -EINVAL;
}
xt_ematch_foreach(ematch, e)
if (find_syn_match(ematch))
- return true;
- printk("xt_TCPMSS: Only works on TCP SYN packets\n");
- return false;
+ return 0;
+ pr_info("Only works on TCP SYN packets\n");
+ return -EINVAL;
}
#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
-static bool tcpmss_tg6_check(const struct xt_tgchk_param *par)
+static int tcpmss_tg6_check(const struct xt_tgchk_param *par)
{
const struct xt_tcpmss_info *info = par->targinfo;
const struct ip6t_entry *e = par->entryinfo;
@@ -268,15 +267,15 @@ static bool tcpmss_tg6_check(const struct xt_tgchk_param *par)
(par->hook_mask & ~((1 << NF_INET_FORWARD) |
(1 << NF_INET_LOCAL_OUT) |
(1 << NF_INET_POST_ROUTING))) != 0) {
- printk("xt_TCPMSS: path-MTU clamping only supported in "
- "FORWARD, OUTPUT and POSTROUTING hooks\n");
- return false;
+ pr_info("path-MTU clamping only supported in "
+ "FORWARD, OUTPUT and POSTROUTING hooks\n");
+ return -EINVAL;
}
xt_ematch_foreach(ematch, e)
if (find_syn_match(ematch))
- return true;
- printk("xt_TCPMSS: Only works on TCP SYN packets\n");
- return false;
+ return 0;
+ pr_info("Only works on TCP SYN packets\n");
+ return -EINVAL;
}
#endif
diff --git a/net/netfilter/xt_TCPOPTSTRIP.c b/net/netfilter/xt_TCPOPTSTRIP.c
index 9dd8c8ef63eb..9dc9ecfdd546 100644
--- a/net/netfilter/xt_TCPOPTSTRIP.c
+++ b/net/netfilter/xt_TCPOPTSTRIP.c
@@ -3,7 +3,6 @@
*
* Copyright (C) 2007 Sven Schnelle <svens@bitebene.org>
* Copyright © CC Computer Consultants GmbH, 2007
- * Contact: Jan Engelhardt <jengelh@computergmbh.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -75,7 +74,7 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb,
}
static unsigned int
-tcpoptstrip_tg4(struct sk_buff *skb, const struct xt_target_param *par)
+tcpoptstrip_tg4(struct sk_buff *skb, const struct xt_action_param *par)
{
return tcpoptstrip_mangle_packet(skb, par->targinfo, ip_hdrlen(skb),
sizeof(struct iphdr) + sizeof(struct tcphdr));
@@ -83,7 +82,7 @@ tcpoptstrip_tg4(struct sk_buff *skb, const struct xt_target_param *par)
#if defined(CONFIG_IP6_NF_MANGLE) || defined(CONFIG_IP6_NF_MANGLE_MODULE)
static unsigned int
-tcpoptstrip_tg6(struct sk_buff *skb, const struct xt_target_param *par)
+tcpoptstrip_tg6(struct sk_buff *skb, const struct xt_action_param *par)
{
struct ipv6hdr *ipv6h = ipv6_hdr(skb);
int tcphoff;
@@ -136,7 +135,7 @@ static void __exit tcpoptstrip_tg_exit(void)
module_init(tcpoptstrip_tg_init);
module_exit(tcpoptstrip_tg_exit);
-MODULE_AUTHOR("Sven Schnelle <svens@bitebene.org>, Jan Engelhardt <jengelh@computergmbh.de>");
+MODULE_AUTHOR("Sven Schnelle <svens@bitebene.org>, Jan Engelhardt <jengelh@medozas.de>");
MODULE_DESCRIPTION("Xtables: TCP option stripping");
MODULE_LICENSE("GPL");
MODULE_ALIAS("ipt_TCPOPTSTRIP");
diff --git a/net/netfilter/xt_TEE.c b/net/netfilter/xt_TEE.c
new file mode 100644
index 000000000000..d7920d9f49e9
--- /dev/null
+++ b/net/netfilter/xt_TEE.c
@@ -0,0 +1,309 @@
+/*
+ * "TEE" target extension for Xtables
+ * Copyright © Sebastian Claßen, 2007
+ * Jan Engelhardt, 2007-2010
+ *
+ * based on ipt_ROUTE.c from Cédric de Launois
+ * <delaunois@info.ucl.be>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 or later, as published by the Free Software Foundation.
+ */
+#include <linux/ip.h>
+#include <linux/module.h>
+#include <linux/percpu.h>
+#include <linux/route.h>
+#include <linux/skbuff.h>
+#include <linux/notifier.h>
+#include <net/checksum.h>
+#include <net/icmp.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/ip6_route.h>
+#include <net/route.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_TEE.h>
+
+#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+# define WITH_CONNTRACK 1
+# include <net/netfilter/nf_conntrack.h>
+#endif
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+# define WITH_IPV6 1
+#endif
+
+struct xt_tee_priv {
+ struct notifier_block notifier;
+ struct xt_tee_tginfo *tginfo;
+ int oif;
+};
+
+static const union nf_inet_addr tee_zero_address;
+static DEFINE_PER_CPU(bool, tee_active);
+
+static struct net *pick_net(struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_NS
+ const struct dst_entry *dst;
+
+ if (skb->dev != NULL)
+ return dev_net(skb->dev);
+ dst = skb_dst(skb);
+ if (dst != NULL && dst->dev != NULL)
+ return dev_net(dst->dev);
+#endif
+ return &init_net;
+}
+
+static bool
+tee_tg_route4(struct sk_buff *skb, const struct xt_tee_tginfo *info)
+{
+ const struct iphdr *iph = ip_hdr(skb);
+ struct net *net = pick_net(skb);
+ struct rtable *rt;
+ struct flowi fl;
+
+ memset(&fl, 0, sizeof(fl));
+ if (info->priv) {
+ if (info->priv->oif == -1)
+ return false;
+ fl.oif = info->priv->oif;
+ }
+ fl.nl_u.ip4_u.daddr = info->gw.ip;
+ fl.nl_u.ip4_u.tos = RT_TOS(iph->tos);
+ fl.nl_u.ip4_u.scope = RT_SCOPE_UNIVERSE;
+ if (ip_route_output_key(net, &rt, &fl) != 0)
+ return false;
+
+ dst_release(skb_dst(skb));
+ skb_dst_set(skb, &rt->u.dst);
+ skb->dev = rt->u.dst.dev;
+ skb->protocol = htons(ETH_P_IP);
+ return true;
+}
+
+static unsigned int
+tee_tg4(struct sk_buff *skb, const struct xt_action_param *par)
+{
+ const struct xt_tee_tginfo *info = par->targinfo;
+ struct iphdr *iph;
+
+ if (percpu_read(tee_active))
+ return XT_CONTINUE;
+ /*
+ * Copy the skb, and route the copy. Will later return %XT_CONTINUE for
+ * the original skb, which should continue on its way as if nothing has
+ * happened. The copy should be independently delivered to the TEE
+ * --gateway.
+ */
+ skb = pskb_copy(skb, GFP_ATOMIC);
+ if (skb == NULL)
+ return XT_CONTINUE;
+
+#ifdef WITH_CONNTRACK
+ /* Avoid counting cloned packets towards the original connection. */
+ nf_conntrack_put(skb->nfct);
+ skb->nfct = &nf_conntrack_untracked.ct_general;
+ skb->nfctinfo = IP_CT_NEW;
+ nf_conntrack_get(skb->nfct);
+#endif
+ /*
+ * If we are in PREROUTING/INPUT, the checksum must be recalculated
+ * since the length could have changed as a result of defragmentation.
+ *
+ * We also decrease the TTL to mitigate potential TEE loops
+ * between two hosts.
+ *
+ * Set %IP_DF so that the original source is notified of a potentially
+ * decreased MTU on the clone route. IPv6 does this too.
+ */
+ iph = ip_hdr(skb);
+ iph->frag_off |= htons(IP_DF);
+ if (par->hooknum == NF_INET_PRE_ROUTING ||
+ par->hooknum == NF_INET_LOCAL_IN)
+ --iph->ttl;
+ ip_send_check(iph);
+
+ if (tee_tg_route4(skb, info)) {
+ percpu_write(tee_active, true);
+ ip_local_out(skb);
+ percpu_write(tee_active, false);
+ } else {
+ kfree_skb(skb);
+ }
+ return XT_CONTINUE;
+}
+
+#ifdef WITH_IPV6
+static bool
+tee_tg_route6(struct sk_buff *skb, const struct xt_tee_tginfo *info)
+{
+ const struct ipv6hdr *iph = ipv6_hdr(skb);
+ struct net *net = pick_net(skb);
+ struct dst_entry *dst;
+ struct flowi fl;
+
+ memset(&fl, 0, sizeof(fl));
+ if (info->priv) {
+ if (info->priv->oif == -1)
+ return false;
+ fl.oif = info->priv->oif;
+ }
+ fl.nl_u.ip6_u.daddr = info->gw.in6;
+ fl.nl_u.ip6_u.flowlabel = ((iph->flow_lbl[0] & 0xF) << 16) |
+ (iph->flow_lbl[1] << 8) | iph->flow_lbl[2];
+ dst = ip6_route_output(net, NULL, &fl);
+ if (dst == NULL)
+ return false;
+
+ dst_release(skb_dst(skb));
+ skb_dst_set(skb, dst);
+ skb->dev = dst->dev;
+ skb->protocol = htons(ETH_P_IPV6);
+ return true;
+}
+
+static unsigned int
+tee_tg6(struct sk_buff *skb, const struct xt_action_param *par)
+{
+ const struct xt_tee_tginfo *info = par->targinfo;
+
+ if (percpu_read(tee_active))
+ return XT_CONTINUE;
+ skb = pskb_copy(skb, GFP_ATOMIC);
+ if (skb == NULL)
+ return XT_CONTINUE;
+
+#ifdef WITH_CONNTRACK
+ nf_conntrack_put(skb->nfct);
+ skb->nfct = &nf_conntrack_untracked.ct_general;
+ skb->nfctinfo = IP_CT_NEW;
+ nf_conntrack_get(skb->nfct);
+#endif
+ if (par->hooknum == NF_INET_PRE_ROUTING ||
+ par->hooknum == NF_INET_LOCAL_IN) {
+ struct ipv6hdr *iph = ipv6_hdr(skb);
+ --iph->hop_limit;
+ }
+ if (tee_tg_route6(skb, info)) {
+ percpu_write(tee_active, true);
+ ip6_local_out(skb);
+ percpu_write(tee_active, false);
+ } else {
+ kfree_skb(skb);
+ }
+ return XT_CONTINUE;
+}
+#endif /* WITH_IPV6 */
+
+static int tee_netdev_event(struct notifier_block *this, unsigned long event,
+ void *ptr)
+{
+ struct net_device *dev = ptr;
+ struct xt_tee_priv *priv;
+
+ priv = container_of(this, struct xt_tee_priv, notifier);
+ switch (event) {
+ case NETDEV_REGISTER:
+ if (!strcmp(dev->name, priv->tginfo->oif))
+ priv->oif = dev->ifindex;
+ break;
+ case NETDEV_UNREGISTER:
+ if (dev->ifindex == priv->oif)
+ priv->oif = -1;
+ break;
+ case NETDEV_CHANGENAME:
+ if (!strcmp(dev->name, priv->tginfo->oif))
+ priv->oif = dev->ifindex;
+ else if (dev->ifindex == priv->oif)
+ priv->oif = -1;
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int tee_tg_check(const struct xt_tgchk_param *par)
+{
+ struct xt_tee_tginfo *info = par->targinfo;
+ struct xt_tee_priv *priv;
+
+ /* 0.0.0.0 and :: not allowed */
+ if (memcmp(&info->gw, &tee_zero_address,
+ sizeof(tee_zero_address)) == 0)
+ return -EINVAL;
+
+ if (info->oif[0]) {
+ if (info->oif[sizeof(info->oif)-1] != '\0')
+ return -EINVAL;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (priv == NULL)
+ return -ENOMEM;
+
+ priv->tginfo = info;
+ priv->oif = -1;
+ priv->notifier.notifier_call = tee_netdev_event;
+ info->priv = priv;
+
+ register_netdevice_notifier(&priv->notifier);
+ } else
+ info->priv = NULL;
+
+ return 0;
+}
+
+static void tee_tg_destroy(const struct xt_tgdtor_param *par)
+{
+ struct xt_tee_tginfo *info = par->targinfo;
+
+ if (info->priv) {
+ unregister_netdevice_notifier(&info->priv->notifier);
+ kfree(info->priv);
+ }
+}
+
+static struct xt_target tee_tg_reg[] __read_mostly = {
+ {
+ .name = "TEE",
+ .revision = 1,
+ .family = NFPROTO_IPV4,
+ .target = tee_tg4,
+ .targetsize = sizeof(struct xt_tee_tginfo),
+ .checkentry = tee_tg_check,
+ .destroy = tee_tg_destroy,
+ .me = THIS_MODULE,
+ },
+#ifdef WITH_IPV6
+ {
+ .name = "TEE",
+ .revision = 1,
+ .family = NFPROTO_IPV6,
+ .target = tee_tg6,
+ .targetsize = sizeof(struct xt_tee_tginfo),
+ .checkentry = tee_tg_check,
+ .destroy = tee_tg_destroy,
+ .me = THIS_MODULE,
+ },
+#endif
+};
+
+static int __init tee_tg_init(void)
+{
+ return xt_register_targets(tee_tg_reg, ARRAY_SIZE(tee_tg_reg));
+}
+
+static void __exit tee_tg_exit(void)
+{
+ xt_unregister_targets(tee_tg_reg, ARRAY_SIZE(tee_tg_reg));
+}
+
+module_init(tee_tg_init);
+module_exit(tee_tg_exit);
+MODULE_AUTHOR("Sebastian Claßen <sebastian.classen@freenet.ag>");
+MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
+MODULE_DESCRIPTION("Xtables: Reroute packet copy");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ipt_TEE");
+MODULE_ALIAS("ip6t_TEE");
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c
index 1340c2fa3621..e1a0dedac258 100644
--- a/net/netfilter/xt_TPROXY.c
+++ b/net/netfilter/xt_TPROXY.c
@@ -9,7 +9,7 @@
* published by the Free Software Foundation.
*
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/ip.h>
@@ -25,7 +25,7 @@
#include <net/netfilter/nf_tproxy_core.h>
static unsigned int
-tproxy_tg(struct sk_buff *skb, const struct xt_target_param *par)
+tproxy_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct iphdr *iph = ip_hdr(skb);
const struct xt_tproxy_target_info *tgi = par->targinfo;
@@ -59,17 +59,17 @@ tproxy_tg(struct sk_buff *skb, const struct xt_target_param *par)
return NF_DROP;
}
-static bool tproxy_tg_check(const struct xt_tgchk_param *par)
+static int tproxy_tg_check(const struct xt_tgchk_param *par)
{
const struct ipt_ip *i = par->entryinfo;
if ((i->proto == IPPROTO_TCP || i->proto == IPPROTO_UDP)
&& !(i->invflags & IPT_INV_PROTO))
- return true;
+ return 0;
- pr_info("xt_TPROXY: Can be used only in combination with "
+ pr_info("Can be used only in combination with "
"either -p tcp or -p udp\n");
- return false;
+ return -EINVAL;
}
static struct xt_target tproxy_tg_reg __read_mostly = {
diff --git a/net/netfilter/xt_TRACE.c b/net/netfilter/xt_TRACE.c
index fbb04b86c46b..df48967af382 100644
--- a/net/netfilter/xt_TRACE.c
+++ b/net/netfilter/xt_TRACE.c
@@ -11,7 +11,7 @@ MODULE_ALIAS("ipt_TRACE");
MODULE_ALIAS("ip6t_TRACE");
static unsigned int
-trace_tg(struct sk_buff *skb, const struct xt_target_param *par)
+trace_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
skb->nf_trace = 1;
return XT_CONTINUE;
diff --git a/net/netfilter/xt_cluster.c b/net/netfilter/xt_cluster.c
index 225ee3ecd69d..30b95a1c1c89 100644
--- a/net/netfilter/xt_cluster.c
+++ b/net/netfilter/xt_cluster.c
@@ -5,6 +5,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/jhash.h>
@@ -85,7 +86,7 @@ xt_cluster_is_multicast_addr(const struct sk_buff *skb, u_int8_t family)
}
static bool
-xt_cluster_mt(const struct sk_buff *skb, const struct xt_match_param *par)
+xt_cluster_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
struct sk_buff *pskb = (struct sk_buff *)skb;
const struct xt_cluster_match_info *info = par->matchinfo;
@@ -131,22 +132,22 @@ xt_cluster_mt(const struct sk_buff *skb, const struct xt_match_param *par)
!!(info->flags & XT_CLUSTER_F_INV);
}
-static bool xt_cluster_mt_checkentry(const struct xt_mtchk_param *par)
+static int xt_cluster_mt_checkentry(const struct xt_mtchk_param *par)
{
struct xt_cluster_match_info *info = par->matchinfo;
if (info->total_nodes > XT_CLUSTER_NODES_MAX) {
- printk(KERN_ERR "xt_cluster: you have exceeded the maximum "
- "number of cluster nodes (%u > %u)\n",
- info->total_nodes, XT_CLUSTER_NODES_MAX);
- return false;
+ pr_info("you have exceeded the maximum "
+ "number of cluster nodes (%u > %u)\n",
+ info->total_nodes, XT_CLUSTER_NODES_MAX);
+ return -EINVAL;
}
if (info->node_mask >= (1ULL << info->total_nodes)) {
- printk(KERN_ERR "xt_cluster: this node mask cannot be "
- "higher than the total number of nodes\n");
- return false;
+ pr_info("this node mask cannot be "
+ "higher than the total number of nodes\n");
+ return -EDOM;
}
- return true;
+ return 0;
}
static struct xt_match xt_cluster_match __read_mostly = {
diff --git a/net/netfilter/xt_comment.c b/net/netfilter/xt_comment.c
index e82179832acd..5c861d2f21ca 100644
--- a/net/netfilter/xt_comment.c
+++ b/net/netfilter/xt_comment.c
@@ -16,7 +16,7 @@ MODULE_ALIAS("ipt_comment");
MODULE_ALIAS("ip6t_comment");
static bool
-comment_mt(const struct sk_buff *skb, const struct xt_match_param *par)
+comment_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
/* We always match */
return true;
diff --git a/net/netfilter/xt_connbytes.c b/net/netfilter/xt_connbytes.c
index 955e6598a7f0..73517835303d 100644
--- a/net/netfilter/xt_connbytes.c
+++ b/net/netfilter/xt_connbytes.c
@@ -1,6 +1,7 @@
/* Kernel module to match connection tracking byte counter.
* GPL (C) 2002 Martin Devera (devik@cdi.cz).
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/bitops.h>
#include <linux/skbuff.h>
@@ -17,7 +18,7 @@ MODULE_ALIAS("ipt_connbytes");
MODULE_ALIAS("ip6t_connbytes");
static bool
-connbytes_mt(const struct sk_buff *skb, const struct xt_match_param *par)
+connbytes_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_connbytes_info *sinfo = par->matchinfo;
const struct nf_conn *ct;
@@ -92,27 +93,26 @@ connbytes_mt(const struct sk_buff *skb, const struct xt_match_param *par)
return what >= sinfo->count.from;
}
-static bool connbytes_mt_check(const struct xt_mtchk_param *par)
+static int connbytes_mt_check(const struct xt_mtchk_param *par)
{
const struct xt_connbytes_info *sinfo = par->matchinfo;
+ int ret;
if (sinfo->what != XT_CONNBYTES_PKTS &&
sinfo->what != XT_CONNBYTES_BYTES &&
sinfo->what != XT_CONNBYTES_AVGPKT)
- return false;
+ return -EINVAL;
if (sinfo->direction != XT_CONNBYTES_DIR_ORIGINAL &&
sinfo->direction != XT_CONNBYTES_DIR_REPLY &&
sinfo->direction != XT_CONNBYTES_DIR_BOTH)
- return false;
-
- if (nf_ct_l3proto_try_module_get(par->family) < 0) {
- printk(KERN_WARNING "can't load conntrack support for "
- "proto=%u\n", par->family);
- return false;
- }
+ return -EINVAL;
- return true;
+ ret = nf_ct_l3proto_try_module_get(par->family);
+ if (ret < 0)
+ pr_info("cannot load conntrack support for proto=%u\n",
+ par->family);
+ return ret;
}
static void connbytes_mt_destroy(const struct xt_mtdtor_param *par)
diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c
index 388ca4596098..5c5b6b921b84 100644
--- a/net/netfilter/xt_connlimit.c
+++ b/net/netfilter/xt_connlimit.c
@@ -5,13 +5,13 @@
* Nov 2002: Martin Bene <martin.bene@icomedias.com>:
* only ignore TIME_WAIT or gone connections
* (C) CC Computer Consultants GmbH, 2007
- * Contact: <jengelh@computergmbh.de>
*
* based on ...
*
* Kernel module to match connection tracking information.
* GPL (C) 1999 Rusty Russell (rusty@rustcorp.com.au).
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/ip.h>
@@ -173,7 +173,7 @@ static int count_them(struct net *net,
}
static bool
-connlimit_mt(const struct sk_buff *skb, const struct xt_match_param *par)
+connlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
struct net *net = dev_net(par->in ? par->in : par->out);
const struct xt_connlimit_info *info = par->matchinfo;
@@ -206,44 +206,46 @@ connlimit_mt(const struct sk_buff *skb, const struct xt_match_param *par)
if (connections < 0) {
/* kmalloc failed, drop it entirely */
- *par->hotdrop = true;
+ par->hotdrop = true;
return false;
}
return (connections > info->limit) ^ info->inverse;
hotdrop:
- *par->hotdrop = true;
+ par->hotdrop = true;
return false;
}
-static bool connlimit_mt_check(const struct xt_mtchk_param *par)
+static int connlimit_mt_check(const struct xt_mtchk_param *par)
{
struct xt_connlimit_info *info = par->matchinfo;
unsigned int i;
+ int ret;
if (unlikely(!connlimit_rnd_inited)) {
get_random_bytes(&connlimit_rnd, sizeof(connlimit_rnd));
connlimit_rnd_inited = true;
}
- if (nf_ct_l3proto_try_module_get(par->family) < 0) {
- printk(KERN_WARNING "cannot load conntrack support for "
- "address family %u\n", par->family);
- return false;
+ ret = nf_ct_l3proto_try_module_get(par->family);
+ if (ret < 0) {
+ pr_info("cannot load conntrack support for "
+ "address family %u\n", par->family);
+ return ret;
}
/* init private data */
info->data = kmalloc(sizeof(struct xt_connlimit_data), GFP_KERNEL);
if (info->data == NULL) {
nf_ct_l3proto_module_put(par->family);
- return false;
+ return -ENOMEM;
}
spin_lock_init(&info->data->lock);
for (i = 0; i < ARRAY_SIZE(info->data->iphash); ++i)
INIT_LIST_HEAD(&info->data->iphash[i]);
- return true;
+ return 0;
}
static void connlimit_mt_destroy(const struct xt_mtdtor_param *par)
diff --git a/net/netfilter/xt_connmark.c b/net/netfilter/xt_connmark.c
index 122aa8b0147b..7278145e6a68 100644
--- a/net/netfilter/xt_connmark.c
+++ b/net/netfilter/xt_connmark.c
@@ -1,10 +1,10 @@
/*
- * xt_connmark - Netfilter module to match connection mark values
+ * xt_connmark - Netfilter module to operate on connection marks
*
* Copyright (C) 2002,2004 MARA Systems AB <http://www.marasystems.com>
* by Henrik Nordstrom <hno@marasystems.com>
* Copyright © CC Computer Consultants GmbH, 2007 - 2008
- * Jan Engelhardt <jengelh@computergmbh.de>
+ * Jan Engelhardt <jengelh@medozas.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -24,17 +24,74 @@
#include <linux/module.h>
#include <linux/skbuff.h>
#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_ecache.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_connmark.h>
MODULE_AUTHOR("Henrik Nordstrom <hno@marasystems.com>");
-MODULE_DESCRIPTION("Xtables: connection mark match");
+MODULE_DESCRIPTION("Xtables: connection mark operations");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("ipt_CONNMARK");
+MODULE_ALIAS("ip6t_CONNMARK");
MODULE_ALIAS("ipt_connmark");
MODULE_ALIAS("ip6t_connmark");
+static unsigned int
+connmark_tg(struct sk_buff *skb, const struct xt_action_param *par)
+{
+ const struct xt_connmark_tginfo1 *info = par->targinfo;
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn *ct;
+ u_int32_t newmark;
+
+ ct = nf_ct_get(skb, &ctinfo);
+ if (ct == NULL)
+ return XT_CONTINUE;
+
+ switch (info->mode) {
+ case XT_CONNMARK_SET:
+ newmark = (ct->mark & ~info->ctmask) ^ info->ctmark;
+ if (ct->mark != newmark) {
+ ct->mark = newmark;
+ nf_conntrack_event_cache(IPCT_MARK, ct);
+ }
+ break;
+ case XT_CONNMARK_SAVE:
+ newmark = (ct->mark & ~info->ctmask) ^
+ (skb->mark & info->nfmask);
+ if (ct->mark != newmark) {
+ ct->mark = newmark;
+ nf_conntrack_event_cache(IPCT_MARK, ct);
+ }
+ break;
+ case XT_CONNMARK_RESTORE:
+ newmark = (skb->mark & ~info->nfmask) ^
+ (ct->mark & info->ctmask);
+ skb->mark = newmark;
+ break;
+ }
+
+ return XT_CONTINUE;
+}
+
+static int connmark_tg_check(const struct xt_tgchk_param *par)
+{
+ int ret;
+
+ ret = nf_ct_l3proto_try_module_get(par->family);
+ if (ret < 0)
+ pr_info("cannot load conntrack support for proto=%u\n",
+ par->family);
+ return ret;
+}
+
+static void connmark_tg_destroy(const struct xt_tgdtor_param *par)
+{
+ nf_ct_l3proto_module_put(par->family);
+}
+
static bool
-connmark_mt(const struct sk_buff *skb, const struct xt_match_param *par)
+connmark_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_connmark_mtinfo1 *info = par->matchinfo;
enum ip_conntrack_info ctinfo;
@@ -47,14 +104,15 @@ connmark_mt(const struct sk_buff *skb, const struct xt_match_param *par)
return ((ct->mark & info->mask) == info->mark) ^ info->invert;
}
-static bool connmark_mt_check(const struct xt_mtchk_param *par)
+static int connmark_mt_check(const struct xt_mtchk_param *par)
{
- if (nf_ct_l3proto_try_module_get(par->family) < 0) {
- printk(KERN_WARNING "cannot load conntrack support for "
- "proto=%u\n", par->family);
- return false;
- }
- return true;
+ int ret;
+
+ ret = nf_ct_l3proto_try_module_get(par->family);
+ if (ret < 0)
+ pr_info("cannot load conntrack support for proto=%u\n",
+ par->family);
+ return ret;
}
static void connmark_mt_destroy(const struct xt_mtdtor_param *par)
@@ -62,6 +120,17 @@ static void connmark_mt_destroy(const struct xt_mtdtor_param *par)
nf_ct_l3proto_module_put(par->family);
}
+static struct xt_target connmark_tg_reg __read_mostly = {
+ .name = "CONNMARK",
+ .revision = 1,
+ .family = NFPROTO_UNSPEC,
+ .checkentry = connmark_tg_check,
+ .target = connmark_tg,
+ .targetsize = sizeof(struct xt_connmark_tginfo1),
+ .destroy = connmark_tg_destroy,
+ .me = THIS_MODULE,
+};
+
static struct xt_match connmark_mt_reg __read_mostly = {
.name = "connmark",
.revision = 1,
@@ -75,12 +144,23 @@ static struct xt_match connmark_mt_reg __read_mostly = {
static int __init connmark_mt_init(void)
{
- return xt_register_match(&connmark_mt_reg);
+ int ret;
+
+ ret = xt_register_target(&connmark_tg_reg);
+ if (ret < 0)
+ return ret;
+ ret = xt_register_match(&connmark_mt_reg);
+ if (ret < 0) {
+ xt_unregister_target(&connmark_tg_reg);
+ return ret;
+ }
+ return 0;
}
static void __exit connmark_mt_exit(void)
{
xt_unregister_match(&connmark_mt_reg);
+ xt_unregister_target(&connmark_tg_reg);
}
module_init(connmark_mt_init);
diff --git a/net/netfilter/xt_conntrack.c b/net/netfilter/xt_conntrack.c
index ae66305f0fe5..39681f10291c 100644
--- a/net/netfilter/xt_conntrack.c
+++ b/net/netfilter/xt_conntrack.c
@@ -9,7 +9,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <net/ipv6.h>
@@ -113,7 +113,7 @@ ct_proto_port_check(const struct xt_conntrack_mtinfo2 *info,
}
static bool
-conntrack_mt(const struct sk_buff *skb, const struct xt_match_param *par,
+conntrack_mt(const struct sk_buff *skb, struct xt_action_param *par,
u16 state_mask, u16 status_mask)
{
const struct xt_conntrack_mtinfo2 *info = par->matchinfo;
@@ -191,7 +191,7 @@ conntrack_mt(const struct sk_buff *skb, const struct xt_match_param *par,
}
static bool
-conntrack_mt_v1(const struct sk_buff *skb, const struct xt_match_param *par)
+conntrack_mt_v1(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_conntrack_mtinfo1 *info = par->matchinfo;
@@ -199,21 +199,22 @@ conntrack_mt_v1(const struct sk_buff *skb, const struct xt_match_param *par)
}
static bool
-conntrack_mt_v2(const struct sk_buff *skb, const struct xt_match_param *par)
+conntrack_mt_v2(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_conntrack_mtinfo2 *info = par->matchinfo;
return conntrack_mt(skb, par, info->state_mask, info->status_mask);
}
-static bool conntrack_mt_check(const struct xt_mtchk_param *par)
+static int conntrack_mt_check(const struct xt_mtchk_param *par)
{
- if (nf_ct_l3proto_try_module_get(par->family) < 0) {
- printk(KERN_WARNING "can't load conntrack support for "
- "proto=%u\n", par->family);
- return false;
- }
- return true;
+ int ret;
+
+ ret = nf_ct_l3proto_try_module_get(par->family);
+ if (ret < 0)
+ pr_info("cannot load conntrack support for proto=%u\n",
+ par->family);
+ return ret;
}
static void conntrack_mt_destroy(const struct xt_mtdtor_param *par)
diff --git a/net/netfilter/xt_dccp.c b/net/netfilter/xt_dccp.c
index 395af5943ffd..b63d2a3d80ba 100644
--- a/net/netfilter/xt_dccp.c
+++ b/net/netfilter/xt_dccp.c
@@ -96,7 +96,7 @@ match_option(u_int8_t option, const struct sk_buff *skb, unsigned int protoff,
}
static bool
-dccp_mt(const struct sk_buff *skb, const struct xt_match_param *par)
+dccp_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_dccp_info *info = par->matchinfo;
const struct dccp_hdr *dh;
@@ -107,7 +107,7 @@ dccp_mt(const struct sk_buff *skb, const struct xt_match_param *par)
dh = skb_header_pointer(skb, par->thoff, sizeof(_dh), &_dh);
if (dh == NULL) {
- *par->hotdrop = true;
+ par->hotdrop = true;
return false;
}
@@ -120,17 +120,21 @@ dccp_mt(const struct sk_buff *skb, const struct xt_match_param *par)
&& DCCHECK(match_types(dh, info->typemask),
XT_DCCP_TYPE, info->flags, info->invflags)
&& DCCHECK(match_option(info->option, skb, par->thoff, dh,
- par->hotdrop),
+ &par->hotdrop),
XT_DCCP_OPTION, info->flags, info->invflags);
}
-static bool dccp_mt_check(const struct xt_mtchk_param *par)
+static int dccp_mt_check(const struct xt_mtchk_param *par)
{
const struct xt_dccp_info *info = par->matchinfo;
- return !(info->flags & ~XT_DCCP_VALID_FLAGS)
- && !(info->invflags & ~XT_DCCP_VALID_FLAGS)
- && !(info->invflags & ~info->flags);
+ if (info->flags & ~XT_DCCP_VALID_FLAGS)
+ return -EINVAL;
+ if (info->invflags & ~XT_DCCP_VALID_FLAGS)
+ return -EINVAL;
+ if (info->invflags & ~info->flags)
+ return -EINVAL;
+ return 0;
}
static struct xt_match dccp_mt_reg[] __read_mostly = {
diff --git a/net/netfilter/xt_dscp.c b/net/netfilter/xt_dscp.c
index 0280d3a8c161..64670fc5d0e1 100644
--- a/net/netfilter/xt_dscp.c
+++ b/net/netfilter/xt_dscp.c
@@ -6,7 +6,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/ip.h>
@@ -25,7 +25,7 @@ MODULE_ALIAS("ipt_tos");
MODULE_ALIAS("ip6t_tos");
static bool
-dscp_mt(const struct sk_buff *skb, const struct xt_match_param *par)
+dscp_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_dscp_info *info = par->matchinfo;
u_int8_t dscp = ipv4_get_dsfield(ip_hdr(skb)) >> XT_DSCP_SHIFT;
@@ -34,7 +34,7 @@ dscp_mt(const struct sk_buff *skb, const struct xt_match_param *par)
}
static bool
-dscp_mt6(const struct sk_buff *skb, const struct xt_match_param *par)
+dscp_mt6(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_dscp_info *info = par->matchinfo;
u_int8_t dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> XT_DSCP_SHIFT;
@@ -42,23 +42,23 @@ dscp_mt6(const struct sk_buff *skb, const struct xt_match_param *par)
return (dscp == info->dscp) ^ !!info->invert;
}
-static bool dscp_mt_check(const struct xt_mtchk_param *par)
+static int dscp_mt_check(const struct xt_mtchk_param *par)
{
const struct xt_dscp_info *info = par->matchinfo;
if (info->dscp > XT_DSCP_MAX) {
- printk(KERN_ERR "xt_dscp: dscp %x out of range\n", info->dscp);
- return false;
+ pr_info("dscp %x out of range\n", info->dscp);
+ return -EDOM;
}
- return true;
+ return 0;
}
-static bool tos_mt(const struct sk_buff *skb, const struct xt_match_param *par)
+static bool tos_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_tos_match_info *info = par->matchinfo;
- if (par->match->family == NFPROTO_IPV4)
+ if (par->family == NFPROTO_IPV4)
return ((ip_hdr(skb)->tos & info->tos_mask) ==
info->tos_value) ^ !!info->invert;
else
diff --git a/net/netfilter/xt_esp.c b/net/netfilter/xt_esp.c
index 609439967c2c..171ba82b5902 100644
--- a/net/netfilter/xt_esp.c
+++ b/net/netfilter/xt_esp.c
@@ -6,7 +6,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/in.h>
@@ -24,25 +24,19 @@ MODULE_DESCRIPTION("Xtables: IPsec-ESP packet match");
MODULE_ALIAS("ipt_esp");
MODULE_ALIAS("ip6t_esp");
-#if 0
-#define duprintf(format, args...) printk(format , ## args)
-#else
-#define duprintf(format, args...)
-#endif
-
/* Returns 1 if the spi is matched by the range, 0 otherwise */
static inline bool
spi_match(u_int32_t min, u_int32_t max, u_int32_t spi, bool invert)
{
bool r;
- duprintf("esp spi_match:%c 0x%x <= 0x%x <= 0x%x", invert ? '!' : ' ',
- min, spi, max);
+ pr_debug("spi_match:%c 0x%x <= 0x%x <= 0x%x\n",
+ invert ? '!' : ' ', min, spi, max);
r = (spi >= min && spi <= max) ^ invert;
- duprintf(" result %s\n", r ? "PASS" : "FAILED");
+ pr_debug(" result %s\n", r ? "PASS" : "FAILED");
return r;
}
-static bool esp_mt(const struct sk_buff *skb, const struct xt_match_param *par)
+static bool esp_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct ip_esp_hdr *eh;
struct ip_esp_hdr _esp;
@@ -57,8 +51,8 @@ static bool esp_mt(const struct sk_buff *skb, const struct xt_match_param *par)
/* We've been asked to examine this packet, and we
* can't. Hence, no choice but to drop.
*/
- duprintf("Dropping evil ESP tinygram.\n");
- *par->hotdrop = true;
+ pr_debug("Dropping evil ESP tinygram.\n");
+ par->hotdrop = true;
return false;
}
@@ -66,16 +60,16 @@ static bool esp_mt(const struct sk_buff *skb, const struct xt_match_param *par)
!!(espinfo->invflags & XT_ESP_INV_SPI));
}
-static bool esp_mt_check(const struct xt_mtchk_param *par)
+static int esp_mt_check(const struct xt_mtchk_param *par)
{
const struct xt_esp *espinfo = par->matchinfo;
if (espinfo->invflags & ~XT_ESP_INV_MASK) {
- duprintf("xt_esp: unknown flags %X\n", espinfo->invflags);
- return false;
+ pr_debug("unknown flags %X\n", espinfo->invflags);
+ return -EINVAL;
}
- return true;
+ return 0;
}
static struct xt_match esp_mt_reg[] __read_mostly = {
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index 215a64835de8..b46a8390896d 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -7,6 +7,7 @@
*
* Development of this code was funded by Astaro AG, http://www.astaro.com/
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/random.h>
@@ -36,7 +37,7 @@
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
-MODULE_AUTHOR("Jan Engelhardt <jengelh@computergmbh.de>");
+MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
MODULE_DESCRIPTION("Xtables: per hash-bucket rate-limit match");
MODULE_ALIAS("ipt_hashlimit");
MODULE_ALIAS("ip6t_hashlimit");
@@ -80,12 +81,14 @@ struct dsthash_ent {
struct dsthash_dst dst;
/* modified structure members in the end */
+ spinlock_t lock;
unsigned long expires; /* precalculated expiry time */
struct {
unsigned long prev; /* last modification */
u_int32_t credit;
u_int32_t credit_cap, cost;
} rateinfo;
+ struct rcu_head rcu;
};
struct xt_hashlimit_htable {
@@ -142,9 +145,11 @@ dsthash_find(const struct xt_hashlimit_htable *ht,
u_int32_t hash = hash_dst(ht, dst);
if (!hlist_empty(&ht->hash[hash])) {
- hlist_for_each_entry(ent, pos, &ht->hash[hash], node)
- if (dst_cmp(ent, dst))
+ hlist_for_each_entry_rcu(ent, pos, &ht->hash[hash], node)
+ if (dst_cmp(ent, dst)) {
+ spin_lock(&ent->lock);
return ent;
+ }
}
return NULL;
}
@@ -156,9 +161,10 @@ dsthash_alloc_init(struct xt_hashlimit_htable *ht,
{
struct dsthash_ent *ent;
+ spin_lock(&ht->lock);
/* initialize hash with random val at the time we allocate
* the first hashtable entry */
- if (!ht->rnd_initialized) {
+ if (unlikely(!ht->rnd_initialized)) {
get_random_bytes(&ht->rnd, sizeof(ht->rnd));
ht->rnd_initialized = true;
}
@@ -166,106 +172,40 @@ dsthash_alloc_init(struct xt_hashlimit_htable *ht,
if (ht->cfg.max && ht->count >= ht->cfg.max) {
/* FIXME: do something. question is what.. */
if (net_ratelimit())
- printk(KERN_WARNING
- "xt_hashlimit: max count of %u reached\n",
- ht->cfg.max);
- return NULL;
- }
-
- ent = kmem_cache_alloc(hashlimit_cachep, GFP_ATOMIC);
+ pr_err("max count of %u reached\n", ht->cfg.max);
+ ent = NULL;
+ } else
+ ent = kmem_cache_alloc(hashlimit_cachep, GFP_ATOMIC);
if (!ent) {
if (net_ratelimit())
- printk(KERN_ERR
- "xt_hashlimit: can't allocate dsthash_ent\n");
- return NULL;
- }
- memcpy(&ent->dst, dst, sizeof(ent->dst));
+ pr_err("cannot allocate dsthash_ent\n");
+ } else {
+ memcpy(&ent->dst, dst, sizeof(ent->dst));
+ spin_lock_init(&ent->lock);
- hlist_add_head(&ent->node, &ht->hash[hash_dst(ht, dst)]);
- ht->count++;
+ spin_lock(&ent->lock);
+ hlist_add_head_rcu(&ent->node, &ht->hash[hash_dst(ht, dst)]);
+ ht->count++;
+ }
+ spin_unlock(&ht->lock);
return ent;
}
-static inline void
-dsthash_free(struct xt_hashlimit_htable *ht, struct dsthash_ent *ent)
+static void dsthash_free_rcu(struct rcu_head *head)
{
- hlist_del(&ent->node);
+ struct dsthash_ent *ent = container_of(head, struct dsthash_ent, rcu);
+
kmem_cache_free(hashlimit_cachep, ent);
- ht->count--;
}
-static void htable_gc(unsigned long htlong);
-static int htable_create_v0(struct net *net, struct xt_hashlimit_info *minfo, u_int8_t family)
+static inline void
+dsthash_free(struct xt_hashlimit_htable *ht, struct dsthash_ent *ent)
{
- struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
- struct xt_hashlimit_htable *hinfo;
- unsigned int size;
- unsigned int i;
-
- if (minfo->cfg.size)
- size = minfo->cfg.size;
- else {
- size = ((totalram_pages << PAGE_SHIFT) / 16384) /
- sizeof(struct list_head);
- if (totalram_pages > (1024 * 1024 * 1024 / PAGE_SIZE))
- size = 8192;
- if (size < 16)
- size = 16;
- }
- /* FIXME: don't use vmalloc() here or anywhere else -HW */
- hinfo = vmalloc(sizeof(struct xt_hashlimit_htable) +
- sizeof(struct list_head) * size);
- if (!hinfo) {
- printk(KERN_ERR "xt_hashlimit: unable to create hashtable\n");
- return -1;
- }
- minfo->hinfo = hinfo;
-
- /* copy match config into hashtable config */
- hinfo->cfg.mode = minfo->cfg.mode;
- hinfo->cfg.avg = minfo->cfg.avg;
- hinfo->cfg.burst = minfo->cfg.burst;
- hinfo->cfg.max = minfo->cfg.max;
- hinfo->cfg.gc_interval = minfo->cfg.gc_interval;
- hinfo->cfg.expire = minfo->cfg.expire;
-
- if (family == NFPROTO_IPV4)
- hinfo->cfg.srcmask = hinfo->cfg.dstmask = 32;
- else
- hinfo->cfg.srcmask = hinfo->cfg.dstmask = 128;
-
- hinfo->cfg.size = size;
- if (!hinfo->cfg.max)
- hinfo->cfg.max = 8 * hinfo->cfg.size;
- else if (hinfo->cfg.max < hinfo->cfg.size)
- hinfo->cfg.max = hinfo->cfg.size;
-
- for (i = 0; i < hinfo->cfg.size; i++)
- INIT_HLIST_HEAD(&hinfo->hash[i]);
-
- hinfo->use = 1;
- hinfo->count = 0;
- hinfo->family = family;
- hinfo->rnd_initialized = false;
- spin_lock_init(&hinfo->lock);
- hinfo->pde = proc_create_data(minfo->name, 0,
- (family == NFPROTO_IPV4) ?
- hashlimit_net->ipt_hashlimit : hashlimit_net->ip6t_hashlimit,
- &dl_file_ops, hinfo);
- if (!hinfo->pde) {
- vfree(hinfo);
- return -1;
- }
- hinfo->net = net;
-
- setup_timer(&hinfo->timer, htable_gc, (unsigned long )hinfo);
- hinfo->timer.expires = jiffies + msecs_to_jiffies(hinfo->cfg.gc_interval);
- add_timer(&hinfo->timer);
-
- hlist_add_head(&hinfo->node, &hashlimit_net->htables);
-
- return 0;
+ hlist_del_rcu(&ent->node);
+ call_rcu_bh(&ent->rcu, dsthash_free_rcu);
+ ht->count--;
}
+static void htable_gc(unsigned long htlong);
static int htable_create(struct net *net, struct xt_hashlimit_mtinfo1 *minfo,
u_int8_t family)
@@ -288,10 +228,8 @@ static int htable_create(struct net *net, struct xt_hashlimit_mtinfo1 *minfo,
/* FIXME: don't use vmalloc() here or anywhere else -HW */
hinfo = vmalloc(sizeof(struct xt_hashlimit_htable) +
sizeof(struct list_head) * size);
- if (hinfo == NULL) {
- printk(KERN_ERR "xt_hashlimit: unable to create hashtable\n");
- return -1;
- }
+ if (hinfo == NULL)
+ return -ENOMEM;
minfo->hinfo = hinfo;
/* copy match config into hashtable config */
@@ -317,7 +255,7 @@ static int htable_create(struct net *net, struct xt_hashlimit_mtinfo1 *minfo,
&dl_file_ops, hinfo);
if (hinfo->pde == NULL) {
vfree(hinfo);
- return -1;
+ return -ENOMEM;
}
hinfo->net = net;
@@ -578,58 +516,7 @@ hashlimit_init_dst(const struct xt_hashlimit_htable *hinfo,
}
static bool
-hashlimit_mt_v0(const struct sk_buff *skb, const struct xt_match_param *par)
-{
- const struct xt_hashlimit_info *r = par->matchinfo;
- struct xt_hashlimit_htable *hinfo = r->hinfo;
- unsigned long now = jiffies;
- struct dsthash_ent *dh;
- struct dsthash_dst dst;
-
- if (hashlimit_init_dst(hinfo, &dst, skb, par->thoff) < 0)
- goto hotdrop;
-
- spin_lock_bh(&hinfo->lock);
- dh = dsthash_find(hinfo, &dst);
- if (!dh) {
- dh = dsthash_alloc_init(hinfo, &dst);
- if (!dh) {
- spin_unlock_bh(&hinfo->lock);
- goto hotdrop;
- }
-
- dh->expires = jiffies + msecs_to_jiffies(hinfo->cfg.expire);
- dh->rateinfo.prev = jiffies;
- dh->rateinfo.credit = user2credits(hinfo->cfg.avg *
- hinfo->cfg.burst);
- dh->rateinfo.credit_cap = user2credits(hinfo->cfg.avg *
- hinfo->cfg.burst);
- dh->rateinfo.cost = user2credits(hinfo->cfg.avg);
- } else {
- /* update expiration timeout */
- dh->expires = now + msecs_to_jiffies(hinfo->cfg.expire);
- rateinfo_recalc(dh, now);
- }
-
- if (dh->rateinfo.credit >= dh->rateinfo.cost) {
- /* We're underlimit. */
- dh->rateinfo.credit -= dh->rateinfo.cost;
- spin_unlock_bh(&hinfo->lock);
- return true;
- }
-
- spin_unlock_bh(&hinfo->lock);
-
- /* default case: we're overlimit, thus don't match */
- return false;
-
-hotdrop:
- *par->hotdrop = true;
- return false;
-}
-
-static bool
-hashlimit_mt(const struct sk_buff *skb, const struct xt_match_param *par)
+hashlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_hashlimit_mtinfo1 *info = par->matchinfo;
struct xt_hashlimit_htable *hinfo = info->hinfo;
@@ -640,15 +527,14 @@ hashlimit_mt(const struct sk_buff *skb, const struct xt_match_param *par)
if (hashlimit_init_dst(hinfo, &dst, skb, par->thoff) < 0)
goto hotdrop;
- spin_lock_bh(&hinfo->lock);
+ rcu_read_lock_bh();
dh = dsthash_find(hinfo, &dst);
if (dh == NULL) {
dh = dsthash_alloc_init(hinfo, &dst);
if (dh == NULL) {
- spin_unlock_bh(&hinfo->lock);
+ rcu_read_unlock_bh();
goto hotdrop;
}
-
dh->expires = jiffies + msecs_to_jiffies(hinfo->cfg.expire);
dh->rateinfo.prev = jiffies;
dh->rateinfo.credit = user2credits(hinfo->cfg.avg *
@@ -665,96 +551,58 @@ hashlimit_mt(const struct sk_buff *skb, const struct xt_match_param *par)
if (dh->rateinfo.credit >= dh->rateinfo.cost) {
/* below the limit */
dh->rateinfo.credit -= dh->rateinfo.cost;
- spin_unlock_bh(&hinfo->lock);
+ spin_unlock(&dh->lock);
+ rcu_read_unlock_bh();
return !(info->cfg.mode & XT_HASHLIMIT_INVERT);
}
- spin_unlock_bh(&hinfo->lock);
+ spin_unlock(&dh->lock);
+ rcu_read_unlock_bh();
/* default match is underlimit - so over the limit, we need to invert */
return info->cfg.mode & XT_HASHLIMIT_INVERT;
hotdrop:
- *par->hotdrop = true;
+ par->hotdrop = true;
return false;
}
-static bool hashlimit_mt_check_v0(const struct xt_mtchk_param *par)
-{
- struct net *net = par->net;
- struct xt_hashlimit_info *r = par->matchinfo;
-
- /* Check for overflow. */
- if (r->cfg.burst == 0 ||
- user2credits(r->cfg.avg * r->cfg.burst) < user2credits(r->cfg.avg)) {
- printk(KERN_ERR "xt_hashlimit: overflow, try lower: %u/%u\n",
- r->cfg.avg, r->cfg.burst);
- return false;
- }
- if (r->cfg.mode == 0 ||
- r->cfg.mode > (XT_HASHLIMIT_HASH_DPT |
- XT_HASHLIMIT_HASH_DIP |
- XT_HASHLIMIT_HASH_SIP |
- XT_HASHLIMIT_HASH_SPT))
- return false;
- if (!r->cfg.gc_interval)
- return false;
- if (!r->cfg.expire)
- return false;
- if (r->name[sizeof(r->name) - 1] != '\0')
- return false;
-
- mutex_lock(&hashlimit_mutex);
- r->hinfo = htable_find_get(net, r->name, par->match->family);
- if (!r->hinfo && htable_create_v0(net, r, par->match->family) != 0) {
- mutex_unlock(&hashlimit_mutex);
- return false;
- }
- mutex_unlock(&hashlimit_mutex);
-
- return true;
-}
-
-static bool hashlimit_mt_check(const struct xt_mtchk_param *par)
+static int hashlimit_mt_check(const struct xt_mtchk_param *par)
{
struct net *net = par->net;
struct xt_hashlimit_mtinfo1 *info = par->matchinfo;
+ int ret;
/* Check for overflow. */
if (info->cfg.burst == 0 ||
user2credits(info->cfg.avg * info->cfg.burst) <
user2credits(info->cfg.avg)) {
- printk(KERN_ERR "xt_hashlimit: overflow, try lower: %u/%u\n",
- info->cfg.avg, info->cfg.burst);
- return false;
+ pr_info("overflow, try lower: %u/%u\n",
+ info->cfg.avg, info->cfg.burst);
+ return -ERANGE;
}
if (info->cfg.gc_interval == 0 || info->cfg.expire == 0)
- return false;
+ return -EINVAL;
if (info->name[sizeof(info->name)-1] != '\0')
- return false;
- if (par->match->family == NFPROTO_IPV4) {
+ return -EINVAL;
+ if (par->family == NFPROTO_IPV4) {
if (info->cfg.srcmask > 32 || info->cfg.dstmask > 32)
- return false;
+ return -EINVAL;
} else {
if (info->cfg.srcmask > 128 || info->cfg.dstmask > 128)
- return false;
+ return -EINVAL;
}
mutex_lock(&hashlimit_mutex);
- info->hinfo = htable_find_get(net, info->name, par->match->family);
- if (!info->hinfo && htable_create(net, info, par->match->family) != 0) {
- mutex_unlock(&hashlimit_mutex);
- return false;
+ info->hinfo = htable_find_get(net, info->name, par->family);
+ if (info->hinfo == NULL) {
+ ret = htable_create(net, info, par->family);
+ if (ret < 0) {
+ mutex_unlock(&hashlimit_mutex);
+ return ret;
+ }
}
mutex_unlock(&hashlimit_mutex);
- return true;
-}
-
-static void
-hashlimit_mt_destroy_v0(const struct xt_mtdtor_param *par)
-{
- const struct xt_hashlimit_info *r = par->matchinfo;
-
- htable_put(r->hinfo);
+ return 0;
}
static void hashlimit_mt_destroy(const struct xt_mtdtor_param *par)
@@ -764,47 +612,8 @@ static void hashlimit_mt_destroy(const struct xt_mtdtor_param *par)
htable_put(info->hinfo);
}
-#ifdef CONFIG_COMPAT
-struct compat_xt_hashlimit_info {
- char name[IFNAMSIZ];
- struct hashlimit_cfg cfg;
- compat_uptr_t hinfo;
- compat_uptr_t master;
-};
-
-static void hashlimit_mt_compat_from_user(void *dst, const void *src)
-{
- int off = offsetof(struct compat_xt_hashlimit_info, hinfo);
-
- memcpy(dst, src, off);
- memset(dst + off, 0, sizeof(struct compat_xt_hashlimit_info) - off);
-}
-
-static int hashlimit_mt_compat_to_user(void __user *dst, const void *src)
-{
- int off = offsetof(struct compat_xt_hashlimit_info, hinfo);
-
- return copy_to_user(dst, src, off) ? -EFAULT : 0;
-}
-#endif
-
static struct xt_match hashlimit_mt_reg[] __read_mostly = {
{
- .name = "hashlimit",
- .revision = 0,
- .family = NFPROTO_IPV4,
- .match = hashlimit_mt_v0,
- .matchsize = sizeof(struct xt_hashlimit_info),
-#ifdef CONFIG_COMPAT
- .compatsize = sizeof(struct compat_xt_hashlimit_info),
- .compat_from_user = hashlimit_mt_compat_from_user,
- .compat_to_user = hashlimit_mt_compat_to_user,
-#endif
- .checkentry = hashlimit_mt_check_v0,
- .destroy = hashlimit_mt_destroy_v0,
- .me = THIS_MODULE
- },
- {
.name = "hashlimit",
.revision = 1,
.family = NFPROTO_IPV4,
@@ -816,20 +625,6 @@ static struct xt_match hashlimit_mt_reg[] __read_mostly = {
},
#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
{
- .name = "hashlimit",
- .family = NFPROTO_IPV6,
- .match = hashlimit_mt_v0,
- .matchsize = sizeof(struct xt_hashlimit_info),
-#ifdef CONFIG_COMPAT
- .compatsize = sizeof(struct compat_xt_hashlimit_info),
- .compat_from_user = hashlimit_mt_compat_from_user,
- .compat_to_user = hashlimit_mt_compat_to_user,
-#endif
- .checkentry = hashlimit_mt_check_v0,
- .destroy = hashlimit_mt_destroy_v0,
- .me = THIS_MODULE
- },
- {
.name = "hashlimit",
.revision = 1,
.family = NFPROTO_IPV6,
@@ -888,12 +683,15 @@ static void dl_seq_stop(struct seq_file *s, void *v)
static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family,
struct seq_file *s)
{
+ int res;
+
+ spin_lock(&ent->lock);
/* recalculate to show accurate numbers */
rateinfo_recalc(ent, jiffies);
switch (family) {
case NFPROTO_IPV4:
- return seq_printf(s, "%ld %pI4:%u->%pI4:%u %u %u %u\n",
+ res = seq_printf(s, "%ld %pI4:%u->%pI4:%u %u %u %u\n",
(long)(ent->expires - jiffies)/HZ,
&ent->dst.ip.src,
ntohs(ent->dst.src_port),
@@ -901,9 +699,10 @@ static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family,
ntohs(ent->dst.dst_port),
ent->rateinfo.credit, ent->rateinfo.credit_cap,
ent->rateinfo.cost);
+ break;
#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
case NFPROTO_IPV6:
- return seq_printf(s, "%ld %pI6:%u->%pI6:%u %u %u %u\n",
+ res = seq_printf(s, "%ld %pI6:%u->%pI6:%u %u %u %u\n",
(long)(ent->expires - jiffies)/HZ,
&ent->dst.ip6.src,
ntohs(ent->dst.src_port),
@@ -911,11 +710,14 @@ static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family,
ntohs(ent->dst.dst_port),
ent->rateinfo.credit, ent->rateinfo.credit_cap,
ent->rateinfo.cost);
+ break;
#endif
default:
BUG();
- return 0;
+ res = 0;
}
+ spin_unlock(&ent->lock);
+ return res;
}
static int dl_seq_show(struct seq_file *s, void *v)
@@ -1024,7 +826,7 @@ static int __init hashlimit_mt_init(void)
sizeof(struct dsthash_ent), 0, 0,
NULL);
if (!hashlimit_cachep) {
- printk(KERN_ERR "xt_hashlimit: unable to create slab cache\n");
+ pr_warning("unable to create slab cache\n");
goto err2;
}
return 0;
@@ -1039,9 +841,11 @@ err1:
static void __exit hashlimit_mt_exit(void)
{
- kmem_cache_destroy(hashlimit_cachep);
xt_unregister_matches(hashlimit_mt_reg, ARRAY_SIZE(hashlimit_mt_reg));
unregister_pernet_subsys(&hashlimit_net_ops);
+
+ rcu_barrier_bh();
+ kmem_cache_destroy(hashlimit_cachep);
}
module_init(hashlimit_mt_init);
diff --git a/net/netfilter/xt_helper.c b/net/netfilter/xt_helper.c
index 64fc7f277221..9f4ab00c8050 100644
--- a/net/netfilter/xt_helper.c
+++ b/net/netfilter/xt_helper.c
@@ -6,7 +6,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/netfilter.h>
@@ -24,7 +24,7 @@ MODULE_ALIAS("ip6t_helper");
static bool
-helper_mt(const struct sk_buff *skb, const struct xt_match_param *par)
+helper_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_helper_info *info = par->matchinfo;
const struct nf_conn *ct;
@@ -54,17 +54,19 @@ helper_mt(const struct sk_buff *skb, const struct xt_match_param *par)
return ret;
}
-static bool helper_mt_check(const struct xt_mtchk_param *par)
+static int helper_mt_check(const struct xt_mtchk_param *par)
{
struct xt_helper_info *info = par->matchinfo;
+ int ret;
- if (nf_ct_l3proto_try_module_get(par->family) < 0) {
- printk(KERN_WARNING "can't load conntrack support for "
- "proto=%u\n", par->family);
- return false;
+ ret = nf_ct_l3proto_try_module_get(par->family);
+ if (ret < 0) {
+ pr_info("cannot load conntrack support for proto=%u\n",
+ par->family);
+ return ret;
}
info->name[29] = '\0';
- return true;
+ return 0;
}
static void helper_mt_destroy(const struct xt_mtdtor_param *par)
diff --git a/net/netfilter/xt_hl.c b/net/netfilter/xt_hl.c
index 7726154c87b2..7d12221ead89 100644
--- a/net/netfilter/xt_hl.c
+++ b/net/netfilter/xt_hl.c
@@ -25,7 +25,7 @@ MODULE_LICENSE("GPL");
MODULE_ALIAS("ipt_ttl");
MODULE_ALIAS("ip6t_hl");
-static bool ttl_mt(const struct sk_buff *skb, const struct xt_match_param *par)
+static bool ttl_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct ipt_ttl_info *info = par->matchinfo;
const u8 ttl = ip_hdr(skb)->ttl;
@@ -39,16 +39,12 @@ static bool ttl_mt(const struct sk_buff *skb, const struct xt_match_param *par)
return ttl < info->ttl;
case IPT_TTL_GT:
return ttl > info->ttl;
- default:
- printk(KERN_WARNING "ipt_ttl: unknown mode %d\n",
- info->mode);
- return false;
}
return false;
}
-static bool hl_mt6(const struct sk_buff *skb, const struct xt_match_param *par)
+static bool hl_mt6(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct ip6t_hl_info *info = par->matchinfo;
const struct ipv6hdr *ip6h = ipv6_hdr(skb);
@@ -56,20 +52,12 @@ static bool hl_mt6(const struct sk_buff *skb, const struct xt_match_param *par)
switch (info->mode) {
case IP6T_HL_EQ:
return ip6h->hop_limit == info->hop_limit;
- break;
case IP6T_HL_NE:
return ip6h->hop_limit != info->hop_limit;
- break;
case IP6T_HL_LT:
return ip6h->hop_limit < info->hop_limit;
- break;
case IP6T_HL_GT:
return ip6h->hop_limit > info->hop_limit;
- break;
- default:
- printk(KERN_WARNING "ip6t_hl: unknown mode %d\n",
- info->mode);
- return false;
}
return false;
diff --git a/net/netfilter/xt_iprange.c b/net/netfilter/xt_iprange.c
index ffc96387d556..88f7c3511c72 100644
--- a/net/netfilter/xt_iprange.c
+++ b/net/netfilter/xt_iprange.c
@@ -8,6 +8,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/ip.h>
@@ -16,7 +17,7 @@
#include <linux/netfilter/xt_iprange.h>
static bool
-iprange_mt4(const struct sk_buff *skb, const struct xt_match_param *par)
+iprange_mt4(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_iprange_mtinfo *info = par->matchinfo;
const struct iphdr *iph = ip_hdr(skb);
@@ -67,7 +68,7 @@ iprange_ipv6_sub(const struct in6_addr *a, const struct in6_addr *b)
}
static bool
-iprange_mt6(const struct sk_buff *skb, const struct xt_match_param *par)
+iprange_mt6(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_iprange_mtinfo *info = par->matchinfo;
const struct ipv6hdr *iph = ipv6_hdr(skb);
diff --git a/net/netfilter/xt_length.c b/net/netfilter/xt_length.c
index c4871ca6c86d..176e5570a999 100644
--- a/net/netfilter/xt_length.c
+++ b/net/netfilter/xt_length.c
@@ -21,7 +21,7 @@ MODULE_ALIAS("ipt_length");
MODULE_ALIAS("ip6t_length");
static bool
-length_mt(const struct sk_buff *skb, const struct xt_match_param *par)
+length_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_length_info *info = par->matchinfo;
u_int16_t pktlen = ntohs(ip_hdr(skb)->tot_len);
@@ -30,7 +30,7 @@ length_mt(const struct sk_buff *skb, const struct xt_match_param *par)
}
static bool
-length_mt6(const struct sk_buff *skb, const struct xt_match_param *par)
+length_mt6(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_length_info *info = par->matchinfo;
const u_int16_t pktlen = ntohs(ipv6_hdr(skb)->payload_len) +
diff --git a/net/netfilter/xt_limit.c b/net/netfilter/xt_limit.c
index e5d7e1ffb1a4..32b7a579a032 100644
--- a/net/netfilter/xt_limit.c
+++ b/net/netfilter/xt_limit.c
@@ -5,6 +5,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/slab.h>
#include <linux/module.h>
@@ -64,7 +65,7 @@ static DEFINE_SPINLOCK(limit_lock);
#define CREDITS_PER_JIFFY POW2_BELOW32(MAX_CPJ)
static bool
-limit_mt(const struct sk_buff *skb, const struct xt_match_param *par)
+limit_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_rateinfo *r = par->matchinfo;
struct xt_limit_priv *priv = r->master;
@@ -98,7 +99,7 @@ user2credits(u_int32_t user)
return (user * HZ * CREDITS_PER_JIFFY) / XT_LIMIT_SCALE;
}
-static bool limit_mt_check(const struct xt_mtchk_param *par)
+static int limit_mt_check(const struct xt_mtchk_param *par)
{
struct xt_rateinfo *r = par->matchinfo;
struct xt_limit_priv *priv;
@@ -106,14 +107,14 @@ static bool limit_mt_check(const struct xt_mtchk_param *par)
/* Check for overflow. */
if (r->burst == 0
|| user2credits(r->avg * r->burst) < user2credits(r->avg)) {
- printk("Overflow in xt_limit, try lower: %u/%u\n",
- r->avg, r->burst);
- return false;
+ pr_info("Overflow, try lower: %u/%u\n",
+ r->avg, r->burst);
+ return -ERANGE;
}
priv = kmalloc(sizeof(*priv), GFP_KERNEL);
if (priv == NULL)
- return false;
+ return -ENOMEM;
/* For SMP, we only want to use one set of state. */
r->master = priv;
@@ -125,7 +126,7 @@ static bool limit_mt_check(const struct xt_mtchk_param *par)
r->credit_cap = user2credits(r->avg * r->burst); /* Credits full. */
r->cost = user2credits(r->avg);
}
- return true;
+ return 0;
}
static void limit_mt_destroy(const struct xt_mtdtor_param *par)
diff --git a/net/netfilter/xt_mac.c b/net/netfilter/xt_mac.c
index c2007116ce5b..8160f6b1435d 100644
--- a/net/netfilter/xt_mac.c
+++ b/net/netfilter/xt_mac.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/skbuff.h>
+#include <linux/if_arp.h>
#include <linux/if_ether.h>
#include <linux/etherdevice.h>
@@ -24,16 +25,20 @@ MODULE_DESCRIPTION("Xtables: MAC address match");
MODULE_ALIAS("ipt_mac");
MODULE_ALIAS("ip6t_mac");
-static bool mac_mt(const struct sk_buff *skb, const struct xt_match_param *par)
+static bool mac_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
- const struct xt_mac_info *info = par->matchinfo;
-
- /* Is mac pointer valid? */
- return skb_mac_header(skb) >= skb->head &&
- skb_mac_header(skb) + ETH_HLEN <= skb->data
- /* If so, compare... */
- && ((!compare_ether_addr(eth_hdr(skb)->h_source, info->srcaddr))
- ^ info->invert);
+ const struct xt_mac_info *info = par->matchinfo;
+ bool ret;
+
+ if (skb->dev == NULL || skb->dev->type != ARPHRD_ETHER)
+ return false;
+ if (skb_mac_header(skb) < skb->head)
+ return false;
+ if (skb_mac_header(skb) + ETH_HLEN > skb->data)
+ return false;
+ ret = compare_ether_addr(eth_hdr(skb)->h_source, info->srcaddr) == 0;
+ ret ^= info->invert;
+ return ret;
}
static struct xt_match mac_mt_reg __read_mostly = {
diff --git a/net/netfilter/xt_mark.c b/net/netfilter/xt_mark.c
index 1db07d8125f8..23345238711b 100644
--- a/net/netfilter/xt_mark.c
+++ b/net/netfilter/xt_mark.c
@@ -18,18 +18,38 @@
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>");
-MODULE_DESCRIPTION("Xtables: packet mark match");
+MODULE_DESCRIPTION("Xtables: packet mark operations");
MODULE_ALIAS("ipt_mark");
MODULE_ALIAS("ip6t_mark");
+MODULE_ALIAS("ipt_MARK");
+MODULE_ALIAS("ip6t_MARK");
+
+static unsigned int
+mark_tg(struct sk_buff *skb, const struct xt_action_param *par)
+{
+ const struct xt_mark_tginfo2 *info = par->targinfo;
+
+ skb->mark = (skb->mark & ~info->mask) ^ info->mark;
+ return XT_CONTINUE;
+}
static bool
-mark_mt(const struct sk_buff *skb, const struct xt_match_param *par)
+mark_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_mark_mtinfo1 *info = par->matchinfo;
return ((skb->mark & info->mask) == info->mark) ^ info->invert;
}
+static struct xt_target mark_tg_reg __read_mostly = {
+ .name = "MARK",
+ .revision = 2,
+ .family = NFPROTO_UNSPEC,
+ .target = mark_tg,
+ .targetsize = sizeof(struct xt_mark_tginfo2),
+ .me = THIS_MODULE,
+};
+
static struct xt_match mark_mt_reg __read_mostly = {
.name = "mark",
.revision = 1,
@@ -41,12 +61,23 @@ static struct xt_match mark_mt_reg __read_mostly = {
static int __init mark_mt_init(void)
{
- return xt_register_match(&mark_mt_reg);
+ int ret;
+
+ ret = xt_register_target(&mark_tg_reg);
+ if (ret < 0)
+ return ret;
+ ret = xt_register_match(&mark_mt_reg);
+ if (ret < 0) {
+ xt_unregister_target(&mark_tg_reg);
+ return ret;
+ }
+ return 0;
}
static void __exit mark_mt_exit(void)
{
xt_unregister_match(&mark_mt_reg);
+ xt_unregister_target(&mark_tg_reg);
}
module_init(mark_mt_init);
diff --git a/net/netfilter/xt_multiport.c b/net/netfilter/xt_multiport.c
index d06bb2dd3900..ac1d3c3d09e7 100644
--- a/net/netfilter/xt_multiport.c
+++ b/net/netfilter/xt_multiport.c
@@ -8,7 +8,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/types.h>
#include <linux/udp.h>
@@ -26,29 +26,6 @@ MODULE_DESCRIPTION("Xtables: multiple port matching for TCP, UDP, UDP-Lite, SCTP
MODULE_ALIAS("ipt_multiport");
MODULE_ALIAS("ip6t_multiport");
-#if 0
-#define duprintf(format, args...) printk(format , ## args)
-#else
-#define duprintf(format, args...)
-#endif
-
-/* Returns 1 if the port is matched by the test, 0 otherwise. */
-static inline bool
-ports_match_v0(const u_int16_t *portlist, enum xt_multiport_flags flags,
- u_int8_t count, u_int16_t src, u_int16_t dst)
-{
- unsigned int i;
- for (i = 0; i < count; i++) {
- if (flags != XT_MULTIPORT_DESTINATION && portlist[i] == src)
- return true;
-
- if (flags != XT_MULTIPORT_SOURCE && portlist[i] == dst)
- return true;
- }
-
- return false;
-}
-
/* Returns 1 if the port is matched by the test, 0 otherwise. */
static inline bool
ports_match_v1(const struct xt_multiport_v1 *minfo,
@@ -63,7 +40,7 @@ ports_match_v1(const struct xt_multiport_v1 *minfo,
if (minfo->pflags[i]) {
/* range port matching */
e = minfo->ports[++i];
- duprintf("src or dst matches with %d-%d?\n", s, e);
+ pr_debug("src or dst matches with %d-%d?\n", s, e);
if (minfo->flags == XT_MULTIPORT_SOURCE
&& src >= s && src <= e)
@@ -77,7 +54,7 @@ ports_match_v1(const struct xt_multiport_v1 *minfo,
return true ^ minfo->invert;
} else {
/* exact port matching */
- duprintf("src or dst matches with %d?\n", s);
+ pr_debug("src or dst matches with %d?\n", s);
if (minfo->flags == XT_MULTIPORT_SOURCE
&& src == s)
@@ -95,31 +72,7 @@ ports_match_v1(const struct xt_multiport_v1 *minfo,
}
static bool
-multiport_mt_v0(const struct sk_buff *skb, const struct xt_match_param *par)
-{
- const __be16 *pptr;
- __be16 _ports[2];
- const struct xt_multiport *multiinfo = par->matchinfo;
-
- if (par->fragoff != 0)
- return false;
-
- pptr = skb_header_pointer(skb, par->thoff, sizeof(_ports), _ports);
- if (pptr == NULL) {
- /* We've been asked to examine this packet, and we
- * can't. Hence, no choice but to drop.
- */
- duprintf("xt_multiport: Dropping evil offset=0 tinygram.\n");
- *par->hotdrop = true;
- return false;
- }
-
- return ports_match_v0(multiinfo->ports, multiinfo->flags,
- multiinfo->count, ntohs(pptr[0]), ntohs(pptr[1]));
-}
-
-static bool
-multiport_mt(const struct sk_buff *skb, const struct xt_match_param *par)
+multiport_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const __be16 *pptr;
__be16 _ports[2];
@@ -133,8 +86,8 @@ multiport_mt(const struct sk_buff *skb, const struct xt_match_param *par)
/* We've been asked to examine this packet, and we
* can't. Hence, no choice but to drop.
*/
- duprintf("xt_multiport: Dropping evil offset=0 tinygram.\n");
- *par->hotdrop = true;
+ pr_debug("Dropping evil offset=0 tinygram.\n");
+ par->hotdrop = true;
return false;
}
@@ -158,55 +111,28 @@ check(u_int16_t proto,
&& count <= XT_MULTI_PORTS;
}
-static bool multiport_mt_check_v0(const struct xt_mtchk_param *par)
-{
- const struct ipt_ip *ip = par->entryinfo;
- const struct xt_multiport *multiinfo = par->matchinfo;
-
- return check(ip->proto, ip->invflags, multiinfo->flags,
- multiinfo->count);
-}
-
-static bool multiport_mt_check(const struct xt_mtchk_param *par)
+static int multiport_mt_check(const struct xt_mtchk_param *par)
{
const struct ipt_ip *ip = par->entryinfo;
const struct xt_multiport_v1 *multiinfo = par->matchinfo;
return check(ip->proto, ip->invflags, multiinfo->flags,
- multiinfo->count);
+ multiinfo->count) ? 0 : -EINVAL;
}
-static bool multiport_mt6_check_v0(const struct xt_mtchk_param *par)
-{
- const struct ip6t_ip6 *ip = par->entryinfo;
- const struct xt_multiport *multiinfo = par->matchinfo;
-
- return check(ip->proto, ip->invflags, multiinfo->flags,
- multiinfo->count);
-}
-
-static bool multiport_mt6_check(const struct xt_mtchk_param *par)
+static int multiport_mt6_check(const struct xt_mtchk_param *par)
{
const struct ip6t_ip6 *ip = par->entryinfo;
const struct xt_multiport_v1 *multiinfo = par->matchinfo;
return check(ip->proto, ip->invflags, multiinfo->flags,
- multiinfo->count);
+ multiinfo->count) ? 0 : -EINVAL;
}
static struct xt_match multiport_mt_reg[] __read_mostly = {
{
.name = "multiport",
.family = NFPROTO_IPV4,
- .revision = 0,
- .checkentry = multiport_mt_check_v0,
- .match = multiport_mt_v0,
- .matchsize = sizeof(struct xt_multiport),
- .me = THIS_MODULE,
- },
- {
- .name = "multiport",
- .family = NFPROTO_IPV4,
.revision = 1,
.checkentry = multiport_mt_check,
.match = multiport_mt,
@@ -216,15 +142,6 @@ static struct xt_match multiport_mt_reg[] __read_mostly = {
{
.name = "multiport",
.family = NFPROTO_IPV6,
- .revision = 0,
- .checkentry = multiport_mt6_check_v0,
- .match = multiport_mt_v0,
- .matchsize = sizeof(struct xt_multiport),
- .me = THIS_MODULE,
- },
- {
- .name = "multiport",
- .family = NFPROTO_IPV6,
.revision = 1,
.checkentry = multiport_mt6_check,
.match = multiport_mt,
diff --git a/net/netfilter/xt_osf.c b/net/netfilter/xt_osf.c
index 4169e200588d..4327e101c047 100644
--- a/net/netfilter/xt_osf.c
+++ b/net/netfilter/xt_osf.c
@@ -16,7 +16,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
@@ -193,8 +193,8 @@ static inline int xt_osf_ttl(const struct sk_buff *skb, const struct xt_osf_info
return ip->ttl == f_ttl;
}
-static bool xt_osf_match_packet(const struct sk_buff *skb,
- const struct xt_match_param *p)
+static bool
+xt_osf_match_packet(const struct sk_buff *skb, struct xt_action_param *p)
{
const struct xt_osf_info *info = p->matchinfo;
const struct iphdr *ip = ip_hdr(skb);
@@ -382,14 +382,14 @@ static int __init xt_osf_init(void)
err = nfnetlink_subsys_register(&xt_osf_nfnetlink);
if (err < 0) {
- printk(KERN_ERR "Failed (%d) to register OSF nsfnetlink helper.\n", err);
+ pr_err("Failed to register OSF nsfnetlink helper (%d)\n", err);
goto err_out_exit;
}
err = xt_register_match(&xt_osf_match);
if (err) {
- printk(KERN_ERR "Failed (%d) to register OS fingerprint "
- "matching module.\n", err);
+ pr_err("Failed to register OS fingerprint "
+ "matching module (%d)\n", err);
goto err_out_remove;
}
diff --git a/net/netfilter/xt_owner.c b/net/netfilter/xt_owner.c
index d24c76dffee2..772d7389b337 100644
--- a/net/netfilter/xt_owner.c
+++ b/net/netfilter/xt_owner.c
@@ -18,7 +18,7 @@
#include <linux/netfilter/xt_owner.h>
static bool
-owner_mt(const struct sk_buff *skb, const struct xt_match_param *par)
+owner_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_owner_match_info *info = par->matchinfo;
const struct file *filp;
diff --git a/net/netfilter/xt_physdev.c b/net/netfilter/xt_physdev.c
index 8d28ca5848bc..d7ca16b8b8df 100644
--- a/net/netfilter/xt_physdev.c
+++ b/net/netfilter/xt_physdev.c
@@ -7,7 +7,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/netfilter_bridge.h>
@@ -22,7 +22,7 @@ MODULE_ALIAS("ip6t_physdev");
static bool
-physdev_mt(const struct sk_buff *skb, const struct xt_match_param *par)
+physdev_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
const struct xt_physdev_info *info = par->matchinfo;
@@ -83,25 +83,25 @@ match_outdev:
return (!!ret ^ !(info->invert & XT_PHYSDEV_OP_OUT));
}
-static bool physdev_mt_check(const struct xt_mtchk_param *par)
+static int physdev_mt_check(const struct xt_mtchk_param *par)
{
const struct xt_physdev_info *info = par->matchinfo;
if (!(info->bitmask & XT_PHYSDEV_OP_MASK) ||
info->bitmask & ~XT_PHYSDEV_OP_MASK)
- return false;
+ return -EINVAL;
if (info->bitmask & XT_PHYSDEV_OP_OUT &&
(!(info->bitmask & XT_PHYSDEV_OP_BRIDGED) ||
info->invert & XT_PHYSDEV_OP_BRIDGED) &&
par->hook_mask & ((1 << NF_INET_LOCAL_OUT) |
(1 << NF_INET_FORWARD) | (1 << NF_INET_POST_ROUTING))) {
- printk(KERN_WARNING "physdev match: using --physdev-out in the "
- "OUTPUT, FORWARD and POSTROUTING chains for non-bridged "
- "traffic is not supported anymore.\n");
+ pr_info("using --physdev-out in the OUTPUT, FORWARD and "
+ "POSTROUTING chains for non-bridged traffic is not "
+ "supported anymore.\n");
if (par->hook_mask & (1 << NF_INET_LOCAL_OUT))
- return false;
+ return -EINVAL;
}
- return true;
+ return 0;
}
static struct xt_match physdev_mt_reg __read_mostly = {
diff --git a/net/netfilter/xt_pkttype.c b/net/netfilter/xt_pkttype.c
index 69da1d3a1d85..5b645cb598fc 100644
--- a/net/netfilter/xt_pkttype.c
+++ b/net/netfilter/xt_pkttype.c
@@ -23,7 +23,7 @@ MODULE_ALIAS("ipt_pkttype");
MODULE_ALIAS("ip6t_pkttype");
static bool
-pkttype_mt(const struct sk_buff *skb, const struct xt_match_param *par)
+pkttype_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_pkttype_info *info = par->matchinfo;
u_int8_t type;
diff --git a/net/netfilter/xt_policy.c b/net/netfilter/xt_policy.c
index 4cbfebda8fa1..f23e97bb42d7 100644
--- a/net/netfilter/xt_policy.c
+++ b/net/netfilter/xt_policy.c
@@ -6,7 +6,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/skbuff.h>
@@ -110,15 +110,15 @@ match_policy_out(const struct sk_buff *skb, const struct xt_policy_info *info,
}
static bool
-policy_mt(const struct sk_buff *skb, const struct xt_match_param *par)
+policy_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_policy_info *info = par->matchinfo;
int ret;
if (info->flags & XT_POLICY_MATCH_IN)
- ret = match_policy_in(skb, info, par->match->family);
+ ret = match_policy_in(skb, info, par->family);
else
- ret = match_policy_out(skb, info, par->match->family);
+ ret = match_policy_out(skb, info, par->family);
if (ret < 0)
ret = info->flags & XT_POLICY_MATCH_NONE ? true : false;
@@ -128,32 +128,29 @@ policy_mt(const struct sk_buff *skb, const struct xt_match_param *par)
return ret;
}
-static bool policy_mt_check(const struct xt_mtchk_param *par)
+static int policy_mt_check(const struct xt_mtchk_param *par)
{
const struct xt_policy_info *info = par->matchinfo;
if (!(info->flags & (XT_POLICY_MATCH_IN|XT_POLICY_MATCH_OUT))) {
- printk(KERN_ERR "xt_policy: neither incoming nor "
- "outgoing policy selected\n");
- return false;
+ pr_info("neither incoming nor outgoing policy selected\n");
+ return -EINVAL;
}
if (par->hook_mask & ((1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_LOCAL_IN)) && info->flags & XT_POLICY_MATCH_OUT) {
- printk(KERN_ERR "xt_policy: output policy not valid in "
- "PRE_ROUTING and INPUT\n");
- return false;
+ pr_info("output policy not valid in PREROUTING and INPUT\n");
+ return -EINVAL;
}
if (par->hook_mask & ((1 << NF_INET_POST_ROUTING) |
(1 << NF_INET_LOCAL_OUT)) && info->flags & XT_POLICY_MATCH_IN) {
- printk(KERN_ERR "xt_policy: input policy not valid in "
- "POST_ROUTING and OUTPUT\n");
- return false;
+ pr_info("input policy not valid in POSTROUTING and OUTPUT\n");
+ return -EINVAL;
}
if (info->len > XT_POLICY_MAX_ELEM) {
- printk(KERN_ERR "xt_policy: too many policy elements\n");
- return false;
+ pr_info("too many policy elements\n");
+ return -EINVAL;
}
- return true;
+ return 0;
}
static struct xt_match policy_mt_reg[] __read_mostly = {
diff --git a/net/netfilter/xt_quota.c b/net/netfilter/xt_quota.c
index 2d5562498c43..b4f7dfea5980 100644
--- a/net/netfilter/xt_quota.c
+++ b/net/netfilter/xt_quota.c
@@ -23,7 +23,7 @@ MODULE_ALIAS("ip6t_quota");
static DEFINE_SPINLOCK(quota_lock);
static bool
-quota_mt(const struct sk_buff *skb, const struct xt_match_param *par)
+quota_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
struct xt_quota_info *q = (void *)par->matchinfo;
struct xt_quota_priv *priv = q->master;
@@ -44,19 +44,19 @@ quota_mt(const struct sk_buff *skb, const struct xt_match_param *par)
return ret;
}
-static bool quota_mt_check(const struct xt_mtchk_param *par)
+static int quota_mt_check(const struct xt_mtchk_param *par)
{
struct xt_quota_info *q = par->matchinfo;
if (q->flags & ~XT_QUOTA_MASK)
- return false;
+ return -EINVAL;
q->master = kmalloc(sizeof(*q->master), GFP_KERNEL);
if (q->master == NULL)
- return false;
+ return -ENOMEM;
q->master->quota = q->quota;
- return true;
+ return 0;
}
static void quota_mt_destroy(const struct xt_mtdtor_param *par)
diff --git a/net/netfilter/xt_rateest.c b/net/netfilter/xt_rateest.c
index 4fc6a917f6de..76a083184d8e 100644
--- a/net/netfilter/xt_rateest.c
+++ b/net/netfilter/xt_rateest.c
@@ -15,7 +15,7 @@
static bool
-xt_rateest_mt(const struct sk_buff *skb, const struct xt_match_param *par)
+xt_rateest_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_rateest_match_info *info = par->matchinfo;
struct gnet_stats_rate_est *r;
@@ -74,10 +74,11 @@ xt_rateest_mt(const struct sk_buff *skb, const struct xt_match_param *par)
return ret;
}
-static bool xt_rateest_mt_checkentry(const struct xt_mtchk_param *par)
+static int xt_rateest_mt_checkentry(const struct xt_mtchk_param *par)
{
struct xt_rateest_match_info *info = par->matchinfo;
struct xt_rateest *est1, *est2;
+ int ret = false;
if (hweight32(info->flags & (XT_RATEEST_MATCH_ABS |
XT_RATEEST_MATCH_REL)) != 1)
@@ -95,6 +96,7 @@ static bool xt_rateest_mt_checkentry(const struct xt_mtchk_param *par)
goto err1;
}
+ ret = -ENOENT;
est1 = xt_rateest_lookup(info->name1);
if (!est1)
goto err1;
@@ -109,12 +111,12 @@ static bool xt_rateest_mt_checkentry(const struct xt_mtchk_param *par)
info->est1 = est1;
info->est2 = est2;
- return true;
+ return 0;
err2:
xt_rateest_put(est1);
err1:
- return false;
+ return -EINVAL;
}
static void xt_rateest_mt_destroy(const struct xt_mtdtor_param *par)
diff --git a/net/netfilter/xt_realm.c b/net/netfilter/xt_realm.c
index 484d1689bfde..459a7b256eb2 100644
--- a/net/netfilter/xt_realm.c
+++ b/net/netfilter/xt_realm.c
@@ -22,7 +22,7 @@ MODULE_DESCRIPTION("Xtables: Routing realm match");
MODULE_ALIAS("ipt_realm");
static bool
-realm_mt(const struct sk_buff *skb, const struct xt_match_param *par)
+realm_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_realm_info *info = par->matchinfo;
const struct dst_entry *dst = skb_dst(skb);
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index 834b736857cb..76aec6a44762 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -12,6 +12,7 @@
* Author: Stephen Frost <sfrost@snowman.net>
* Copyright 2002-2003, Stephen Frost, 2.5.x port by laforge@netfilter.org
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/init.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
@@ -35,8 +36,8 @@
#include <linux/netfilter/xt_recent.h>
MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
-MODULE_AUTHOR("Jan Engelhardt <jengelh@computergmbh.de>");
-MODULE_DESCRIPTION("Xtables: \"recently-seen\" host matching for IPv4");
+MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
+MODULE_DESCRIPTION("Xtables: \"recently-seen\" host matching");
MODULE_LICENSE("GPL");
MODULE_ALIAS("ipt_recent");
MODULE_ALIAS("ip6t_recent");
@@ -51,14 +52,14 @@ module_param(ip_list_tot, uint, 0400);
module_param(ip_pkt_list_tot, uint, 0400);
module_param(ip_list_hash_size, uint, 0400);
module_param(ip_list_perms, uint, 0400);
-module_param(ip_list_uid, uint, 0400);
-module_param(ip_list_gid, uint, 0400);
+module_param(ip_list_uid, uint, S_IRUGO | S_IWUSR);
+module_param(ip_list_gid, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ip_list_tot, "number of IPs to remember per list");
MODULE_PARM_DESC(ip_pkt_list_tot, "number of packets per IP address to remember (max. 255)");
MODULE_PARM_DESC(ip_list_hash_size, "size of hash table used to look up IPs");
MODULE_PARM_DESC(ip_list_perms, "permissions on /proc/net/xt_recent/* files");
-MODULE_PARM_DESC(ip_list_uid,"owner of /proc/net/xt_recent/* files");
-MODULE_PARM_DESC(ip_list_gid,"owning group of /proc/net/xt_recent/* files");
+MODULE_PARM_DESC(ip_list_uid, "default owner of /proc/net/xt_recent/* files");
+MODULE_PARM_DESC(ip_list_gid, "default owning group of /proc/net/xt_recent/* files");
struct recent_entry {
struct list_head list;
@@ -84,9 +85,6 @@ struct recent_net {
struct list_head tables;
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *xt_recent;
-#ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT
- struct proc_dir_entry *ipt_recent;
-#endif
#endif
};
@@ -147,6 +145,25 @@ static void recent_entry_remove(struct recent_table *t, struct recent_entry *e)
t->entries--;
}
+/*
+ * Drop entries with timestamps older then 'time'.
+ */
+static void recent_entry_reap(struct recent_table *t, unsigned long time)
+{
+ struct recent_entry *e;
+
+ /*
+ * The head of the LRU list is always the oldest entry.
+ */
+ e = list_entry(t->lru_list.next, struct recent_entry, lru_list);
+
+ /*
+ * The last time stamp is the most recent.
+ */
+ if (time_after(time, e->stamps[e->index-1]))
+ recent_entry_remove(t, e);
+}
+
static struct recent_entry *
recent_entry_init(struct recent_table *t, const union nf_inet_addr *addr,
u_int16_t family, u_int8_t ttl)
@@ -207,7 +224,7 @@ static void recent_table_flush(struct recent_table *t)
}
static bool
-recent_mt(const struct sk_buff *skb, const struct xt_match_param *par)
+recent_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
struct net *net = dev_net(par->in ? par->in : par->out);
struct recent_net *recent_net = recent_pernet(net);
@@ -218,7 +235,7 @@ recent_mt(const struct sk_buff *skb, const struct xt_match_param *par)
u_int8_t ttl;
bool ret = info->invert;
- if (par->match->family == NFPROTO_IPV4) {
+ if (par->family == NFPROTO_IPV4) {
const struct iphdr *iph = ip_hdr(skb);
if (info->side == XT_RECENT_DEST)
@@ -244,14 +261,14 @@ recent_mt(const struct sk_buff *skb, const struct xt_match_param *par)
spin_lock_bh(&recent_lock);
t = recent_table_lookup(recent_net, info->name);
- e = recent_entry_lookup(t, &addr, par->match->family,
+ e = recent_entry_lookup(t, &addr, par->family,
(info->check_set & XT_RECENT_TTL) ? ttl : 0);
if (e == NULL) {
if (!(info->check_set & XT_RECENT_SET))
goto out;
- e = recent_entry_init(t, &addr, par->match->family, ttl);
+ e = recent_entry_init(t, &addr, par->family, ttl);
if (e == NULL)
- *par->hotdrop = true;
+ par->hotdrop = true;
ret = !ret;
goto out;
}
@@ -273,6 +290,10 @@ recent_mt(const struct sk_buff *skb, const struct xt_match_param *par)
break;
}
}
+
+ /* info->seconds must be non-zero */
+ if (info->check_set & XT_RECENT_REAP)
+ recent_entry_reap(t, time);
}
if (info->check_set & XT_RECENT_SET ||
@@ -285,7 +306,7 @@ out:
return ret;
}
-static bool recent_mt_check(const struct xt_mtchk_param *par)
+static int recent_mt_check(const struct xt_mtchk_param *par)
{
struct recent_net *recent_net = recent_pernet(par->net);
const struct xt_recent_mtinfo *info = par->matchinfo;
@@ -294,41 +315,51 @@ static bool recent_mt_check(const struct xt_mtchk_param *par)
struct proc_dir_entry *pde;
#endif
unsigned i;
- bool ret = false;
+ int ret = -EINVAL;
if (unlikely(!hash_rnd_inited)) {
get_random_bytes(&hash_rnd, sizeof(hash_rnd));
hash_rnd_inited = true;
}
+ if (info->check_set & ~XT_RECENT_VALID_FLAGS) {
+ pr_info("Unsupported user space flags (%08x)\n",
+ info->check_set);
+ return -EINVAL;
+ }
if (hweight8(info->check_set &
(XT_RECENT_SET | XT_RECENT_REMOVE |
XT_RECENT_CHECK | XT_RECENT_UPDATE)) != 1)
- return false;
+ return -EINVAL;
if ((info->check_set & (XT_RECENT_SET | XT_RECENT_REMOVE)) &&
- (info->seconds || info->hit_count))
- return false;
+ (info->seconds || info->hit_count ||
+ (info->check_set & XT_RECENT_MODIFIERS)))
+ return -EINVAL;
+ if ((info->check_set & XT_RECENT_REAP) && !info->seconds)
+ return -EINVAL;
if (info->hit_count > ip_pkt_list_tot) {
- pr_info(KBUILD_MODNAME ": hitcount (%u) is larger than "
+ pr_info("hitcount (%u) is larger than "
"packets to be remembered (%u)\n",
info->hit_count, ip_pkt_list_tot);
- return false;
+ return -EINVAL;
}
if (info->name[0] == '\0' ||
strnlen(info->name, XT_RECENT_NAME_LEN) == XT_RECENT_NAME_LEN)
- return false;
+ return -EINVAL;
mutex_lock(&recent_mutex);
t = recent_table_lookup(recent_net, info->name);
if (t != NULL) {
t->refcnt++;
- ret = true;
+ ret = 0;
goto out;
}
t = kzalloc(sizeof(*t) + sizeof(t->iphash[0]) * ip_list_hash_size,
GFP_KERNEL);
- if (t == NULL)
+ if (t == NULL) {
+ ret = -ENOMEM;
goto out;
+ }
t->refcnt = 1;
strcpy(t->name, info->name);
INIT_LIST_HEAD(&t->lru_list);
@@ -339,26 +370,16 @@ static bool recent_mt_check(const struct xt_mtchk_param *par)
&recent_mt_fops, t);
if (pde == NULL) {
kfree(t);
- goto out;
- }
- pde->uid = ip_list_uid;
- pde->gid = ip_list_gid;
-#ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT
- pde = proc_create_data(t->name, ip_list_perms, recent_net->ipt_recent,
- &recent_old_fops, t);
- if (pde == NULL) {
- remove_proc_entry(t->name, recent_net->xt_recent);
- kfree(t);
+ ret = -ENOMEM;
goto out;
}
pde->uid = ip_list_uid;
pde->gid = ip_list_gid;
#endif
-#endif
spin_lock_bh(&recent_lock);
list_add_tail(&t->list, &recent_net->tables);
spin_unlock_bh(&recent_lock);
- ret = true;
+ ret = 0;
out:
mutex_unlock(&recent_mutex);
return ret;
@@ -377,9 +398,6 @@ static void recent_mt_destroy(const struct xt_mtdtor_param *par)
list_del(&t->list);
spin_unlock_bh(&recent_lock);
#ifdef CONFIG_PROC_FS
-#ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT
- remove_proc_entry(t->name, recent_net->ipt_recent);
-#endif
remove_proc_entry(t->name, recent_net->xt_recent);
#endif
recent_table_flush(t);
@@ -471,84 +489,6 @@ static int recent_seq_open(struct inode *inode, struct file *file)
return 0;
}
-#ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT
-static int recent_old_seq_open(struct inode *inode, struct file *filp)
-{
- static bool warned_of_old;
-
- if (unlikely(!warned_of_old)) {
- printk(KERN_INFO KBUILD_MODNAME ": Use of /proc/net/ipt_recent"
- " is deprecated; use /proc/net/xt_recent.\n");
- warned_of_old = true;
- }
- return recent_seq_open(inode, filp);
-}
-
-static ssize_t recent_old_proc_write(struct file *file,
- const char __user *input,
- size_t size, loff_t *loff)
-{
- const struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
- struct recent_table *t = pde->data;
- struct recent_entry *e;
- char buf[sizeof("+255.255.255.255")], *c = buf;
- union nf_inet_addr addr = {};
- int add;
-
- if (size > sizeof(buf))
- size = sizeof(buf);
- if (copy_from_user(buf, input, size))
- return -EFAULT;
-
- c = skip_spaces(c);
-
- if (size - (c - buf) < 5)
- return c - buf;
- if (!strncmp(c, "clear", 5)) {
- c += 5;
- spin_lock_bh(&recent_lock);
- recent_table_flush(t);
- spin_unlock_bh(&recent_lock);
- return c - buf;
- }
-
- switch (*c) {
- case '-':
- add = 0;
- c++;
- break;
- case '+':
- c++;
- default:
- add = 1;
- break;
- }
- addr.ip = in_aton(c);
-
- spin_lock_bh(&recent_lock);
- e = recent_entry_lookup(t, &addr, NFPROTO_IPV4, 0);
- if (e == NULL) {
- if (add)
- recent_entry_init(t, &addr, NFPROTO_IPV4, 0);
- } else {
- if (add)
- recent_entry_update(t, e);
- else
- recent_entry_remove(t, e);
- }
- spin_unlock_bh(&recent_lock);
- return size;
-}
-
-static const struct file_operations recent_old_fops = {
- .open = recent_old_seq_open,
- .read = seq_read,
- .write = recent_old_proc_write,
- .release = seq_release_private,
- .owner = THIS_MODULE,
-};
-#endif
-
static ssize_t
recent_mt_proc_write(struct file *file, const char __user *input,
size_t size, loff_t *loff)
@@ -585,7 +525,7 @@ recent_mt_proc_write(struct file *file, const char __user *input,
add = true;
break;
default:
- printk(KERN_INFO KBUILD_MODNAME ": Need +ip, -ip or /\n");
+ pr_info("Need \"+ip\", \"-ip\" or \"/\"\n");
return -EINVAL;
}
@@ -600,8 +540,7 @@ recent_mt_proc_write(struct file *file, const char __user *input,
}
if (!succ) {
- printk(KERN_INFO KBUILD_MODNAME ": illegal address written "
- "to procfs\n");
+ pr_info("illegal address written to procfs\n");
return -EINVAL;
}
@@ -637,21 +576,11 @@ static int __net_init recent_proc_net_init(struct net *net)
recent_net->xt_recent = proc_mkdir("xt_recent", net->proc_net);
if (!recent_net->xt_recent)
return -ENOMEM;
-#ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT
- recent_net->ipt_recent = proc_mkdir("ipt_recent", net->proc_net);
- if (!recent_net->ipt_recent) {
- proc_net_remove(net, "xt_recent");
- return -ENOMEM;
- }
-#endif
return 0;
}
static void __net_exit recent_proc_net_exit(struct net *net)
{
-#ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT
- proc_net_remove(net, "ipt_recent");
-#endif
proc_net_remove(net, "xt_recent");
}
#else
diff --git a/net/netfilter/xt_sctp.c b/net/netfilter/xt_sctp.c
index a189ada9128f..c04fcf385c59 100644
--- a/net/netfilter/xt_sctp.c
+++ b/net/netfilter/xt_sctp.c
@@ -1,3 +1,4 @@
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <net/ip.h>
@@ -15,12 +16,6 @@ MODULE_DESCRIPTION("Xtables: SCTP protocol packet match");
MODULE_ALIAS("ipt_sctp");
MODULE_ALIAS("ip6t_sctp");
-#ifdef DEBUG_SCTP
-#define duprintf(format, args...) printk(format , ## args)
-#else
-#define duprintf(format, args...)
-#endif
-
#define SCCHECK(cond, option, flag, invflag) (!((flag) & (option)) \
|| (!!((invflag) & (option)) ^ (cond)))
@@ -52,7 +47,7 @@ match_packet(const struct sk_buff *skb,
const struct xt_sctp_flag_info *flag_info = info->flag_info;
int flag_count = info->flag_count;
-#ifdef DEBUG_SCTP
+#ifdef DEBUG
int i = 0;
#endif
@@ -62,17 +57,19 @@ match_packet(const struct sk_buff *skb,
do {
sch = skb_header_pointer(skb, offset, sizeof(_sch), &_sch);
if (sch == NULL || sch->length == 0) {
- duprintf("Dropping invalid SCTP packet.\n");
+ pr_debug("Dropping invalid SCTP packet.\n");
*hotdrop = true;
return false;
}
-
- duprintf("Chunk num: %d\toffset: %d\ttype: %d\tlength: %d\tflags: %x\n",
- ++i, offset, sch->type, htons(sch->length), sch->flags);
-
+#ifdef DEBUG
+ pr_debug("Chunk num: %d\toffset: %d\ttype: %d\tlength: %d"
+ "\tflags: %x\n",
+ ++i, offset, sch->type, htons(sch->length),
+ sch->flags);
+#endif
offset += (ntohs(sch->length) + 3) & ~3;
- duprintf("skb->len: %d\toffset: %d\n", skb->len, offset);
+ pr_debug("skb->len: %d\toffset: %d\n", skb->len, offset);
if (SCTP_CHUNKMAP_IS_SET(info->chunkmap, sch->type)) {
switch (chunk_match_type) {
@@ -117,24 +114,24 @@ match_packet(const struct sk_buff *skb,
}
static bool
-sctp_mt(const struct sk_buff *skb, const struct xt_match_param *par)
+sctp_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_sctp_info *info = par->matchinfo;
const sctp_sctphdr_t *sh;
sctp_sctphdr_t _sh;
if (par->fragoff != 0) {
- duprintf("Dropping non-first fragment.. FIXME\n");
+ pr_debug("Dropping non-first fragment.. FIXME\n");
return false;
}
sh = skb_header_pointer(skb, par->thoff, sizeof(_sh), &_sh);
if (sh == NULL) {
- duprintf("Dropping evil TCP offset=0 tinygram.\n");
- *par->hotdrop = true;
+ pr_debug("Dropping evil TCP offset=0 tinygram.\n");
+ par->hotdrop = true;
return false;
}
- duprintf("spt: %d\tdpt: %d\n", ntohs(sh->source), ntohs(sh->dest));
+ pr_debug("spt: %d\tdpt: %d\n", ntohs(sh->source), ntohs(sh->dest));
return SCCHECK(ntohs(sh->source) >= info->spts[0]
&& ntohs(sh->source) <= info->spts[1],
@@ -143,22 +140,26 @@ sctp_mt(const struct sk_buff *skb, const struct xt_match_param *par)
&& ntohs(sh->dest) <= info->dpts[1],
XT_SCTP_DEST_PORTS, info->flags, info->invflags)
&& SCCHECK(match_packet(skb, par->thoff + sizeof(sctp_sctphdr_t),
- info, par->hotdrop),
+ info, &par->hotdrop),
XT_SCTP_CHUNK_TYPES, info->flags, info->invflags);
}
-static bool sctp_mt_check(const struct xt_mtchk_param *par)
+static int sctp_mt_check(const struct xt_mtchk_param *par)
{
const struct xt_sctp_info *info = par->matchinfo;
- return !(info->flags & ~XT_SCTP_VALID_FLAGS)
- && !(info->invflags & ~XT_SCTP_VALID_FLAGS)
- && !(info->invflags & ~info->flags)
- && ((!(info->flags & XT_SCTP_CHUNK_TYPES)) ||
- (info->chunk_match_type &
- (SCTP_CHUNK_MATCH_ALL
- | SCTP_CHUNK_MATCH_ANY
- | SCTP_CHUNK_MATCH_ONLY)));
+ if (info->flags & ~XT_SCTP_VALID_FLAGS)
+ return -EINVAL;
+ if (info->invflags & ~XT_SCTP_VALID_FLAGS)
+ return -EINVAL;
+ if (info->invflags & ~info->flags)
+ return -EINVAL;
+ if (!(info->flags & XT_SCTP_CHUNK_TYPES))
+ return 0;
+ if (info->chunk_match_type & (SCTP_CHUNK_MATCH_ALL |
+ SCTP_CHUNK_MATCH_ANY | SCTP_CHUNK_MATCH_ONLY))
+ return 0;
+ return -EINVAL;
}
static struct xt_match sctp_mt_reg[] __read_mostly = {
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index 6a902564d24f..3d54c236a1ba 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -9,7 +9,7 @@
* published by the Free Software Foundation.
*
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/netfilter/x_tables.h>
@@ -88,7 +88,7 @@ extract_icmp_fields(const struct sk_buff *skb,
static bool
-socket_match(const struct sk_buff *skb, const struct xt_match_param *par,
+socket_match(const struct sk_buff *skb, struct xt_action_param *par,
const struct xt_socket_mtinfo1 *info)
{
const struct iphdr *iph = ip_hdr(skb);
@@ -165,8 +165,7 @@ socket_match(const struct sk_buff *skb, const struct xt_match_param *par,
sk = NULL;
}
- pr_debug("socket match: proto %u %08x:%u -> %08x:%u "
- "(orig %08x:%u) sock %p\n",
+ pr_debug("proto %u %08x:%u -> %08x:%u (orig %08x:%u) sock %p\n",
protocol, ntohl(saddr), ntohs(sport),
ntohl(daddr), ntohs(dport),
ntohl(iph->daddr), hp ? ntohs(hp->dest) : 0, sk);
@@ -175,13 +174,13 @@ socket_match(const struct sk_buff *skb, const struct xt_match_param *par,
}
static bool
-socket_mt_v0(const struct sk_buff *skb, const struct xt_match_param *par)
+socket_mt_v0(const struct sk_buff *skb, struct xt_action_param *par)
{
return socket_match(skb, par, NULL);
}
static bool
-socket_mt_v1(const struct sk_buff *skb, const struct xt_match_param *par)
+socket_mt_v1(const struct sk_buff *skb, struct xt_action_param *par)
{
return socket_match(skb, par, par->matchinfo);
}
diff --git a/net/netfilter/xt_state.c b/net/netfilter/xt_state.c
index 4c946cbd731f..e12e053d3782 100644
--- a/net/netfilter/xt_state.c
+++ b/net/netfilter/xt_state.c
@@ -21,7 +21,7 @@ MODULE_ALIAS("ipt_state");
MODULE_ALIAS("ip6t_state");
static bool
-state_mt(const struct sk_buff *skb, const struct xt_match_param *par)
+state_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_state_info *sinfo = par->matchinfo;
enum ip_conntrack_info ctinfo;
@@ -37,50 +37,40 @@ state_mt(const struct sk_buff *skb, const struct xt_match_param *par)
return (sinfo->statemask & statebit);
}
-static bool state_mt_check(const struct xt_mtchk_param *par)
+static int state_mt_check(const struct xt_mtchk_param *par)
{
- if (nf_ct_l3proto_try_module_get(par->match->family) < 0) {
- printk(KERN_WARNING "can't load conntrack support for "
- "proto=%u\n", par->match->family);
- return false;
- }
- return true;
+ int ret;
+
+ ret = nf_ct_l3proto_try_module_get(par->family);
+ if (ret < 0)
+ pr_info("cannot load conntrack support for proto=%u\n",
+ par->family);
+ return ret;
}
static void state_mt_destroy(const struct xt_mtdtor_param *par)
{
- nf_ct_l3proto_module_put(par->match->family);
+ nf_ct_l3proto_module_put(par->family);
}
-static struct xt_match state_mt_reg[] __read_mostly = {
- {
- .name = "state",
- .family = NFPROTO_IPV4,
- .checkentry = state_mt_check,
- .match = state_mt,
- .destroy = state_mt_destroy,
- .matchsize = sizeof(struct xt_state_info),
- .me = THIS_MODULE,
- },
- {
- .name = "state",
- .family = NFPROTO_IPV6,
- .checkentry = state_mt_check,
- .match = state_mt,
- .destroy = state_mt_destroy,
- .matchsize = sizeof(struct xt_state_info),
- .me = THIS_MODULE,
- },
+static struct xt_match state_mt_reg __read_mostly = {
+ .name = "state",
+ .family = NFPROTO_UNSPEC,
+ .checkentry = state_mt_check,
+ .match = state_mt,
+ .destroy = state_mt_destroy,
+ .matchsize = sizeof(struct xt_state_info),
+ .me = THIS_MODULE,
};
static int __init state_mt_init(void)
{
- return xt_register_matches(state_mt_reg, ARRAY_SIZE(state_mt_reg));
+ return xt_register_match(&state_mt_reg);
}
static void __exit state_mt_exit(void)
{
- xt_unregister_matches(state_mt_reg, ARRAY_SIZE(state_mt_reg));
+ xt_unregister_match(&state_mt_reg);
}
module_init(state_mt_init);
diff --git a/net/netfilter/xt_statistic.c b/net/netfilter/xt_statistic.c
index 937ce0633e99..96e62b8fd6b1 100644
--- a/net/netfilter/xt_statistic.c
+++ b/net/netfilter/xt_statistic.c
@@ -30,7 +30,7 @@ MODULE_ALIAS("ip6t_statistic");
static DEFINE_SPINLOCK(nth_lock);
static bool
-statistic_mt(const struct sk_buff *skb, const struct xt_match_param *par)
+statistic_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_statistic_info *info = par->matchinfo;
bool ret = info->flags & XT_STATISTIC_INVERT;
@@ -53,22 +53,20 @@ statistic_mt(const struct sk_buff *skb, const struct xt_match_param *par)
return ret;
}
-static bool statistic_mt_check(const struct xt_mtchk_param *par)
+static int statistic_mt_check(const struct xt_mtchk_param *par)
{
struct xt_statistic_info *info = par->matchinfo;
if (info->mode > XT_STATISTIC_MODE_MAX ||
info->flags & ~XT_STATISTIC_MASK)
- return false;
+ return -EINVAL;
info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
- if (info->master == NULL) {
- printk(KERN_ERR KBUILD_MODNAME ": Out of memory\n");
- return false;
- }
+ if (info->master == NULL)
+ return -ENOMEM;
info->master->count = info->u.nth.count;
- return true;
+ return 0;
}
static void statistic_mt_destroy(const struct xt_mtdtor_param *par)
diff --git a/net/netfilter/xt_string.c b/net/netfilter/xt_string.c
index 96801ffd8af8..d3c48b14ab94 100644
--- a/net/netfilter/xt_string.c
+++ b/net/netfilter/xt_string.c
@@ -23,16 +23,14 @@ MODULE_ALIAS("ipt_string");
MODULE_ALIAS("ip6t_string");
static bool
-string_mt(const struct sk_buff *skb, const struct xt_match_param *par)
+string_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_string_info *conf = par->matchinfo;
struct ts_state state;
- int invert;
+ bool invert;
memset(&state, 0, sizeof(struct ts_state));
-
- invert = (par->match->revision == 0 ? conf->u.v0.invert :
- conf->u.v1.flags & XT_STRING_FLAG_INVERT);
+ invert = conf->u.v1.flags & XT_STRING_FLAG_INVERT;
return (skb_find_text((struct sk_buff *)skb, conf->from_offset,
conf->to_offset, conf->config, &state)
@@ -41,7 +39,7 @@ string_mt(const struct sk_buff *skb, const struct xt_match_param *par)
#define STRING_TEXT_PRIV(m) ((struct xt_string_info *)(m))
-static bool string_mt_check(const struct xt_mtchk_param *par)
+static int string_mt_check(const struct xt_mtchk_param *par)
{
struct xt_string_info *conf = par->matchinfo;
struct ts_config *ts_conf;
@@ -49,26 +47,23 @@ static bool string_mt_check(const struct xt_mtchk_param *par)
/* Damn, can't handle this case properly with iptables... */
if (conf->from_offset > conf->to_offset)
- return false;
+ return -EINVAL;
if (conf->algo[XT_STRING_MAX_ALGO_NAME_SIZE - 1] != '\0')
- return false;
+ return -EINVAL;
if (conf->patlen > XT_STRING_MAX_PATTERN_SIZE)
- return false;
- if (par->match->revision == 1) {
- if (conf->u.v1.flags &
- ~(XT_STRING_FLAG_IGNORECASE | XT_STRING_FLAG_INVERT))
- return false;
- if (conf->u.v1.flags & XT_STRING_FLAG_IGNORECASE)
- flags |= TS_IGNORECASE;
- }
+ return -EINVAL;
+ if (conf->u.v1.flags &
+ ~(XT_STRING_FLAG_IGNORECASE | XT_STRING_FLAG_INVERT))
+ return -EINVAL;
+ if (conf->u.v1.flags & XT_STRING_FLAG_IGNORECASE)
+ flags |= TS_IGNORECASE;
ts_conf = textsearch_prepare(conf->algo, conf->pattern, conf->patlen,
GFP_KERNEL, flags);
if (IS_ERR(ts_conf))
- return false;
+ return PTR_ERR(ts_conf);
conf->config = ts_conf;
-
- return true;
+ return 0;
}
static void string_mt_destroy(const struct xt_mtdtor_param *par)
@@ -76,38 +71,25 @@ static void string_mt_destroy(const struct xt_mtdtor_param *par)
textsearch_destroy(STRING_TEXT_PRIV(par->matchinfo)->config);
}
-static struct xt_match xt_string_mt_reg[] __read_mostly = {
- {
- .name = "string",
- .revision = 0,
- .family = NFPROTO_UNSPEC,
- .checkentry = string_mt_check,
- .match = string_mt,
- .destroy = string_mt_destroy,
- .matchsize = sizeof(struct xt_string_info),
- .me = THIS_MODULE
- },
- {
- .name = "string",
- .revision = 1,
- .family = NFPROTO_UNSPEC,
- .checkentry = string_mt_check,
- .match = string_mt,
- .destroy = string_mt_destroy,
- .matchsize = sizeof(struct xt_string_info),
- .me = THIS_MODULE
- },
+static struct xt_match xt_string_mt_reg __read_mostly = {
+ .name = "string",
+ .revision = 1,
+ .family = NFPROTO_UNSPEC,
+ .checkentry = string_mt_check,
+ .match = string_mt,
+ .destroy = string_mt_destroy,
+ .matchsize = sizeof(struct xt_string_info),
+ .me = THIS_MODULE,
};
static int __init string_mt_init(void)
{
- return xt_register_matches(xt_string_mt_reg,
- ARRAY_SIZE(xt_string_mt_reg));
+ return xt_register_match(&xt_string_mt_reg);
}
static void __exit string_mt_exit(void)
{
- xt_unregister_matches(xt_string_mt_reg, ARRAY_SIZE(xt_string_mt_reg));
+ xt_unregister_match(&xt_string_mt_reg);
}
module_init(string_mt_init);
diff --git a/net/netfilter/xt_tcpmss.c b/net/netfilter/xt_tcpmss.c
index 4809b34b10f8..c53d4d18eadf 100644
--- a/net/netfilter/xt_tcpmss.c
+++ b/net/netfilter/xt_tcpmss.c
@@ -25,7 +25,7 @@ MODULE_ALIAS("ipt_tcpmss");
MODULE_ALIAS("ip6t_tcpmss");
static bool
-tcpmss_mt(const struct sk_buff *skb, const struct xt_match_param *par)
+tcpmss_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_tcpmss_match_info *info = par->matchinfo;
const struct tcphdr *th;
@@ -73,7 +73,7 @@ out:
return info->invert;
dropit:
- *par->hotdrop = true;
+ par->hotdrop = true;
return false;
}
diff --git a/net/netfilter/xt_tcpudp.c b/net/netfilter/xt_tcpudp.c
index 1ebdc4934eed..c14d4645daa3 100644
--- a/net/netfilter/xt_tcpudp.c
+++ b/net/netfilter/xt_tcpudp.c
@@ -1,3 +1,4 @@
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/types.h>
#include <linux/module.h>
#include <net/ip.h>
@@ -19,13 +20,6 @@ MODULE_ALIAS("ipt_tcp");
MODULE_ALIAS("ip6t_udp");
MODULE_ALIAS("ip6t_tcp");
-#ifdef DEBUG_IP_FIREWALL_USER
-#define duprintf(format, args...) printk(format , ## args)
-#else
-#define duprintf(format, args...)
-#endif
-
-
/* Returns 1 if the port is matched by the range, 0 otherwise */
static inline bool
port_match(u_int16_t min, u_int16_t max, u_int16_t port, bool invert)
@@ -46,7 +40,7 @@ tcp_find_option(u_int8_t option,
u_int8_t _opt[60 - sizeof(struct tcphdr)];
unsigned int i;
- duprintf("tcp_match: finding option\n");
+ pr_debug("finding option\n");
if (!optlen)
return invert;
@@ -68,7 +62,7 @@ tcp_find_option(u_int8_t option,
return invert;
}
-static bool tcp_mt(const struct sk_buff *skb, const struct xt_match_param *par)
+static bool tcp_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct tcphdr *th;
struct tcphdr _tcph;
@@ -82,8 +76,8 @@ static bool tcp_mt(const struct sk_buff *skb, const struct xt_match_param *par)
flag overwrite to pass the direction checks.
*/
if (par->fragoff == 1) {
- duprintf("Dropping evil TCP offset=1 frag.\n");
- *par->hotdrop = true;
+ pr_debug("Dropping evil TCP offset=1 frag.\n");
+ par->hotdrop = true;
}
/* Must not be a fragment. */
return false;
@@ -95,8 +89,8 @@ static bool tcp_mt(const struct sk_buff *skb, const struct xt_match_param *par)
if (th == NULL) {
/* We've been asked to examine this packet, and we
can't. Hence, no choice but to drop. */
- duprintf("Dropping evil TCP offset=0 tinygram.\n");
- *par->hotdrop = true;
+ pr_debug("Dropping evil TCP offset=0 tinygram.\n");
+ par->hotdrop = true;
return false;
}
@@ -114,27 +108,27 @@ static bool tcp_mt(const struct sk_buff *skb, const struct xt_match_param *par)
return false;
if (tcpinfo->option) {
if (th->doff * 4 < sizeof(_tcph)) {
- *par->hotdrop = true;
+ par->hotdrop = true;
return false;
}
if (!tcp_find_option(tcpinfo->option, skb, par->thoff,
th->doff*4 - sizeof(_tcph),
tcpinfo->invflags & XT_TCP_INV_OPTION,
- par->hotdrop))
+ &par->hotdrop))
return false;
}
return true;
}
-static bool tcp_mt_check(const struct xt_mtchk_param *par)
+static int tcp_mt_check(const struct xt_mtchk_param *par)
{
const struct xt_tcp *tcpinfo = par->matchinfo;
/* Must specify no unknown invflags */
- return !(tcpinfo->invflags & ~XT_TCP_INV_MASK);
+ return (tcpinfo->invflags & ~XT_TCP_INV_MASK) ? -EINVAL : 0;
}
-static bool udp_mt(const struct sk_buff *skb, const struct xt_match_param *par)
+static bool udp_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct udphdr *uh;
struct udphdr _udph;
@@ -148,8 +142,8 @@ static bool udp_mt(const struct sk_buff *skb, const struct xt_match_param *par)
if (uh == NULL) {
/* We've been asked to examine this packet, and we
can't. Hence, no choice but to drop. */
- duprintf("Dropping evil UDP tinygram.\n");
- *par->hotdrop = true;
+ pr_debug("Dropping evil UDP tinygram.\n");
+ par->hotdrop = true;
return false;
}
@@ -161,12 +155,12 @@ static bool udp_mt(const struct sk_buff *skb, const struct xt_match_param *par)
!!(udpinfo->invflags & XT_UDP_INV_DSTPT));
}
-static bool udp_mt_check(const struct xt_mtchk_param *par)
+static int udp_mt_check(const struct xt_mtchk_param *par)
{
const struct xt_udp *udpinfo = par->matchinfo;
/* Must specify no unknown invflags */
- return !(udpinfo->invflags & ~XT_UDP_INV_MASK);
+ return (udpinfo->invflags & ~XT_UDP_INV_MASK) ? -EINVAL : 0;
}
static struct xt_match tcpudp_mt_reg[] __read_mostly = {
diff --git a/net/netfilter/xt_time.c b/net/netfilter/xt_time.c
index 93acaa59d108..c48975ff8ea2 100644
--- a/net/netfilter/xt_time.c
+++ b/net/netfilter/xt_time.c
@@ -1,7 +1,6 @@
/*
* xt_time
* Copyright © CC Computer Consultants GmbH, 2007
- * Contact: <jengelh@computergmbh.de>
*
* based on ipt_time by Fabrice MARIE <fabrice@netfilter.org>
* This is a module which is used for time matching
@@ -149,11 +148,10 @@ static void localtime_3(struct xtm *r, time_t time)
}
r->month = i + 1;
- return;
}
static bool
-time_mt(const struct sk_buff *skb, const struct xt_match_param *par)
+time_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_time_info *info = par->matchinfo;
unsigned int packet_time;
@@ -218,18 +216,18 @@ time_mt(const struct sk_buff *skb, const struct xt_match_param *par)
return true;
}
-static bool time_mt_check(const struct xt_mtchk_param *par)
+static int time_mt_check(const struct xt_mtchk_param *par)
{
const struct xt_time_info *info = par->matchinfo;
if (info->daytime_start > XT_TIME_MAX_DAYTIME ||
info->daytime_stop > XT_TIME_MAX_DAYTIME) {
- printk(KERN_WARNING "xt_time: invalid argument - start or "
- "stop time greater than 23:59:59\n");
- return false;
+ pr_info("invalid argument - start or "
+ "stop time greater than 23:59:59\n");
+ return -EDOM;
}
- return true;
+ return 0;
}
static struct xt_match xt_time_mt_reg __read_mostly = {
@@ -264,7 +262,7 @@ static void __exit time_mt_exit(void)
module_init(time_mt_init);
module_exit(time_mt_exit);
-MODULE_AUTHOR("Jan Engelhardt <jengelh@computergmbh.de>");
+MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
MODULE_DESCRIPTION("Xtables: time-based matching");
MODULE_LICENSE("GPL");
MODULE_ALIAS("ipt_time");
diff --git a/net/netfilter/xt_u32.c b/net/netfilter/xt_u32.c
index 24a527624500..a95b50342dbb 100644
--- a/net/netfilter/xt_u32.c
+++ b/net/netfilter/xt_u32.c
@@ -3,7 +3,6 @@
*
* Original author: Don Cohen <don@isis.cs3-inc.com>
* (C) CC Computer Consultants GmbH, 2007
- * Contact: <jengelh@computergmbh.de>
*/
#include <linux/module.h>
@@ -87,7 +86,7 @@ static bool u32_match_it(const struct xt_u32 *data,
return true;
}
-static bool u32_mt(const struct sk_buff *skb, const struct xt_match_param *par)
+static bool u32_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_u32 *data = par->matchinfo;
bool ret;
@@ -117,7 +116,7 @@ static void __exit u32_mt_exit(void)
module_init(u32_mt_init);
module_exit(u32_mt_exit);
-MODULE_AUTHOR("Jan Engelhardt <jengelh@computergmbh.de>");
+MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
MODULE_DESCRIPTION("Xtables: arbitrary byte matching");
MODULE_LICENSE("GPL");
MODULE_ALIAS("ipt_u32");
diff --git a/net/netlabel/netlabel_addrlist.h b/net/netlabel/netlabel_addrlist.h
index 07ae7fd82be1..1c1c093cf279 100644
--- a/net/netlabel/netlabel_addrlist.h
+++ b/net/netlabel/netlabel_addrlist.h
@@ -130,7 +130,6 @@ static inline void netlbl_af4list_audit_addr(struct audit_buffer *audit_buf,
int src, const char *dev,
__be32 addr, __be32 mask)
{
- return;
}
#endif
@@ -203,7 +202,6 @@ static inline void netlbl_af6list_audit_addr(struct audit_buffer *audit_buf,
const struct in6_addr *addr,
const struct in6_addr *mask)
{
- return;
}
#endif
#endif /* IPV6 */
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index a3d64aabe2f7..e2b0a680dd56 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -670,7 +670,6 @@ static void netlbl_unlhsh_condremove_iface(struct netlbl_unlhsh_iface *iface)
unlhsh_condremove_failure:
spin_unlock(&netlbl_unlhsh_lock);
- return;
}
/**
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 795424396aff..a2eb965207d3 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -545,7 +545,7 @@ static int netlink_autobind(struct socket *sock)
struct hlist_head *head;
struct sock *osk;
struct hlist_node *node;
- s32 pid = current->tgid;
+ s32 pid = task_tgid_vnr(current);
int err;
static s32 rover = -4097;
@@ -978,6 +978,8 @@ struct netlink_broadcast_data {
int delivered;
gfp_t allocation;
struct sk_buff *skb, *skb2;
+ int (*tx_filter)(struct sock *dsk, struct sk_buff *skb, void *data);
+ void *tx_data;
};
static inline int do_one_broadcast(struct sock *sk,
@@ -1020,6 +1022,9 @@ static inline int do_one_broadcast(struct sock *sk,
p->failure = 1;
if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
p->delivery_failure = 1;
+ } else if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) {
+ kfree_skb(p->skb2);
+ p->skb2 = NULL;
} else if (sk_filter(sk, p->skb2)) {
kfree_skb(p->skb2);
p->skb2 = NULL;
@@ -1038,8 +1043,10 @@ out:
return 0;
}
-int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid,
- u32 group, gfp_t allocation)
+int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 pid,
+ u32 group, gfp_t allocation,
+ int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
+ void *filter_data)
{
struct net *net = sock_net(ssk);
struct netlink_broadcast_data info;
@@ -1059,6 +1066,8 @@ int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid,
info.allocation = allocation;
info.skb = skb;
info.skb2 = NULL;
+ info.tx_filter = filter;
+ info.tx_data = filter_data;
/* While we sleep in clone, do not allow to change socket list */
@@ -1083,6 +1092,14 @@ int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid,
}
return -ESRCH;
}
+EXPORT_SYMBOL(netlink_broadcast_filtered);
+
+int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid,
+ u32 group, gfp_t allocation)
+{
+ return netlink_broadcast_filtered(ssk, skb, pid, group, allocation,
+ NULL, NULL);
+}
EXPORT_SYMBOL(netlink_broadcast);
struct netlink_set_err_data {
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 06438fa2b1e5..aa4308afcc7f 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -21,15 +21,17 @@
static DEFINE_MUTEX(genl_mutex); /* serialization of message processing */
-static inline void genl_lock(void)
+void genl_lock(void)
{
mutex_lock(&genl_mutex);
}
+EXPORT_SYMBOL(genl_lock);
-static inline void genl_unlock(void)
+void genl_unlock(void)
{
mutex_unlock(&genl_mutex);
}
+EXPORT_SYMBOL(genl_unlock);
#define GENL_FAM_TAB_SIZE 16
#define GENL_FAM_TAB_MASK (GENL_FAM_TAB_SIZE - 1)
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index fa07f044b599..06cb02796a0e 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -739,7 +739,7 @@ static int nr_connect(struct socket *sock, struct sockaddr *uaddr,
DEFINE_WAIT(wait);
for (;;) {
- prepare_to_wait(sk->sk_sleep, &wait,
+ prepare_to_wait(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
if (sk->sk_state != TCP_SYN_SENT)
break;
@@ -752,7 +752,7 @@ static int nr_connect(struct socket *sock, struct sockaddr *uaddr,
err = -ERESTARTSYS;
break;
}
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
if (err)
goto out_release;
}
@@ -798,7 +798,7 @@ static int nr_accept(struct socket *sock, struct socket *newsock, int flags)
* hooked into the SABM we saved
*/
for (;;) {
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
skb = skb_dequeue(&sk->sk_receive_queue);
if (skb)
break;
@@ -816,7 +816,7 @@ static int nr_accept(struct socket *sock, struct socket *newsock, int flags)
err = -ERESTARTSYS;
break;
}
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
if (err)
goto out_release;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 243946d4809d..2078a277e06b 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -82,6 +82,7 @@
#include <linux/mutex.h>
#include <linux/if_vlan.h>
#include <linux/virtio_net.h>
+#include <linux/errqueue.h>
#ifdef CONFIG_INET
#include <net/inet_common.h>
@@ -315,6 +316,8 @@ static inline struct packet_sock *pkt_sk(struct sock *sk)
static void packet_sock_destruct(struct sock *sk)
{
+ skb_queue_purge(&sk->sk_error_queue);
+
WARN_ON(atomic_read(&sk->sk_rmem_alloc));
WARN_ON(atomic_read(&sk->sk_wmem_alloc));
@@ -483,6 +486,9 @@ retry:
skb->dev = dev;
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
+ err = sock_tx_timestamp(msg, sk, skb_tx(skb));
+ if (err < 0)
+ goto out_unlock;
dev_queue_xmit(skb);
rcu_read_unlock();
@@ -1188,6 +1194,9 @@ static int packet_snd(struct socket *sock,
err = skb_copy_datagram_from_iovec(skb, offset, msg->msg_iov, 0, len);
if (err)
goto out_free;
+ err = sock_tx_timestamp(msg, sk, skb_tx(skb));
+ if (err < 0)
+ goto out_free;
skb->protocol = proto;
skb->dev = dev;
@@ -1487,6 +1496,51 @@ out:
return err;
}
+static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len)
+{
+ struct sock_exterr_skb *serr;
+ struct sk_buff *skb, *skb2;
+ int copied, err;
+
+ err = -EAGAIN;
+ skb = skb_dequeue(&sk->sk_error_queue);
+ if (skb == NULL)
+ goto out;
+
+ copied = skb->len;
+ if (copied > len) {
+ msg->msg_flags |= MSG_TRUNC;
+ copied = len;
+ }
+ err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+ if (err)
+ goto out_free_skb;
+
+ sock_recv_timestamp(msg, sk, skb);
+
+ serr = SKB_EXT_ERR(skb);
+ put_cmsg(msg, SOL_PACKET, PACKET_TX_TIMESTAMP,
+ sizeof(serr->ee), &serr->ee);
+
+ msg->msg_flags |= MSG_ERRQUEUE;
+ err = copied;
+
+ /* Reset and regenerate socket error */
+ spin_lock_bh(&sk->sk_error_queue.lock);
+ sk->sk_err = 0;
+ if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
+ sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
+ spin_unlock_bh(&sk->sk_error_queue.lock);
+ sk->sk_error_report(sk);
+ } else
+ spin_unlock_bh(&sk->sk_error_queue.lock);
+
+out_free_skb:
+ kfree_skb(skb);
+out:
+ return err;
+}
+
/*
* Pull a packet from our receive queue and hand it to the user.
* If necessary we block.
@@ -1502,7 +1556,7 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
int vnet_hdr_len = 0;
err = -EINVAL;
- if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT))
+ if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
goto out;
#if 0
@@ -1511,6 +1565,11 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
return -ENODEV;
#endif
+ if (flags & MSG_ERRQUEUE) {
+ err = packet_recv_error(sk, msg, len);
+ goto out;
+ }
+
/*
* Call the generic datagram receiver. This handles all sorts
* of horrible races and re-entrancy so we can forget about it
@@ -1692,9 +1751,9 @@ static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
if (i->alen != dev->addr_len)
return -EINVAL;
if (what > 0)
- return dev_mc_add(dev, i->addr, i->alen, 0);
+ return dev_mc_add(dev, i->addr);
else
- return dev_mc_delete(dev, i->addr, i->alen, 0);
+ return dev_mc_del(dev, i->addr);
break;
case PACKET_MR_PROMISC:
return dev_set_promiscuity(dev, what);
@@ -1706,9 +1765,9 @@ static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
if (i->alen != dev->addr_len)
return -EINVAL;
if (what > 0)
- return dev_unicast_add(dev, i->addr);
+ return dev_uc_add(dev, i->addr);
else
- return dev_unicast_delete(dev, i->addr);
+ return dev_uc_del(dev, i->addr);
break;
default:
break;
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index e2a95762abd3..af4d38bc3b22 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -664,12 +664,12 @@ static int pep_wait_connreq(struct sock *sk, int noblock)
if (signal_pending(tsk))
return sock_intr_errno(timeo);
- prepare_to_wait_exclusive(&sk->sk_socket->wait, &wait,
+ prepare_to_wait_exclusive(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
release_sock(sk);
timeo = schedule_timeout(timeo);
lock_sock(sk);
- finish_wait(&sk->sk_socket->wait, &wait);
+ finish_wait(sk_sleep(sk), &wait);
}
return 0;
@@ -910,10 +910,10 @@ disabled:
goto out;
}
- prepare_to_wait(&sk->sk_socket->wait, &wait,
+ prepare_to_wait(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
done = sk_wait_event(sk, &timeo, atomic_read(&pn->tx_credits));
- finish_wait(&sk->sk_socket->wait, &wait);
+ finish_wait(sk_sleep(sk), &wait);
if (sk->sk_state != TCP_ESTABLISHED)
goto disabled;
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c
index 9b4ced6e0968..c33da6576942 100644
--- a/net/phonet/pn_dev.c
+++ b/net/phonet/pn_dev.c
@@ -46,9 +46,16 @@ struct phonet_net {
int phonet_net_id __read_mostly;
+static struct phonet_net *phonet_pernet(struct net *net)
+{
+ BUG_ON(!net);
+
+ return net_generic(net, phonet_net_id);
+}
+
struct phonet_device_list *phonet_device_list(struct net *net)
{
- struct phonet_net *pnn = net_generic(net, phonet_net_id);
+ struct phonet_net *pnn = phonet_pernet(net);
return &pnn->pndevs;
}
@@ -261,7 +268,7 @@ static int phonet_device_autoconf(struct net_device *dev)
static void phonet_route_autodel(struct net_device *dev)
{
- struct phonet_net *pnn = net_generic(dev_net(dev), phonet_net_id);
+ struct phonet_net *pnn = phonet_pernet(dev_net(dev));
unsigned i;
DECLARE_BITMAP(deleted, 64);
@@ -313,7 +320,7 @@ static struct notifier_block phonet_device_notifier = {
/* Per-namespace Phonet devices handling */
static int __net_init phonet_init_net(struct net *net)
{
- struct phonet_net *pnn = net_generic(net, phonet_net_id);
+ struct phonet_net *pnn = phonet_pernet(net);
if (!proc_net_fops_create(net, "phonet", 0, &pn_sock_seq_fops))
return -ENOMEM;
@@ -326,7 +333,7 @@ static int __net_init phonet_init_net(struct net *net)
static void __net_exit phonet_exit_net(struct net *net)
{
- struct phonet_net *pnn = net_generic(net, phonet_net_id);
+ struct phonet_net *pnn = phonet_pernet(net);
struct net_device *dev;
unsigned i;
@@ -376,7 +383,7 @@ void phonet_device_exit(void)
int phonet_route_add(struct net_device *dev, u8 daddr)
{
- struct phonet_net *pnn = net_generic(dev_net(dev), phonet_net_id);
+ struct phonet_net *pnn = phonet_pernet(dev_net(dev));
struct phonet_routes *routes = &pnn->routes;
int err = -EEXIST;
@@ -393,7 +400,7 @@ int phonet_route_add(struct net_device *dev, u8 daddr)
int phonet_route_del(struct net_device *dev, u8 daddr)
{
- struct phonet_net *pnn = net_generic(dev_net(dev), phonet_net_id);
+ struct phonet_net *pnn = phonet_pernet(dev_net(dev));
struct phonet_routes *routes = &pnn->routes;
daddr = daddr >> 2;
@@ -413,7 +420,7 @@ int phonet_route_del(struct net_device *dev, u8 daddr)
struct net_device *phonet_route_get(struct net *net, u8 daddr)
{
- struct phonet_net *pnn = net_generic(net, phonet_net_id);
+ struct phonet_net *pnn = phonet_pernet(net);
struct phonet_routes *routes = &pnn->routes;
struct net_device *dev;
@@ -428,7 +435,7 @@ struct net_device *phonet_route_get(struct net *net, u8 daddr)
struct net_device *phonet_route_output(struct net *net, u8 daddr)
{
- struct phonet_net *pnn = net_generic(net, phonet_net_id);
+ struct phonet_net *pnn = phonet_pernet(net);
struct phonet_routes *routes = &pnn->routes;
struct net_device *dev;
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index c785bfd0744f..6e9848bf0370 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -265,7 +265,7 @@ static unsigned int pn_socket_poll(struct file *file, struct socket *sock,
struct pep_sock *pn = pep_sk(sk);
unsigned int mask = 0;
- poll_wait(file, &sock->wait, wait);
+ poll_wait(file, sk_sleep(sk), wait);
switch (sk->sk_state) {
case TCP_LISTEN:
diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c
index f81862baf4d0..aebfecbdb841 100644
--- a/net/rds/af_rds.c
+++ b/net/rds/af_rds.c
@@ -158,9 +158,10 @@ static unsigned int rds_poll(struct file *file, struct socket *sock,
unsigned int mask = 0;
unsigned long flags;
- poll_wait(file, sk->sk_sleep, wait);
+ poll_wait(file, sk_sleep(sk), wait);
- poll_wait(file, &rds_poll_waitq, wait);
+ if (rs->rs_seen_congestion)
+ poll_wait(file, &rds_poll_waitq, wait);
read_lock_irqsave(&rs->rs_recv_lock, flags);
if (!rs->rs_cong_monitor) {
@@ -182,6 +183,10 @@ static unsigned int rds_poll(struct file *file, struct socket *sock,
mask |= (POLLOUT | POLLWRNORM);
read_unlock_irqrestore(&rs->rs_recv_lock, flags);
+ /* clear state any time we wake a seen-congested socket */
+ if (mask)
+ rs->rs_seen_congestion = 0;
+
return mask;
}
@@ -447,7 +452,6 @@ static void rds_sock_inc_info(struct socket *sock, unsigned int len,
struct rds_info_lengths *lens)
{
struct rds_sock *rs;
- struct sock *sk;
struct rds_incoming *inc;
unsigned long flags;
unsigned int total = 0;
@@ -457,7 +461,6 @@ static void rds_sock_inc_info(struct socket *sock, unsigned int len,
spin_lock_irqsave(&rds_sock_lock, flags);
list_for_each_entry(rs, &rds_sock_list, rs_item) {
- sk = rds_rs_to_sk(rs);
read_lock(&rs->rs_recv_lock);
/* XXX too lazy to maintain counts.. */
diff --git a/net/rds/cong.c b/net/rds/cong.c
index f1da27ceb064..0871a29f0780 100644
--- a/net/rds/cong.c
+++ b/net/rds/cong.c
@@ -219,8 +219,6 @@ void rds_cong_queue_updates(struct rds_cong_map *map)
spin_lock_irqsave(&rds_cong_lock, flags);
list_for_each_entry(conn, &map->m_conn_list, c_map_item) {
- if (conn->c_loopback)
- continue;
if (!test_and_set_bit(0, &conn->c_map_queued)) {
rds_stats_inc(s_cong_update_queued);
queue_delayed_work(rds_wq, &conn->c_send_w, 0);
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index 88d0856cb797..10ed0d55f759 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -204,9 +204,10 @@ static void rds_ib_qp_event_handler(struct ib_event *event, void *data)
rdma_notify(ic->i_cm_id, IB_EVENT_COMM_EST);
break;
default:
- rds_ib_conn_error(conn, "RDS/IB: Fatal QP Event %u "
+ rdsdebug("Fatal QP Event %u "
"- connection %pI4->%pI4, reconnecting\n",
event->event, &conn->c_laddr, &conn->c_faddr);
+ rds_conn_drop(conn);
break;
}
}
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
index 059989fdb7d7..a54cd63f9e35 100644
--- a/net/rds/ib_rdma.c
+++ b/net/rds/ib_rdma.c
@@ -235,8 +235,8 @@ void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool)
{
flush_workqueue(rds_wq);
rds_ib_flush_mr_pool(pool, 1);
- BUG_ON(atomic_read(&pool->item_count));
- BUG_ON(atomic_read(&pool->free_pinned));
+ WARN_ON(atomic_read(&pool->item_count));
+ WARN_ON(atomic_read(&pool->free_pinned));
kfree(pool);
}
@@ -441,6 +441,7 @@ static void __rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
/* FIXME we need a way to tell a r/w MR
* from a r/o MR */
+ BUG_ON(in_interrupt());
set_page_dirty(page);
put_page(page);
}
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index c7dd11b835f0..c74e9904a6b2 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -469,8 +469,8 @@ static void rds_ib_send_ack(struct rds_ib_connection *ic, unsigned int adv_credi
set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
rds_ib_stats_inc(s_ib_ack_send_failure);
- /* Need to finesse this later. */
- BUG();
+
+ rds_ib_conn_error(ic->conn, "sending ack failed\n");
} else
rds_ib_stats_inc(s_ib_ack_sent);
}
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
index a10fab6886d1..17fa80803ab0 100644
--- a/net/rds/ib_send.c
+++ b/net/rds/ib_send.c
@@ -243,8 +243,12 @@ void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context)
struct rds_message *rm;
rm = rds_send_get_message(conn, send->s_op);
- if (rm)
+ if (rm) {
+ if (rm->m_rdma_op)
+ rds_ib_send_unmap_rdma(ic, rm->m_rdma_op);
rds_ib_send_rdma_complete(rm, wc.status);
+ rds_message_put(rm);
+ }
}
oldest = (oldest + 1) % ic->i_send_ring.w_nr;
@@ -482,6 +486,13 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
BUG_ON(off % RDS_FRAG_SIZE);
BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header));
+ /* Do not send cong updates to IB loopback */
+ if (conn->c_loopback
+ && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) {
+ rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
+ return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES;
+ }
+
/* FIXME we may overallocate here */
if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0)
i = 1;
@@ -574,8 +585,7 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
rds_ib_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits);
adv_credits += posted;
BUG_ON(adv_credits > 255);
- } else if (ic->i_rm != rm)
- BUG();
+ }
send = &ic->i_sends[pos];
first = send;
@@ -714,8 +724,8 @@ add_header:
ic->i_rm = prev->s_rm;
prev->s_rm = NULL;
}
- /* Finesse this later */
- BUG();
+
+ rds_ib_conn_error(ic->conn, "ib_post_send failed\n");
goto out;
}
diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
index 3e9460f935d8..a9d951b4fbae 100644
--- a/net/rds/iw_cm.c
+++ b/net/rds/iw_cm.c
@@ -157,9 +157,11 @@ static void rds_iw_qp_event_handler(struct ib_event *event, void *data)
case IB_EVENT_QP_REQ_ERR:
case IB_EVENT_QP_FATAL:
default:
- rds_iw_conn_error(conn, "RDS/IW: Fatal QP Event %u - connection %pI4->%pI4...reconnecting\n",
+ rdsdebug("Fatal QP Event %u "
+ "- connection %pI4->%pI4, reconnecting\n",
event->event, &conn->c_laddr,
&conn->c_faddr);
+ rds_conn_drop(conn);
break;
}
}
diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c
index da43ee840ca3..3d479067d54d 100644
--- a/net/rds/iw_recv.c
+++ b/net/rds/iw_recv.c
@@ -469,8 +469,8 @@ static void rds_iw_send_ack(struct rds_iw_connection *ic, unsigned int adv_credi
set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
rds_iw_stats_inc(s_iw_ack_send_failure);
- /* Need to finesse this later. */
- BUG();
+
+ rds_iw_conn_error(ic->conn, "sending ack failed\n");
} else
rds_iw_stats_inc(s_iw_ack_sent);
}
diff --git a/net/rds/iw_send.c b/net/rds/iw_send.c
index 1379e9d66a78..52182ff7519e 100644
--- a/net/rds/iw_send.c
+++ b/net/rds/iw_send.c
@@ -616,8 +616,7 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
rds_iw_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits);
adv_credits += posted;
BUG_ON(adv_credits > 255);
- } else if (ic->i_rm != rm)
- BUG();
+ }
send = &ic->i_sends[pos];
first = send;
diff --git a/net/rds/loop.c b/net/rds/loop.c
index 0d7a159158b8..dd9879379457 100644
--- a/net/rds/loop.c
+++ b/net/rds/loop.c
@@ -81,16 +81,9 @@ static int rds_loop_xmit_cong_map(struct rds_connection *conn,
struct rds_cong_map *map,
unsigned long offset)
{
- unsigned long i;
-
BUG_ON(offset);
BUG_ON(map != conn->c_lcong);
- for (i = 0; i < RDS_CONG_MAP_PAGES; i++) {
- memcpy((void *)conn->c_fcong->m_page_addrs[i],
- (void *)map->m_page_addrs[i], PAGE_SIZE);
- }
-
rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES;
diff --git a/net/rds/rdma.c b/net/rds/rdma.c
index 5ce9437cad67..75fd13bb631b 100644
--- a/net/rds/rdma.c
+++ b/net/rds/rdma.c
@@ -439,8 +439,10 @@ void rds_rdma_free_op(struct rds_rdma_op *ro)
/* Mark page dirty if it was possibly modified, which
* is the case for a RDMA_READ which copies from remote
* to local memory */
- if (!ro->r_write)
+ if (!ro->r_write) {
+ BUG_ON(in_interrupt());
set_page_dirty(page);
+ }
put_page(page);
}
diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c
index 7b155081b4dc..e599ba2f950d 100644
--- a/net/rds/rdma_transport.c
+++ b/net/rds/rdma_transport.c
@@ -101,7 +101,7 @@ int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id,
break;
case RDMA_CM_EVENT_DISCONNECTED:
- printk(KERN_WARNING "RDS/RDMA: DISCONNECT event - dropping connection "
+ rdsdebug("DISCONNECT event - dropping connection "
"%pI4->%pI4\n", &conn->c_laddr,
&conn->c_faddr);
rds_conn_drop(conn);
@@ -109,8 +109,7 @@ int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id,
default:
/* things like device disconnect? */
- printk(KERN_ERR "unknown event %u\n", event->event);
- BUG();
+ printk(KERN_ERR "RDS: unknown event %u!\n", event->event);
break;
}
diff --git a/net/rds/rds.h b/net/rds/rds.h
index 85d6f897ecc7..c224b5bb3ba9 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -388,6 +388,8 @@ struct rds_sock {
/* flag indicating we were congested or not */
int rs_congested;
+ /* seen congestion (ENOBUFS) when sending? */
+ int rs_seen_congestion;
/* rs_lock protects all these adjacent members before the newline */
spinlock_t rs_lock;
@@ -490,7 +492,7 @@ void rds_sock_put(struct rds_sock *rs);
void rds_wake_sk_sleep(struct rds_sock *rs);
static inline void __rds_wake_sk_sleep(struct sock *sk)
{
- wait_queue_head_t *waitq = sk->sk_sleep;
+ wait_queue_head_t *waitq = sk_sleep(sk);
if (!sock_flag(sk, SOCK_DEAD) && waitq)
wake_up(waitq);
diff --git a/net/rds/recv.c b/net/rds/recv.c
index e2a2b9344f7b..795a00b7f2cb 100644
--- a/net/rds/recv.c
+++ b/net/rds/recv.c
@@ -432,7 +432,7 @@ int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
break;
}
- timeo = wait_event_interruptible_timeout(*sk->sk_sleep,
+ timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
(!list_empty(&rs->rs_notify_queue) ||
rs->rs_cong_notify ||
rds_next_incoming(rs, &inc)), timeo);
diff --git a/net/rds/send.c b/net/rds/send.c
index f04b929ded92..9c1c6bcaa6c9 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -508,12 +508,13 @@ EXPORT_SYMBOL_GPL(rds_send_get_message);
*/
void rds_send_remove_from_sock(struct list_head *messages, int status)
{
- unsigned long flags = 0; /* silence gcc :P */
+ unsigned long flags;
struct rds_sock *rs = NULL;
struct rds_message *rm;
- local_irq_save(flags);
while (!list_empty(messages)) {
+ int was_on_sock = 0;
+
rm = list_entry(messages->next, struct rds_message,
m_conn_item);
list_del_init(&rm->m_conn_item);
@@ -528,20 +529,19 @@ void rds_send_remove_from_sock(struct list_head *messages, int status)
* while we're messing with it. It does not prevent the
* message from being removed from the socket, though.
*/
- spin_lock(&rm->m_rs_lock);
+ spin_lock_irqsave(&rm->m_rs_lock, flags);
if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags))
goto unlock_and_drop;
if (rs != rm->m_rs) {
if (rs) {
- spin_unlock(&rs->rs_lock);
rds_wake_sk_sleep(rs);
sock_put(rds_rs_to_sk(rs));
}
rs = rm->m_rs;
- spin_lock(&rs->rs_lock);
sock_hold(rds_rs_to_sk(rs));
}
+ spin_lock(&rs->rs_lock);
if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) {
struct rds_rdma_op *ro = rm->m_rdma_op;
@@ -558,21 +558,22 @@ void rds_send_remove_from_sock(struct list_head *messages, int status)
notifier->n_status = status;
rm->m_rdma_op->r_notifier = NULL;
}
- rds_message_put(rm);
+ was_on_sock = 1;
rm->m_rs = NULL;
}
+ spin_unlock(&rs->rs_lock);
unlock_and_drop:
- spin_unlock(&rm->m_rs_lock);
+ spin_unlock_irqrestore(&rm->m_rs_lock, flags);
rds_message_put(rm);
+ if (was_on_sock)
+ rds_message_put(rm);
}
if (rs) {
- spin_unlock(&rs->rs_lock);
rds_wake_sk_sleep(rs);
sock_put(rds_rs_to_sk(rs));
}
- local_irq_restore(flags);
}
/*
@@ -634,9 +635,6 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
list_move(&rm->m_sock_item, &list);
rds_send_sndbuf_remove(rs, rm);
clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
-
- /* If this is a RDMA operation, notify the app. */
- __rds_rdma_send_complete(rs, rm, RDS_RDMA_CANCELED);
}
/* order flag updates with the rs lock */
@@ -645,9 +643,6 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
spin_unlock_irqrestore(&rs->rs_lock, flags);
- if (wake)
- rds_wake_sk_sleep(rs);
-
conn = NULL;
/* now remove the messages from the conn list as needed */
@@ -655,6 +650,10 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
/* We do this here rather than in the loop above, so that
* we don't have to nest m_rs_lock under rs->rs_lock */
spin_lock_irqsave(&rm->m_rs_lock, flags2);
+ /* If this is a RDMA operation, notify the app. */
+ spin_lock(&rs->rs_lock);
+ __rds_rdma_send_complete(rs, rm, RDS_RDMA_CANCELED);
+ spin_unlock(&rs->rs_lock);
rm->m_rs = NULL;
spin_unlock_irqrestore(&rm->m_rs_lock, flags2);
@@ -683,6 +682,9 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
if (conn)
spin_unlock_irqrestore(&conn->c_lock, flags);
+ if (wake)
+ rds_wake_sk_sleep(rs);
+
while (!list_empty(&list)) {
rm = list_entry(list.next, struct rds_message, m_sock_item);
list_del_init(&rm->m_sock_item);
@@ -816,7 +818,7 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
int ret = 0;
int queued = 0, allocated_mr = 0;
int nonblock = msg->msg_flags & MSG_DONTWAIT;
- long timeo = sock_rcvtimeo(sk, nonblock);
+ long timeo = sock_sndtimeo(sk, nonblock);
/* Mirror Linux UDP mirror of BSD error message compatibility */
/* XXX: Perhaps MSG_MORE someday */
@@ -895,8 +897,10 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
queue_delayed_work(rds_wq, &conn->c_conn_w, 0);
ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
- if (ret)
+ if (ret) {
+ rs->rs_seen_congestion = 1;
goto out;
+ }
while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port,
dport, &queued)) {
@@ -911,7 +915,7 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
goto out;
}
- timeo = wait_event_interruptible_timeout(*sk->sk_sleep,
+ timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
rds_send_queue_rm(rs, conn, rm,
rs->rs_bound_port,
dport,
diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c
index 056256285987..c397524c039c 100644
--- a/net/rds/tcp_connect.c
+++ b/net/rds/tcp_connect.c
@@ -141,7 +141,7 @@ void rds_tcp_conn_shutdown(struct rds_connection *conn)
release_sock(sock->sk);
sock_release(sock);
- };
+ }
if (tc->t_tinc) {
rds_inc_put(&tc->t_tinc->ti_inc);
diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c
index e08ec912d8b0..1aba6878fa5d 100644
--- a/net/rds/tcp_recv.c
+++ b/net/rds/tcp_recv.c
@@ -98,6 +98,7 @@ int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov,
goto out;
}
+ rds_stats_add(s_copy_to_user, to_copy);
size -= to_copy;
ret += to_copy;
skb_off += to_copy;
diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
index 34fdcc059e54..a28b895ff0d1 100644
--- a/net/rds/tcp_send.c
+++ b/net/rds/tcp_send.c
@@ -240,7 +240,9 @@ void rds_tcp_write_space(struct sock *sk)
tc->t_last_seen_una = rds_tcp_snd_una(tc);
rds_send_drop_acked(conn, rds_tcp_snd_una(tc), rds_tcp_is_acked);
- queue_delayed_work(rds_wq, &conn->c_send_w, 0);
+ if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf)
+ queue_delayed_work(rds_wq, &conn->c_send_w, 0);
+
out:
read_unlock(&sk->sk_callback_lock);
diff --git a/net/rds/threads.c b/net/rds/threads.c
index 00fa10e59af8..786c20eaaf5e 100644
--- a/net/rds/threads.c
+++ b/net/rds/threads.c
@@ -259,7 +259,7 @@ void rds_threads_exit(void)
int __init rds_threads_init(void)
{
- rds_wq = create_singlethread_workqueue("krdsd");
+ rds_wq = create_workqueue("krdsd");
if (rds_wq == NULL)
return -ENOMEM;
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index a9fa86f65983..51875a0c5d48 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -629,6 +629,49 @@ static ssize_t rfkill_persistent_show(struct device *dev,
return sprintf(buf, "%d\n", rfkill->persistent);
}
+static ssize_t rfkill_hard_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct rfkill *rfkill = to_rfkill(dev);
+
+ return sprintf(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_HW) ? 1 : 0 );
+}
+
+static ssize_t rfkill_soft_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct rfkill *rfkill = to_rfkill(dev);
+
+ return sprintf(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_SW) ? 1 : 0 );
+}
+
+static ssize_t rfkill_soft_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rfkill *rfkill = to_rfkill(dev);
+ unsigned long state;
+ int err;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ err = strict_strtoul(buf, 0, &state);
+ if (err)
+ return err;
+
+ if (state > 1 )
+ return -EINVAL;
+
+ mutex_lock(&rfkill_global_mutex);
+ rfkill_set_block(rfkill, state);
+ mutex_unlock(&rfkill_global_mutex);
+
+ return err ?: count;
+}
+
static u8 user_state_from_blocked(unsigned long state)
{
if (state & RFKILL_BLOCK_HW)
@@ -644,14 +687,8 @@ static ssize_t rfkill_state_show(struct device *dev,
char *buf)
{
struct rfkill *rfkill = to_rfkill(dev);
- unsigned long flags;
- u32 state;
-
- spin_lock_irqsave(&rfkill->lock, flags);
- state = rfkill->state;
- spin_unlock_irqrestore(&rfkill->lock, flags);
- return sprintf(buf, "%d\n", user_state_from_blocked(state));
+ return sprintf(buf, "%d\n", user_state_from_blocked(rfkill->state));
}
static ssize_t rfkill_state_store(struct device *dev,
@@ -701,6 +738,8 @@ static struct device_attribute rfkill_dev_attrs[] = {
__ATTR(persistent, S_IRUGO, rfkill_persistent_show, NULL),
__ATTR(state, S_IRUGO|S_IWUSR, rfkill_state_show, rfkill_state_store),
__ATTR(claim, S_IRUGO|S_IWUSR, rfkill_claim_show, rfkill_claim_store),
+ __ATTR(soft, S_IRUGO|S_IWUSR, rfkill_soft_show, rfkill_soft_store),
+ __ATTR(hard, S_IRUGO, rfkill_hard_show, NULL),
__ATTR_NULL
};
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 4fb711a035f4..8e45e76a95f5 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -845,7 +845,7 @@ rose_try_next_neigh:
DEFINE_WAIT(wait);
for (;;) {
- prepare_to_wait(sk->sk_sleep, &wait,
+ prepare_to_wait(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
if (sk->sk_state != TCP_SYN_SENT)
break;
@@ -858,7 +858,7 @@ rose_try_next_neigh:
err = -ERESTARTSYS;
break;
}
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
if (err)
goto out_release;
@@ -911,7 +911,7 @@ static int rose_accept(struct socket *sock, struct socket *newsock, int flags)
* hooked into the SABM we saved
*/
for (;;) {
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
skb = skb_dequeue(&sk->sk_receive_queue);
if (skb)
@@ -930,7 +930,7 @@ static int rose_accept(struct socket *sock, struct socket *newsock, int flags)
err = -ERESTARTSYS;
break;
}
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
if (err)
goto out_release;
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index c060095b27ce..0b9bb2085ce4 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -62,13 +62,15 @@ static inline int rxrpc_writable(struct sock *sk)
static void rxrpc_write_space(struct sock *sk)
{
_enter("%p", sk);
- read_lock(&sk->sk_callback_lock);
+ rcu_read_lock();
if (rxrpc_writable(sk)) {
- if (sk_has_sleeper(sk))
- wake_up_interruptible(sk->sk_sleep);
+ struct socket_wq *wq = rcu_dereference(sk->sk_wq);
+
+ if (wq_has_sleeper(wq))
+ wake_up_interruptible(&wq->wait);
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
}
- read_unlock(&sk->sk_callback_lock);
+ rcu_read_unlock();
}
/*
@@ -589,7 +591,7 @@ static unsigned int rxrpc_poll(struct file *file, struct socket *sock,
unsigned int mask;
struct sock *sk = sock->sk;
- sock_poll_wait(file, sk->sk_sleep, wait);
+ sock_poll_wait(file, sk_sleep(sk), wait);
mask = 0;
/* the socket is readable if there are any messages waiting on the Rx
diff --git a/net/rxrpc/ar-recvmsg.c b/net/rxrpc/ar-recvmsg.c
index 60c2b94e6b54..0c65013e3bfe 100644
--- a/net/rxrpc/ar-recvmsg.c
+++ b/net/rxrpc/ar-recvmsg.c
@@ -91,7 +91,7 @@ int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock,
/* wait for a message to turn up */
release_sock(&rx->sk);
- prepare_to_wait_exclusive(rx->sk.sk_sleep, &wait,
+ prepare_to_wait_exclusive(sk_sleep(&rx->sk), &wait,
TASK_INTERRUPTIBLE);
ret = sock_error(&rx->sk);
if (ret)
@@ -102,7 +102,7 @@ int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock,
goto wait_interrupted;
timeo = schedule_timeout(timeo);
}
- finish_wait(rx->sk.sk_sleep, &wait);
+ finish_wait(sk_sleep(&rx->sk), &wait);
lock_sock(&rx->sk);
continue;
}
@@ -356,7 +356,7 @@ csum_copy_error:
wait_interrupted:
ret = sock_intr_errno(timeo);
wait_error:
- finish_wait(rx->sk.sk_sleep, &wait);
+ finish_wait(sk_sleep(&rx->sk), &wait);
if (continue_call)
rxrpc_put_call(continue_call);
if (copied)
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index d8e0171d9a4b..972378f47f3c 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -153,7 +153,7 @@ int tcf_generic_walker(struct sk_buff *skb, struct netlink_callback *cb,
} else if (type == RTM_GETACTION) {
return tcf_dump_walker(skb, cb, a, hinfo);
} else {
- printk("tcf_generic_walker: unknown action %d\n", type);
+ WARN(1, "tcf_generic_walker: unknown action %d\n", type);
return -EINVAL;
}
}
@@ -403,8 +403,9 @@ void tcf_action_destroy(struct tc_action *act, int bind)
module_put(a->ops->owner);
act = act->next;
kfree(a);
- } else { /*FIXME: Remove later - catch insertion bugs*/
- printk("tcf_action_destroy: BUG? destroying NULL ops\n");
+ } else {
+ /*FIXME: Remove later - catch insertion bugs*/
+ WARN(1, "tcf_action_destroy: BUG? destroying NULL ops\n");
act = act->next;
kfree(a);
}
@@ -668,7 +669,8 @@ nlmsg_failure:
}
static int
-act_get_notify(u32 pid, struct nlmsghdr *n, struct tc_action *a, int event)
+act_get_notify(struct net *net, u32 pid, struct nlmsghdr *n,
+ struct tc_action *a, int event)
{
struct sk_buff *skb;
@@ -680,7 +682,7 @@ act_get_notify(u32 pid, struct nlmsghdr *n, struct tc_action *a, int event)
return -EINVAL;
}
- return rtnl_unicast(skb, &init_net, pid);
+ return rtnl_unicast(skb, net, pid);
}
static struct tc_action *
@@ -743,14 +745,15 @@ static struct tc_action *create_a(int i)
act = kzalloc(sizeof(*act), GFP_KERNEL);
if (act == NULL) {
- printk("create_a: failed to alloc!\n");
+ pr_debug("create_a: failed to alloc!\n");
return NULL;
}
act->order = i;
return act;
}
-static int tca_action_flush(struct nlattr *nla, struct nlmsghdr *n, u32 pid)
+static int tca_action_flush(struct net *net, struct nlattr *nla,
+ struct nlmsghdr *n, u32 pid)
{
struct sk_buff *skb;
unsigned char *b;
@@ -764,13 +767,13 @@ static int tca_action_flush(struct nlattr *nla, struct nlmsghdr *n, u32 pid)
int err = -ENOMEM;
if (a == NULL) {
- printk("tca_action_flush: couldnt create tc_action\n");
+ pr_debug("tca_action_flush: couldnt create tc_action\n");
return err;
}
skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
if (!skb) {
- printk("tca_action_flush: failed skb alloc\n");
+ pr_debug("tca_action_flush: failed skb alloc\n");
kfree(a);
return err;
}
@@ -809,7 +812,7 @@ static int tca_action_flush(struct nlattr *nla, struct nlmsghdr *n, u32 pid)
nlh->nlmsg_flags |= NLM_F_ROOT;
module_put(a->ops->owner);
kfree(a);
- err = rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
+ err = rtnetlink_send(skb, net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
if (err > 0)
return 0;
@@ -826,7 +829,8 @@ noflush_out:
}
static int
-tca_action_gd(struct nlattr *nla, struct nlmsghdr *n, u32 pid, int event)
+tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
+ u32 pid, int event)
{
int i, ret;
struct nlattr *tb[TCA_ACT_MAX_PRIO+1];
@@ -838,7 +842,7 @@ tca_action_gd(struct nlattr *nla, struct nlmsghdr *n, u32 pid, int event)
if (event == RTM_DELACTION && n->nlmsg_flags&NLM_F_ROOT) {
if (tb[1] != NULL)
- return tca_action_flush(tb[1], n, pid);
+ return tca_action_flush(net, tb[1], n, pid);
else
return -EINVAL;
}
@@ -859,7 +863,7 @@ tca_action_gd(struct nlattr *nla, struct nlmsghdr *n, u32 pid, int event)
}
if (event == RTM_GETACTION)
- ret = act_get_notify(pid, n, head, event);
+ ret = act_get_notify(net, pid, n, head, event);
else { /* delete */
struct sk_buff *skb;
@@ -878,7 +882,7 @@ tca_action_gd(struct nlattr *nla, struct nlmsghdr *n, u32 pid, int event)
/* now do the delete */
tcf_action_destroy(head, 0);
- ret = rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC,
+ ret = rtnetlink_send(skb, net, pid, RTNLGRP_TC,
n->nlmsg_flags&NLM_F_ECHO);
if (ret > 0)
return 0;
@@ -889,8 +893,8 @@ err:
return ret;
}
-static int tcf_add_notify(struct tc_action *a, u32 pid, u32 seq, int event,
- u16 flags)
+static int tcf_add_notify(struct net *net, struct tc_action *a,
+ u32 pid, u32 seq, int event, u16 flags)
{
struct tcamsg *t;
struct nlmsghdr *nlh;
@@ -923,7 +927,7 @@ static int tcf_add_notify(struct tc_action *a, u32 pid, u32 seq, int event,
nlh->nlmsg_len = skb_tail_pointer(skb) - b;
NETLINK_CB(skb).dst_group = RTNLGRP_TC;
- err = rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, flags&NLM_F_ECHO);
+ err = rtnetlink_send(skb, net, pid, RTNLGRP_TC, flags&NLM_F_ECHO);
if (err > 0)
err = 0;
return err;
@@ -936,7 +940,8 @@ nlmsg_failure:
static int
-tcf_action_add(struct nlattr *nla, struct nlmsghdr *n, u32 pid, int ovr)
+tcf_action_add(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
+ u32 pid, int ovr)
{
int ret = 0;
struct tc_action *act;
@@ -954,7 +959,7 @@ tcf_action_add(struct nlattr *nla, struct nlmsghdr *n, u32 pid, int ovr)
/* dump then free all the actions after update; inserted policy
* stays intact
* */
- ret = tcf_add_notify(act, pid, seq, RTM_NEWACTION, n->nlmsg_flags);
+ ret = tcf_add_notify(net, act, pid, seq, RTM_NEWACTION, n->nlmsg_flags);
for (a = act; a; a = act) {
act = a->next;
kfree(a);
@@ -970,15 +975,12 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
u32 pid = skb ? NETLINK_CB(skb).pid : 0;
int ret = 0, ovr = 0;
- if (!net_eq(net, &init_net))
- return -EINVAL;
-
ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCA_ACT_MAX, NULL);
if (ret < 0)
return ret;
if (tca[TCA_ACT_TAB] == NULL) {
- printk("tc_ctl_action: received NO action attribs\n");
+ pr_notice("tc_ctl_action: received NO action attribs\n");
return -EINVAL;
}
@@ -995,15 +997,17 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
if (n->nlmsg_flags&NLM_F_REPLACE)
ovr = 1;
replay:
- ret = tcf_action_add(tca[TCA_ACT_TAB], n, pid, ovr);
+ ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, pid, ovr);
if (ret == -EAGAIN)
goto replay;
break;
case RTM_DELACTION:
- ret = tca_action_gd(tca[TCA_ACT_TAB], n, pid, RTM_DELACTION);
+ ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
+ pid, RTM_DELACTION);
break;
case RTM_GETACTION:
- ret = tca_action_gd(tca[TCA_ACT_TAB], n, pid, RTM_GETACTION);
+ ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
+ pid, RTM_GETACTION);
break;
default:
BUG();
@@ -1043,7 +1047,6 @@ find_dump_kind(const struct nlmsghdr *n)
static int
tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
{
- struct net *net = sock_net(skb->sk);
struct nlmsghdr *nlh;
unsigned char *b = skb_tail_pointer(skb);
struct nlattr *nest;
@@ -1053,11 +1056,8 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
struct tcamsg *t = (struct tcamsg *) NLMSG_DATA(cb->nlh);
struct nlattr *kind = find_dump_kind(cb->nlh);
- if (!net_eq(net, &init_net))
- return 0;
-
if (kind == NULL) {
- printk("tc_dump_action: action bad kind\n");
+ pr_info("tc_dump_action: action bad kind\n");
return 0;
}
@@ -1070,7 +1070,8 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
a.ops = a_o;
if (a_o->walk == NULL) {
- printk("tc_dump_action: %s !capable of dumping table\n", a_o->kind);
+ WARN(1, "tc_dump_action: %s !capable of dumping table\n",
+ a_o->kind);
goto nla_put_failure;
}
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index e7f796aec657..8406c6654990 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -202,9 +202,9 @@ MODULE_LICENSE("GPL");
static int __init gact_init_module(void)
{
#ifdef CONFIG_GACT_PROB
- printk("GACT probability on\n");
+ printk(KERN_INFO "GACT probability on\n");
#else
- printk("GACT probability NOT on\n");
+ printk(KERN_INFO "GACT probability NOT on\n");
#endif
return tcf_register_action(&act_gact_ops);
}
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index da27a170b6b7..c7e59e6ec349 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -47,8 +47,8 @@ static int ipt_init_target(struct ipt_entry_target *t, char *table, unsigned int
target = xt_request_find_target(AF_INET, t->u.user.name,
t->u.user.revision);
- if (!target)
- return -ENOENT;
+ if (IS_ERR(target))
+ return PTR_ERR(target);
t->u.kernel.target = target;
par.table = table;
@@ -199,7 +199,7 @@ static int tcf_ipt(struct sk_buff *skb, struct tc_action *a,
{
int ret = 0, result = 0;
struct tcf_ipt *ipt = a->priv;
- struct xt_target_param par;
+ struct xt_action_param par;
if (skb_cloned(skb)) {
if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
@@ -235,7 +235,8 @@ static int tcf_ipt(struct sk_buff *skb, struct tc_action *a,
break;
default:
if (net_ratelimit())
- printk("Bogus netfilter code %d assume ACCEPT\n", ret);
+ pr_notice("tc filter: Bogus netfilter code"
+ " %d assume ACCEPT\n", ret);
result = TC_POLICE_OK;
break;
}
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index c046682054eb..c0b6863e3b87 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -164,8 +164,8 @@ static int tcf_mirred(struct sk_buff *skb, struct tc_action *a,
dev = m->tcfm_dev;
if (!(dev->flags & IFF_UP)) {
if (net_ratelimit())
- printk("mirred to Houston: device %s is gone!\n",
- dev->name);
+ pr_notice("tc mirred to Houston: device %s is gone!\n",
+ dev->name);
goto out;
}
@@ -252,7 +252,7 @@ MODULE_LICENSE("GPL");
static int __init mirred_init_module(void)
{
- printk("Mirror/redirect action on\n");
+ pr_info("Mirror/redirect action on\n");
return tcf_register_action(&act_mirred_ops);
}
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index b7dcfedc802e..fdbd0b7bd840 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -158,11 +158,13 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a,
}
if (offset % 4) {
- printk("offset must be on 32 bit boundaries\n");
+ pr_info("tc filter pedit"
+ " offset must be on 32 bit boundaries\n");
goto bad;
}
if (offset > 0 && offset > skb->len) {
- printk("offset %d cant exceed pkt length %d\n",
+ pr_info("tc filter pedit"
+ " offset %d cant exceed pkt length %d\n",
offset, skb->len);
goto bad;
}
@@ -176,9 +178,8 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a,
if (munged)
skb->tc_verd = SET_TC_MUNGED(skb->tc_verd);
goto done;
- } else {
- printk("pedit BUG: index %d\n", p->tcf_index);
- }
+ } else
+ WARN(1, "pedit BUG: index %d\n", p->tcf_index);
bad:
p->tcf_qstats.overlimits++;
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 622ca809c15c..1b4bc691d7d1 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -49,7 +49,7 @@ static int tcf_simp(struct sk_buff *skb, struct tc_action *a, struct tcf_result
* Example if this was the 3rd packet and the string was "hello"
* then it would look like "hello_3" (without quotes)
**/
- printk("simple: %s_%d\n",
+ pr_info("simple: %s_%d\n",
(char *)d->tcfd_defdata, d->tcf_bstats.packets);
spin_unlock(&d->tcf_lock);
return d->tcf_action;
@@ -205,7 +205,7 @@ static int __init simp_init_module(void)
{
int ret = tcf_register_action(&act_simp_ops);
if (!ret)
- printk("Simple TC action Loaded\n");
+ pr_info("Simple TC action Loaded\n");
return ret;
}
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index f082b27ff46d..5fd0c28ef79a 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -99,8 +99,9 @@ out:
}
EXPORT_SYMBOL(unregister_tcf_proto_ops);
-static int tfilter_notify(struct sk_buff *oskb, struct nlmsghdr *n,
- struct tcf_proto *tp, unsigned long fh, int event);
+static int tfilter_notify(struct net *net, struct sk_buff *oskb,
+ struct nlmsghdr *n, struct tcf_proto *tp,
+ unsigned long fh, int event);
/* Select new prio value from the range, managed by kernel. */
@@ -138,9 +139,6 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
int err;
int tp_created = 0;
- if (!net_eq(net, &init_net))
- return -EINVAL;
-
replay:
t = NLMSG_DATA(n);
protocol = TC_H_MIN(t->tcm_info);
@@ -159,7 +157,7 @@ replay:
/* Find head of filter chain. */
/* Find link */
- dev = __dev_get_by_index(&init_net, t->tcm_ifindex);
+ dev = __dev_get_by_index(net, t->tcm_ifindex);
if (dev == NULL)
return -ENODEV;
@@ -283,7 +281,7 @@ replay:
*back = tp->next;
spin_unlock_bh(root_lock);
- tfilter_notify(skb, n, tp, fh, RTM_DELTFILTER);
+ tfilter_notify(net, skb, n, tp, fh, RTM_DELTFILTER);
tcf_destroy(tp);
err = 0;
goto errout;
@@ -306,10 +304,10 @@ replay:
case RTM_DELTFILTER:
err = tp->ops->delete(tp, fh);
if (err == 0)
- tfilter_notify(skb, n, tp, fh, RTM_DELTFILTER);
+ tfilter_notify(net, skb, n, tp, fh, RTM_DELTFILTER);
goto errout;
case RTM_GETTFILTER:
- err = tfilter_notify(skb, n, tp, fh, RTM_NEWTFILTER);
+ err = tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER);
goto errout;
default:
err = -EINVAL;
@@ -325,7 +323,7 @@ replay:
*back = tp;
spin_unlock_bh(root_lock);
}
- tfilter_notify(skb, n, tp, fh, RTM_NEWTFILTER);
+ tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER);
} else {
if (tp_created)
tcf_destroy(tp);
@@ -371,8 +369,9 @@ nla_put_failure:
return -1;
}
-static int tfilter_notify(struct sk_buff *oskb, struct nlmsghdr *n,
- struct tcf_proto *tp, unsigned long fh, int event)
+static int tfilter_notify(struct net *net, struct sk_buff *oskb,
+ struct nlmsghdr *n, struct tcf_proto *tp,
+ unsigned long fh, int event)
{
struct sk_buff *skb;
u32 pid = oskb ? NETLINK_CB(oskb).pid : 0;
@@ -386,7 +385,7 @@ static int tfilter_notify(struct sk_buff *oskb, struct nlmsghdr *n,
return -EINVAL;
}
- return rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC,
+ return rtnetlink_send(skb, net, pid, RTNLGRP_TC,
n->nlmsg_flags & NLM_F_ECHO);
}
@@ -419,12 +418,9 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
const struct Qdisc_class_ops *cops;
struct tcf_dump_args arg;
- if (!net_eq(net, &init_net))
- return 0;
-
if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm)))
return skb->len;
- if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
+ if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL)
return skb->len;
if (!tcm->tcm_parent)
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 6ed61b10e002..f73542d2cdd0 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -602,7 +602,6 @@ static unsigned long flow_get(struct tcf_proto *tp, u32 handle)
static void flow_put(struct tcf_proto *tp, unsigned long f)
{
- return;
}
static int flow_dump(struct tcf_proto *tp, unsigned long fh,
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 17c5dfc67320..96275422c619 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -211,7 +211,7 @@ check_terminal:
deadloop:
if (net_ratelimit())
- printk("cls_u32: dead loop\n");
+ printk(KERN_WARNING "cls_u32: dead loop\n");
return -1;
}
@@ -768,15 +768,15 @@ static struct tcf_proto_ops cls_u32_ops __read_mostly = {
static int __init init_u32(void)
{
- printk("u32 classifier\n");
+ pr_info("u32 classifier\n");
#ifdef CONFIG_CLS_U32_PERF
- printk(" Performance counters on\n");
+ pr_info(" Performance counters on\n");
#endif
#ifdef CONFIG_NET_CLS_IND
- printk(" input device check on \n");
+ pr_info(" input device check on\n");
#endif
#ifdef CONFIG_NET_CLS_ACT
- printk(" Actions configured \n");
+ pr_info(" Actions configured\n");
#endif
return register_tcf_proto_ops(&cls_u32_ops);
}
diff --git a/net/sched/ematch.c b/net/sched/ematch.c
index e782bdeedc58..5e37da961f80 100644
--- a/net/sched/ematch.c
+++ b/net/sched/ematch.c
@@ -527,7 +527,8 @@ pop_stack:
stack_overflow:
if (net_ratelimit())
- printk("Local stack overflow, increase NET_EMATCH_STACK\n");
+ printk(KERN_WARNING "tc ematch: local stack overflow,"
+ " increase NET_EMATCH_STACK\n");
return -1;
}
EXPORT_SYMBOL(__tcf_em_tree_match);
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 145268ca57cf..fe35c1f338c2 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -35,10 +35,12 @@
#include <net/netlink.h>
#include <net/pkt_sched.h>
-static int qdisc_notify(struct sk_buff *oskb, struct nlmsghdr *n, u32 clid,
+static int qdisc_notify(struct net *net, struct sk_buff *oskb,
+ struct nlmsghdr *n, u32 clid,
struct Qdisc *old, struct Qdisc *new);
-static int tclass_notify(struct sk_buff *oskb, struct nlmsghdr *n,
- struct Qdisc *q, unsigned long cl, int event);
+static int tclass_notify(struct net *net, struct sk_buff *oskb,
+ struct nlmsghdr *n, struct Qdisc *q,
+ unsigned long cl, int event);
/*
@@ -639,11 +641,12 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
}
EXPORT_SYMBOL(qdisc_tree_decrease_qlen);
-static void notify_and_destroy(struct sk_buff *skb, struct nlmsghdr *n, u32 clid,
+static void notify_and_destroy(struct net *net, struct sk_buff *skb,
+ struct nlmsghdr *n, u32 clid,
struct Qdisc *old, struct Qdisc *new)
{
if (new || old)
- qdisc_notify(skb, n, clid, old, new);
+ qdisc_notify(net, skb, n, clid, old, new);
if (old)
qdisc_destroy(old);
@@ -663,6 +666,7 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
struct Qdisc *new, struct Qdisc *old)
{
struct Qdisc *q = old;
+ struct net *net = dev_net(dev);
int err = 0;
if (parent == NULL) {
@@ -699,12 +703,13 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
}
if (!ingress) {
- notify_and_destroy(skb, n, classid, dev->qdisc, new);
+ notify_and_destroy(net, skb, n, classid,
+ dev->qdisc, new);
if (new && !new->ops->attach)
atomic_inc(&new->refcnt);
dev->qdisc = new ? : &noop_qdisc;
} else {
- notify_and_destroy(skb, n, classid, old, new);
+ notify_and_destroy(net, skb, n, classid, old, new);
}
if (dev->flags & IFF_UP)
@@ -722,7 +727,7 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
err = -ENOENT;
}
if (!err)
- notify_and_destroy(skb, n, classid, old, new);
+ notify_and_destroy(net, skb, n, classid, old, new);
}
return err;
}
@@ -948,10 +953,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
struct Qdisc *p = NULL;
int err;
- if (!net_eq(net, &init_net))
- return -EINVAL;
-
- if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
+ if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL)
return -ENODEV;
err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
@@ -991,7 +993,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
if ((err = qdisc_graft(dev, p, skb, n, clid, NULL, q)) != 0)
return err;
} else {
- qdisc_notify(skb, n, clid, NULL, q);
+ qdisc_notify(net, skb, n, clid, NULL, q);
}
return 0;
}
@@ -1010,16 +1012,13 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
struct Qdisc *q, *p;
int err;
- if (!net_eq(net, &init_net))
- return -EINVAL;
-
replay:
/* Reinit, just in case something touches this. */
tcm = NLMSG_DATA(n);
clid = tcm->tcm_parent;
q = p = NULL;
- if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
+ if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL)
return -ENODEV;
err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
@@ -1106,7 +1105,7 @@ replay:
return -EINVAL;
err = qdisc_change(q, tca);
if (err == 0)
- qdisc_notify(skb, n, clid, NULL, q);
+ qdisc_notify(net, skb, n, clid, NULL, q);
return err;
create_n_graft:
@@ -1196,8 +1195,9 @@ nla_put_failure:
return -1;
}
-static int qdisc_notify(struct sk_buff *oskb, struct nlmsghdr *n,
- u32 clid, struct Qdisc *old, struct Qdisc *new)
+static int qdisc_notify(struct net *net, struct sk_buff *oskb,
+ struct nlmsghdr *n, u32 clid,
+ struct Qdisc *old, struct Qdisc *new)
{
struct sk_buff *skb;
u32 pid = oskb ? NETLINK_CB(oskb).pid : 0;
@@ -1216,7 +1216,7 @@ static int qdisc_notify(struct sk_buff *oskb, struct nlmsghdr *n,
}
if (skb->len)
- return rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
+ return rtnetlink_send(skb, net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
err_out:
kfree_skb(skb);
@@ -1275,15 +1275,12 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
int s_idx, s_q_idx;
struct net_device *dev;
- if (!net_eq(net, &init_net))
- return 0;
-
s_idx = cb->args[0];
s_q_idx = q_idx = cb->args[1];
rcu_read_lock();
idx = 0;
- for_each_netdev_rcu(&init_net, dev) {
+ for_each_netdev_rcu(net, dev) {
struct netdev_queue *dev_queue;
if (idx < s_idx)
@@ -1335,10 +1332,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
u32 qid = TC_H_MAJ(clid);
int err;
- if (!net_eq(net, &init_net))
- return -EINVAL;
-
- if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
+ if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL)
return -ENODEV;
err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
@@ -1419,10 +1413,10 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
if (cops->delete)
err = cops->delete(q, cl);
if (err == 0)
- tclass_notify(skb, n, q, cl, RTM_DELTCLASS);
+ tclass_notify(net, skb, n, q, cl, RTM_DELTCLASS);
goto out;
case RTM_GETTCLASS:
- err = tclass_notify(skb, n, q, cl, RTM_NEWTCLASS);
+ err = tclass_notify(net, skb, n, q, cl, RTM_NEWTCLASS);
goto out;
default:
err = -EINVAL;
@@ -1435,7 +1429,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
if (cops->change)
err = cops->change(q, clid, pid, tca, &new_cl);
if (err == 0)
- tclass_notify(skb, n, q, new_cl, RTM_NEWTCLASS);
+ tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS);
out:
if (cl)
@@ -1487,8 +1481,9 @@ nla_put_failure:
return -1;
}
-static int tclass_notify(struct sk_buff *oskb, struct nlmsghdr *n,
- struct Qdisc *q, unsigned long cl, int event)
+static int tclass_notify(struct net *net, struct sk_buff *oskb,
+ struct nlmsghdr *n, struct Qdisc *q,
+ unsigned long cl, int event)
{
struct sk_buff *skb;
u32 pid = oskb ? NETLINK_CB(oskb).pid : 0;
@@ -1502,7 +1497,7 @@ static int tclass_notify(struct sk_buff *oskb, struct nlmsghdr *n,
return -EINVAL;
}
- return rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
+ return rtnetlink_send(skb, net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
}
struct qdisc_dump_args
@@ -1577,12 +1572,9 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
struct net_device *dev;
int t, s_t;
- if (!net_eq(net, &init_net))
- return 0;
-
if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm)))
return 0;
- if ((dev = dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
+ if ((dev = dev_get_by_index(net, tcm->tcm_ifindex)) == NULL)
return 0;
s_t = cb->args[0];
@@ -1645,9 +1637,12 @@ reclassify:
tp = otp;
if (verd++ >= MAX_REC_LOOP) {
- printk("rule prio %u protocol %02x reclassify loop, "
- "packet dropped\n",
- tp->prio&0xffff, ntohs(tp->protocol));
+ if (net_ratelimit())
+ printk(KERN_NOTICE
+ "%s: packet reclassify loop"
+ " rule prio %u protocol %02x\n",
+ tp->q->ops->id,
+ tp->prio & 0xffff, ntohs(tp->protocol));
return TC_ACT_SHOT;
}
skb->tc_verd = SET_TC_VERD(skb->tc_verd, verd);
@@ -1692,7 +1687,7 @@ static int psched_show(struct seq_file *seq, void *v)
static int psched_open(struct inode *inode, struct file *file)
{
- return single_open(file, psched_show, PDE(inode)->data);
+ return single_open(file, psched_show, NULL);
}
static const struct file_operations psched_fops = {
@@ -1702,15 +1697,53 @@ static const struct file_operations psched_fops = {
.llseek = seq_lseek,
.release = single_release,
};
+
+static int __net_init psched_net_init(struct net *net)
+{
+ struct proc_dir_entry *e;
+
+ e = proc_net_fops_create(net, "psched", 0, &psched_fops);
+ if (e == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void __net_exit psched_net_exit(struct net *net)
+{
+ proc_net_remove(net, "psched");
+}
+#else
+static int __net_init psched_net_init(struct net *net)
+{
+ return 0;
+}
+
+static void __net_exit psched_net_exit(struct net *net)
+{
+}
#endif
+static struct pernet_operations psched_net_ops = {
+ .init = psched_net_init,
+ .exit = psched_net_exit,
+};
+
static int __init pktsched_init(void)
{
+ int err;
+
+ err = register_pernet_subsys(&psched_net_ops);
+ if (err) {
+ printk(KERN_ERR "pktsched_init: "
+ "cannot initialize per netns operations\n");
+ return err;
+ }
+
register_qdisc(&pfifo_qdisc_ops);
register_qdisc(&bfifo_qdisc_ops);
register_qdisc(&pfifo_head_drop_qdisc_ops);
register_qdisc(&mq_qdisc_ops);
- proc_net_fops_create(&init_net, "psched", 0, &psched_fops);
rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL);
rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL);
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index ff4dd53eeff0..a63029ef3edd 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -26,6 +26,7 @@
#include <linux/list.h>
#include <linux/slab.h>
#include <net/pkt_sched.h>
+#include <net/dst.h>
/* Main transmission queue. */
@@ -40,6 +41,7 @@
static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
{
+ skb_dst_force(skb);
q->gso_skb = skb;
q->qstats.requeues++;
q->q.qlen++; /* it's still part of the queue */
@@ -94,7 +96,7 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
* Another cpu is holding lock, requeue & delay xmits for
* some time.
*/
- __get_cpu_var(netdev_rx_stat).cpu_collision++;
+ __get_cpu_var(softnet_data).cpu_collision++;
ret = dev_requeue_skb(skb, q);
}
@@ -179,7 +181,7 @@ static inline int qdisc_restart(struct Qdisc *q)
skb = dequeue_skb(q);
if (unlikely(!skb))
return 0;
-
+ WARN_ON_ONCE(skb_dst_is_noref(skb));
root_lock = qdisc_lock(q);
dev = qdisc_dev(q);
txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
@@ -529,7 +531,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
unsigned int size;
int err = -ENOBUFS;
- /* ensure that the Qdisc and the private data are 32-byte aligned */
+ /* ensure that the Qdisc and the private data are 64-byte aligned */
size = QDISC_ALIGN(sizeof(*sch));
size += ops->priv_size + (QDISC_ALIGNTO - 1);
@@ -591,6 +593,13 @@ void qdisc_reset(struct Qdisc *qdisc)
}
EXPORT_SYMBOL(qdisc_reset);
+static void qdisc_rcu_free(struct rcu_head *head)
+{
+ struct Qdisc *qdisc = container_of(head, struct Qdisc, rcu_head);
+
+ kfree((char *) qdisc - qdisc->padded);
+}
+
void qdisc_destroy(struct Qdisc *qdisc)
{
const struct Qdisc_ops *ops = qdisc->ops;
@@ -614,7 +623,11 @@ void qdisc_destroy(struct Qdisc *qdisc)
dev_put(qdisc_dev(qdisc));
kfree_skb(qdisc->gso_skb);
- kfree((char *) qdisc - qdisc->padded);
+ /*
+ * gen_estimator est_timer() might access qdisc->q.lock,
+ * wait a RCU grace period before freeing qdisc.
+ */
+ call_rcu(&qdisc->rcu_head, qdisc_rcu_free);
}
EXPORT_SYMBOL(qdisc_destroy);
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index b38b39c60752..abd904be4287 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -617,7 +617,6 @@ rtsc_min(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y)
rtsc->y = y;
rtsc->dx = dx;
rtsc->dy = dy;
- return;
}
static void
@@ -1155,7 +1154,7 @@ static struct hfsc_class *
hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
{
struct hfsc_sched *q = qdisc_priv(sch);
- struct hfsc_class *cl;
+ struct hfsc_class *head, *cl;
struct tcf_result res;
struct tcf_proto *tcf;
int result;
@@ -1166,6 +1165,7 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
return cl;
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
+ head = &q->root;
tcf = q->root.filter_list;
while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
#ifdef CONFIG_NET_CLS_ACT
@@ -1180,6 +1180,8 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
if ((cl = (struct hfsc_class *)res.class) == NULL) {
if ((cl = hfsc_find_class(res.classid, sch)) == NULL)
break; /* filter selected invalid classid */
+ if (cl->level >= head->level)
+ break; /* filter may only point downwards */
}
if (cl->level == 0)
@@ -1187,6 +1189,7 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
/* apply inner filter chain */
tcf = cl->filter_list;
+ head = cl;
}
/* classification failed, try default class */
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c
index a9e646bdb605..f10e34a68445 100644
--- a/net/sched/sch_ingress.c
+++ b/net/sched/sch_ingress.c
@@ -44,7 +44,6 @@ static void ingress_put(struct Qdisc *sch, unsigned long cl)
static void ingress_walk(struct Qdisc *sch, struct qdisc_walker *walker)
{
- return;
}
static struct tcf_proto **ingress_find_tcf(struct Qdisc *sch, unsigned long cl)
diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
index b2aba3f5e6fa..fe91e50f9d98 100644
--- a/net/sched/sch_mq.c
+++ b/net/sched/sch_mq.c
@@ -174,7 +174,6 @@ static unsigned long mq_get(struct Qdisc *sch, u32 classid)
static void mq_put(struct Qdisc *sch, unsigned long cl)
{
- return;
}
static int mq_dump_class(struct Qdisc *sch, unsigned long cl,
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index c50876cd8704..6ae251279fc2 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -340,7 +340,6 @@ static unsigned long multiq_bind(struct Qdisc *sch, unsigned long parent,
static void multiq_put(struct Qdisc *q, unsigned long cl)
{
- return;
}
static int multiq_dump_class(struct Qdisc *sch, unsigned long cl,
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 81672e0c1b25..0748fb1e3a49 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -303,7 +303,6 @@ static unsigned long prio_bind(struct Qdisc *sch, unsigned long parent, u32 clas
static void prio_put(struct Qdisc *q, unsigned long cl)
{
- return;
}
static int prio_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb,
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 072cdf442f8e..8d42bb3ba540 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -303,7 +303,6 @@ static unsigned long red_get(struct Qdisc *sch, u32 classid)
static void red_put(struct Qdisc *sch, unsigned long arg)
{
- return;
}
static void red_walk(struct Qdisc *sch, struct qdisc_walker *walker)
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index c5a9ac566007..c65762823f5e 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -123,8 +123,8 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
case htons(ETH_P_IP):
{
const struct iphdr *iph = ip_hdr(skb);
- h = iph->daddr;
- h2 = iph->saddr ^ iph->protocol;
+ h = (__force u32)iph->daddr;
+ h2 = (__force u32)iph->saddr ^ iph->protocol;
if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) &&
(iph->protocol == IPPROTO_TCP ||
iph->protocol == IPPROTO_UDP ||
@@ -138,8 +138,8 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
case htons(ETH_P_IPV6):
{
struct ipv6hdr *iph = ipv6_hdr(skb);
- h = iph->daddr.s6_addr32[3];
- h2 = iph->saddr.s6_addr32[3] ^ iph->nexthdr;
+ h = (__force u32)iph->daddr.s6_addr32[3];
+ h2 = (__force u32)iph->saddr.s6_addr32[3] ^ iph->nexthdr;
if (iph->nexthdr == IPPROTO_TCP ||
iph->nexthdr == IPPROTO_UDP ||
iph->nexthdr == IPPROTO_UDPLITE ||
@@ -150,7 +150,7 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
break;
}
default:
- h = (unsigned long)skb_dst(skb) ^ skb->protocol;
+ h = (unsigned long)skb_dst(skb) ^ (__force u32)skb->protocol;
h2 = (unsigned long)skb->sk;
}
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 8fb8107ab188..0991c640cd3e 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -273,7 +273,11 @@ static int tbf_change(struct Qdisc* sch, struct nlattr *opt)
if (max_size < 0)
goto done;
- if (qopt->limit > 0) {
+ if (q->qdisc != &noop_qdisc) {
+ err = fifo_set_limit(q->qdisc, qopt->limit);
+ if (err)
+ goto done;
+ } else if (qopt->limit > 0) {
child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit);
if (IS_ERR(child)) {
err = PTR_ERR(child);
diff --git a/net/sctp/Kconfig b/net/sctp/Kconfig
index 58b3e882a187..126b014eb79b 100644
--- a/net/sctp/Kconfig
+++ b/net/sctp/Kconfig
@@ -37,6 +37,18 @@ menuconfig IP_SCTP
if IP_SCTP
+config NET_SCTPPROBE
+ tristate "SCTP: Association probing"
+ depends on PROC_FS && KPROBES
+ ---help---
+ This module allows for capturing the changes to SCTP association
+ state in response to incoming packets. It is used for debugging
+ SCTP congestion control algorithms. If you don't understand
+ what was just said, you don't need it: say N.
+
+ To compile this code as a module, choose M here: the
+ module will be called sctp_probe.
+
config SCTP_DBG_MSG
bool "SCTP: Debug messages"
help
diff --git a/net/sctp/Makefile b/net/sctp/Makefile
index 6b794734380a..5c30b7a873df 100644
--- a/net/sctp/Makefile
+++ b/net/sctp/Makefile
@@ -3,6 +3,7 @@
#
obj-$(CONFIG_IP_SCTP) += sctp.o
+obj-$(CONFIG_NET_SCTPPROBE) += sctp_probe.o
sctp-y := sm_statetable.o sm_statefuns.o sm_sideeffect.o \
protocol.o endpointola.o associola.o \
@@ -11,6 +12,8 @@ sctp-y := sm_statetable.o sm_statefuns.o sm_sideeffect.o \
tsnmap.o bind_addr.o socket.o primitive.o \
output.o input.o debug.o ssnmap.o auth.o
+sctp_probe-y := probe.o
+
sctp-$(CONFIG_SCTP_DBG_OBJCNT) += objcnt.o
sctp-$(CONFIG_PROC_FS) += proc.o
sctp-$(CONFIG_SYSCTL) += sysctl.o
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 99c93ee98ad9..e41feff19e43 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -87,9 +87,6 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
/* Retrieve the SCTP per socket area. */
sp = sctp_sk((struct sock *)sk);
- /* Init all variables to a known value. */
- memset(asoc, 0, sizeof(struct sctp_association));
-
/* Discarding const is appropriate here. */
asoc->ep = (struct sctp_endpoint *)ep;
sctp_endpoint_hold(asoc->ep);
@@ -762,7 +759,8 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
asoc->peer.retran_path = peer;
}
- if (asoc->peer.active_path == asoc->peer.retran_path) {
+ if (asoc->peer.active_path == asoc->peer.retran_path &&
+ peer->state != SCTP_UNCONFIRMED) {
asoc->peer.retran_path = peer;
}
@@ -818,8 +816,6 @@ void sctp_assoc_del_nonprimary_peers(struct sctp_association *asoc,
if (t != primary)
sctp_assoc_rm_peer(asoc, t);
}
-
- return;
}
/* Engage in transport control operations.
@@ -1320,12 +1316,13 @@ void sctp_assoc_update_retran_path(struct sctp_association *asoc)
/* Keep track of the next transport in case
* we don't find any active transport.
*/
- if (!next)
+ if (t->state != SCTP_UNCONFIRMED && !next)
next = t;
}
}
- asoc->peer.retran_path = t;
+ if (t)
+ asoc->peer.retran_path = t;
SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_update_retran_path:association"
" %p addr: ",
@@ -1485,7 +1482,7 @@ void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned len)
if (asoc->rwnd >= len) {
asoc->rwnd -= len;
if (over) {
- asoc->rwnd_press = asoc->rwnd;
+ asoc->rwnd_press += asoc->rwnd;
asoc->rwnd = 0;
}
} else {
diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c
index 3eab6db59a37..476caaf100ed 100644
--- a/net/sctp/chunk.c
+++ b/net/sctp/chunk.c
@@ -58,9 +58,9 @@ static void sctp_datamsg_init(struct sctp_datamsg *msg)
msg->send_failed = 0;
msg->send_error = 0;
msg->can_abandon = 0;
+ msg->can_delay = 1;
msg->expires_at = 0;
INIT_LIST_HEAD(&msg->chunks);
- msg->msg_size = 0;
}
/* Allocate and initialize datamsg. */
@@ -157,7 +157,6 @@ static void sctp_datamsg_assign(struct sctp_datamsg *msg, struct sctp_chunk *chu
{
sctp_datamsg_hold(msg);
chunk->msg = msg;
- msg->msg_size += chunk->skb->len;
}
@@ -247,6 +246,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
if (msg_len >= first_len) {
msg_len -= first_len;
whole = 1;
+ msg->can_delay = 0;
}
/* How many full sized? How many bytes leftover? */
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 7ec09ba03a1c..e10acc01c75f 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -70,8 +70,6 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
struct sctp_shared_key *null_key;
int err;
- memset(ep, 0, sizeof(struct sctp_endpoint));
-
ep->digest = kzalloc(SCTP_SIGNATURE_SIZE, gfp);
if (!ep->digest)
return NULL;
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 2a570184e5a9..ea2192444ce6 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -440,11 +440,25 @@ void sctp_icmp_proto_unreachable(struct sock *sk,
{
SCTP_DEBUG_PRINTK("%s\n", __func__);
- sctp_do_sm(SCTP_EVENT_T_OTHER,
- SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH),
- asoc->state, asoc->ep, asoc, t,
- GFP_ATOMIC);
+ if (sock_owned_by_user(sk)) {
+ if (timer_pending(&t->proto_unreach_timer))
+ return;
+ else {
+ if (!mod_timer(&t->proto_unreach_timer,
+ jiffies + (HZ/20)))
+ sctp_association_hold(asoc);
+ }
+
+ } else {
+ if (timer_pending(&t->proto_unreach_timer) &&
+ del_timer(&t->proto_unreach_timer))
+ sctp_association_put(asoc);
+ sctp_do_sm(SCTP_EVENT_T_OTHER,
+ SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH),
+ asoc->state, asoc->ep, asoc, t,
+ GFP_ATOMIC);
+ }
}
/* Common lookup code for icmp/icmpv6 error handler. */
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 9fb5d37c37ad..732689140fb8 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -232,7 +232,7 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
if (!(transport->param_flags & SPP_PMTUD_ENABLE))
skb->local_df = 1;
- return ip6_xmit(sk, skb, &fl, np->opt, 0);
+ return ip6_xmit(sk, skb, &fl, np->opt);
}
/* Returns the dst cache entry for the given source and destination ip
@@ -277,20 +277,7 @@ static struct dst_entry *sctp_v6_get_dst(struct sctp_association *asoc,
static inline int sctp_v6_addr_match_len(union sctp_addr *s1,
union sctp_addr *s2)
{
- struct in6_addr *a1 = &s1->v6.sin6_addr;
- struct in6_addr *a2 = &s2->v6.sin6_addr;
- int i, j;
-
- for (i = 0; i < 4 ; i++) {
- __be32 a1xora2;
-
- a1xora2 = a1->s6_addr32[i] ^ a2->s6_addr32[i];
-
- if ((j = fls(ntohl(a1xora2))))
- return (i * 32 + 32 - j);
- }
-
- return (i*32);
+ return ipv6_addr_diff(&s1->v6.sin6_addr, &s2->v6.sin6_addr);
}
/* Fills in the source address(saddr) based on the destination address(daddr)
@@ -372,13 +359,13 @@ static void sctp_v6_copy_addrlist(struct list_head *addrlist,
}
read_lock_bh(&in6_dev->lock);
- for (ifp = in6_dev->addr_list; ifp; ifp = ifp->if_next) {
+ list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
/* Add the address to the local list. */
addr = t_new(struct sctp_sockaddr_entry, GFP_ATOMIC);
if (addr) {
addr->a.v6.sin6_family = AF_INET6;
addr->a.v6.sin6_port = 0;
- addr->a.v6.sin6_addr = ifp->addr;
+ ipv6_addr_copy(&addr->a.v6.sin6_addr, &ifp->addr);
addr->a.v6.sin6_scope_id = dev->ifindex;
addr->valid = 1;
INIT_LIST_HEAD(&addr->list);
@@ -419,7 +406,7 @@ static void sctp_v6_from_sk(union sctp_addr *addr, struct sock *sk)
{
addr->v6.sin6_family = AF_INET6;
addr->v6.sin6_port = 0;
- addr->v6.sin6_addr = inet6_sk(sk)->rcv_saddr;
+ ipv6_addr_copy(&addr->v6.sin6_addr, &inet6_sk(sk)->rcv_saddr);
}
/* Initialize sk->sk_rcv_saddr from sctp_addr. */
@@ -432,7 +419,7 @@ static void sctp_v6_to_sk_saddr(union sctp_addr *addr, struct sock *sk)
inet6_sk(sk)->rcv_saddr.s6_addr32[3] =
addr->v4.sin_addr.s_addr;
} else {
- inet6_sk(sk)->rcv_saddr = addr->v6.sin6_addr;
+ ipv6_addr_copy(&inet6_sk(sk)->rcv_saddr, &addr->v6.sin6_addr);
}
}
@@ -445,7 +432,7 @@ static void sctp_v6_to_sk_daddr(union sctp_addr *addr, struct sock *sk)
inet6_sk(sk)->daddr.s6_addr32[2] = htonl(0x0000ffff);
inet6_sk(sk)->daddr.s6_addr32[3] = addr->v4.sin_addr.s_addr;
} else {
- inet6_sk(sk)->daddr = addr->v6.sin6_addr;
+ ipv6_addr_copy(&inet6_sk(sk)->daddr, &addr->v6.sin6_addr);
}
}
diff --git a/net/sctp/output.c b/net/sctp/output.c
index fad261d41ec2..a646681f5acd 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -429,24 +429,17 @@ int sctp_packet_transmit(struct sctp_packet *packet)
list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
list_del_init(&chunk->list);
if (sctp_chunk_is_data(chunk)) {
+ /* 6.3.1 C4) When data is in flight and when allowed
+ * by rule C5, a new RTT measurement MUST be made each
+ * round trip. Furthermore, new RTT measurements
+ * SHOULD be made no more than once per round-trip
+ * for a given destination transport address.
+ */
- if (!chunk->resent) {
-
- /* 6.3.1 C4) When data is in flight and when allowed
- * by rule C5, a new RTT measurement MUST be made each
- * round trip. Furthermore, new RTT measurements
- * SHOULD be made no more than once per round-trip
- * for a given destination transport address.
- */
-
- if (!tp->rto_pending) {
- chunk->rtt_in_progress = 1;
- tp->rto_pending = 1;
- }
+ if (!tp->rto_pending) {
+ chunk->rtt_in_progress = 1;
+ tp->rto_pending = 1;
}
-
- chunk->resent = 1;
-
has_data = 1;
}
@@ -681,7 +674,7 @@ static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet,
* Don't delay large message writes that may have been
* fragmeneted into small peices.
*/
- if ((len < max) && (chunk->msg->msg_size < max)) {
+ if ((len < max) && chunk->msg->can_delay) {
retval = SCTP_XMIT_NAGLE_DELAY;
goto finish;
}
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index abfc0b8dee74..c04b2eb59186 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -62,7 +62,7 @@ static void sctp_check_transmitted(struct sctp_outq *q,
struct list_head *transmitted_queue,
struct sctp_transport *transport,
struct sctp_sackhdr *sack,
- __u32 highest_new_tsn);
+ __u32 *highest_new_tsn);
static void sctp_mark_missing(struct sctp_outq *q,
struct list_head *transmitted_queue,
@@ -80,7 +80,6 @@ static inline void sctp_outq_head_data(struct sctp_outq *q,
{
list_add(&ch->list, &q->out_chunk_list);
q->out_qlen += ch->skb->len;
- return;
}
/* Take data from the front of the queue. */
@@ -103,7 +102,6 @@ static inline void sctp_outq_tail_data(struct sctp_outq *q,
{
list_add_tail(&ch->list, &q->out_chunk_list);
q->out_qlen += ch->skb->len;
- return;
}
/*
@@ -308,7 +306,7 @@ int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk)
/* If it is data, queue it up, otherwise, send it
* immediately.
*/
- if (SCTP_CID_DATA == chunk->chunk_hdr->type) {
+ if (sctp_chunk_is_data(chunk)) {
/* Is it OK to queue data chunks? */
/* From 9. Termination of Association
*
@@ -598,11 +596,23 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
if (fast_rtx && !chunk->fast_retransmit)
continue;
+redo:
/* Attempt to append this chunk to the packet. */
status = sctp_packet_append_chunk(pkt, chunk);
switch (status) {
case SCTP_XMIT_PMTU_FULL:
+ if (!pkt->has_data && !pkt->has_cookie_echo) {
+ /* If this packet did not contain DATA then
+ * retransmission did not happen, so do it
+ * again. We'll ignore the error here since
+ * control chunks are already freed so there
+ * is nothing we can do.
+ */
+ sctp_packet_transmit(pkt);
+ goto redo;
+ }
+
/* Send this packet. */
error = sctp_packet_transmit(pkt);
@@ -647,14 +657,6 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
if (chunk->fast_retransmit == SCTP_NEED_FRTX)
chunk->fast_retransmit = SCTP_DONT_FRTX;
- /* Force start T3-rtx timer when fast retransmitting
- * the earliest outstanding TSN
- */
- if (!timer && fast_rtx &&
- ntohl(chunk->subh.data_hdr->tsn) ==
- asoc->ctsn_ack_point + 1)
- timer = 2;
-
q->empty = 0;
break;
}
@@ -854,6 +856,12 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
if (status != SCTP_XMIT_OK) {
/* put the chunk back */
list_add(&chunk->list, &q->control_chunk_list);
+ } else if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN) {
+ /* PR-SCTP C5) If a FORWARD TSN is sent, the
+ * sender MUST assure that at least one T3-rtx
+ * timer is running.
+ */
+ sctp_transport_reset_timers(transport);
}
break;
@@ -906,8 +914,7 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
rtx_timeout, &start_timer);
if (start_timer)
- sctp_transport_reset_timers(transport,
- start_timer-1);
+ sctp_transport_reset_timers(transport);
/* This can happen on COOKIE-ECHO resend. Only
* one chunk can get bundled with a COOKIE-ECHO.
@@ -1040,7 +1047,7 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
list_add_tail(&chunk->transmitted_list,
&transport->transmitted);
- sctp_transport_reset_timers(transport, 0);
+ sctp_transport_reset_timers(transport);
q->empty = 0;
@@ -1100,32 +1107,6 @@ static void sctp_sack_update_unack_data(struct sctp_association *assoc,
assoc->unack_data = unack_data;
}
-/* Return the highest new tsn that is acknowledged by the given SACK chunk. */
-static __u32 sctp_highest_new_tsn(struct sctp_sackhdr *sack,
- struct sctp_association *asoc)
-{
- struct sctp_transport *transport;
- struct sctp_chunk *chunk;
- __u32 highest_new_tsn, tsn;
- struct list_head *transport_list = &asoc->peer.transport_addr_list;
-
- highest_new_tsn = ntohl(sack->cum_tsn_ack);
-
- list_for_each_entry(transport, transport_list, transports) {
- list_for_each_entry(chunk, &transport->transmitted,
- transmitted_list) {
- tsn = ntohl(chunk->subh.data_hdr->tsn);
-
- if (!chunk->tsn_gap_acked &&
- TSN_lt(highest_new_tsn, tsn) &&
- sctp_acked(sack, tsn))
- highest_new_tsn = tsn;
- }
- }
-
- return highest_new_tsn;
-}
-
/* This is where we REALLY process a SACK.
*
* Process the SACK against the outqueue. Mostly, this just frees
@@ -1145,6 +1126,7 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack)
struct sctp_transport *primary = asoc->peer.primary_path;
int count_of_newacks = 0;
int gap_ack_blocks;
+ u8 accum_moved = 0;
/* Grab the association's destination address list. */
transport_list = &asoc->peer.transport_addr_list;
@@ -1193,18 +1175,15 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack)
if (gap_ack_blocks)
highest_tsn += ntohs(frags[gap_ack_blocks - 1].gab.end);
- if (TSN_lt(asoc->highest_sacked, highest_tsn)) {
- highest_new_tsn = highest_tsn;
+ if (TSN_lt(asoc->highest_sacked, highest_tsn))
asoc->highest_sacked = highest_tsn;
- } else {
- highest_new_tsn = sctp_highest_new_tsn(sack, asoc);
- }
+ highest_new_tsn = sack_ctsn;
/* Run through the retransmit queue. Credit bytes received
* and free those chunks that we can.
*/
- sctp_check_transmitted(q, &q->retransmit, NULL, sack, highest_new_tsn);
+ sctp_check_transmitted(q, &q->retransmit, NULL, sack, &highest_new_tsn);
/* Run through the transmitted queue.
* Credit bytes received and free those chunks which we can.
@@ -1213,7 +1192,7 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack)
*/
list_for_each_entry(transport, transport_list, transports) {
sctp_check_transmitted(q, &transport->transmitted,
- transport, sack, highest_new_tsn);
+ transport, sack, &highest_new_tsn);
/*
* SFR-CACC algorithm:
* C) Let count_of_newacks be the number of
@@ -1223,16 +1202,22 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack)
count_of_newacks ++;
}
+ /* Move the Cumulative TSN Ack Point if appropriate. */
+ if (TSN_lt(asoc->ctsn_ack_point, sack_ctsn)) {
+ asoc->ctsn_ack_point = sack_ctsn;
+ accum_moved = 1;
+ }
+
if (gap_ack_blocks) {
+
+ if (asoc->fast_recovery && accum_moved)
+ highest_new_tsn = highest_tsn;
+
list_for_each_entry(transport, transport_list, transports)
sctp_mark_missing(q, &transport->transmitted, transport,
highest_new_tsn, count_of_newacks);
}
- /* Move the Cumulative TSN Ack Point if appropriate. */
- if (TSN_lt(asoc->ctsn_ack_point, sack_ctsn))
- asoc->ctsn_ack_point = sack_ctsn;
-
/* Update unack_data field in the assoc. */
sctp_sack_update_unack_data(asoc, sack);
@@ -1315,7 +1300,7 @@ static void sctp_check_transmitted(struct sctp_outq *q,
struct list_head *transmitted_queue,
struct sctp_transport *transport,
struct sctp_sackhdr *sack,
- __u32 highest_new_tsn_in_sack)
+ __u32 *highest_new_tsn_in_sack)
{
struct list_head *lchunk;
struct sctp_chunk *tchunk;
@@ -1387,7 +1372,6 @@ static void sctp_check_transmitted(struct sctp_outq *q,
* instance).
*/
if (!tchunk->tsn_gap_acked &&
- !tchunk->resent &&
tchunk->rtt_in_progress) {
tchunk->rtt_in_progress = 0;
rtt = jiffies - tchunk->sent_at;
@@ -1404,6 +1388,7 @@ static void sctp_check_transmitted(struct sctp_outq *q,
*/
if (!tchunk->tsn_gap_acked) {
tchunk->tsn_gap_acked = 1;
+ *highest_new_tsn_in_sack = tsn;
bytes_acked += sctp_data_size(tchunk);
if (!tchunk->transport)
migrate_bytes += sctp_data_size(tchunk);
@@ -1677,7 +1662,8 @@ static void sctp_mark_missing(struct sctp_outq *q,
struct sctp_chunk *chunk;
__u32 tsn;
char do_fast_retransmit = 0;
- struct sctp_transport *primary = q->asoc->peer.primary_path;
+ struct sctp_association *asoc = q->asoc;
+ struct sctp_transport *primary = asoc->peer.primary_path;
list_for_each_entry(chunk, transmitted_queue, transmitted_list) {
diff --git a/net/sctp/probe.c b/net/sctp/probe.c
new file mode 100644
index 000000000000..db3a42b8b349
--- /dev/null
+++ b/net/sctp/probe.c
@@ -0,0 +1,214 @@
+/*
+ * sctp_probe - Observe the SCTP flow with kprobes.
+ *
+ * The idea for this came from Werner Almesberger's umlsim
+ * Copyright (C) 2004, Stephen Hemminger <shemminger@osdl.org>
+ *
+ * Modified for SCTP from Stephen Hemminger's code
+ * Copyright (C) 2010, Wei Yongjun <yjwei@cn.fujitsu.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/kprobes.h>
+#include <linux/socket.h>
+#include <linux/sctp.h>
+#include <linux/proc_fs.h>
+#include <linux/vmalloc.h>
+#include <linux/module.h>
+#include <linux/kfifo.h>
+#include <linux/time.h>
+#include <net/net_namespace.h>
+
+#include <net/sctp/sctp.h>
+#include <net/sctp/sm.h>
+
+MODULE_AUTHOR("Wei Yongjun <yjwei@cn.fujitsu.com>");
+MODULE_DESCRIPTION("SCTP snooper");
+MODULE_LICENSE("GPL");
+
+static int port __read_mostly = 0;
+MODULE_PARM_DESC(port, "Port to match (0=all)");
+module_param(port, int, 0);
+
+static int bufsize __read_mostly = 64 * 1024;
+MODULE_PARM_DESC(bufsize, "Log buffer size (default 64k)");
+module_param(bufsize, int, 0);
+
+static int full __read_mostly = 1;
+MODULE_PARM_DESC(full, "Full log (1=every ack packet received, 0=only cwnd changes)");
+module_param(full, int, 0);
+
+static const char procname[] = "sctpprobe";
+
+static struct {
+ struct kfifo fifo;
+ spinlock_t lock;
+ wait_queue_head_t wait;
+ struct timespec tstart;
+} sctpw;
+
+static void printl(const char *fmt, ...)
+{
+ va_list args;
+ int len;
+ char tbuf[256];
+
+ va_start(args, fmt);
+ len = vscnprintf(tbuf, sizeof(tbuf), fmt, args);
+ va_end(args);
+
+ kfifo_in_locked(&sctpw.fifo, tbuf, len, &sctpw.lock);
+ wake_up(&sctpw.wait);
+}
+
+static int sctpprobe_open(struct inode *inode, struct file *file)
+{
+ kfifo_reset(&sctpw.fifo);
+ getnstimeofday(&sctpw.tstart);
+
+ return 0;
+}
+
+static ssize_t sctpprobe_read(struct file *file, char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ int error = 0, cnt = 0;
+ unsigned char *tbuf;
+
+ if (!buf)
+ return -EINVAL;
+
+ if (len == 0)
+ return 0;
+
+ tbuf = vmalloc(len);
+ if (!tbuf)
+ return -ENOMEM;
+
+ error = wait_event_interruptible(sctpw.wait,
+ kfifo_len(&sctpw.fifo) != 0);
+ if (error)
+ goto out_free;
+
+ cnt = kfifo_out_locked(&sctpw.fifo, tbuf, len, &sctpw.lock);
+ error = copy_to_user(buf, tbuf, cnt) ? -EFAULT : 0;
+
+out_free:
+ vfree(tbuf);
+
+ return error ? error : cnt;
+}
+
+static const struct file_operations sctpprobe_fops = {
+ .owner = THIS_MODULE,
+ .open = sctpprobe_open,
+ .read = sctpprobe_read,
+};
+
+sctp_disposition_t jsctp_sf_eat_sack(const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const sctp_subtype_t type,
+ void *arg,
+ sctp_cmd_seq_t *commands)
+{
+ struct sctp_transport *sp;
+ static __u32 lcwnd = 0;
+ struct timespec now;
+
+ sp = asoc->peer.primary_path;
+
+ if ((full || sp->cwnd != lcwnd) &&
+ (!port || asoc->peer.port == port ||
+ ep->base.bind_addr.port == port)) {
+ lcwnd = sp->cwnd;
+
+ getnstimeofday(&now);
+ now = timespec_sub(now, sctpw.tstart);
+
+ printl("%lu.%06lu ", (unsigned long) now.tv_sec,
+ (unsigned long) now.tv_nsec / NSEC_PER_USEC);
+
+ printl("%p %5d %5d %5d %8d %5d ", asoc,
+ ep->base.bind_addr.port, asoc->peer.port,
+ asoc->pathmtu, asoc->peer.rwnd, asoc->unack_data);
+
+ list_for_each_entry(sp, &asoc->peer.transport_addr_list,
+ transports) {
+ if (sp == asoc->peer.primary_path)
+ printl("*");
+
+ if (sp->ipaddr.sa.sa_family == AF_INET)
+ printl("%pI4 ", &sp->ipaddr.v4.sin_addr);
+ else
+ printl("%pI6 ", &sp->ipaddr.v6.sin6_addr);
+
+ printl("%2u %8u %8u %8u %8u %8u ",
+ sp->state, sp->cwnd, sp->ssthresh,
+ sp->flight_size, sp->partial_bytes_acked,
+ sp->pathmtu);
+ }
+ printl("\n");
+ }
+
+ jprobe_return();
+ return 0;
+}
+
+static struct jprobe sctp_recv_probe = {
+ .kp = {
+ .symbol_name = "sctp_sf_eat_sack_6_2",
+ },
+ .entry = jsctp_sf_eat_sack,
+};
+
+static __init int sctpprobe_init(void)
+{
+ int ret = -ENOMEM;
+
+ init_waitqueue_head(&sctpw.wait);
+ spin_lock_init(&sctpw.lock);
+ if (kfifo_alloc(&sctpw.fifo, bufsize, GFP_KERNEL))
+ return ret;
+
+ if (!proc_net_fops_create(&init_net, procname, S_IRUSR,
+ &sctpprobe_fops))
+ goto free_kfifo;
+
+ ret = register_jprobe(&sctp_recv_probe);
+ if (ret)
+ goto remove_proc;
+
+ pr_info("SCTP probe registered (port=%d)\n", port);
+
+ return 0;
+
+remove_proc:
+ proc_net_remove(&init_net, procname);
+free_kfifo:
+ kfifo_free(&sctpw.fifo);
+ return ret;
+}
+
+static __exit void sctpprobe_exit(void)
+{
+ kfifo_free(&sctpw.fifo);
+ proc_net_remove(&init_net, procname);
+ unregister_jprobe(&sctp_recv_probe);
+}
+
+module_init(sctpprobe_init);
+module_exit(sctpprobe_exit);
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index 784bcc9a979d..61aacfbbaa92 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -181,7 +181,6 @@ static void * sctp_eps_seq_start(struct seq_file *seq, loff_t *pos)
static void sctp_eps_seq_stop(struct seq_file *seq, void *v)
{
- return;
}
@@ -286,7 +285,6 @@ static void * sctp_assocs_seq_start(struct seq_file *seq, loff_t *pos)
static void sctp_assocs_seq_stop(struct seq_file *seq, void *v)
{
- return;
}
@@ -409,7 +407,6 @@ static void *sctp_remaddr_seq_next(struct seq_file *seq, void *v, loff_t *pos)
static void sctp_remaddr_seq_stop(struct seq_file *seq, void *v)
{
- return;
}
static int sctp_remaddr_seq_show(struct seq_file *seq, void *v)
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index a56f98e82f92..182749867c72 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -474,13 +474,17 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc,
memset(&fl, 0x0, sizeof(struct flowi));
fl.fl4_dst = daddr->v4.sin_addr.s_addr;
+ fl.fl_ip_dport = daddr->v4.sin_port;
fl.proto = IPPROTO_SCTP;
if (asoc) {
fl.fl4_tos = RT_CONN_FLAGS(asoc->base.sk);
fl.oif = asoc->base.sk->sk_bound_dev_if;
+ fl.fl_ip_sport = htons(asoc->base.bind_addr.port);
}
- if (saddr)
+ if (saddr) {
fl.fl4_src = saddr->v4.sin_addr.s_addr;
+ fl.fl_ip_sport = saddr->v4.sin_port;
+ }
SCTP_DEBUG_PRINTK("%s: DST:%pI4, SRC:%pI4 - ",
__func__, &fl.fl4_dst, &fl.fl4_src);
@@ -528,6 +532,7 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc,
if ((laddr->state == SCTP_ADDR_SRC) &&
(AF_INET == laddr->a.sa.sa_family)) {
fl.fl4_src = laddr->a.v4.sin_addr.s_addr;
+ fl.fl_ip_sport = laddr->a.v4.sin_port;
if (!ip_route_output_key(&init_net, &rt, &fl)) {
dst = &rt->u.dst;
goto out_unlock;
@@ -854,7 +859,7 @@ static inline int sctp_v4_xmit(struct sk_buff *skb,
IP_PMTUDISC_DO : IP_PMTUDISC_DONT;
SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS);
- return ip_queue_xmit(skb, 0);
+ return ip_queue_xmit(skb);
}
static struct sctp_af sctp_af_inet;
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 0fd5b4c88358..bd2a50b482ac 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -108,7 +108,7 @@ static const struct sctp_paramhdr prsctp_param = {
cpu_to_be16(sizeof(struct sctp_paramhdr)),
};
-/* A helper to initialize to initialize an op error inside a
+/* A helper to initialize an op error inside a
* provided chunk, as most cause codes will be embedded inside an
* abort chunk.
*/
@@ -125,6 +125,29 @@ void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code,
chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(sctp_errhdr_t), &err);
}
+/* A helper to initialize an op error inside a
+ * provided chunk, as most cause codes will be embedded inside an
+ * abort chunk. Differs from sctp_init_cause in that it won't oops
+ * if there isn't enough space in the op error chunk
+ */
+int sctp_init_cause_fixed(struct sctp_chunk *chunk, __be16 cause_code,
+ size_t paylen)
+{
+ sctp_errhdr_t err;
+ __u16 len;
+
+ /* Cause code constants are now defined in network order. */
+ err.cause = cause_code;
+ len = sizeof(sctp_errhdr_t) + paylen;
+ err.length = htons(len);
+
+ if (skb_tailroom(chunk->skb) < len)
+ return -ENOSPC;
+ chunk->subh.err_hdr = sctp_addto_chunk_fixed(chunk,
+ sizeof(sctp_errhdr_t),
+ &err);
+ return 0;
+}
/* 3.3.2 Initiation (INIT) (1)
*
* This chunk is used to initiate a SCTP association between two
@@ -422,10 +445,17 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
if (!retval)
goto nomem_chunk;
- /* Per the advice in RFC 2960 6.4, send this reply to
- * the source of the INIT packet.
+ /* RFC 2960 6.4 Multi-homed SCTP Endpoints
+ *
+ * An endpoint SHOULD transmit reply chunks (e.g., SACK,
+ * HEARTBEAT ACK, * etc.) to the same destination transport
+ * address from which it received the DATA or control chunk
+ * to which it is replying.
+ *
+ * [INIT ACK back to where the INIT came from.]
*/
retval->transport = chunk->transport;
+
retval->subh.init_hdr =
sctp_addto_chunk(retval, sizeof(initack), &initack);
retval->param_hdr.v = sctp_addto_chunk(retval, addrs_len, addrs.v);
@@ -464,18 +494,6 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
/* We need to remove the const qualifier at this point. */
retval->asoc = (struct sctp_association *) asoc;
- /* RFC 2960 6.4 Multi-homed SCTP Endpoints
- *
- * An endpoint SHOULD transmit reply chunks (e.g., SACK,
- * HEARTBEAT ACK, * etc.) to the same destination transport
- * address from which it received the DATA or control chunk
- * to which it is replying.
- *
- * [INIT ACK back to where the INIT came from.]
- */
- if (chunk)
- retval->transport = chunk->transport;
-
nomem_chunk:
kfree(cookie);
nomem_cookie:
@@ -1132,6 +1150,24 @@ nodata:
return retval;
}
+/* Create an Operation Error chunk of a fixed size,
+ * specifically, max(asoc->pathmtu, SCTP_DEFAULT_MAXSEGMENT)
+ * This is a helper function to allocate an error chunk for
+ * for those invalid parameter codes in which we may not want
+ * to report all the errors, if the incomming chunk is large
+ */
+static inline struct sctp_chunk *sctp_make_op_error_fixed(
+ const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk)
+{
+ size_t size = asoc ? asoc->pathmtu : 0;
+
+ if (!size)
+ size = SCTP_DEFAULT_MAXSEGMENT;
+
+ return sctp_make_op_error_space(asoc, chunk, size);
+}
+
/* Create an Operation Error chunk. */
struct sctp_chunk *sctp_make_op_error(const struct sctp_association *asoc,
const struct sctp_chunk *chunk,
@@ -1213,7 +1249,6 @@ struct sctp_chunk *sctp_chunkify(struct sk_buff *skb,
INIT_LIST_HEAD(&retval->list);
retval->skb = skb;
retval->asoc = (struct sctp_association *)asoc;
- retval->resent = 0;
retval->has_tsn = 0;
retval->has_ssn = 0;
retval->rtt_in_progress = 0;
@@ -1374,6 +1409,18 @@ void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data)
return target;
}
+/* Append bytes to the end of a chunk. Returns NULL if there isn't sufficient
+ * space in the chunk
+ */
+void *sctp_addto_chunk_fixed(struct sctp_chunk *chunk,
+ int len, const void *data)
+{
+ if (skb_tailroom(chunk->skb) >= len)
+ return sctp_addto_chunk(chunk, len, data);
+ else
+ return NULL;
+}
+
/* Append bytes from user space to the end of a chunk. Will panic if
* chunk is not big enough.
* Returns a kernel err value.
@@ -1977,13 +2024,12 @@ static sctp_ierror_t sctp_process_unk_param(const struct sctp_association *asoc,
* returning multiple unknown parameters.
*/
if (NULL == *errp)
- *errp = sctp_make_op_error_space(asoc, chunk,
- ntohs(chunk->chunk_hdr->length));
+ *errp = sctp_make_op_error_fixed(asoc, chunk);
if (*errp) {
- sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM,
+ sctp_init_cause_fixed(*errp, SCTP_ERROR_UNKNOWN_PARAM,
WORD_ROUND(ntohs(param.p->length)));
- sctp_addto_chunk(*errp,
+ sctp_addto_chunk_fixed(*errp,
WORD_ROUND(ntohs(param.p->length)),
param.v);
} else {
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index d5ae450b6f02..f5e5e27cac5e 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -397,6 +397,41 @@ out_unlock:
sctp_transport_put(transport);
}
+/* Handle the timeout of the ICMP protocol unreachable timer. Trigger
+ * the correct state machine transition that will close the association.
+ */
+void sctp_generate_proto_unreach_event(unsigned long data)
+{
+ struct sctp_transport *transport = (struct sctp_transport *) data;
+ struct sctp_association *asoc = transport->asoc;
+
+ sctp_bh_lock_sock(asoc->base.sk);
+ if (sock_owned_by_user(asoc->base.sk)) {
+ SCTP_DEBUG_PRINTK("%s:Sock is busy.\n", __func__);
+
+ /* Try again later. */
+ if (!mod_timer(&transport->proto_unreach_timer,
+ jiffies + (HZ/20)))
+ sctp_association_hold(asoc);
+ goto out_unlock;
+ }
+
+ /* Is this structure just waiting around for us to actually
+ * get destroyed?
+ */
+ if (asoc->base.dead)
+ goto out_unlock;
+
+ sctp_do_sm(SCTP_EVENT_T_OTHER,
+ SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH),
+ asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC);
+
+out_unlock:
+ sctp_bh_unlock_sock(asoc->base.sk);
+ sctp_association_put(asoc);
+}
+
+
/* Inject a SACK Timeout event into the state machine. */
static void sctp_generate_sack_event(unsigned long data)
{
@@ -697,11 +732,15 @@ static void sctp_cmd_setup_t2(sctp_cmd_seq_t *cmds,
{
struct sctp_transport *t;
- t = sctp_assoc_choose_alter_transport(asoc,
+ if (chunk->transport)
+ t = chunk->transport;
+ else {
+ t = sctp_assoc_choose_alter_transport(asoc,
asoc->shutdown_last_sent_to);
+ chunk->transport = t;
+ }
asoc->shutdown_last_sent_to = t;
asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = t->rto;
- chunk->transport = t;
}
/* Helper function to change the state of an association. */
@@ -853,8 +892,6 @@ static void sctp_cmd_process_fwdtsn(struct sctp_ulpq *ulpq,
sctp_walk_fwdtsn(skip, chunk) {
sctp_ulpq_skip(ulpq, ntohs(skip->stream), ntohs(skip->ssn));
}
-
- return;
}
/* Helper function to remove the association non-primary peer
@@ -873,8 +910,6 @@ static void sctp_cmd_del_non_primary(struct sctp_association *asoc)
sctp_assoc_del_peer(asoc, &t->ipaddr);
}
}
-
- return;
}
/* Helper function to set sk_err on a 1-1 style socket. */
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 44a1ab03a3f0..ca44917872d2 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -3720,9 +3720,6 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
SCTP_DBG_OBJCNT_INC(sock);
- /* Set socket backlog limit. */
- sk->sk_backlog.limit = sysctl_sctp_rmem[1];
-
local_bh_disable();
percpu_counter_inc(&sctp_sockets_allocated);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
@@ -4387,7 +4384,7 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
transports) {
memcpy(&temp, &from->ipaddr, sizeof(temp));
sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp);
- addrlen = sctp_get_af_specific(sk->sk_family)->sockaddr_len;
+ addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
if (space_left < addrlen)
return -ENOMEM;
if (copy_to_user(to, &temp, addrlen))
@@ -5436,6 +5433,8 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
rover++;
if ((rover < low) || (rover > high))
rover = low;
+ if (inet_is_reserved_local_port(rover))
+ continue;
index = sctp_phashfn(rover);
head = &sctp_port_hashtable[index];
sctp_spin_lock(&head->lock);
@@ -5482,7 +5481,6 @@ pp_found:
*/
int reuse = sk->sk_reuse;
struct sock *sk2;
- struct hlist_node *node;
SCTP_DEBUG_PRINTK("sctp_get_port() found a possible match\n");
if (pp->fastreuse && sk->sk_reuse &&
@@ -5703,7 +5701,7 @@ unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
struct sctp_sock *sp = sctp_sk(sk);
unsigned int mask;
- poll_wait(file, sk->sk_sleep, wait);
+ poll_wait(file, sk_sleep(sk), wait);
/* A TCP-style listening socket becomes readable when the accept queue
* is not empty.
@@ -5944,7 +5942,7 @@ static int sctp_wait_for_packet(struct sock * sk, int *err, long *timeo_p)
int error;
DEFINE_WAIT(wait);
- prepare_to_wait_exclusive(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
/* Socket errors? */
error = sock_error(sk);
@@ -5981,14 +5979,14 @@ static int sctp_wait_for_packet(struct sock * sk, int *err, long *timeo_p)
sctp_lock_sock(sk);
ready:
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
return 0;
interrupted:
error = sock_intr_errno(*timeo_p);
out:
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
*err = error;
return error;
}
@@ -6062,14 +6060,14 @@ static void __sctp_write_space(struct sctp_association *asoc)
wake_up_interruptible(&asoc->wait);
if (sctp_writeable(sk)) {
- if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
- wake_up_interruptible(sk->sk_sleep);
+ if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
+ wake_up_interruptible(sk_sleep(sk));
/* Note that we try to include the Async I/O support
* here by modeling from the current TCP/UDP code.
* We have not tested with it yet.
*/
- if (sock->fasync_list &&
+ if (sock->wq->fasync_list &&
!(sk->sk_shutdown & SEND_SHUTDOWN))
sock_wake_async(sock,
SOCK_WAKE_SPACE, POLL_OUT);
@@ -6191,12 +6189,15 @@ do_nonblock:
void sctp_data_ready(struct sock *sk, int len)
{
- read_lock_bh(&sk->sk_callback_lock);
- if (sk_has_sleeper(sk))
- wake_up_interruptible_sync_poll(sk->sk_sleep, POLLIN |
+ struct socket_wq *wq;
+
+ rcu_read_lock();
+ wq = rcu_dereference(sk->sk_wq);
+ if (wq_has_sleeper(wq))
+ wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
POLLRDNORM | POLLRDBAND);
sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
- read_unlock_bh(&sk->sk_callback_lock);
+ rcu_read_unlock();
}
/* If socket sndbuf has changed, wake up all per association waiters. */
@@ -6307,7 +6308,7 @@ static int sctp_wait_for_accept(struct sock *sk, long timeo)
for (;;) {
- prepare_to_wait_exclusive(sk->sk_sleep, &wait,
+ prepare_to_wait_exclusive(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
if (list_empty(&ep->asocs)) {
@@ -6333,7 +6334,7 @@ static int sctp_wait_for_accept(struct sock *sk, long timeo)
break;
}
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
return err;
}
@@ -6343,7 +6344,7 @@ static void sctp_wait_for_close(struct sock *sk, long timeout)
DEFINE_WAIT(wait);
do {
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
if (list_empty(&sctp_sk(sk)->ep->asocs))
break;
sctp_release_sock(sk);
@@ -6351,7 +6352,7 @@ static void sctp_wait_for_close(struct sock *sk, long timeout)
sctp_lock_sock(sk);
} while (!signal_pending(current) && timeout);
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
}
static void sctp_skb_set_owner_r_frag(struct sk_buff *skb, struct sock *sk)
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index be4d63d5a5cc..132046cb82fc 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -64,9 +64,6 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
/* Copy in the address. */
peer->ipaddr = *addr;
peer->af_specific = sctp_get_af_specific(addr->sa.sa_family);
- peer->asoc = NULL;
-
- peer->dst = NULL;
memset(&peer->saddr, 0, sizeof(union sctp_addr));
/* From 6.3.1 RTO Calculation:
@@ -76,52 +73,32 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
* parameter 'RTO.Initial'.
*/
peer->rto = msecs_to_jiffies(sctp_rto_initial);
- peer->rtt = 0;
- peer->rttvar = 0;
- peer->srtt = 0;
- peer->rto_pending = 0;
- peer->hb_sent = 0;
- peer->fast_recovery = 0;
peer->last_time_heard = jiffies;
peer->last_time_ecne_reduced = jiffies;
- peer->init_sent_count = 0;
-
peer->param_flags = SPP_HB_DISABLE |
SPP_PMTUD_ENABLE |
SPP_SACKDELAY_ENABLE;
- peer->hbinterval = 0;
/* Initialize the default path max_retrans. */
peer->pathmaxrxt = sctp_max_retrans_path;
- peer->error_count = 0;
INIT_LIST_HEAD(&peer->transmitted);
INIT_LIST_HEAD(&peer->send_ready);
INIT_LIST_HEAD(&peer->transports);
- peer->T3_rtx_timer.expires = 0;
- peer->hb_timer.expires = 0;
-
setup_timer(&peer->T3_rtx_timer, sctp_generate_t3_rtx_event,
(unsigned long)peer);
setup_timer(&peer->hb_timer, sctp_generate_heartbeat_event,
(unsigned long)peer);
+ setup_timer(&peer->proto_unreach_timer,
+ sctp_generate_proto_unreach_event, (unsigned long)peer);
/* Initialize the 64-bit random nonce sent with heartbeat. */
get_random_bytes(&peer->hb_nonce, sizeof(peer->hb_nonce));
atomic_set(&peer->refcnt, 1);
- peer->dead = 0;
-
- peer->malloced = 0;
-
- /* Initialize the state information for SFR-CACC */
- peer->cacc.changeover_active = 0;
- peer->cacc.cycling_changeover = 0;
- peer->cacc.next_tsn_at_change = 0;
- peer->cacc.cacc_saw_newack = 0;
return peer;
}
@@ -171,6 +148,10 @@ void sctp_transport_free(struct sctp_transport *transport)
del_timer(&transport->T3_rtx_timer))
sctp_transport_put(transport);
+ /* Delete the ICMP proto unreachable timer if it's active. */
+ if (timer_pending(&transport->proto_unreach_timer) &&
+ del_timer(&transport->proto_unreach_timer))
+ sctp_association_put(transport->asoc);
sctp_transport_put(transport);
}
@@ -195,7 +176,7 @@ static void sctp_transport_destroy(struct sctp_transport *transport)
/* Start T3_rtx timer if it is not already running and update the heartbeat
* timer. This routine is called every time a DATA chunk is sent.
*/
-void sctp_transport_reset_timers(struct sctp_transport *transport, int force)
+void sctp_transport_reset_timers(struct sctp_transport *transport)
{
/* RFC 2960 6.3.2 Retransmission Timer Rules
*
@@ -205,7 +186,7 @@ void sctp_transport_reset_timers(struct sctp_transport *transport, int force)
* address.
*/
- if (force || !timer_pending(&transport->T3_rtx_timer))
+ if (!timer_pending(&transport->T3_rtx_timer))
if (!mod_timer(&transport->T3_rtx_timer,
jiffies + transport->rto))
sctp_transport_hold(transport);
@@ -403,15 +384,16 @@ void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt)
void sctp_transport_raise_cwnd(struct sctp_transport *transport,
__u32 sack_ctsn, __u32 bytes_acked)
{
+ struct sctp_association *asoc = transport->asoc;
__u32 cwnd, ssthresh, flight_size, pba, pmtu;
cwnd = transport->cwnd;
flight_size = transport->flight_size;
/* See if we need to exit Fast Recovery first */
- if (transport->fast_recovery &&
- TSN_lte(transport->fast_recovery_exit, sack_ctsn))
- transport->fast_recovery = 0;
+ if (asoc->fast_recovery &&
+ TSN_lte(asoc->fast_recovery_exit, sack_ctsn))
+ asoc->fast_recovery = 0;
/* The appropriate cwnd increase algorithm is performed if, and only
* if the cumulative TSN whould advanced and the congestion window is
@@ -440,7 +422,7 @@ void sctp_transport_raise_cwnd(struct sctp_transport *transport,
* 2) the destination's path MTU. This upper bound protects
* against the ACK-Splitting attack outlined in [SAVAGE99].
*/
- if (transport->fast_recovery)
+ if (asoc->fast_recovery)
return;
if (bytes_acked > pmtu)
@@ -491,6 +473,8 @@ void sctp_transport_raise_cwnd(struct sctp_transport *transport,
void sctp_transport_lower_cwnd(struct sctp_transport *transport,
sctp_lower_cwnd_t reason)
{
+ struct sctp_association *asoc = transport->asoc;
+
switch (reason) {
case SCTP_LOWER_CWND_T3_RTX:
/* RFC 2960 Section 7.2.3, sctpimpguide
@@ -501,11 +485,11 @@ void sctp_transport_lower_cwnd(struct sctp_transport *transport,
* partial_bytes_acked = 0
*/
transport->ssthresh = max(transport->cwnd/2,
- 4*transport->asoc->pathmtu);
- transport->cwnd = transport->asoc->pathmtu;
+ 4*asoc->pathmtu);
+ transport->cwnd = asoc->pathmtu;
- /* T3-rtx also clears fast recovery on the transport */
- transport->fast_recovery = 0;
+ /* T3-rtx also clears fast recovery */
+ asoc->fast_recovery = 0;
break;
case SCTP_LOWER_CWND_FAST_RTX:
@@ -521,15 +505,15 @@ void sctp_transport_lower_cwnd(struct sctp_transport *transport,
* cwnd = ssthresh
* partial_bytes_acked = 0
*/
- if (transport->fast_recovery)
+ if (asoc->fast_recovery)
return;
/* Mark Fast recovery */
- transport->fast_recovery = 1;
- transport->fast_recovery_exit = transport->asoc->next_tsn - 1;
+ asoc->fast_recovery = 1;
+ asoc->fast_recovery_exit = asoc->next_tsn - 1;
transport->ssthresh = max(transport->cwnd/2,
- 4*transport->asoc->pathmtu);
+ 4*asoc->pathmtu);
transport->cwnd = transport->ssthresh;
break;
@@ -549,7 +533,7 @@ void sctp_transport_lower_cwnd(struct sctp_transport *transport,
if (time_after(jiffies, transport->last_time_ecne_reduced +
transport->rtt)) {
transport->ssthresh = max(transport->cwnd/2,
- 4*transport->asoc->pathmtu);
+ 4*asoc->pathmtu);
transport->cwnd = transport->ssthresh;
transport->last_time_ecne_reduced = jiffies;
}
@@ -565,7 +549,7 @@ void sctp_transport_lower_cwnd(struct sctp_transport *transport,
* interval.
*/
transport->cwnd = max(transport->cwnd/2,
- 4*transport->asoc->pathmtu);
+ 4*asoc->pathmtu);
break;
}
@@ -650,7 +634,6 @@ void sctp_transport_reset(struct sctp_transport *t)
t->error_count = 0;
t->rto_pending = 0;
t->hb_sent = 0;
- t->fast_recovery = 0;
/* Initialize the state information for SFR-CACC */
t->cacc.changeover_active = 0;
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index 3a448536f0b6..c7f7e49609cb 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -955,7 +955,6 @@ void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
* ordering and deliver them if needed.
*/
sctp_ulpq_reap_ordered(ulpq, sid);
- return;
}
static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq,
@@ -1064,7 +1063,6 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
}
sk_mem_reclaim(asoc->base.sk);
- return;
}
diff --git a/net/socket.c b/net/socket.c
index 5e8d0af3c0e7..f9f7d0872cac 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -252,9 +252,14 @@ static struct inode *sock_alloc_inode(struct super_block *sb)
ei = kmem_cache_alloc(sock_inode_cachep, GFP_KERNEL);
if (!ei)
return NULL;
- init_waitqueue_head(&ei->socket.wait);
+ ei->socket.wq = kmalloc(sizeof(struct socket_wq), GFP_KERNEL);
+ if (!ei->socket.wq) {
+ kmem_cache_free(sock_inode_cachep, ei);
+ return NULL;
+ }
+ init_waitqueue_head(&ei->socket.wq->wait);
+ ei->socket.wq->fasync_list = NULL;
- ei->socket.fasync_list = NULL;
ei->socket.state = SS_UNCONNECTED;
ei->socket.flags = 0;
ei->socket.ops = NULL;
@@ -264,10 +269,21 @@ static struct inode *sock_alloc_inode(struct super_block *sb)
return &ei->vfs_inode;
}
+
+static void wq_free_rcu(struct rcu_head *head)
+{
+ struct socket_wq *wq = container_of(head, struct socket_wq, rcu);
+
+ kfree(wq);
+}
+
static void sock_destroy_inode(struct inode *inode)
{
- kmem_cache_free(sock_inode_cachep,
- container_of(inode, struct socket_alloc, vfs_inode));
+ struct socket_alloc *ei;
+
+ ei = container_of(inode, struct socket_alloc, vfs_inode);
+ call_rcu(&ei->socket.wq->rcu, wq_free_rcu);
+ kmem_cache_free(sock_inode_cachep, ei);
}
static void init_once(void *foo)
@@ -513,7 +529,7 @@ void sock_release(struct socket *sock)
module_put(owner);
}
- if (sock->fasync_list)
+ if (sock->wq->fasync_list)
printk(KERN_ERR "sock_release: fasync list not empty!\n");
percpu_sub(sockets_in_use, 1);
@@ -620,10 +636,9 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMP,
sizeof(tv), &tv);
} else {
- struct timespec ts;
- skb_get_timestampns(skb, &ts);
+ skb_get_timestampns(skb, &ts[0]);
put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPNS,
- sizeof(ts), &ts);
+ sizeof(ts[0]), &ts[0]);
}
}
@@ -656,13 +671,13 @@ inline void sock_recv_drops(struct msghdr *msg, struct sock *sk, struct sk_buff
sizeof(__u32), &skb->dropcount);
}
-void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
+void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb)
{
sock_recv_timestamp(msg, sk, skb);
sock_recv_drops(msg, sk, skb);
}
-EXPORT_SYMBOL_GPL(sock_recv_ts_and_drops);
+EXPORT_SYMBOL_GPL(__sock_recv_ts_and_drops);
static inline int __sock_recvmsg_nosec(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t size, int flags)
@@ -1068,87 +1083,44 @@ static int sock_close(struct inode *inode, struct file *filp)
* 1. fasync_list is modified only under process context socket lock
* i.e. under semaphore.
* 2. fasync_list is used under read_lock(&sk->sk_callback_lock)
- * or under socket lock.
- * 3. fasync_list can be used from softirq context, so that
- * modification under socket lock have to be enhanced with
- * write_lock_bh(&sk->sk_callback_lock).
- * --ANK (990710)
+ * or under socket lock
*/
static int sock_fasync(int fd, struct file *filp, int on)
{
- struct fasync_struct *fa, *fna = NULL, **prev;
- struct socket *sock;
- struct sock *sk;
-
- if (on) {
- fna = kmalloc(sizeof(struct fasync_struct), GFP_KERNEL);
- if (fna == NULL)
- return -ENOMEM;
- }
-
- sock = filp->private_data;
+ struct socket *sock = filp->private_data;
+ struct sock *sk = sock->sk;
- sk = sock->sk;
- if (sk == NULL) {
- kfree(fna);
+ if (sk == NULL)
return -EINVAL;
- }
lock_sock(sk);
- spin_lock(&filp->f_lock);
- if (on)
- filp->f_flags |= FASYNC;
- else
- filp->f_flags &= ~FASYNC;
- spin_unlock(&filp->f_lock);
+ fasync_helper(fd, filp, on, &sock->wq->fasync_list);
- prev = &(sock->fasync_list);
-
- for (fa = *prev; fa != NULL; prev = &fa->fa_next, fa = *prev)
- if (fa->fa_file == filp)
- break;
-
- if (on) {
- if (fa != NULL) {
- write_lock_bh(&sk->sk_callback_lock);
- fa->fa_fd = fd;
- write_unlock_bh(&sk->sk_callback_lock);
-
- kfree(fna);
- goto out;
- }
- fna->fa_file = filp;
- fna->fa_fd = fd;
- fna->magic = FASYNC_MAGIC;
- fna->fa_next = sock->fasync_list;
- write_lock_bh(&sk->sk_callback_lock);
- sock->fasync_list = fna;
+ if (!sock->wq->fasync_list)
+ sock_reset_flag(sk, SOCK_FASYNC);
+ else
sock_set_flag(sk, SOCK_FASYNC);
- write_unlock_bh(&sk->sk_callback_lock);
- } else {
- if (fa != NULL) {
- write_lock_bh(&sk->sk_callback_lock);
- *prev = fa->fa_next;
- if (!sock->fasync_list)
- sock_reset_flag(sk, SOCK_FASYNC);
- write_unlock_bh(&sk->sk_callback_lock);
- kfree(fa);
- }
- }
-out:
- release_sock(sock->sk);
+ release_sock(sk);
return 0;
}
-/* This function may be called only under socket lock or callback_lock */
+/* This function may be called only under socket lock or callback_lock or rcu_lock */
int sock_wake_async(struct socket *sock, int how, int band)
{
- if (!sock || !sock->fasync_list)
+ struct socket_wq *wq;
+
+ if (!sock)
return -1;
+ rcu_read_lock();
+ wq = rcu_dereference(sock->wq);
+ if (!wq || !wq->fasync_list) {
+ rcu_read_unlock();
+ return -1;
+ }
switch (how) {
case SOCK_WAKE_WAITD:
if (test_bit(SOCK_ASYNC_WAITDATA, &sock->flags))
@@ -1160,11 +1132,12 @@ int sock_wake_async(struct socket *sock, int how, int band)
/* fall through */
case SOCK_WAKE_IO:
call_kill:
- __kill_fasync(sock->fasync_list, SIGIO, band);
+ kill_fasync(&wq->fasync_list, SIGIO, band);
break;
case SOCK_WAKE_URG:
- __kill_fasync(sock->fasync_list, SIGURG, band);
+ kill_fasync(&wq->fasync_list, SIGURG, band);
}
+ rcu_read_unlock();
return 0;
}
@@ -2642,7 +2615,7 @@ static int bond_ioctl(struct net *net, unsigned int cmd,
return dev_ioctl(net, cmd, uifr);
default:
return -EINVAL;
- };
+ }
}
static int siocdevprivate_ioctl(struct net *net, unsigned int cmd,
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index f394fc190a49..73affb8624fa 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -236,10 +236,15 @@ rpcauth_prune_expired(struct list_head *free, int nr_to_scan)
list_for_each_entry_safe(cred, next, &cred_unused, cr_lru) {
- /* Enforce a 60 second garbage collection moratorium */
- if (time_in_range_open(cred->cr_expire, expired, jiffies) &&
+ if (nr_to_scan-- == 0)
+ break;
+ /*
+ * Enforce a 60 second garbage collection moratorium
+ * Note that the cred_unused list must be time-ordered.
+ */
+ if (time_in_range(cred->cr_expire, expired, jiffies) &&
test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) != 0)
- continue;
+ return 0;
list_del_init(&cred->cr_lru);
number_cred_unused--;
@@ -252,13 +257,10 @@ rpcauth_prune_expired(struct list_head *free, int nr_to_scan)
get_rpccred(cred);
list_add_tail(&cred->cr_lru, free);
rpcauth_unhash_cred_locked(cred);
- nr_to_scan--;
}
spin_unlock(cache_lock);
- if (nr_to_scan == 0)
- break;
}
- return nr_to_scan;
+ return (number_cred_unused / 100) * sysctl_vfs_cache_pressure;
}
/*
@@ -270,11 +272,12 @@ rpcauth_cache_shrinker(int nr_to_scan, gfp_t gfp_mask)
LIST_HEAD(free);
int res;
+ if ((gfp_mask & GFP_KERNEL) != GFP_KERNEL)
+ return (nr_to_scan == 0) ? 0 : -1;
if (list_empty(&cred_unused))
return 0;
spin_lock(&rpc_credcache_lock);
- nr_to_scan = rpcauth_prune_expired(&free, nr_to_scan);
- res = (number_cred_unused / 100) * sysctl_vfs_cache_pressure;
+ res = rpcauth_prune_expired(&free, nr_to_scan);
spin_unlock(&rpc_credcache_lock);
rpcauth_destroy_credlist(&free);
return res;
diff --git a/net/sunrpc/auth_gss/Makefile b/net/sunrpc/auth_gss/Makefile
index 4de8bcf26fa7..74a231735f67 100644
--- a/net/sunrpc/auth_gss/Makefile
+++ b/net/sunrpc/auth_gss/Makefile
@@ -10,7 +10,7 @@ auth_rpcgss-objs := auth_gss.o gss_generic_token.o \
obj-$(CONFIG_RPCSEC_GSS_KRB5) += rpcsec_gss_krb5.o
rpcsec_gss_krb5-objs := gss_krb5_mech.o gss_krb5_seal.o gss_krb5_unseal.o \
- gss_krb5_seqnum.o gss_krb5_wrap.o gss_krb5_crypto.o
+ gss_krb5_seqnum.o gss_krb5_wrap.o gss_krb5_crypto.o gss_krb5_keys.o
obj-$(CONFIG_RPCSEC_GSS_SPKM3) += rpcsec_gss_spkm3.o
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index c389ccf6437d..8da2a0e68574 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -57,11 +57,14 @@ static const struct rpc_authops authgss_ops;
static const struct rpc_credops gss_credops;
static const struct rpc_credops gss_nullops;
+#define GSS_RETRY_EXPIRED 5
+static unsigned int gss_expired_cred_retry_delay = GSS_RETRY_EXPIRED;
+
#ifdef RPC_DEBUG
# define RPCDBG_FACILITY RPCDBG_AUTH
#endif
-#define GSS_CRED_SLACK 1024
+#define GSS_CRED_SLACK (RPC_MAX_AUTH_SIZE * 2)
/* length of a krb5 verifier (48), plus data added before arguments when
* using integrity (two 4-byte integers): */
#define GSS_VERF_SLACK 100
@@ -229,7 +232,7 @@ gss_fill_context(const void *p, const void *end, struct gss_cl_ctx *ctx, struct
p = ERR_PTR(-EFAULT);
goto err;
}
- ret = gss_import_sec_context(p, seclen, gm, &ctx->gc_gss_ctx);
+ ret = gss_import_sec_context(p, seclen, gm, &ctx->gc_gss_ctx, GFP_NOFS);
if (ret < 0) {
p = ERR_PTR(ret);
goto err;
@@ -350,6 +353,24 @@ gss_unhash_msg(struct gss_upcall_msg *gss_msg)
}
static void
+gss_handle_downcall_result(struct gss_cred *gss_cred, struct gss_upcall_msg *gss_msg)
+{
+ switch (gss_msg->msg.errno) {
+ case 0:
+ if (gss_msg->ctx == NULL)
+ break;
+ clear_bit(RPCAUTH_CRED_NEGATIVE, &gss_cred->gc_base.cr_flags);
+ gss_cred_set_ctx(&gss_cred->gc_base, gss_msg->ctx);
+ break;
+ case -EKEYEXPIRED:
+ set_bit(RPCAUTH_CRED_NEGATIVE, &gss_cred->gc_base.cr_flags);
+ }
+ gss_cred->gc_upcall_timestamp = jiffies;
+ gss_cred->gc_upcall = NULL;
+ rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno);
+}
+
+static void
gss_upcall_callback(struct rpc_task *task)
{
struct gss_cred *gss_cred = container_of(task->tk_msg.rpc_cred,
@@ -358,13 +379,9 @@ gss_upcall_callback(struct rpc_task *task)
struct inode *inode = &gss_msg->inode->vfs_inode;
spin_lock(&inode->i_lock);
- if (gss_msg->ctx)
- gss_cred_set_ctx(task->tk_msg.rpc_cred, gss_msg->ctx);
- else
- task->tk_status = gss_msg->msg.errno;
- gss_cred->gc_upcall = NULL;
- rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno);
+ gss_handle_downcall_result(gss_cred, gss_msg);
spin_unlock(&inode->i_lock);
+ task->tk_status = gss_msg->msg.errno;
gss_release_msg(gss_msg);
}
@@ -377,11 +394,12 @@ static void gss_encode_v0_msg(struct gss_upcall_msg *gss_msg)
static void gss_encode_v1_msg(struct gss_upcall_msg *gss_msg,
struct rpc_clnt *clnt, int machine_cred)
{
+ struct gss_api_mech *mech = gss_msg->auth->mech;
char *p = gss_msg->databuf;
int len = 0;
gss_msg->msg.len = sprintf(gss_msg->databuf, "mech=%s uid=%d ",
- gss_msg->auth->mech->gm_name,
+ mech->gm_name,
gss_msg->uid);
p += gss_msg->msg.len;
if (clnt->cl_principal) {
@@ -398,6 +416,11 @@ static void gss_encode_v1_msg(struct gss_upcall_msg *gss_msg,
p += len;
gss_msg->msg.len += len;
}
+ if (mech->gm_upcall_enctypes) {
+ len = sprintf(p, mech->gm_upcall_enctypes);
+ p += len;
+ gss_msg->msg.len += len;
+ }
len = sprintf(p, "\n");
gss_msg->msg.len += len;
@@ -507,18 +530,16 @@ gss_refresh_upcall(struct rpc_task *task)
spin_lock(&inode->i_lock);
if (gss_cred->gc_upcall != NULL)
rpc_sleep_on(&gss_cred->gc_upcall->rpc_waitqueue, task, NULL);
- else if (gss_msg->ctx != NULL) {
- gss_cred_set_ctx(task->tk_msg.rpc_cred, gss_msg->ctx);
- gss_cred->gc_upcall = NULL;
- rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno);
- } else if (gss_msg->msg.errno >= 0) {
+ else if (gss_msg->ctx == NULL && gss_msg->msg.errno >= 0) {
task->tk_timeout = 0;
gss_cred->gc_upcall = gss_msg;
/* gss_upcall_callback will release the reference to gss_upcall_msg */
atomic_inc(&gss_msg->count);
rpc_sleep_on(&gss_msg->rpc_waitqueue, task, gss_upcall_callback);
- } else
+ } else {
+ gss_handle_downcall_result(gss_cred, gss_msg);
err = gss_msg->msg.errno;
+ }
spin_unlock(&inode->i_lock);
gss_release_msg(gss_msg);
out:
@@ -1117,6 +1138,23 @@ static int gss_renew_cred(struct rpc_task *task)
return 0;
}
+static int gss_cred_is_negative_entry(struct rpc_cred *cred)
+{
+ if (test_bit(RPCAUTH_CRED_NEGATIVE, &cred->cr_flags)) {
+ unsigned long now = jiffies;
+ unsigned long begin, expire;
+ struct gss_cred *gss_cred;
+
+ gss_cred = container_of(cred, struct gss_cred, gc_base);
+ begin = gss_cred->gc_upcall_timestamp;
+ expire = begin + gss_expired_cred_retry_delay * HZ;
+
+ if (time_in_range_open(now, begin, expire))
+ return 1;
+ }
+ return 0;
+}
+
/*
* Refresh credentials. XXX - finish
*/
@@ -1126,6 +1164,9 @@ gss_refresh(struct rpc_task *task)
struct rpc_cred *cred = task->tk_msg.rpc_cred;
int ret = 0;
+ if (gss_cred_is_negative_entry(cred))
+ return -EKEYEXPIRED;
+
if (!test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags) &&
!test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags)) {
ret = gss_renew_cred(task);
@@ -1316,15 +1357,21 @@ gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
inpages = snd_buf->pages + first;
snd_buf->pages = rqstp->rq_enc_pages;
snd_buf->page_base -= first << PAGE_CACHE_SHIFT;
- /* Give the tail its own page, in case we need extra space in the
- * head when wrapping: */
+ /*
+ * Give the tail its own page, in case we need extra space in the
+ * head when wrapping:
+ *
+ * call_allocate() allocates twice the slack space required
+ * by the authentication flavor to rq_callsize.
+ * For GSS, slack is GSS_CRED_SLACK.
+ */
if (snd_buf->page_len || snd_buf->tail[0].iov_len) {
tmp = page_address(rqstp->rq_enc_pages[rqstp->rq_enc_pages_num - 1]);
memcpy(tmp, snd_buf->tail[0].iov_base, snd_buf->tail[0].iov_len);
snd_buf->tail[0].iov_base = tmp;
}
maj_stat = gss_wrap(ctx->gc_gss_ctx, offset, snd_buf, inpages);
- /* RPC_SLACK_SPACE should prevent this ever happening: */
+ /* slack space should prevent this ever happening: */
BUG_ON(snd_buf->len > snd_buf->buflen);
status = -EIO;
/* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was
@@ -1573,5 +1620,11 @@ static void __exit exit_rpcsec_gss(void)
}
MODULE_LICENSE("GPL");
+module_param_named(expired_cred_retry_delay,
+ gss_expired_cred_retry_delay,
+ uint, 0644);
+MODULE_PARM_DESC(expired_cred_retry_delay, "Timeout (in seconds) until "
+ "the RPC engine retries an expired credential");
+
module_init(init_rpcsec_gss)
module_exit(exit_rpcsec_gss)
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index e9b636176687..75ee993ea057 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -1,7 +1,7 @@
/*
* linux/net/sunrpc/gss_krb5_crypto.c
*
- * Copyright (c) 2000 The Regents of the University of Michigan.
+ * Copyright (c) 2000-2008 The Regents of the University of Michigan.
* All rights reserved.
*
* Andy Adamson <andros@umich.edu>
@@ -41,6 +41,7 @@
#include <linux/crypto.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
+#include <linux/random.h>
#include <linux/sunrpc/gss_krb5.h>
#include <linux/sunrpc/xdr.h>
@@ -58,13 +59,13 @@ krb5_encrypt(
{
u32 ret = -EINVAL;
struct scatterlist sg[1];
- u8 local_iv[16] = {0};
+ u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};
struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
if (length % crypto_blkcipher_blocksize(tfm) != 0)
goto out;
- if (crypto_blkcipher_ivsize(tfm) > 16) {
+ if (crypto_blkcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
dprintk("RPC: gss_k5encrypt: tfm iv size too large %d\n",
crypto_blkcipher_ivsize(tfm));
goto out;
@@ -92,13 +93,13 @@ krb5_decrypt(
{
u32 ret = -EINVAL;
struct scatterlist sg[1];
- u8 local_iv[16] = {0};
+ u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};
struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
if (length % crypto_blkcipher_blocksize(tfm) != 0)
goto out;
- if (crypto_blkcipher_ivsize(tfm) > 16) {
+ if (crypto_blkcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
dprintk("RPC: gss_k5decrypt: tfm iv size too large %d\n",
crypto_blkcipher_ivsize(tfm));
goto out;
@@ -123,21 +124,155 @@ checksummer(struct scatterlist *sg, void *data)
return crypto_hash_update(desc, sg, sg->length);
}
-/* checksum the plaintext data and hdrlen bytes of the token header */
-s32
-make_checksum(char *cksumname, char *header, int hdrlen, struct xdr_buf *body,
- int body_offset, struct xdr_netobj *cksum)
+static int
+arcfour_hmac_md5_usage_to_salt(unsigned int usage, u8 salt[4])
+{
+ unsigned int ms_usage;
+
+ switch (usage) {
+ case KG_USAGE_SIGN:
+ ms_usage = 15;
+ break;
+ case KG_USAGE_SEAL:
+ ms_usage = 13;
+ break;
+ default:
+ return EINVAL;;
+ }
+ salt[0] = (ms_usage >> 0) & 0xff;
+ salt[1] = (ms_usage >> 8) & 0xff;
+ salt[2] = (ms_usage >> 16) & 0xff;
+ salt[3] = (ms_usage >> 24) & 0xff;
+
+ return 0;
+}
+
+static u32
+make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen,
+ struct xdr_buf *body, int body_offset, u8 *cksumkey,
+ unsigned int usage, struct xdr_netobj *cksumout)
{
- struct hash_desc desc; /* XXX add to ctx? */
+ struct hash_desc desc;
struct scatterlist sg[1];
int err;
+ u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN];
+ u8 rc4salt[4];
+ struct crypto_hash *md5;
+ struct crypto_hash *hmac_md5;
+
+ if (cksumkey == NULL)
+ return GSS_S_FAILURE;
+
+ if (cksumout->len < kctx->gk5e->cksumlength) {
+ dprintk("%s: checksum buffer length, %u, too small for %s\n",
+ __func__, cksumout->len, kctx->gk5e->name);
+ return GSS_S_FAILURE;
+ }
+
+ if (arcfour_hmac_md5_usage_to_salt(usage, rc4salt)) {
+ dprintk("%s: invalid usage value %u\n", __func__, usage);
+ return GSS_S_FAILURE;
+ }
+
+ md5 = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(md5))
+ return GSS_S_FAILURE;
+
+ hmac_md5 = crypto_alloc_hash(kctx->gk5e->cksum_name, 0,
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(hmac_md5)) {
+ crypto_free_hash(md5);
+ return GSS_S_FAILURE;
+ }
+
+ desc.tfm = md5;
+ desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ err = crypto_hash_init(&desc);
+ if (err)
+ goto out;
+ sg_init_one(sg, rc4salt, 4);
+ err = crypto_hash_update(&desc, sg, 4);
+ if (err)
+ goto out;
+
+ sg_init_one(sg, header, hdrlen);
+ err = crypto_hash_update(&desc, sg, hdrlen);
+ if (err)
+ goto out;
+ err = xdr_process_buf(body, body_offset, body->len - body_offset,
+ checksummer, &desc);
+ if (err)
+ goto out;
+ err = crypto_hash_final(&desc, checksumdata);
+ if (err)
+ goto out;
+
+ desc.tfm = hmac_md5;
+ desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ err = crypto_hash_init(&desc);
+ if (err)
+ goto out;
+ err = crypto_hash_setkey(hmac_md5, cksumkey, kctx->gk5e->keylength);
+ if (err)
+ goto out;
+
+ sg_init_one(sg, checksumdata, crypto_hash_digestsize(md5));
+ err = crypto_hash_digest(&desc, sg, crypto_hash_digestsize(md5),
+ checksumdata);
+ if (err)
+ goto out;
+
+ memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength);
+ cksumout->len = kctx->gk5e->cksumlength;
+out:
+ crypto_free_hash(md5);
+ crypto_free_hash(hmac_md5);
+ return err ? GSS_S_FAILURE : 0;
+}
+
+/*
+ * checksum the plaintext data and hdrlen bytes of the token header
+ * The checksum is performed over the first 8 bytes of the
+ * gss token header and then over the data body
+ */
+u32
+make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen,
+ struct xdr_buf *body, int body_offset, u8 *cksumkey,
+ unsigned int usage, struct xdr_netobj *cksumout)
+{
+ struct hash_desc desc;
+ struct scatterlist sg[1];
+ int err;
+ u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN];
+ unsigned int checksumlen;
+
+ if (kctx->gk5e->ctype == CKSUMTYPE_HMAC_MD5_ARCFOUR)
+ return make_checksum_hmac_md5(kctx, header, hdrlen,
+ body, body_offset,
+ cksumkey, usage, cksumout);
+
+ if (cksumout->len < kctx->gk5e->cksumlength) {
+ dprintk("%s: checksum buffer length, %u, too small for %s\n",
+ __func__, cksumout->len, kctx->gk5e->name);
+ return GSS_S_FAILURE;
+ }
- desc.tfm = crypto_alloc_hash(cksumname, 0, CRYPTO_ALG_ASYNC);
+ desc.tfm = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(desc.tfm))
return GSS_S_FAILURE;
- cksum->len = crypto_hash_digestsize(desc.tfm);
desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+ checksumlen = crypto_hash_digestsize(desc.tfm);
+
+ if (cksumkey != NULL) {
+ err = crypto_hash_setkey(desc.tfm, cksumkey,
+ kctx->gk5e->keylength);
+ if (err)
+ goto out;
+ }
+
err = crypto_hash_init(&desc);
if (err)
goto out;
@@ -149,15 +284,109 @@ make_checksum(char *cksumname, char *header, int hdrlen, struct xdr_buf *body,
checksummer, &desc);
if (err)
goto out;
- err = crypto_hash_final(&desc, cksum->data);
+ err = crypto_hash_final(&desc, checksumdata);
+ if (err)
+ goto out;
+ switch (kctx->gk5e->ctype) {
+ case CKSUMTYPE_RSA_MD5:
+ err = kctx->gk5e->encrypt(kctx->seq, NULL, checksumdata,
+ checksumdata, checksumlen);
+ if (err)
+ goto out;
+ memcpy(cksumout->data,
+ checksumdata + checksumlen - kctx->gk5e->cksumlength,
+ kctx->gk5e->cksumlength);
+ break;
+ case CKSUMTYPE_HMAC_SHA1_DES3:
+ memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength);
+ break;
+ default:
+ BUG();
+ break;
+ }
+ cksumout->len = kctx->gk5e->cksumlength;
+out:
+ crypto_free_hash(desc.tfm);
+ return err ? GSS_S_FAILURE : 0;
+}
+
+/*
+ * checksum the plaintext data and hdrlen bytes of the token header
+ * Per rfc4121, sec. 4.2.4, the checksum is performed over the data
+ * body then over the first 16 octets of the MIC token
+ * Inclusion of the header data in the calculation of the
+ * checksum is optional.
+ */
+u32
+make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen,
+ struct xdr_buf *body, int body_offset, u8 *cksumkey,
+ unsigned int usage, struct xdr_netobj *cksumout)
+{
+ struct hash_desc desc;
+ struct scatterlist sg[1];
+ int err;
+ u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN];
+ unsigned int checksumlen;
+
+ if (kctx->gk5e->keyed_cksum == 0) {
+ dprintk("%s: expected keyed hash for %s\n",
+ __func__, kctx->gk5e->name);
+ return GSS_S_FAILURE;
+ }
+ if (cksumkey == NULL) {
+ dprintk("%s: no key supplied for %s\n",
+ __func__, kctx->gk5e->name);
+ return GSS_S_FAILURE;
+ }
+
+ desc.tfm = crypto_alloc_hash(kctx->gk5e->cksum_name, 0,
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(desc.tfm))
+ return GSS_S_FAILURE;
+ checksumlen = crypto_hash_digestsize(desc.tfm);
+ desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ err = crypto_hash_setkey(desc.tfm, cksumkey, kctx->gk5e->keylength);
+ if (err)
+ goto out;
+
+ err = crypto_hash_init(&desc);
+ if (err)
+ goto out;
+ err = xdr_process_buf(body, body_offset, body->len - body_offset,
+ checksummer, &desc);
+ if (err)
+ goto out;
+ if (header != NULL) {
+ sg_init_one(sg, header, hdrlen);
+ err = crypto_hash_update(&desc, sg, hdrlen);
+ if (err)
+ goto out;
+ }
+ err = crypto_hash_final(&desc, checksumdata);
+ if (err)
+ goto out;
+
+ cksumout->len = kctx->gk5e->cksumlength;
+
+ switch (kctx->gk5e->ctype) {
+ case CKSUMTYPE_HMAC_SHA1_96_AES128:
+ case CKSUMTYPE_HMAC_SHA1_96_AES256:
+ /* note that this truncates the hash */
+ memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength);
+ break;
+ default:
+ BUG();
+ break;
+ }
out:
crypto_free_hash(desc.tfm);
return err ? GSS_S_FAILURE : 0;
}
struct encryptor_desc {
- u8 iv[8]; /* XXX hard-coded blocksize */
+ u8 iv[GSS_KRB5_MAX_BLOCKSIZE];
struct blkcipher_desc desc;
int pos;
struct xdr_buf *outbuf;
@@ -198,7 +427,7 @@ encryptor(struct scatterlist *sg, void *data)
desc->fraglen += sg->length;
desc->pos += sg->length;
- fraglen = thislen & 7; /* XXX hardcoded blocksize */
+ fraglen = thislen & (crypto_blkcipher_blocksize(desc->desc.tfm) - 1);
thislen -= fraglen;
if (thislen == 0)
@@ -256,7 +485,7 @@ gss_encrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
}
struct decryptor_desc {
- u8 iv[8]; /* XXX hard-coded blocksize */
+ u8 iv[GSS_KRB5_MAX_BLOCKSIZE];
struct blkcipher_desc desc;
struct scatterlist frags[4];
int fragno;
@@ -278,7 +507,7 @@ decryptor(struct scatterlist *sg, void *data)
desc->fragno++;
desc->fraglen += sg->length;
- fraglen = thislen & 7; /* XXX hardcoded blocksize */
+ fraglen = thislen & (crypto_blkcipher_blocksize(desc->desc.tfm) - 1);
thislen -= fraglen;
if (thislen == 0)
@@ -325,3 +554,437 @@ gss_decrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
return xdr_process_buf(buf, offset, buf->len - offset, decryptor, &desc);
}
+
+/*
+ * This function makes the assumption that it was ultimately called
+ * from gss_wrap().
+ *
+ * The client auth_gss code moves any existing tail data into a
+ * separate page before calling gss_wrap.
+ * The server svcauth_gss code ensures that both the head and the
+ * tail have slack space of RPC_MAX_AUTH_SIZE before calling gss_wrap.
+ *
+ * Even with that guarantee, this function may be called more than
+ * once in the processing of gss_wrap(). The best we can do is
+ * verify at compile-time (see GSS_KRB5_SLACK_CHECK) that the
+ * largest expected shift will fit within RPC_MAX_AUTH_SIZE.
+ * At run-time we can verify that a single invocation of this
+ * function doesn't attempt to use more the RPC_MAX_AUTH_SIZE.
+ */
+
+int
+xdr_extend_head(struct xdr_buf *buf, unsigned int base, unsigned int shiftlen)
+{
+ u8 *p;
+
+ if (shiftlen == 0)
+ return 0;
+
+ BUILD_BUG_ON(GSS_KRB5_MAX_SLACK_NEEDED > RPC_MAX_AUTH_SIZE);
+ BUG_ON(shiftlen > RPC_MAX_AUTH_SIZE);
+
+ p = buf->head[0].iov_base + base;
+
+ memmove(p + shiftlen, p, buf->head[0].iov_len - base);
+
+ buf->head[0].iov_len += shiftlen;
+ buf->len += shiftlen;
+
+ return 0;
+}
+
+static u32
+gss_krb5_cts_crypt(struct crypto_blkcipher *cipher, struct xdr_buf *buf,
+ u32 offset, u8 *iv, struct page **pages, int encrypt)
+{
+ u32 ret;
+ struct scatterlist sg[1];
+ struct blkcipher_desc desc = { .tfm = cipher, .info = iv };
+ u8 data[crypto_blkcipher_blocksize(cipher) * 2];
+ struct page **save_pages;
+ u32 len = buf->len - offset;
+
+ BUG_ON(len > crypto_blkcipher_blocksize(cipher) * 2);
+
+ /*
+ * For encryption, we want to read from the cleartext
+ * page cache pages, and write the encrypted data to
+ * the supplied xdr_buf pages.
+ */
+ save_pages = buf->pages;
+ if (encrypt)
+ buf->pages = pages;
+
+ ret = read_bytes_from_xdr_buf(buf, offset, data, len);
+ buf->pages = save_pages;
+ if (ret)
+ goto out;
+
+ sg_init_one(sg, data, len);
+
+ if (encrypt)
+ ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, len);
+ else
+ ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, len);
+
+ if (ret)
+ goto out;
+
+ ret = write_bytes_to_xdr_buf(buf, offset, data, len);
+
+out:
+ return ret;
+}
+
+u32
+gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
+ struct xdr_buf *buf, int ec, struct page **pages)
+{
+ u32 err;
+ struct xdr_netobj hmac;
+ u8 *cksumkey;
+ u8 *ecptr;
+ struct crypto_blkcipher *cipher, *aux_cipher;
+ int blocksize;
+ struct page **save_pages;
+ int nblocks, nbytes;
+ struct encryptor_desc desc;
+ u32 cbcbytes;
+ unsigned int usage;
+
+ if (kctx->initiate) {
+ cipher = kctx->initiator_enc;
+ aux_cipher = kctx->initiator_enc_aux;
+ cksumkey = kctx->initiator_integ;
+ usage = KG_USAGE_INITIATOR_SEAL;
+ } else {
+ cipher = kctx->acceptor_enc;
+ aux_cipher = kctx->acceptor_enc_aux;
+ cksumkey = kctx->acceptor_integ;
+ usage = KG_USAGE_ACCEPTOR_SEAL;
+ }
+ blocksize = crypto_blkcipher_blocksize(cipher);
+
+ /* hide the gss token header and insert the confounder */
+ offset += GSS_KRB5_TOK_HDR_LEN;
+ if (xdr_extend_head(buf, offset, kctx->gk5e->conflen))
+ return GSS_S_FAILURE;
+ gss_krb5_make_confounder(buf->head[0].iov_base + offset, kctx->gk5e->conflen);
+ offset -= GSS_KRB5_TOK_HDR_LEN;
+
+ if (buf->tail[0].iov_base != NULL) {
+ ecptr = buf->tail[0].iov_base + buf->tail[0].iov_len;
+ } else {
+ buf->tail[0].iov_base = buf->head[0].iov_base
+ + buf->head[0].iov_len;
+ buf->tail[0].iov_len = 0;
+ ecptr = buf->tail[0].iov_base;
+ }
+
+ memset(ecptr, 'X', ec);
+ buf->tail[0].iov_len += ec;
+ buf->len += ec;
+
+ /* copy plaintext gss token header after filler (if any) */
+ memcpy(ecptr + ec, buf->head[0].iov_base + offset,
+ GSS_KRB5_TOK_HDR_LEN);
+ buf->tail[0].iov_len += GSS_KRB5_TOK_HDR_LEN;
+ buf->len += GSS_KRB5_TOK_HDR_LEN;
+
+ /* Do the HMAC */
+ hmac.len = GSS_KRB5_MAX_CKSUM_LEN;
+ hmac.data = buf->tail[0].iov_base + buf->tail[0].iov_len;
+
+ /*
+ * When we are called, pages points to the real page cache
+ * data -- which we can't go and encrypt! buf->pages points
+ * to scratch pages which we are going to send off to the
+ * client/server. Swap in the plaintext pages to calculate
+ * the hmac.
+ */
+ save_pages = buf->pages;
+ buf->pages = pages;
+
+ err = make_checksum_v2(kctx, NULL, 0, buf,
+ offset + GSS_KRB5_TOK_HDR_LEN,
+ cksumkey, usage, &hmac);
+ buf->pages = save_pages;
+ if (err)
+ return GSS_S_FAILURE;
+
+ nbytes = buf->len - offset - GSS_KRB5_TOK_HDR_LEN;
+ nblocks = (nbytes + blocksize - 1) / blocksize;
+ cbcbytes = 0;
+ if (nblocks > 2)
+ cbcbytes = (nblocks - 2) * blocksize;
+
+ memset(desc.iv, 0, sizeof(desc.iv));
+
+ if (cbcbytes) {
+ desc.pos = offset + GSS_KRB5_TOK_HDR_LEN;
+ desc.fragno = 0;
+ desc.fraglen = 0;
+ desc.pages = pages;
+ desc.outbuf = buf;
+ desc.desc.info = desc.iv;
+ desc.desc.flags = 0;
+ desc.desc.tfm = aux_cipher;
+
+ sg_init_table(desc.infrags, 4);
+ sg_init_table(desc.outfrags, 4);
+
+ err = xdr_process_buf(buf, offset + GSS_KRB5_TOK_HDR_LEN,
+ cbcbytes, encryptor, &desc);
+ if (err)
+ goto out_err;
+ }
+
+ /* Make sure IV carries forward from any CBC results. */
+ err = gss_krb5_cts_crypt(cipher, buf,
+ offset + GSS_KRB5_TOK_HDR_LEN + cbcbytes,
+ desc.iv, pages, 1);
+ if (err) {
+ err = GSS_S_FAILURE;
+ goto out_err;
+ }
+
+ /* Now update buf to account for HMAC */
+ buf->tail[0].iov_len += kctx->gk5e->cksumlength;
+ buf->len += kctx->gk5e->cksumlength;
+
+out_err:
+ if (err)
+ err = GSS_S_FAILURE;
+ return err;
+}
+
+u32
+gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf,
+ u32 *headskip, u32 *tailskip)
+{
+ struct xdr_buf subbuf;
+ u32 ret = 0;
+ u8 *cksum_key;
+ struct crypto_blkcipher *cipher, *aux_cipher;
+ struct xdr_netobj our_hmac_obj;
+ u8 our_hmac[GSS_KRB5_MAX_CKSUM_LEN];
+ u8 pkt_hmac[GSS_KRB5_MAX_CKSUM_LEN];
+ int nblocks, blocksize, cbcbytes;
+ struct decryptor_desc desc;
+ unsigned int usage;
+
+ if (kctx->initiate) {
+ cipher = kctx->acceptor_enc;
+ aux_cipher = kctx->acceptor_enc_aux;
+ cksum_key = kctx->acceptor_integ;
+ usage = KG_USAGE_ACCEPTOR_SEAL;
+ } else {
+ cipher = kctx->initiator_enc;
+ aux_cipher = kctx->initiator_enc_aux;
+ cksum_key = kctx->initiator_integ;
+ usage = KG_USAGE_INITIATOR_SEAL;
+ }
+ blocksize = crypto_blkcipher_blocksize(cipher);
+
+
+ /* create a segment skipping the header and leaving out the checksum */
+ xdr_buf_subsegment(buf, &subbuf, offset + GSS_KRB5_TOK_HDR_LEN,
+ (buf->len - offset - GSS_KRB5_TOK_HDR_LEN -
+ kctx->gk5e->cksumlength));
+
+ nblocks = (subbuf.len + blocksize - 1) / blocksize;
+
+ cbcbytes = 0;
+ if (nblocks > 2)
+ cbcbytes = (nblocks - 2) * blocksize;
+
+ memset(desc.iv, 0, sizeof(desc.iv));
+
+ if (cbcbytes) {
+ desc.fragno = 0;
+ desc.fraglen = 0;
+ desc.desc.info = desc.iv;
+ desc.desc.flags = 0;
+ desc.desc.tfm = aux_cipher;
+
+ sg_init_table(desc.frags, 4);
+
+ ret = xdr_process_buf(&subbuf, 0, cbcbytes, decryptor, &desc);
+ if (ret)
+ goto out_err;
+ }
+
+ /* Make sure IV carries forward from any CBC results. */
+ ret = gss_krb5_cts_crypt(cipher, &subbuf, cbcbytes, desc.iv, NULL, 0);
+ if (ret)
+ goto out_err;
+
+
+ /* Calculate our hmac over the plaintext data */
+ our_hmac_obj.len = sizeof(our_hmac);
+ our_hmac_obj.data = our_hmac;
+
+ ret = make_checksum_v2(kctx, NULL, 0, &subbuf, 0,
+ cksum_key, usage, &our_hmac_obj);
+ if (ret)
+ goto out_err;
+
+ /* Get the packet's hmac value */
+ ret = read_bytes_from_xdr_buf(buf, buf->len - kctx->gk5e->cksumlength,
+ pkt_hmac, kctx->gk5e->cksumlength);
+ if (ret)
+ goto out_err;
+
+ if (memcmp(pkt_hmac, our_hmac, kctx->gk5e->cksumlength) != 0) {
+ ret = GSS_S_BAD_SIG;
+ goto out_err;
+ }
+ *headskip = kctx->gk5e->conflen;
+ *tailskip = kctx->gk5e->cksumlength;
+out_err:
+ if (ret && ret != GSS_S_BAD_SIG)
+ ret = GSS_S_FAILURE;
+ return ret;
+}
+
+/*
+ * Compute Kseq given the initial session key and the checksum.
+ * Set the key of the given cipher.
+ */
+int
+krb5_rc4_setup_seq_key(struct krb5_ctx *kctx, struct crypto_blkcipher *cipher,
+ unsigned char *cksum)
+{
+ struct crypto_hash *hmac;
+ struct hash_desc desc;
+ struct scatterlist sg[1];
+ u8 Kseq[GSS_KRB5_MAX_KEYLEN];
+ u32 zeroconstant = 0;
+ int err;
+
+ dprintk("%s: entered\n", __func__);
+
+ hmac = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(hmac)) {
+ dprintk("%s: error %ld, allocating hash '%s'\n",
+ __func__, PTR_ERR(hmac), kctx->gk5e->cksum_name);
+ return PTR_ERR(hmac);
+ }
+
+ desc.tfm = hmac;
+ desc.flags = 0;
+
+ err = crypto_hash_init(&desc);
+ if (err)
+ goto out_err;
+
+ /* Compute intermediate Kseq from session key */
+ err = crypto_hash_setkey(hmac, kctx->Ksess, kctx->gk5e->keylength);
+ if (err)
+ goto out_err;
+
+ sg_init_table(sg, 1);
+ sg_set_buf(sg, &zeroconstant, 4);
+
+ err = crypto_hash_digest(&desc, sg, 4, Kseq);
+ if (err)
+ goto out_err;
+
+ /* Compute final Kseq from the checksum and intermediate Kseq */
+ err = crypto_hash_setkey(hmac, Kseq, kctx->gk5e->keylength);
+ if (err)
+ goto out_err;
+
+ sg_set_buf(sg, cksum, 8);
+
+ err = crypto_hash_digest(&desc, sg, 8, Kseq);
+ if (err)
+ goto out_err;
+
+ err = crypto_blkcipher_setkey(cipher, Kseq, kctx->gk5e->keylength);
+ if (err)
+ goto out_err;
+
+ err = 0;
+
+out_err:
+ crypto_free_hash(hmac);
+ dprintk("%s: returning %d\n", __func__, err);
+ return err;
+}
+
+/*
+ * Compute Kcrypt given the initial session key and the plaintext seqnum.
+ * Set the key of cipher kctx->enc.
+ */
+int
+krb5_rc4_setup_enc_key(struct krb5_ctx *kctx, struct crypto_blkcipher *cipher,
+ s32 seqnum)
+{
+ struct crypto_hash *hmac;
+ struct hash_desc desc;
+ struct scatterlist sg[1];
+ u8 Kcrypt[GSS_KRB5_MAX_KEYLEN];
+ u8 zeroconstant[4] = {0};
+ u8 seqnumarray[4];
+ int err, i;
+
+ dprintk("%s: entered, seqnum %u\n", __func__, seqnum);
+
+ hmac = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(hmac)) {
+ dprintk("%s: error %ld, allocating hash '%s'\n",
+ __func__, PTR_ERR(hmac), kctx->gk5e->cksum_name);
+ return PTR_ERR(hmac);
+ }
+
+ desc.tfm = hmac;
+ desc.flags = 0;
+
+ err = crypto_hash_init(&desc);
+ if (err)
+ goto out_err;
+
+ /* Compute intermediate Kcrypt from session key */
+ for (i = 0; i < kctx->gk5e->keylength; i++)
+ Kcrypt[i] = kctx->Ksess[i] ^ 0xf0;
+
+ err = crypto_hash_setkey(hmac, Kcrypt, kctx->gk5e->keylength);
+ if (err)
+ goto out_err;
+
+ sg_init_table(sg, 1);
+ sg_set_buf(sg, zeroconstant, 4);
+
+ err = crypto_hash_digest(&desc, sg, 4, Kcrypt);
+ if (err)
+ goto out_err;
+
+ /* Compute final Kcrypt from the seqnum and intermediate Kcrypt */
+ err = crypto_hash_setkey(hmac, Kcrypt, kctx->gk5e->keylength);
+ if (err)
+ goto out_err;
+
+ seqnumarray[0] = (unsigned char) ((seqnum >> 24) & 0xff);
+ seqnumarray[1] = (unsigned char) ((seqnum >> 16) & 0xff);
+ seqnumarray[2] = (unsigned char) ((seqnum >> 8) & 0xff);
+ seqnumarray[3] = (unsigned char) ((seqnum >> 0) & 0xff);
+
+ sg_set_buf(sg, seqnumarray, 4);
+
+ err = crypto_hash_digest(&desc, sg, 4, Kcrypt);
+ if (err)
+ goto out_err;
+
+ err = crypto_blkcipher_setkey(cipher, Kcrypt, kctx->gk5e->keylength);
+ if (err)
+ goto out_err;
+
+ err = 0;
+
+out_err:
+ crypto_free_hash(hmac);
+ dprintk("%s: returning %d\n", __func__, err);
+ return err;
+}
+
diff --git a/net/sunrpc/auth_gss/gss_krb5_keys.c b/net/sunrpc/auth_gss/gss_krb5_keys.c
new file mode 100644
index 000000000000..76e42e6be755
--- /dev/null
+++ b/net/sunrpc/auth_gss/gss_krb5_keys.c
@@ -0,0 +1,336 @@
+/*
+ * COPYRIGHT (c) 2008
+ * The Regents of the University of Michigan
+ * ALL RIGHTS RESERVED
+ *
+ * Permission is granted to use, copy, create derivative works
+ * and redistribute this software and such derivative works
+ * for any purpose, so long as the name of The University of
+ * Michigan is not used in any advertising or publicity
+ * pertaining to the use of distribution of this software
+ * without specific, written prior authorization. If the
+ * above copyright notice or any other identification of the
+ * University of Michigan is included in any copy of any
+ * portion of this software, then the disclaimer below must
+ * also be included.
+ *
+ * THIS SOFTWARE IS PROVIDED AS IS, WITHOUT REPRESENTATION
+ * FROM THE UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY
+ * PURPOSE, AND WITHOUT WARRANTY BY THE UNIVERSITY OF
+ * MICHIGAN OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING
+ * WITHOUT LIMITATION THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE
+ * REGENTS OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE LIABLE
+ * FOR ANY DAMAGES, INCLUDING SPECIAL, INDIRECT, INCIDENTAL, OR
+ * CONSEQUENTIAL DAMAGES, WITH RESPECT TO ANY CLAIM ARISING
+ * OUT OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN
+ * IF IT HAS BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGES.
+ */
+
+/*
+ * Copyright (C) 1998 by the FundsXpress, INC.
+ *
+ * All rights reserved.
+ *
+ * Export of this software from the United States of America may require
+ * a specific license from the United States Government. It is the
+ * responsibility of any person or organization contemplating export to
+ * obtain such a license before exporting.
+ *
+ * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
+ * distribute this software and its documentation for any purpose and
+ * without fee is hereby granted, provided that the above copyright
+ * notice appear in all copies and that both that copyright notice and
+ * this permission notice appear in supporting documentation, and that
+ * the name of FundsXpress. not be used in advertising or publicity pertaining
+ * to distribution of the software without specific, written prior
+ * permission. FundsXpress makes no representations about the suitability of
+ * this software for any purpose. It is provided "as is" without express
+ * or implied warranty.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/err.h>
+#include <linux/types.h>
+#include <linux/crypto.h>
+#include <linux/sunrpc/gss_krb5.h>
+#include <linux/sunrpc/xdr.h>
+
+#ifdef RPC_DEBUG
+# define RPCDBG_FACILITY RPCDBG_AUTH
+#endif
+
+/*
+ * This is the n-fold function as described in rfc3961, sec 5.1
+ * Taken from MIT Kerberos and modified.
+ */
+
+static void krb5_nfold(u32 inbits, const u8 *in,
+ u32 outbits, u8 *out)
+{
+ int a, b, c, lcm;
+ int byte, i, msbit;
+
+ /* the code below is more readable if I make these bytes
+ instead of bits */
+
+ inbits >>= 3;
+ outbits >>= 3;
+
+ /* first compute lcm(n,k) */
+
+ a = outbits;
+ b = inbits;
+
+ while (b != 0) {
+ c = b;
+ b = a%b;
+ a = c;
+ }
+
+ lcm = outbits*inbits/a;
+
+ /* now do the real work */
+
+ memset(out, 0, outbits);
+ byte = 0;
+
+ /* this will end up cycling through k lcm(k,n)/k times, which
+ is correct */
+ for (i = lcm-1; i >= 0; i--) {
+ /* compute the msbit in k which gets added into this byte */
+ msbit = (
+ /* first, start with the msbit in the first,
+ * unrotated byte */
+ ((inbits << 3) - 1)
+ /* then, for each byte, shift to the right
+ * for each repetition */
+ + (((inbits << 3) + 13) * (i/inbits))
+ /* last, pick out the correct byte within
+ * that shifted repetition */
+ + ((inbits - (i % inbits)) << 3)
+ ) % (inbits << 3);
+
+ /* pull out the byte value itself */
+ byte += (((in[((inbits - 1) - (msbit >> 3)) % inbits] << 8)|
+ (in[((inbits) - (msbit >> 3)) % inbits]))
+ >> ((msbit & 7) + 1)) & 0xff;
+
+ /* do the addition */
+ byte += out[i % outbits];
+ out[i % outbits] = byte & 0xff;
+
+ /* keep around the carry bit, if any */
+ byte >>= 8;
+
+ }
+
+ /* if there's a carry bit left over, add it back in */
+ if (byte) {
+ for (i = outbits - 1; i >= 0; i--) {
+ /* do the addition */
+ byte += out[i];
+ out[i] = byte & 0xff;
+
+ /* keep around the carry bit, if any */
+ byte >>= 8;
+ }
+ }
+}
+
+/*
+ * This is the DK (derive_key) function as described in rfc3961, sec 5.1
+ * Taken from MIT Kerberos and modified.
+ */
+
+u32 krb5_derive_key(const struct gss_krb5_enctype *gk5e,
+ const struct xdr_netobj *inkey,
+ struct xdr_netobj *outkey,
+ const struct xdr_netobj *in_constant,
+ gfp_t gfp_mask)
+{
+ size_t blocksize, keybytes, keylength, n;
+ unsigned char *inblockdata, *outblockdata, *rawkey;
+ struct xdr_netobj inblock, outblock;
+ struct crypto_blkcipher *cipher;
+ u32 ret = EINVAL;
+
+ blocksize = gk5e->blocksize;
+ keybytes = gk5e->keybytes;
+ keylength = gk5e->keylength;
+
+ if ((inkey->len != keylength) || (outkey->len != keylength))
+ goto err_return;
+
+ cipher = crypto_alloc_blkcipher(gk5e->encrypt_name, 0,
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(cipher))
+ goto err_return;
+ if (crypto_blkcipher_setkey(cipher, inkey->data, inkey->len))
+ goto err_return;
+
+ /* allocate and set up buffers */
+
+ ret = ENOMEM;
+ inblockdata = kmalloc(blocksize, gfp_mask);
+ if (inblockdata == NULL)
+ goto err_free_cipher;
+
+ outblockdata = kmalloc(blocksize, gfp_mask);
+ if (outblockdata == NULL)
+ goto err_free_in;
+
+ rawkey = kmalloc(keybytes, gfp_mask);
+ if (rawkey == NULL)
+ goto err_free_out;
+
+ inblock.data = (char *) inblockdata;
+ inblock.len = blocksize;
+
+ outblock.data = (char *) outblockdata;
+ outblock.len = blocksize;
+
+ /* initialize the input block */
+
+ if (in_constant->len == inblock.len) {
+ memcpy(inblock.data, in_constant->data, inblock.len);
+ } else {
+ krb5_nfold(in_constant->len * 8, in_constant->data,
+ inblock.len * 8, inblock.data);
+ }
+
+ /* loop encrypting the blocks until enough key bytes are generated */
+
+ n = 0;
+ while (n < keybytes) {
+ (*(gk5e->encrypt))(cipher, NULL, inblock.data,
+ outblock.data, inblock.len);
+
+ if ((keybytes - n) <= outblock.len) {
+ memcpy(rawkey + n, outblock.data, (keybytes - n));
+ break;
+ }
+
+ memcpy(rawkey + n, outblock.data, outblock.len);
+ memcpy(inblock.data, outblock.data, outblock.len);
+ n += outblock.len;
+ }
+
+ /* postprocess the key */
+
+ inblock.data = (char *) rawkey;
+ inblock.len = keybytes;
+
+ BUG_ON(gk5e->mk_key == NULL);
+ ret = (*(gk5e->mk_key))(gk5e, &inblock, outkey);
+ if (ret) {
+ dprintk("%s: got %d from mk_key function for '%s'\n",
+ __func__, ret, gk5e->encrypt_name);
+ goto err_free_raw;
+ }
+
+ /* clean memory, free resources and exit */
+
+ ret = 0;
+
+err_free_raw:
+ memset(rawkey, 0, keybytes);
+ kfree(rawkey);
+err_free_out:
+ memset(outblockdata, 0, blocksize);
+ kfree(outblockdata);
+err_free_in:
+ memset(inblockdata, 0, blocksize);
+ kfree(inblockdata);
+err_free_cipher:
+ crypto_free_blkcipher(cipher);
+err_return:
+ return ret;
+}
+
+#define smask(step) ((1<<step)-1)
+#define pstep(x, step) (((x)&smask(step))^(((x)>>step)&smask(step)))
+#define parity_char(x) pstep(pstep(pstep((x), 4), 2), 1)
+
+static void mit_des_fixup_key_parity(u8 key[8])
+{
+ int i;
+ for (i = 0; i < 8; i++) {
+ key[i] &= 0xfe;
+ key[i] |= 1^parity_char(key[i]);
+ }
+}
+
+/*
+ * This is the des3 key derivation postprocess function
+ */
+u32 gss_krb5_des3_make_key(const struct gss_krb5_enctype *gk5e,
+ struct xdr_netobj *randombits,
+ struct xdr_netobj *key)
+{
+ int i;
+ u32 ret = EINVAL;
+
+ if (key->len != 24) {
+ dprintk("%s: key->len is %d\n", __func__, key->len);
+ goto err_out;
+ }
+ if (randombits->len != 21) {
+ dprintk("%s: randombits->len is %d\n",
+ __func__, randombits->len);
+ goto err_out;
+ }
+
+ /* take the seven bytes, move them around into the top 7 bits of the
+ 8 key bytes, then compute the parity bits. Do this three times. */
+
+ for (i = 0; i < 3; i++) {
+ memcpy(key->data + i*8, randombits->data + i*7, 7);
+ key->data[i*8+7] = (((key->data[i*8]&1)<<1) |
+ ((key->data[i*8+1]&1)<<2) |
+ ((key->data[i*8+2]&1)<<3) |
+ ((key->data[i*8+3]&1)<<4) |
+ ((key->data[i*8+4]&1)<<5) |
+ ((key->data[i*8+5]&1)<<6) |
+ ((key->data[i*8+6]&1)<<7));
+
+ mit_des_fixup_key_parity(key->data + i*8);
+ }
+ ret = 0;
+err_out:
+ return ret;
+}
+
+/*
+ * This is the aes key derivation postprocess function
+ */
+u32 gss_krb5_aes_make_key(const struct gss_krb5_enctype *gk5e,
+ struct xdr_netobj *randombits,
+ struct xdr_netobj *key)
+{
+ u32 ret = EINVAL;
+
+ if (key->len != 16 && key->len != 32) {
+ dprintk("%s: key->len is %d\n", __func__, key->len);
+ goto err_out;
+ }
+ if (randombits->len != 16 && randombits->len != 32) {
+ dprintk("%s: randombits->len is %d\n",
+ __func__, randombits->len);
+ goto err_out;
+ }
+ if (randombits->len != key->len) {
+ dprintk("%s: randombits->len is %d, key->len is %d\n",
+ __func__, randombits->len, key->len);
+ goto err_out;
+ }
+ memcpy(key->data, randombits->data, key->len);
+ ret = 0;
+err_out:
+ return ret;
+}
+
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index 2deb0ed72ff4..032644610524 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -1,7 +1,7 @@
/*
* linux/net/sunrpc/gss_krb5_mech.c
*
- * Copyright (c) 2001 The Regents of the University of Michigan.
+ * Copyright (c) 2001-2008 The Regents of the University of Michigan.
* All rights reserved.
*
* Andy Adamson <andros@umich.edu>
@@ -48,6 +48,143 @@
# define RPCDBG_FACILITY RPCDBG_AUTH
#endif
+static struct gss_api_mech gss_kerberos_mech; /* forward declaration */
+
+static const struct gss_krb5_enctype supported_gss_krb5_enctypes[] = {
+ /*
+ * DES (All DES enctypes are mapped to the same gss functionality)
+ */
+ {
+ .etype = ENCTYPE_DES_CBC_RAW,
+ .ctype = CKSUMTYPE_RSA_MD5,
+ .name = "des-cbc-crc",
+ .encrypt_name = "cbc(des)",
+ .cksum_name = "md5",
+ .encrypt = krb5_encrypt,
+ .decrypt = krb5_decrypt,
+ .mk_key = NULL,
+ .signalg = SGN_ALG_DES_MAC_MD5,
+ .sealalg = SEAL_ALG_DES,
+ .keybytes = 7,
+ .keylength = 8,
+ .blocksize = 8,
+ .conflen = 8,
+ .cksumlength = 8,
+ .keyed_cksum = 0,
+ },
+ /*
+ * RC4-HMAC
+ */
+ {
+ .etype = ENCTYPE_ARCFOUR_HMAC,
+ .ctype = CKSUMTYPE_HMAC_MD5_ARCFOUR,
+ .name = "rc4-hmac",
+ .encrypt_name = "ecb(arc4)",
+ .cksum_name = "hmac(md5)",
+ .encrypt = krb5_encrypt,
+ .decrypt = krb5_decrypt,
+ .mk_key = NULL,
+ .signalg = SGN_ALG_HMAC_MD5,
+ .sealalg = SEAL_ALG_MICROSOFT_RC4,
+ .keybytes = 16,
+ .keylength = 16,
+ .blocksize = 1,
+ .conflen = 8,
+ .cksumlength = 8,
+ .keyed_cksum = 1,
+ },
+ /*
+ * 3DES
+ */
+ {
+ .etype = ENCTYPE_DES3_CBC_RAW,
+ .ctype = CKSUMTYPE_HMAC_SHA1_DES3,
+ .name = "des3-hmac-sha1",
+ .encrypt_name = "cbc(des3_ede)",
+ .cksum_name = "hmac(sha1)",
+ .encrypt = krb5_encrypt,
+ .decrypt = krb5_decrypt,
+ .mk_key = gss_krb5_des3_make_key,
+ .signalg = SGN_ALG_HMAC_SHA1_DES3_KD,
+ .sealalg = SEAL_ALG_DES3KD,
+ .keybytes = 21,
+ .keylength = 24,
+ .blocksize = 8,
+ .conflen = 8,
+ .cksumlength = 20,
+ .keyed_cksum = 1,
+ },
+ /*
+ * AES128
+ */
+ {
+ .etype = ENCTYPE_AES128_CTS_HMAC_SHA1_96,
+ .ctype = CKSUMTYPE_HMAC_SHA1_96_AES128,
+ .name = "aes128-cts",
+ .encrypt_name = "cts(cbc(aes))",
+ .cksum_name = "hmac(sha1)",
+ .encrypt = krb5_encrypt,
+ .decrypt = krb5_decrypt,
+ .mk_key = gss_krb5_aes_make_key,
+ .encrypt_v2 = gss_krb5_aes_encrypt,
+ .decrypt_v2 = gss_krb5_aes_decrypt,
+ .signalg = -1,
+ .sealalg = -1,
+ .keybytes = 16,
+ .keylength = 16,
+ .blocksize = 16,
+ .conflen = 16,
+ .cksumlength = 12,
+ .keyed_cksum = 1,
+ },
+ /*
+ * AES256
+ */
+ {
+ .etype = ENCTYPE_AES256_CTS_HMAC_SHA1_96,
+ .ctype = CKSUMTYPE_HMAC_SHA1_96_AES256,
+ .name = "aes256-cts",
+ .encrypt_name = "cts(cbc(aes))",
+ .cksum_name = "hmac(sha1)",
+ .encrypt = krb5_encrypt,
+ .decrypt = krb5_decrypt,
+ .mk_key = gss_krb5_aes_make_key,
+ .encrypt_v2 = gss_krb5_aes_encrypt,
+ .decrypt_v2 = gss_krb5_aes_decrypt,
+ .signalg = -1,
+ .sealalg = -1,
+ .keybytes = 32,
+ .keylength = 32,
+ .blocksize = 16,
+ .conflen = 16,
+ .cksumlength = 12,
+ .keyed_cksum = 1,
+ },
+};
+
+static const int num_supported_enctypes =
+ ARRAY_SIZE(supported_gss_krb5_enctypes);
+
+static int
+supported_gss_krb5_enctype(int etype)
+{
+ int i;
+ for (i = 0; i < num_supported_enctypes; i++)
+ if (supported_gss_krb5_enctypes[i].etype == etype)
+ return 1;
+ return 0;
+}
+
+static const struct gss_krb5_enctype *
+get_gss_krb5_enctype(int etype)
+{
+ int i;
+ for (i = 0; i < num_supported_enctypes; i++)
+ if (supported_gss_krb5_enctypes[i].etype == etype)
+ return &supported_gss_krb5_enctypes[i];
+ return NULL;
+}
+
static const void *
simple_get_bytes(const void *p, const void *end, void *res, int len)
{
@@ -78,35 +215,45 @@ simple_get_netobj(const void *p, const void *end, struct xdr_netobj *res)
}
static inline const void *
-get_key(const void *p, const void *end, struct crypto_blkcipher **res)
+get_key(const void *p, const void *end,
+ struct krb5_ctx *ctx, struct crypto_blkcipher **res)
{
struct xdr_netobj key;
int alg;
- char *alg_name;
p = simple_get_bytes(p, end, &alg, sizeof(alg));
if (IS_ERR(p))
goto out_err;
+
+ switch (alg) {
+ case ENCTYPE_DES_CBC_CRC:
+ case ENCTYPE_DES_CBC_MD4:
+ case ENCTYPE_DES_CBC_MD5:
+ /* Map all these key types to ENCTYPE_DES_CBC_RAW */
+ alg = ENCTYPE_DES_CBC_RAW;
+ break;
+ }
+
+ if (!supported_gss_krb5_enctype(alg)) {
+ printk(KERN_WARNING "gss_kerberos_mech: unsupported "
+ "encryption key algorithm %d\n", alg);
+ goto out_err;
+ }
p = simple_get_netobj(p, end, &key);
if (IS_ERR(p))
goto out_err;
- switch (alg) {
- case ENCTYPE_DES_CBC_RAW:
- alg_name = "cbc(des)";
- break;
- default:
- printk("gss_kerberos_mech: unsupported algorithm %d\n", alg);
- goto out_err_free_key;
- }
- *res = crypto_alloc_blkcipher(alg_name, 0, CRYPTO_ALG_ASYNC);
+ *res = crypto_alloc_blkcipher(ctx->gk5e->encrypt_name, 0,
+ CRYPTO_ALG_ASYNC);
if (IS_ERR(*res)) {
- printk("gss_kerberos_mech: unable to initialize crypto algorithm %s\n", alg_name);
+ printk(KERN_WARNING "gss_kerberos_mech: unable to initialize "
+ "crypto algorithm %s\n", ctx->gk5e->encrypt_name);
*res = NULL;
goto out_err_free_key;
}
if (crypto_blkcipher_setkey(*res, key.data, key.len)) {
- printk("gss_kerberos_mech: error setting key for crypto algorithm %s\n", alg_name);
+ printk(KERN_WARNING "gss_kerberos_mech: error setting key for "
+ "crypto algorithm %s\n", ctx->gk5e->encrypt_name);
goto out_err_free_tfm;
}
@@ -123,56 +270,55 @@ out_err:
}
static int
-gss_import_sec_context_kerberos(const void *p,
- size_t len,
- struct gss_ctx *ctx_id)
+gss_import_v1_context(const void *p, const void *end, struct krb5_ctx *ctx)
{
- const void *end = (const void *)((const char *)p + len);
- struct krb5_ctx *ctx;
int tmp;
- if (!(ctx = kzalloc(sizeof(*ctx), GFP_NOFS))) {
- p = ERR_PTR(-ENOMEM);
- goto out_err;
- }
-
p = simple_get_bytes(p, end, &ctx->initiate, sizeof(ctx->initiate));
if (IS_ERR(p))
- goto out_err_free_ctx;
+ goto out_err;
+
+ /* Old format supports only DES! Any other enctype uses new format */
+ ctx->enctype = ENCTYPE_DES_CBC_RAW;
+
+ ctx->gk5e = get_gss_krb5_enctype(ctx->enctype);
+ if (ctx->gk5e == NULL)
+ goto out_err;
+
/* The downcall format was designed before we completely understood
* the uses of the context fields; so it includes some stuff we
* just give some minimal sanity-checking, and some we ignore
* completely (like the next twenty bytes): */
if (unlikely(p + 20 > end || p + 20 < p))
- goto out_err_free_ctx;
+ goto out_err;
p += 20;
p = simple_get_bytes(p, end, &tmp, sizeof(tmp));
if (IS_ERR(p))
- goto out_err_free_ctx;
+ goto out_err;
if (tmp != SGN_ALG_DES_MAC_MD5) {
p = ERR_PTR(-ENOSYS);
- goto out_err_free_ctx;
+ goto out_err;
}
p = simple_get_bytes(p, end, &tmp, sizeof(tmp));
if (IS_ERR(p))
- goto out_err_free_ctx;
+ goto out_err;
if (tmp != SEAL_ALG_DES) {
p = ERR_PTR(-ENOSYS);
- goto out_err_free_ctx;
+ goto out_err;
}
p = simple_get_bytes(p, end, &ctx->endtime, sizeof(ctx->endtime));
if (IS_ERR(p))
- goto out_err_free_ctx;
+ goto out_err;
p = simple_get_bytes(p, end, &ctx->seq_send, sizeof(ctx->seq_send));
if (IS_ERR(p))
- goto out_err_free_ctx;
+ goto out_err;
p = simple_get_netobj(p, end, &ctx->mech_used);
if (IS_ERR(p))
- goto out_err_free_ctx;
- p = get_key(p, end, &ctx->enc);
+ goto out_err;
+ p = get_key(p, end, ctx, &ctx->enc);
if (IS_ERR(p))
goto out_err_free_mech;
- p = get_key(p, end, &ctx->seq);
+ p = get_key(p, end, ctx, &ctx->seq);
if (IS_ERR(p))
goto out_err_free_key1;
if (p != end) {
@@ -180,9 +326,6 @@ gss_import_sec_context_kerberos(const void *p,
goto out_err_free_key2;
}
- ctx_id->internal_ctx_id = ctx;
-
- dprintk("RPC: Successfully imported new context.\n");
return 0;
out_err_free_key2:
@@ -191,18 +334,378 @@ out_err_free_key1:
crypto_free_blkcipher(ctx->enc);
out_err_free_mech:
kfree(ctx->mech_used.data);
-out_err_free_ctx:
- kfree(ctx);
out_err:
return PTR_ERR(p);
}
+struct crypto_blkcipher *
+context_v2_alloc_cipher(struct krb5_ctx *ctx, const char *cname, u8 *key)
+{
+ struct crypto_blkcipher *cp;
+
+ cp = crypto_alloc_blkcipher(cname, 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(cp)) {
+ dprintk("gss_kerberos_mech: unable to initialize "
+ "crypto algorithm %s\n", cname);
+ return NULL;
+ }
+ if (crypto_blkcipher_setkey(cp, key, ctx->gk5e->keylength)) {
+ dprintk("gss_kerberos_mech: error setting key for "
+ "crypto algorithm %s\n", cname);
+ crypto_free_blkcipher(cp);
+ return NULL;
+ }
+ return cp;
+}
+
+static inline void
+set_cdata(u8 cdata[GSS_KRB5_K5CLENGTH], u32 usage, u8 seed)
+{
+ cdata[0] = (usage>>24)&0xff;
+ cdata[1] = (usage>>16)&0xff;
+ cdata[2] = (usage>>8)&0xff;
+ cdata[3] = usage&0xff;
+ cdata[4] = seed;
+}
+
+static int
+context_derive_keys_des3(struct krb5_ctx *ctx, gfp_t gfp_mask)
+{
+ struct xdr_netobj c, keyin, keyout;
+ u8 cdata[GSS_KRB5_K5CLENGTH];
+ u32 err;
+
+ c.len = GSS_KRB5_K5CLENGTH;
+ c.data = cdata;
+
+ keyin.data = ctx->Ksess;
+ keyin.len = ctx->gk5e->keylength;
+ keyout.len = ctx->gk5e->keylength;
+
+ /* seq uses the raw key */
+ ctx->seq = context_v2_alloc_cipher(ctx, ctx->gk5e->encrypt_name,
+ ctx->Ksess);
+ if (ctx->seq == NULL)
+ goto out_err;
+
+ ctx->enc = context_v2_alloc_cipher(ctx, ctx->gk5e->encrypt_name,
+ ctx->Ksess);
+ if (ctx->enc == NULL)
+ goto out_free_seq;
+
+ /* derive cksum */
+ set_cdata(cdata, KG_USAGE_SIGN, KEY_USAGE_SEED_CHECKSUM);
+ keyout.data = ctx->cksum;
+ err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask);
+ if (err) {
+ dprintk("%s: Error %d deriving cksum key\n",
+ __func__, err);
+ goto out_free_enc;
+ }
+
+ return 0;
+
+out_free_enc:
+ crypto_free_blkcipher(ctx->enc);
+out_free_seq:
+ crypto_free_blkcipher(ctx->seq);
+out_err:
+ return -EINVAL;
+}
+
+/*
+ * Note that RC4 depends on deriving keys using the sequence
+ * number or the checksum of a token. Therefore, the final keys
+ * cannot be calculated until the token is being constructed!
+ */
+static int
+context_derive_keys_rc4(struct krb5_ctx *ctx)
+{
+ struct crypto_hash *hmac;
+ char sigkeyconstant[] = "signaturekey";
+ int slen = strlen(sigkeyconstant) + 1; /* include null terminator */
+ struct hash_desc desc;
+ struct scatterlist sg[1];
+ int err;
+
+ dprintk("RPC: %s: entered\n", __func__);
+ /*
+ * derive cksum (aka Ksign) key
+ */
+ hmac = crypto_alloc_hash(ctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(hmac)) {
+ dprintk("%s: error %ld allocating hash '%s'\n",
+ __func__, PTR_ERR(hmac), ctx->gk5e->cksum_name);
+ err = PTR_ERR(hmac);
+ goto out_err;
+ }
+
+ err = crypto_hash_setkey(hmac, ctx->Ksess, ctx->gk5e->keylength);
+ if (err)
+ goto out_err_free_hmac;
+
+ sg_init_table(sg, 1);
+ sg_set_buf(sg, sigkeyconstant, slen);
+
+ desc.tfm = hmac;
+ desc.flags = 0;
+
+ err = crypto_hash_init(&desc);
+ if (err)
+ goto out_err_free_hmac;
+
+ err = crypto_hash_digest(&desc, sg, slen, ctx->cksum);
+ if (err)
+ goto out_err_free_hmac;
+ /*
+ * allocate hash, and blkciphers for data and seqnum encryption
+ */
+ ctx->enc = crypto_alloc_blkcipher(ctx->gk5e->encrypt_name, 0,
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(ctx->enc)) {
+ err = PTR_ERR(ctx->enc);
+ goto out_err_free_hmac;
+ }
+
+ ctx->seq = crypto_alloc_blkcipher(ctx->gk5e->encrypt_name, 0,
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(ctx->seq)) {
+ crypto_free_blkcipher(ctx->enc);
+ err = PTR_ERR(ctx->seq);
+ goto out_err_free_hmac;
+ }
+
+ dprintk("RPC: %s: returning success\n", __func__);
+
+ err = 0;
+
+out_err_free_hmac:
+ crypto_free_hash(hmac);
+out_err:
+ dprintk("RPC: %s: returning %d\n", __func__, err);
+ return err;
+}
+
+static int
+context_derive_keys_new(struct krb5_ctx *ctx, gfp_t gfp_mask)
+{
+ struct xdr_netobj c, keyin, keyout;
+ u8 cdata[GSS_KRB5_K5CLENGTH];
+ u32 err;
+
+ c.len = GSS_KRB5_K5CLENGTH;
+ c.data = cdata;
+
+ keyin.data = ctx->Ksess;
+ keyin.len = ctx->gk5e->keylength;
+ keyout.len = ctx->gk5e->keylength;
+
+ /* initiator seal encryption */
+ set_cdata(cdata, KG_USAGE_INITIATOR_SEAL, KEY_USAGE_SEED_ENCRYPTION);
+ keyout.data = ctx->initiator_seal;
+ err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask);
+ if (err) {
+ dprintk("%s: Error %d deriving initiator_seal key\n",
+ __func__, err);
+ goto out_err;
+ }
+ ctx->initiator_enc = context_v2_alloc_cipher(ctx,
+ ctx->gk5e->encrypt_name,
+ ctx->initiator_seal);
+ if (ctx->initiator_enc == NULL)
+ goto out_err;
+
+ /* acceptor seal encryption */
+ set_cdata(cdata, KG_USAGE_ACCEPTOR_SEAL, KEY_USAGE_SEED_ENCRYPTION);
+ keyout.data = ctx->acceptor_seal;
+ err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask);
+ if (err) {
+ dprintk("%s: Error %d deriving acceptor_seal key\n",
+ __func__, err);
+ goto out_free_initiator_enc;
+ }
+ ctx->acceptor_enc = context_v2_alloc_cipher(ctx,
+ ctx->gk5e->encrypt_name,
+ ctx->acceptor_seal);
+ if (ctx->acceptor_enc == NULL)
+ goto out_free_initiator_enc;
+
+ /* initiator sign checksum */
+ set_cdata(cdata, KG_USAGE_INITIATOR_SIGN, KEY_USAGE_SEED_CHECKSUM);
+ keyout.data = ctx->initiator_sign;
+ err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask);
+ if (err) {
+ dprintk("%s: Error %d deriving initiator_sign key\n",
+ __func__, err);
+ goto out_free_acceptor_enc;
+ }
+
+ /* acceptor sign checksum */
+ set_cdata(cdata, KG_USAGE_ACCEPTOR_SIGN, KEY_USAGE_SEED_CHECKSUM);
+ keyout.data = ctx->acceptor_sign;
+ err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask);
+ if (err) {
+ dprintk("%s: Error %d deriving acceptor_sign key\n",
+ __func__, err);
+ goto out_free_acceptor_enc;
+ }
+
+ /* initiator seal integrity */
+ set_cdata(cdata, KG_USAGE_INITIATOR_SEAL, KEY_USAGE_SEED_INTEGRITY);
+ keyout.data = ctx->initiator_integ;
+ err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask);
+ if (err) {
+ dprintk("%s: Error %d deriving initiator_integ key\n",
+ __func__, err);
+ goto out_free_acceptor_enc;
+ }
+
+ /* acceptor seal integrity */
+ set_cdata(cdata, KG_USAGE_ACCEPTOR_SEAL, KEY_USAGE_SEED_INTEGRITY);
+ keyout.data = ctx->acceptor_integ;
+ err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask);
+ if (err) {
+ dprintk("%s: Error %d deriving acceptor_integ key\n",
+ __func__, err);
+ goto out_free_acceptor_enc;
+ }
+
+ switch (ctx->enctype) {
+ case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
+ case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
+ ctx->initiator_enc_aux =
+ context_v2_alloc_cipher(ctx, "cbc(aes)",
+ ctx->initiator_seal);
+ if (ctx->initiator_enc_aux == NULL)
+ goto out_free_acceptor_enc;
+ ctx->acceptor_enc_aux =
+ context_v2_alloc_cipher(ctx, "cbc(aes)",
+ ctx->acceptor_seal);
+ if (ctx->acceptor_enc_aux == NULL) {
+ crypto_free_blkcipher(ctx->initiator_enc_aux);
+ goto out_free_acceptor_enc;
+ }
+ }
+
+ return 0;
+
+out_free_acceptor_enc:
+ crypto_free_blkcipher(ctx->acceptor_enc);
+out_free_initiator_enc:
+ crypto_free_blkcipher(ctx->initiator_enc);
+out_err:
+ return -EINVAL;
+}
+
+static int
+gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx,
+ gfp_t gfp_mask)
+{
+ int keylen;
+
+ p = simple_get_bytes(p, end, &ctx->flags, sizeof(ctx->flags));
+ if (IS_ERR(p))
+ goto out_err;
+ ctx->initiate = ctx->flags & KRB5_CTX_FLAG_INITIATOR;
+
+ p = simple_get_bytes(p, end, &ctx->endtime, sizeof(ctx->endtime));
+ if (IS_ERR(p))
+ goto out_err;
+ p = simple_get_bytes(p, end, &ctx->seq_send64, sizeof(ctx->seq_send64));
+ if (IS_ERR(p))
+ goto out_err;
+ /* set seq_send for use by "older" enctypes */
+ ctx->seq_send = ctx->seq_send64;
+ if (ctx->seq_send64 != ctx->seq_send) {
+ dprintk("%s: seq_send64 %lx, seq_send %x overflow?\n", __func__,
+ (long unsigned)ctx->seq_send64, ctx->seq_send);
+ goto out_err;
+ }
+ p = simple_get_bytes(p, end, &ctx->enctype, sizeof(ctx->enctype));
+ if (IS_ERR(p))
+ goto out_err;
+ /* Map ENCTYPE_DES3_CBC_SHA1 to ENCTYPE_DES3_CBC_RAW */
+ if (ctx->enctype == ENCTYPE_DES3_CBC_SHA1)
+ ctx->enctype = ENCTYPE_DES3_CBC_RAW;
+ ctx->gk5e = get_gss_krb5_enctype(ctx->enctype);
+ if (ctx->gk5e == NULL) {
+ dprintk("gss_kerberos_mech: unsupported krb5 enctype %u\n",
+ ctx->enctype);
+ p = ERR_PTR(-EINVAL);
+ goto out_err;
+ }
+ keylen = ctx->gk5e->keylength;
+
+ p = simple_get_bytes(p, end, ctx->Ksess, keylen);
+ if (IS_ERR(p))
+ goto out_err;
+
+ if (p != end) {
+ p = ERR_PTR(-EINVAL);
+ goto out_err;
+ }
+
+ ctx->mech_used.data = kmemdup(gss_kerberos_mech.gm_oid.data,
+ gss_kerberos_mech.gm_oid.len, gfp_mask);
+ if (unlikely(ctx->mech_used.data == NULL)) {
+ p = ERR_PTR(-ENOMEM);
+ goto out_err;
+ }
+ ctx->mech_used.len = gss_kerberos_mech.gm_oid.len;
+
+ switch (ctx->enctype) {
+ case ENCTYPE_DES3_CBC_RAW:
+ return context_derive_keys_des3(ctx, gfp_mask);
+ case ENCTYPE_ARCFOUR_HMAC:
+ return context_derive_keys_rc4(ctx);
+ case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
+ case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
+ return context_derive_keys_new(ctx, gfp_mask);
+ default:
+ return -EINVAL;
+ }
+
+out_err:
+ return PTR_ERR(p);
+}
+
+static int
+gss_import_sec_context_kerberos(const void *p, size_t len,
+ struct gss_ctx *ctx_id,
+ gfp_t gfp_mask)
+{
+ const void *end = (const void *)((const char *)p + len);
+ struct krb5_ctx *ctx;
+ int ret;
+
+ ctx = kzalloc(sizeof(*ctx), gfp_mask);
+ if (ctx == NULL)
+ return -ENOMEM;
+
+ if (len == 85)
+ ret = gss_import_v1_context(p, end, ctx);
+ else
+ ret = gss_import_v2_context(p, end, ctx, gfp_mask);
+
+ if (ret == 0)
+ ctx_id->internal_ctx_id = ctx;
+ else
+ kfree(ctx);
+
+ dprintk("RPC: %s: returning %d\n", __func__, ret);
+ return ret;
+}
+
static void
gss_delete_sec_context_kerberos(void *internal_ctx) {
struct krb5_ctx *kctx = internal_ctx;
crypto_free_blkcipher(kctx->seq);
crypto_free_blkcipher(kctx->enc);
+ crypto_free_blkcipher(kctx->acceptor_enc);
+ crypto_free_blkcipher(kctx->initiator_enc);
+ crypto_free_blkcipher(kctx->acceptor_enc_aux);
+ crypto_free_blkcipher(kctx->initiator_enc_aux);
kfree(kctx->mech_used.data);
kfree(kctx);
}
@@ -241,6 +744,7 @@ static struct gss_api_mech gss_kerberos_mech = {
.gm_ops = &gss_kerberos_ops,
.gm_pf_num = ARRAY_SIZE(gss_kerberos_pfs),
.gm_pfs = gss_kerberos_pfs,
+ .gm_upcall_enctypes = "enctypes=18,17,16,23,3,1,2 ",
};
static int __init init_kerberos_module(void)
diff --git a/net/sunrpc/auth_gss/gss_krb5_seal.c b/net/sunrpc/auth_gss/gss_krb5_seal.c
index 88fe6e75ed7e..d7941eab7796 100644
--- a/net/sunrpc/auth_gss/gss_krb5_seal.c
+++ b/net/sunrpc/auth_gss/gss_krb5_seal.c
@@ -3,7 +3,7 @@
*
* Adapted from MIT Kerberos 5-1.2.1 lib/gssapi/krb5/k5seal.c
*
- * Copyright (c) 2000 The Regents of the University of Michigan.
+ * Copyright (c) 2000-2008 The Regents of the University of Michigan.
* All rights reserved.
*
* Andy Adamson <andros@umich.edu>
@@ -70,53 +70,154 @@
DEFINE_SPINLOCK(krb5_seq_lock);
-u32
-gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text,
+static char *
+setup_token(struct krb5_ctx *ctx, struct xdr_netobj *token)
+{
+ __be16 *ptr, *krb5_hdr;
+ int body_size = GSS_KRB5_TOK_HDR_LEN + ctx->gk5e->cksumlength;
+
+ token->len = g_token_size(&ctx->mech_used, body_size);
+
+ ptr = (__be16 *)token->data;
+ g_make_token_header(&ctx->mech_used, body_size, (unsigned char **)&ptr);
+
+ /* ptr now at start of header described in rfc 1964, section 1.2.1: */
+ krb5_hdr = ptr;
+ *ptr++ = KG_TOK_MIC_MSG;
+ *ptr++ = cpu_to_le16(ctx->gk5e->signalg);
+ *ptr++ = SEAL_ALG_NONE;
+ *ptr++ = 0xffff;
+
+ return (char *)krb5_hdr;
+}
+
+static void *
+setup_token_v2(struct krb5_ctx *ctx, struct xdr_netobj *token)
+{
+ __be16 *ptr, *krb5_hdr;
+ u8 *p, flags = 0x00;
+
+ if ((ctx->flags & KRB5_CTX_FLAG_INITIATOR) == 0)
+ flags |= 0x01;
+ if (ctx->flags & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY)
+ flags |= 0x04;
+
+ /* Per rfc 4121, sec 4.2.6.1, there is no header,
+ * just start the token */
+ krb5_hdr = ptr = (__be16 *)token->data;
+
+ *ptr++ = KG2_TOK_MIC;
+ p = (u8 *)ptr;
+ *p++ = flags;
+ *p++ = 0xff;
+ ptr = (__be16 *)p;
+ *ptr++ = 0xffff;
+ *ptr++ = 0xffff;
+
+ token->len = GSS_KRB5_TOK_HDR_LEN + ctx->gk5e->cksumlength;
+ return krb5_hdr;
+}
+
+static u32
+gss_get_mic_v1(struct krb5_ctx *ctx, struct xdr_buf *text,
struct xdr_netobj *token)
{
- struct krb5_ctx *ctx = gss_ctx->internal_ctx_id;
- char cksumdata[16];
- struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata};
- unsigned char *ptr, *msg_start;
+ char cksumdata[GSS_KRB5_MAX_CKSUM_LEN];
+ struct xdr_netobj md5cksum = {.len = sizeof(cksumdata),
+ .data = cksumdata};
+ void *ptr;
s32 now;
u32 seq_send;
+ u8 *cksumkey;
- dprintk("RPC: gss_krb5_seal\n");
+ dprintk("RPC: %s\n", __func__);
BUG_ON(ctx == NULL);
now = get_seconds();
- token->len = g_token_size(&ctx->mech_used, GSS_KRB5_TOK_HDR_LEN + 8);
+ ptr = setup_token(ctx, token);
- ptr = token->data;
- g_make_token_header(&ctx->mech_used, GSS_KRB5_TOK_HDR_LEN + 8, &ptr);
+ if (ctx->gk5e->keyed_cksum)
+ cksumkey = ctx->cksum;
+ else
+ cksumkey = NULL;
- /* ptr now at header described in rfc 1964, section 1.2.1: */
- ptr[0] = (unsigned char) ((KG_TOK_MIC_MSG >> 8) & 0xff);
- ptr[1] = (unsigned char) (KG_TOK_MIC_MSG & 0xff);
+ if (make_checksum(ctx, ptr, 8, text, 0, cksumkey,
+ KG_USAGE_SIGN, &md5cksum))
+ return GSS_S_FAILURE;
- msg_start = ptr + GSS_KRB5_TOK_HDR_LEN + 8;
+ memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data, md5cksum.len);
- *(__be16 *)(ptr + 2) = htons(SGN_ALG_DES_MAC_MD5);
- memset(ptr + 4, 0xff, 4);
+ spin_lock(&krb5_seq_lock);
+ seq_send = ctx->seq_send++;
+ spin_unlock(&krb5_seq_lock);
- if (make_checksum("md5", ptr, 8, text, 0, &md5cksum))
+ if (krb5_make_seq_num(ctx, ctx->seq, ctx->initiate ? 0 : 0xff,
+ seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8))
return GSS_S_FAILURE;
- if (krb5_encrypt(ctx->seq, NULL, md5cksum.data,
- md5cksum.data, md5cksum.len))
- return GSS_S_FAILURE;
+ return (ctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
+}
+
+u32
+gss_get_mic_v2(struct krb5_ctx *ctx, struct xdr_buf *text,
+ struct xdr_netobj *token)
+{
+ char cksumdata[GSS_KRB5_MAX_CKSUM_LEN];
+ struct xdr_netobj cksumobj = { .len = sizeof(cksumdata),
+ .data = cksumdata};
+ void *krb5_hdr;
+ s32 now;
+ u64 seq_send;
+ u8 *cksumkey;
+ unsigned int cksum_usage;
- memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data + md5cksum.len - 8, 8);
+ dprintk("RPC: %s\n", __func__);
+ krb5_hdr = setup_token_v2(ctx, token);
+
+ /* Set up the sequence number. Now 64-bits in clear
+ * text and w/o direction indicator */
spin_lock(&krb5_seq_lock);
- seq_send = ctx->seq_send++;
+ seq_send = ctx->seq_send64++;
spin_unlock(&krb5_seq_lock);
-
- if (krb5_make_seq_num(ctx->seq, ctx->initiate ? 0 : 0xff,
- seq_send, ptr + GSS_KRB5_TOK_HDR_LEN,
- ptr + 8))
+ *((u64 *)(krb5_hdr + 8)) = cpu_to_be64(seq_send);
+
+ if (ctx->initiate) {
+ cksumkey = ctx->initiator_sign;
+ cksum_usage = KG_USAGE_INITIATOR_SIGN;
+ } else {
+ cksumkey = ctx->acceptor_sign;
+ cksum_usage = KG_USAGE_ACCEPTOR_SIGN;
+ }
+
+ if (make_checksum_v2(ctx, krb5_hdr, GSS_KRB5_TOK_HDR_LEN,
+ text, 0, cksumkey, cksum_usage, &cksumobj))
return GSS_S_FAILURE;
+ memcpy(krb5_hdr + GSS_KRB5_TOK_HDR_LEN, cksumobj.data, cksumobj.len);
+
+ now = get_seconds();
+
return (ctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
}
+
+u32
+gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text,
+ struct xdr_netobj *token)
+{
+ struct krb5_ctx *ctx = gss_ctx->internal_ctx_id;
+
+ switch (ctx->enctype) {
+ default:
+ BUG();
+ case ENCTYPE_DES_CBC_RAW:
+ case ENCTYPE_DES3_CBC_RAW:
+ case ENCTYPE_ARCFOUR_HMAC:
+ return gss_get_mic_v1(ctx, text, token);
+ case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
+ case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
+ return gss_get_mic_v2(ctx, text, token);
+ }
+}
+
diff --git a/net/sunrpc/auth_gss/gss_krb5_seqnum.c b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
index 6331cd6866ec..415c013ba382 100644
--- a/net/sunrpc/auth_gss/gss_krb5_seqnum.c
+++ b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
@@ -39,14 +39,51 @@
# define RPCDBG_FACILITY RPCDBG_AUTH
#endif
+static s32
+krb5_make_rc4_seq_num(struct krb5_ctx *kctx, int direction, s32 seqnum,
+ unsigned char *cksum, unsigned char *buf)
+{
+ struct crypto_blkcipher *cipher;
+ unsigned char plain[8];
+ s32 code;
+
+ dprintk("RPC: %s:\n", __func__);
+ cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0,
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(cipher))
+ return PTR_ERR(cipher);
+
+ plain[0] = (unsigned char) ((seqnum >> 24) & 0xff);
+ plain[1] = (unsigned char) ((seqnum >> 16) & 0xff);
+ plain[2] = (unsigned char) ((seqnum >> 8) & 0xff);
+ plain[3] = (unsigned char) ((seqnum >> 0) & 0xff);
+ plain[4] = direction;
+ plain[5] = direction;
+ plain[6] = direction;
+ plain[7] = direction;
+
+ code = krb5_rc4_setup_seq_key(kctx, cipher, cksum);
+ if (code)
+ goto out;
+
+ code = krb5_encrypt(cipher, cksum, plain, buf, 8);
+out:
+ crypto_free_blkcipher(cipher);
+ return code;
+}
s32
-krb5_make_seq_num(struct crypto_blkcipher *key,
+krb5_make_seq_num(struct krb5_ctx *kctx,
+ struct crypto_blkcipher *key,
int direction,
u32 seqnum,
unsigned char *cksum, unsigned char *buf)
{
unsigned char plain[8];
+ if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC)
+ return krb5_make_rc4_seq_num(kctx, direction, seqnum,
+ cksum, buf);
+
plain[0] = (unsigned char) (seqnum & 0xff);
plain[1] = (unsigned char) ((seqnum >> 8) & 0xff);
plain[2] = (unsigned char) ((seqnum >> 16) & 0xff);
@@ -60,17 +97,59 @@ krb5_make_seq_num(struct crypto_blkcipher *key,
return krb5_encrypt(key, cksum, plain, buf, 8);
}
+static s32
+krb5_get_rc4_seq_num(struct krb5_ctx *kctx, unsigned char *cksum,
+ unsigned char *buf, int *direction, s32 *seqnum)
+{
+ struct crypto_blkcipher *cipher;
+ unsigned char plain[8];
+ s32 code;
+
+ dprintk("RPC: %s:\n", __func__);
+ cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0,
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(cipher))
+ return PTR_ERR(cipher);
+
+ code = krb5_rc4_setup_seq_key(kctx, cipher, cksum);
+ if (code)
+ goto out;
+
+ code = krb5_decrypt(cipher, cksum, buf, plain, 8);
+ if (code)
+ goto out;
+
+ if ((plain[4] != plain[5]) || (plain[4] != plain[6])
+ || (plain[4] != plain[7])) {
+ code = (s32)KG_BAD_SEQ;
+ goto out;
+ }
+
+ *direction = plain[4];
+
+ *seqnum = ((plain[0] << 24) | (plain[1] << 16) |
+ (plain[2] << 8) | (plain[3]));
+out:
+ crypto_free_blkcipher(cipher);
+ return code;
+}
+
s32
-krb5_get_seq_num(struct crypto_blkcipher *key,
+krb5_get_seq_num(struct krb5_ctx *kctx,
unsigned char *cksum,
unsigned char *buf,
int *direction, u32 *seqnum)
{
s32 code;
unsigned char plain[8];
+ struct crypto_blkcipher *key = kctx->seq;
dprintk("RPC: krb5_get_seq_num:\n");
+ if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC)
+ return krb5_get_rc4_seq_num(kctx, cksum, buf,
+ direction, seqnum);
+
if ((code = krb5_decrypt(key, cksum, buf, plain, 8)))
return code;
diff --git a/net/sunrpc/auth_gss/gss_krb5_unseal.c b/net/sunrpc/auth_gss/gss_krb5_unseal.c
index ce6c247edad0..6cd930f3678f 100644
--- a/net/sunrpc/auth_gss/gss_krb5_unseal.c
+++ b/net/sunrpc/auth_gss/gss_krb5_unseal.c
@@ -3,7 +3,7 @@
*
* Adapted from MIT Kerberos 5-1.2.1 lib/gssapi/krb5/k5unseal.c
*
- * Copyright (c) 2000 The Regents of the University of Michigan.
+ * Copyright (c) 2000-2008 The Regents of the University of Michigan.
* All rights reserved.
*
* Andy Adamson <andros@umich.edu>
@@ -70,20 +70,21 @@
/* read_token is a mic token, and message_buffer is the data that the mic was
* supposedly taken over. */
-u32
-gss_verify_mic_kerberos(struct gss_ctx *gss_ctx,
+static u32
+gss_verify_mic_v1(struct krb5_ctx *ctx,
struct xdr_buf *message_buffer, struct xdr_netobj *read_token)
{
- struct krb5_ctx *ctx = gss_ctx->internal_ctx_id;
int signalg;
int sealalg;
- char cksumdata[16];
- struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata};
+ char cksumdata[GSS_KRB5_MAX_CKSUM_LEN];
+ struct xdr_netobj md5cksum = {.len = sizeof(cksumdata),
+ .data = cksumdata};
s32 now;
int direction;
u32 seqnum;
unsigned char *ptr = (unsigned char *)read_token->data;
int bodysize;
+ u8 *cksumkey;
dprintk("RPC: krb5_read_token\n");
@@ -98,7 +99,7 @@ gss_verify_mic_kerberos(struct gss_ctx *gss_ctx,
/* XXX sanity-check bodysize?? */
signalg = ptr[2] + (ptr[3] << 8);
- if (signalg != SGN_ALG_DES_MAC_MD5)
+ if (signalg != ctx->gk5e->signalg)
return GSS_S_DEFECTIVE_TOKEN;
sealalg = ptr[4] + (ptr[5] << 8);
@@ -108,13 +109,17 @@ gss_verify_mic_kerberos(struct gss_ctx *gss_ctx,
if ((ptr[6] != 0xff) || (ptr[7] != 0xff))
return GSS_S_DEFECTIVE_TOKEN;
- if (make_checksum("md5", ptr, 8, message_buffer, 0, &md5cksum))
- return GSS_S_FAILURE;
+ if (ctx->gk5e->keyed_cksum)
+ cksumkey = ctx->cksum;
+ else
+ cksumkey = NULL;
- if (krb5_encrypt(ctx->seq, NULL, md5cksum.data, md5cksum.data, 16))
+ if (make_checksum(ctx, ptr, 8, message_buffer, 0,
+ cksumkey, KG_USAGE_SIGN, &md5cksum))
return GSS_S_FAILURE;
- if (memcmp(md5cksum.data + 8, ptr + GSS_KRB5_TOK_HDR_LEN, 8))
+ if (memcmp(md5cksum.data, ptr + GSS_KRB5_TOK_HDR_LEN,
+ ctx->gk5e->cksumlength))
return GSS_S_BAD_SIG;
/* it got through unscathed. Make sure the context is unexpired */
@@ -126,7 +131,8 @@ gss_verify_mic_kerberos(struct gss_ctx *gss_ctx,
/* do sequencing checks */
- if (krb5_get_seq_num(ctx->seq, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8, &direction, &seqnum))
+ if (krb5_get_seq_num(ctx, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8,
+ &direction, &seqnum))
return GSS_S_FAILURE;
if ((ctx->initiate && direction != 0xff) ||
@@ -135,3 +141,86 @@ gss_verify_mic_kerberos(struct gss_ctx *gss_ctx,
return GSS_S_COMPLETE;
}
+
+static u32
+gss_verify_mic_v2(struct krb5_ctx *ctx,
+ struct xdr_buf *message_buffer, struct xdr_netobj *read_token)
+{
+ char cksumdata[GSS_KRB5_MAX_CKSUM_LEN];
+ struct xdr_netobj cksumobj = {.len = sizeof(cksumdata),
+ .data = cksumdata};
+ s32 now;
+ u64 seqnum;
+ u8 *ptr = read_token->data;
+ u8 *cksumkey;
+ u8 flags;
+ int i;
+ unsigned int cksum_usage;
+
+ dprintk("RPC: %s\n", __func__);
+
+ if (be16_to_cpu(*((__be16 *)ptr)) != KG2_TOK_MIC)
+ return GSS_S_DEFECTIVE_TOKEN;
+
+ flags = ptr[2];
+ if ((!ctx->initiate && (flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR)) ||
+ (ctx->initiate && !(flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR)))
+ return GSS_S_BAD_SIG;
+
+ if (flags & KG2_TOKEN_FLAG_SEALED) {
+ dprintk("%s: token has unexpected sealed flag\n", __func__);
+ return GSS_S_FAILURE;
+ }
+
+ for (i = 3; i < 8; i++)
+ if (ptr[i] != 0xff)
+ return GSS_S_DEFECTIVE_TOKEN;
+
+ if (ctx->initiate) {
+ cksumkey = ctx->acceptor_sign;
+ cksum_usage = KG_USAGE_ACCEPTOR_SIGN;
+ } else {
+ cksumkey = ctx->initiator_sign;
+ cksum_usage = KG_USAGE_INITIATOR_SIGN;
+ }
+
+ if (make_checksum_v2(ctx, ptr, GSS_KRB5_TOK_HDR_LEN, message_buffer, 0,
+ cksumkey, cksum_usage, &cksumobj))
+ return GSS_S_FAILURE;
+
+ if (memcmp(cksumobj.data, ptr + GSS_KRB5_TOK_HDR_LEN,
+ ctx->gk5e->cksumlength))
+ return GSS_S_BAD_SIG;
+
+ /* it got through unscathed. Make sure the context is unexpired */
+ now = get_seconds();
+ if (now > ctx->endtime)
+ return GSS_S_CONTEXT_EXPIRED;
+
+ /* do sequencing checks */
+
+ seqnum = be64_to_cpup((__be64 *)ptr + 8);
+
+ return GSS_S_COMPLETE;
+}
+
+u32
+gss_verify_mic_kerberos(struct gss_ctx *gss_ctx,
+ struct xdr_buf *message_buffer,
+ struct xdr_netobj *read_token)
+{
+ struct krb5_ctx *ctx = gss_ctx->internal_ctx_id;
+
+ switch (ctx->enctype) {
+ default:
+ BUG();
+ case ENCTYPE_DES_CBC_RAW:
+ case ENCTYPE_DES3_CBC_RAW:
+ case ENCTYPE_ARCFOUR_HMAC:
+ return gss_verify_mic_v1(ctx, message_buffer, read_token);
+ case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
+ case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
+ return gss_verify_mic_v2(ctx, message_buffer, read_token);
+ }
+}
+
diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c
index a6e905637e03..2763e3e48db4 100644
--- a/net/sunrpc/auth_gss/gss_krb5_wrap.c
+++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c
@@ -1,3 +1,33 @@
+/*
+ * COPYRIGHT (c) 2008
+ * The Regents of the University of Michigan
+ * ALL RIGHTS RESERVED
+ *
+ * Permission is granted to use, copy, create derivative works
+ * and redistribute this software and such derivative works
+ * for any purpose, so long as the name of The University of
+ * Michigan is not used in any advertising or publicity
+ * pertaining to the use of distribution of this software
+ * without specific, written prior authorization. If the
+ * above copyright notice or any other identification of the
+ * University of Michigan is included in any copy of any
+ * portion of this software, then the disclaimer below must
+ * also be included.
+ *
+ * THIS SOFTWARE IS PROVIDED AS IS, WITHOUT REPRESENTATION
+ * FROM THE UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY
+ * PURPOSE, AND WITHOUT WARRANTY BY THE UNIVERSITY OF
+ * MICHIGAN OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING
+ * WITHOUT LIMITATION THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE
+ * REGENTS OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE LIABLE
+ * FOR ANY DAMAGES, INCLUDING SPECIAL, INDIRECT, INCIDENTAL, OR
+ * CONSEQUENTIAL DAMAGES, WITH RESPECT TO ANY CLAIM ARISING
+ * OUT OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN
+ * IF IT HAS BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGES.
+ */
+
#include <linux/types.h>
#include <linux/jiffies.h>
#include <linux/sunrpc/gss_krb5.h>
@@ -12,10 +42,7 @@
static inline int
gss_krb5_padding(int blocksize, int length)
{
- /* Most of the code is block-size independent but currently we
- * use only 8: */
- BUG_ON(blocksize != 8);
- return 8 - (length & 7);
+ return blocksize - (length % blocksize);
}
static inline void
@@ -86,8 +113,8 @@ out:
return 0;
}
-static void
-make_confounder(char *p, u32 conflen)
+void
+gss_krb5_make_confounder(char *p, u32 conflen)
{
static u64 i = 0;
u64 *q = (u64 *)p;
@@ -127,69 +154,73 @@ make_confounder(char *p, u32 conflen)
/* XXX factor out common code with seal/unseal. */
-u32
-gss_wrap_kerberos(struct gss_ctx *ctx, int offset,
+static u32
+gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
struct xdr_buf *buf, struct page **pages)
{
- struct krb5_ctx *kctx = ctx->internal_ctx_id;
- char cksumdata[16];
- struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata};
+ char cksumdata[GSS_KRB5_MAX_CKSUM_LEN];
+ struct xdr_netobj md5cksum = {.len = sizeof(cksumdata),
+ .data = cksumdata};
int blocksize = 0, plainlen;
unsigned char *ptr, *msg_start;
s32 now;
int headlen;
struct page **tmp_pages;
u32 seq_send;
+ u8 *cksumkey;
+ u32 conflen = kctx->gk5e->conflen;
- dprintk("RPC: gss_wrap_kerberos\n");
+ dprintk("RPC: %s\n", __func__);
now = get_seconds();
blocksize = crypto_blkcipher_blocksize(kctx->enc);
gss_krb5_add_padding(buf, offset, blocksize);
BUG_ON((buf->len - offset) % blocksize);
- plainlen = blocksize + buf->len - offset;
+ plainlen = conflen + buf->len - offset;
- headlen = g_token_size(&kctx->mech_used, 24 + plainlen) -
- (buf->len - offset);
+ headlen = g_token_size(&kctx->mech_used,
+ GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength + plainlen) -
+ (buf->len - offset);
ptr = buf->head[0].iov_base + offset;
/* shift data to make room for header. */
+ xdr_extend_head(buf, offset, headlen);
+
/* XXX Would be cleverer to encrypt while copying. */
- /* XXX bounds checking, slack, etc. */
- memmove(ptr + headlen, ptr, buf->head[0].iov_len - offset);
- buf->head[0].iov_len += headlen;
- buf->len += headlen;
BUG_ON((buf->len - offset - headlen) % blocksize);
g_make_token_header(&kctx->mech_used,
- GSS_KRB5_TOK_HDR_LEN + 8 + plainlen, &ptr);
+ GSS_KRB5_TOK_HDR_LEN +
+ kctx->gk5e->cksumlength + plainlen, &ptr);
/* ptr now at header described in rfc 1964, section 1.2.1: */
ptr[0] = (unsigned char) ((KG_TOK_WRAP_MSG >> 8) & 0xff);
ptr[1] = (unsigned char) (KG_TOK_WRAP_MSG & 0xff);
- msg_start = ptr + 24;
+ msg_start = ptr + GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength;
- *(__be16 *)(ptr + 2) = htons(SGN_ALG_DES_MAC_MD5);
+ *(__be16 *)(ptr + 2) = cpu_to_le16(kctx->gk5e->signalg);
memset(ptr + 4, 0xff, 4);
- *(__be16 *)(ptr + 4) = htons(SEAL_ALG_DES);
+ *(__be16 *)(ptr + 4) = cpu_to_le16(kctx->gk5e->sealalg);
- make_confounder(msg_start, blocksize);
+ gss_krb5_make_confounder(msg_start, conflen);
+
+ if (kctx->gk5e->keyed_cksum)
+ cksumkey = kctx->cksum;
+ else
+ cksumkey = NULL;
/* XXXJBF: UGH!: */
tmp_pages = buf->pages;
buf->pages = pages;
- if (make_checksum("md5", ptr, 8, buf,
- offset + headlen - blocksize, &md5cksum))
+ if (make_checksum(kctx, ptr, 8, buf, offset + headlen - conflen,
+ cksumkey, KG_USAGE_SEAL, &md5cksum))
return GSS_S_FAILURE;
buf->pages = tmp_pages;
- if (krb5_encrypt(kctx->seq, NULL, md5cksum.data,
- md5cksum.data, md5cksum.len))
- return GSS_S_FAILURE;
- memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data + md5cksum.len - 8, 8);
+ memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data, md5cksum.len);
spin_lock(&krb5_seq_lock);
seq_send = kctx->seq_send++;
@@ -197,25 +228,42 @@ gss_wrap_kerberos(struct gss_ctx *ctx, int offset,
/* XXX would probably be more efficient to compute checksum
* and encrypt at the same time: */
- if ((krb5_make_seq_num(kctx->seq, kctx->initiate ? 0 : 0xff,
+ if ((krb5_make_seq_num(kctx, kctx->seq, kctx->initiate ? 0 : 0xff,
seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8)))
return GSS_S_FAILURE;
- if (gss_encrypt_xdr_buf(kctx->enc, buf, offset + headlen - blocksize,
- pages))
- return GSS_S_FAILURE;
+ if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) {
+ struct crypto_blkcipher *cipher;
+ int err;
+ cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0,
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(cipher))
+ return GSS_S_FAILURE;
+
+ krb5_rc4_setup_enc_key(kctx, cipher, seq_send);
+
+ err = gss_encrypt_xdr_buf(cipher, buf,
+ offset + headlen - conflen, pages);
+ crypto_free_blkcipher(cipher);
+ if (err)
+ return GSS_S_FAILURE;
+ } else {
+ if (gss_encrypt_xdr_buf(kctx->enc, buf,
+ offset + headlen - conflen, pages))
+ return GSS_S_FAILURE;
+ }
return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
}
-u32
-gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf)
+static u32
+gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
{
- struct krb5_ctx *kctx = ctx->internal_ctx_id;
int signalg;
int sealalg;
- char cksumdata[16];
- struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata};
+ char cksumdata[GSS_KRB5_MAX_CKSUM_LEN];
+ struct xdr_netobj md5cksum = {.len = sizeof(cksumdata),
+ .data = cksumdata};
s32 now;
int direction;
s32 seqnum;
@@ -224,6 +272,9 @@ gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf)
void *data_start, *orig_start;
int data_len;
int blocksize;
+ u32 conflen = kctx->gk5e->conflen;
+ int crypt_offset;
+ u8 *cksumkey;
dprintk("RPC: gss_unwrap_kerberos\n");
@@ -241,29 +292,65 @@ gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf)
/* get the sign and seal algorithms */
signalg = ptr[2] + (ptr[3] << 8);
- if (signalg != SGN_ALG_DES_MAC_MD5)
+ if (signalg != kctx->gk5e->signalg)
return GSS_S_DEFECTIVE_TOKEN;
sealalg = ptr[4] + (ptr[5] << 8);
- if (sealalg != SEAL_ALG_DES)
+ if (sealalg != kctx->gk5e->sealalg)
return GSS_S_DEFECTIVE_TOKEN;
if ((ptr[6] != 0xff) || (ptr[7] != 0xff))
return GSS_S_DEFECTIVE_TOKEN;
- if (gss_decrypt_xdr_buf(kctx->enc, buf,
- ptr + GSS_KRB5_TOK_HDR_LEN + 8 - (unsigned char *)buf->head[0].iov_base))
- return GSS_S_DEFECTIVE_TOKEN;
+ /*
+ * Data starts after token header and checksum. ptr points
+ * to the beginning of the token header
+ */
+ crypt_offset = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) -
+ (unsigned char *)buf->head[0].iov_base;
+
+ /*
+ * Need plaintext seqnum to derive encryption key for arcfour-hmac
+ */
+ if (krb5_get_seq_num(kctx, ptr + GSS_KRB5_TOK_HDR_LEN,
+ ptr + 8, &direction, &seqnum))
+ return GSS_S_BAD_SIG;
- if (make_checksum("md5", ptr, 8, buf,
- ptr + GSS_KRB5_TOK_HDR_LEN + 8 - (unsigned char *)buf->head[0].iov_base, &md5cksum))
- return GSS_S_FAILURE;
+ if ((kctx->initiate && direction != 0xff) ||
+ (!kctx->initiate && direction != 0))
+ return GSS_S_BAD_SIG;
+
+ if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) {
+ struct crypto_blkcipher *cipher;
+ int err;
+
+ cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0,
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(cipher))
+ return GSS_S_FAILURE;
+
+ krb5_rc4_setup_enc_key(kctx, cipher, seqnum);
- if (krb5_encrypt(kctx->seq, NULL, md5cksum.data,
- md5cksum.data, md5cksum.len))
+ err = gss_decrypt_xdr_buf(cipher, buf, crypt_offset);
+ crypto_free_blkcipher(cipher);
+ if (err)
+ return GSS_S_DEFECTIVE_TOKEN;
+ } else {
+ if (gss_decrypt_xdr_buf(kctx->enc, buf, crypt_offset))
+ return GSS_S_DEFECTIVE_TOKEN;
+ }
+
+ if (kctx->gk5e->keyed_cksum)
+ cksumkey = kctx->cksum;
+ else
+ cksumkey = NULL;
+
+ if (make_checksum(kctx, ptr, 8, buf, crypt_offset,
+ cksumkey, KG_USAGE_SEAL, &md5cksum))
return GSS_S_FAILURE;
- if (memcmp(md5cksum.data + 8, ptr + GSS_KRB5_TOK_HDR_LEN, 8))
+ if (memcmp(md5cksum.data, ptr + GSS_KRB5_TOK_HDR_LEN,
+ kctx->gk5e->cksumlength))
return GSS_S_BAD_SIG;
/* it got through unscathed. Make sure the context is unexpired */
@@ -275,19 +362,12 @@ gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf)
/* do sequencing checks */
- if (krb5_get_seq_num(kctx->seq, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8,
- &direction, &seqnum))
- return GSS_S_BAD_SIG;
-
- if ((kctx->initiate && direction != 0xff) ||
- (!kctx->initiate && direction != 0))
- return GSS_S_BAD_SIG;
-
/* Copy the data back to the right position. XXX: Would probably be
* better to copy and encrypt at the same time. */
blocksize = crypto_blkcipher_blocksize(kctx->enc);
- data_start = ptr + GSS_KRB5_TOK_HDR_LEN + 8 + blocksize;
+ data_start = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) +
+ conflen;
orig_start = buf->head[0].iov_base + offset;
data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start;
memmove(orig_start, data_start, data_len);
@@ -299,3 +379,209 @@ gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf)
return GSS_S_COMPLETE;
}
+
+/*
+ * We cannot currently handle tokens with rotated data. We need a
+ * generalized routine to rotate the data in place. It is anticipated
+ * that we won't encounter rotated data in the general case.
+ */
+static u32
+rotate_left(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf, u16 rrc)
+{
+ unsigned int realrrc = rrc % (buf->len - offset - GSS_KRB5_TOK_HDR_LEN);
+
+ if (realrrc == 0)
+ return 0;
+
+ dprintk("%s: cannot process token with rotated data: "
+ "rrc %u, realrrc %u\n", __func__, rrc, realrrc);
+ return 1;
+}
+
+static u32
+gss_wrap_kerberos_v2(struct krb5_ctx *kctx, u32 offset,
+ struct xdr_buf *buf, struct page **pages)
+{
+ int blocksize;
+ u8 *ptr, *plainhdr;
+ s32 now;
+ u8 flags = 0x00;
+ __be16 *be16ptr, ec = 0;
+ __be64 *be64ptr;
+ u32 err;
+
+ dprintk("RPC: %s\n", __func__);
+
+ if (kctx->gk5e->encrypt_v2 == NULL)
+ return GSS_S_FAILURE;
+
+ /* make room for gss token header */
+ if (xdr_extend_head(buf, offset, GSS_KRB5_TOK_HDR_LEN))
+ return GSS_S_FAILURE;
+
+ /* construct gss token header */
+ ptr = plainhdr = buf->head[0].iov_base + offset;
+ *ptr++ = (unsigned char) ((KG2_TOK_WRAP>>8) & 0xff);
+ *ptr++ = (unsigned char) (KG2_TOK_WRAP & 0xff);
+
+ if ((kctx->flags & KRB5_CTX_FLAG_INITIATOR) == 0)
+ flags |= KG2_TOKEN_FLAG_SENTBYACCEPTOR;
+ if ((kctx->flags & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY) != 0)
+ flags |= KG2_TOKEN_FLAG_ACCEPTORSUBKEY;
+ /* We always do confidentiality in wrap tokens */
+ flags |= KG2_TOKEN_FLAG_SEALED;
+
+ *ptr++ = flags;
+ *ptr++ = 0xff;
+ be16ptr = (__be16 *)ptr;
+
+ blocksize = crypto_blkcipher_blocksize(kctx->acceptor_enc);
+ *be16ptr++ = cpu_to_be16(ec);
+ /* "inner" token header always uses 0 for RRC */
+ *be16ptr++ = cpu_to_be16(0);
+
+ be64ptr = (__be64 *)be16ptr;
+ spin_lock(&krb5_seq_lock);
+ *be64ptr = cpu_to_be64(kctx->seq_send64++);
+ spin_unlock(&krb5_seq_lock);
+
+ err = (*kctx->gk5e->encrypt_v2)(kctx, offset, buf, ec, pages);
+ if (err)
+ return err;
+
+ now = get_seconds();
+ return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
+}
+
+static u32
+gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
+{
+ s32 now;
+ u64 seqnum;
+ u8 *ptr;
+ u8 flags = 0x00;
+ u16 ec, rrc;
+ int err;
+ u32 headskip, tailskip;
+ u8 decrypted_hdr[GSS_KRB5_TOK_HDR_LEN];
+ unsigned int movelen;
+
+
+ dprintk("RPC: %s\n", __func__);
+
+ if (kctx->gk5e->decrypt_v2 == NULL)
+ return GSS_S_FAILURE;
+
+ ptr = buf->head[0].iov_base + offset;
+
+ if (be16_to_cpu(*((__be16 *)ptr)) != KG2_TOK_WRAP)
+ return GSS_S_DEFECTIVE_TOKEN;
+
+ flags = ptr[2];
+ if ((!kctx->initiate && (flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR)) ||
+ (kctx->initiate && !(flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR)))
+ return GSS_S_BAD_SIG;
+
+ if ((flags & KG2_TOKEN_FLAG_SEALED) == 0) {
+ dprintk("%s: token missing expected sealed flag\n", __func__);
+ return GSS_S_DEFECTIVE_TOKEN;
+ }
+
+ if (ptr[3] != 0xff)
+ return GSS_S_DEFECTIVE_TOKEN;
+
+ ec = be16_to_cpup((__be16 *)(ptr + 4));
+ rrc = be16_to_cpup((__be16 *)(ptr + 6));
+
+ seqnum = be64_to_cpup((__be64 *)(ptr + 8));
+
+ if (rrc != 0) {
+ err = rotate_left(kctx, offset, buf, rrc);
+ if (err)
+ return GSS_S_FAILURE;
+ }
+
+ err = (*kctx->gk5e->decrypt_v2)(kctx, offset, buf,
+ &headskip, &tailskip);
+ if (err)
+ return GSS_S_FAILURE;
+
+ /*
+ * Retrieve the decrypted gss token header and verify
+ * it against the original
+ */
+ err = read_bytes_from_xdr_buf(buf,
+ buf->len - GSS_KRB5_TOK_HDR_LEN - tailskip,
+ decrypted_hdr, GSS_KRB5_TOK_HDR_LEN);
+ if (err) {
+ dprintk("%s: error %u getting decrypted_hdr\n", __func__, err);
+ return GSS_S_FAILURE;
+ }
+ if (memcmp(ptr, decrypted_hdr, 6)
+ || memcmp(ptr + 8, decrypted_hdr + 8, 8)) {
+ dprintk("%s: token hdr, plaintext hdr mismatch!\n", __func__);
+ return GSS_S_FAILURE;
+ }
+
+ /* do sequencing checks */
+
+ /* it got through unscathed. Make sure the context is unexpired */
+ now = get_seconds();
+ if (now > kctx->endtime)
+ return GSS_S_CONTEXT_EXPIRED;
+
+ /*
+ * Move the head data back to the right position in xdr_buf.
+ * We ignore any "ec" data since it might be in the head or
+ * the tail, and we really don't need to deal with it.
+ * Note that buf->head[0].iov_len may indicate the available
+ * head buffer space rather than that actually occupied.
+ */
+ movelen = min_t(unsigned int, buf->head[0].iov_len, buf->len);
+ movelen -= offset + GSS_KRB5_TOK_HDR_LEN + headskip;
+ BUG_ON(offset + GSS_KRB5_TOK_HDR_LEN + headskip + movelen >
+ buf->head[0].iov_len);
+ memmove(ptr, ptr + GSS_KRB5_TOK_HDR_LEN + headskip, movelen);
+ buf->head[0].iov_len -= GSS_KRB5_TOK_HDR_LEN + headskip;
+ buf->len -= GSS_KRB5_TOK_HDR_LEN + headskip;
+
+ return GSS_S_COMPLETE;
+}
+
+u32
+gss_wrap_kerberos(struct gss_ctx *gctx, int offset,
+ struct xdr_buf *buf, struct page **pages)
+{
+ struct krb5_ctx *kctx = gctx->internal_ctx_id;
+
+ switch (kctx->enctype) {
+ default:
+ BUG();
+ case ENCTYPE_DES_CBC_RAW:
+ case ENCTYPE_DES3_CBC_RAW:
+ case ENCTYPE_ARCFOUR_HMAC:
+ return gss_wrap_kerberos_v1(kctx, offset, buf, pages);
+ case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
+ case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
+ return gss_wrap_kerberos_v2(kctx, offset, buf, pages);
+ }
+}
+
+u32
+gss_unwrap_kerberos(struct gss_ctx *gctx, int offset, struct xdr_buf *buf)
+{
+ struct krb5_ctx *kctx = gctx->internal_ctx_id;
+
+ switch (kctx->enctype) {
+ default:
+ BUG();
+ case ENCTYPE_DES_CBC_RAW:
+ case ENCTYPE_DES3_CBC_RAW:
+ case ENCTYPE_ARCFOUR_HMAC:
+ return gss_unwrap_kerberos_v1(kctx, offset, buf);
+ case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
+ case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
+ return gss_unwrap_kerberos_v2(kctx, offset, buf);
+ }
+}
+
diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c
index 76e4c6f4ac3c..2689de39dc78 100644
--- a/net/sunrpc/auth_gss/gss_mech_switch.c
+++ b/net/sunrpc/auth_gss/gss_mech_switch.c
@@ -249,14 +249,15 @@ EXPORT_SYMBOL_GPL(gss_mech_put);
int
gss_import_sec_context(const void *input_token, size_t bufsize,
struct gss_api_mech *mech,
- struct gss_ctx **ctx_id)
+ struct gss_ctx **ctx_id,
+ gfp_t gfp_mask)
{
- if (!(*ctx_id = kzalloc(sizeof(**ctx_id), GFP_KERNEL)))
+ if (!(*ctx_id = kzalloc(sizeof(**ctx_id), gfp_mask)))
return -ENOMEM;
(*ctx_id)->mech_type = gss_mech_get(mech);
return mech->gm_ops
- ->gss_import_sec_context(input_token, bufsize, *ctx_id);
+ ->gss_import_sec_context(input_token, bufsize, *ctx_id, gfp_mask);
}
/* gss_get_mic: compute a mic over message and return mic_token. */
@@ -285,6 +286,20 @@ gss_verify_mic(struct gss_ctx *context_handle,
mic_token);
}
+/*
+ * This function is called from both the client and server code.
+ * Each makes guarantees about how much "slack" space is available
+ * for the underlying function in "buf"'s head and tail while
+ * performing the wrap.
+ *
+ * The client and server code allocate RPC_MAX_AUTH_SIZE extra
+ * space in both the head and tail which is available for use by
+ * the wrap function.
+ *
+ * Underlying functions should verify they do not use more than
+ * RPC_MAX_AUTH_SIZE of extra space in either the head or tail
+ * when performing the wrap.
+ */
u32
gss_wrap(struct gss_ctx *ctx_id,
int offset,
diff --git a/net/sunrpc/auth_gss/gss_spkm3_mech.c b/net/sunrpc/auth_gss/gss_spkm3_mech.c
index 035e1dd6af1b..dc3f1f5ed865 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_mech.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_mech.c
@@ -84,13 +84,14 @@ simple_get_netobj(const void *p, const void *end, struct xdr_netobj *res)
static int
gss_import_sec_context_spkm3(const void *p, size_t len,
- struct gss_ctx *ctx_id)
+ struct gss_ctx *ctx_id,
+ gfp_t gfp_mask)
{
const void *end = (const void *)((const char *)p + len);
struct spkm3_ctx *ctx;
int version;
- if (!(ctx = kzalloc(sizeof(*ctx), GFP_NOFS)))
+ if (!(ctx = kzalloc(sizeof(*ctx), gfp_mask)))
goto out_err;
p = simple_get_bytes(p, end, &version, sizeof(version));
diff --git a/net/sunrpc/auth_gss/gss_spkm3_token.c b/net/sunrpc/auth_gss/gss_spkm3_token.c
index 3308157436d2..a99825d7caa0 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_token.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_token.c
@@ -223,7 +223,7 @@ spkm3_verify_mic_token(unsigned char **tokp, int *mic_hdrlen, unsigned char **ck
/* only support SPKM_MIC_TOK */
if((ptr[6] != 0x01) || (ptr[7] != 0x01)) {
- dprintk("RPC: ERROR unsupported SPKM3 token \n");
+ dprintk("RPC: ERROR unsupported SPKM3 token\n");
goto out;
}
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index b81e790ef9f4..cc385b3a59c2 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -494,7 +494,7 @@ static int rsc_parse(struct cache_detail *cd,
len = qword_get(&mesg, buf, mlen);
if (len < 0)
goto out;
- status = gss_import_sec_context(buf, len, gm, &rsci.mechctx);
+ status = gss_import_sec_context(buf, len, gm, &rsci.mechctx, GFP_KERNEL);
if (status)
goto out;
@@ -1315,6 +1315,14 @@ svcauth_gss_wrap_resp_priv(struct svc_rqst *rqstp)
inpages = resbuf->pages;
/* XXX: Would be better to write some xdr helper functions for
* nfs{2,3,4}xdr.c that place the data right, instead of copying: */
+
+ /*
+ * If there is currently tail data, make sure there is
+ * room for the head, tail, and 2 * RPC_MAX_AUTH_SIZE in
+ * the page, and move the current tail data such that
+ * there is RPC_MAX_AUTH_SIZE slack space available in
+ * both the head and tail.
+ */
if (resbuf->tail[0].iov_base) {
BUG_ON(resbuf->tail[0].iov_base >= resbuf->head[0].iov_base
+ PAGE_SIZE);
@@ -1327,6 +1335,13 @@ svcauth_gss_wrap_resp_priv(struct svc_rqst *rqstp)
resbuf->tail[0].iov_len);
resbuf->tail[0].iov_base += RPC_MAX_AUTH_SIZE;
}
+ /*
+ * If there is no current tail data, make sure there is
+ * room for the head data, and 2 * RPC_MAX_AUTH_SIZE in the
+ * allotted page, and set up tail information such that there
+ * is RPC_MAX_AUTH_SIZE slack space available in both the
+ * head and tail.
+ */
if (resbuf->tail[0].iov_base == NULL) {
if (resbuf->head[0].iov_len + 2*RPC_MAX_AUTH_SIZE > PAGE_SIZE)
return -ENOMEM;
diff --git a/net/sunrpc/bc_svc.c b/net/sunrpc/bc_svc.c
index f0c05d3311c1..7dcfe0cc3500 100644
--- a/net/sunrpc/bc_svc.c
+++ b/net/sunrpc/bc_svc.c
@@ -60,7 +60,7 @@ int bc_send(struct rpc_rqst *req)
rpc_put_task(task);
}
return ret;
- dprintk("RPC: bc_send ret= %d \n", ret);
+ dprintk("RPC: bc_send ret= %d\n", ret);
}
#endif /* CONFIG_NFS_V4_1 */
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 39bddba53ba1..c2173ebdb33c 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -28,6 +28,7 @@
#include <linux/workqueue.h>
#include <linux/mutex.h>
#include <linux/pagemap.h>
+#include <linux/smp_lock.h>
#include <asm/ioctls.h>
#include <linux/sunrpc/types.h>
#include <linux/sunrpc/cache.h>
@@ -49,11 +50,17 @@ static void cache_init(struct cache_head *h)
h->last_refresh = now;
}
+static inline int cache_is_expired(struct cache_detail *detail, struct cache_head *h)
+{
+ return (h->expiry_time < get_seconds()) ||
+ (detail->flush_time > h->last_refresh);
+}
+
struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
struct cache_head *key, int hash)
{
struct cache_head **head, **hp;
- struct cache_head *new = NULL;
+ struct cache_head *new = NULL, *freeme = NULL;
head = &detail->hash_table[hash];
@@ -62,6 +69,9 @@ struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
for (hp=head; *hp != NULL ; hp = &(*hp)->next) {
struct cache_head *tmp = *hp;
if (detail->match(tmp, key)) {
+ if (cache_is_expired(detail, tmp))
+ /* This entry is expired, we will discard it. */
+ break;
cache_get(tmp);
read_unlock(&detail->hash_lock);
return tmp;
@@ -86,6 +96,13 @@ struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
for (hp=head; *hp != NULL ; hp = &(*hp)->next) {
struct cache_head *tmp = *hp;
if (detail->match(tmp, key)) {
+ if (cache_is_expired(detail, tmp)) {
+ *hp = tmp->next;
+ tmp->next = NULL;
+ detail->entries --;
+ freeme = tmp;
+ break;
+ }
cache_get(tmp);
write_unlock(&detail->hash_lock);
cache_put(new, detail);
@@ -98,6 +115,8 @@ struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
cache_get(new);
write_unlock(&detail->hash_lock);
+ if (freeme)
+ cache_put(freeme, detail);
return new;
}
EXPORT_SYMBOL_GPL(sunrpc_cache_lookup);
@@ -183,10 +202,7 @@ static int cache_make_upcall(struct cache_detail *cd, struct cache_head *h)
static inline int cache_is_valid(struct cache_detail *detail, struct cache_head *h)
{
- if (!test_bit(CACHE_VALID, &h->flags) ||
- h->expiry_time < get_seconds())
- return -EAGAIN;
- else if (detail->flush_time > h->last_refresh)
+ if (!test_bit(CACHE_VALID, &h->flags))
return -EAGAIN;
else {
/* entry is valid */
@@ -397,31 +413,27 @@ static int cache_clean(void)
/* Ok, now to clean this strand */
cp = & current_detail->hash_table[current_index];
- ch = *cp;
- for (; ch; cp= & ch->next, ch= *cp) {
+ for (ch = *cp ; ch ; cp = & ch->next, ch = *cp) {
if (current_detail->nextcheck > ch->expiry_time)
current_detail->nextcheck = ch->expiry_time+1;
- if (ch->expiry_time >= get_seconds() &&
- ch->last_refresh >= current_detail->flush_time)
+ if (!cache_is_expired(current_detail, ch))
continue;
- if (test_and_clear_bit(CACHE_PENDING, &ch->flags))
- cache_dequeue(current_detail, ch);
- if (atomic_read(&ch->ref.refcount) == 1)
- break;
- }
- if (ch) {
*cp = ch->next;
ch->next = NULL;
current_detail->entries--;
rv = 1;
+ break;
}
+
write_unlock(&current_detail->hash_lock);
d = current_detail;
if (!ch)
current_index ++;
spin_unlock(&cache_list_lock);
if (ch) {
+ if (test_and_clear_bit(CACHE_PENDING, &ch->flags))
+ cache_dequeue(current_detail, ch);
cache_revisit_request(ch);
cache_put(ch, d);
}
@@ -1233,8 +1245,10 @@ static int content_open(struct inode *inode, struct file *file,
if (!cd || !try_module_get(cd->owner))
return -EACCES;
han = __seq_open_private(file, &cache_content_op, sizeof(*han));
- if (han == NULL)
+ if (han == NULL) {
+ module_put(cd->owner);
return -ENOMEM;
+ }
han->cd = cd;
return 0;
@@ -1331,12 +1345,18 @@ static unsigned int cache_poll_procfs(struct file *filp, poll_table *wait)
return cache_poll(filp, wait, cd);
}
-static int cache_ioctl_procfs(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+static long cache_ioctl_procfs(struct file *filp,
+ unsigned int cmd, unsigned long arg)
{
+ long ret;
+ struct inode *inode = filp->f_path.dentry->d_inode;
struct cache_detail *cd = PDE(inode)->data;
- return cache_ioctl(inode, filp, cmd, arg, cd);
+ lock_kernel();
+ ret = cache_ioctl(inode, filp, cmd, arg, cd);
+ unlock_kernel();
+
+ return ret;
}
static int cache_open_procfs(struct inode *inode, struct file *filp)
@@ -1359,7 +1379,7 @@ static const struct file_operations cache_file_operations_procfs = {
.read = cache_read_procfs,
.write = cache_write_procfs,
.poll = cache_poll_procfs,
- .ioctl = cache_ioctl_procfs, /* for FIONREAD */
+ .unlocked_ioctl = cache_ioctl_procfs, /* for FIONREAD */
.open = cache_open_procfs,
.release = cache_release_procfs,
};
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 19c9983d5360..756fc324db9e 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -556,26 +556,16 @@ static const struct rpc_call_ops rpc_default_ops = {
*/
struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data)
{
- struct rpc_task *task, *ret;
+ struct rpc_task *task;
task = rpc_new_task(task_setup_data);
- if (task == NULL) {
- rpc_release_calldata(task_setup_data->callback_ops,
- task_setup_data->callback_data);
- ret = ERR_PTR(-ENOMEM);
+ if (IS_ERR(task))
goto out;
- }
- if (task->tk_status != 0) {
- ret = ERR_PTR(task->tk_status);
- rpc_put_task(task);
- goto out;
- }
atomic_inc(&task->tk_count);
rpc_execute(task);
- ret = task;
out:
- return ret;
+ return task;
}
EXPORT_SYMBOL_GPL(rpc_run_task);
@@ -657,9 +647,8 @@ struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req,
* Create an rpc_task to send the data
*/
task = rpc_new_task(&task_setup_data);
- if (!task) {
+ if (IS_ERR(task)) {
xprt_free_bc_request(req);
- task = ERR_PTR(-ENOMEM);
goto out;
}
task->tk_rqstp = req;
@@ -1518,7 +1507,6 @@ call_refreshresult(struct rpc_task *task)
task->tk_action = call_refresh;
if (status != -ETIMEDOUT)
rpc_delay(task, 3*HZ);
- return;
}
static __be32 *
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index aae6907fd546..4a843b883b89 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -25,7 +25,6 @@
#ifdef RPC_DEBUG
#define RPCDBG_FACILITY RPCDBG_SCHED
-#define RPC_TASK_MAGIC_ID 0xf00baa
#endif
/*
@@ -237,7 +236,6 @@ static void rpc_task_set_debuginfo(struct rpc_task *task)
{
static atomic_t rpc_pid;
- task->tk_magic = RPC_TASK_MAGIC_ID;
task->tk_pid = atomic_inc_return(&rpc_pid);
}
#else
@@ -360,9 +358,6 @@ static void __rpc_do_wake_up_task(struct rpc_wait_queue *queue, struct rpc_task
dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n",
task->tk_pid, jiffies);
-#ifdef RPC_DEBUG
- BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID);
-#endif
/* Has the task been executed yet? If not, we cannot wake it up! */
if (!RPC_IS_ACTIVATED(task)) {
printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task);
@@ -834,7 +829,7 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta
}
/* starting timestamp */
- task->tk_start = jiffies;
+ task->tk_start = ktime_get();
dprintk("RPC: new task initialized, procpid %u\n",
task_pid_nr(current));
@@ -856,16 +851,23 @@ struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data)
if (task == NULL) {
task = rpc_alloc_task();
- if (task == NULL)
- goto out;
+ if (task == NULL) {
+ rpc_release_calldata(setup_data->callback_ops,
+ setup_data->callback_data);
+ return ERR_PTR(-ENOMEM);
+ }
flags = RPC_TASK_DYNAMIC;
}
rpc_init_task(task, setup_data);
+ if (task->tk_status < 0) {
+ int err = task->tk_status;
+ rpc_put_task(task);
+ return ERR_PTR(err);
+ }
task->tk_flags |= flags;
dprintk("RPC: allocated task %p\n", task);
-out:
return task;
}
@@ -909,9 +911,6 @@ EXPORT_SYMBOL_GPL(rpc_put_task);
static void rpc_release_task(struct rpc_task *task)
{
-#ifdef RPC_DEBUG
- BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID);
-#endif
dprintk("RPC: %5u release task\n", task->tk_pid);
if (!list_empty(&task->tk_task)) {
@@ -923,9 +922,6 @@ static void rpc_release_task(struct rpc_task *task)
}
BUG_ON (RPC_IS_QUEUED(task));
-#ifdef RPC_DEBUG
- task->tk_magic = 0;
-#endif
/* Wake up anyone who is waiting for task completion */
rpc_mark_complete_task(task);
diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c
index 5785d2037f45..ea1046f3f9a3 100644
--- a/net/sunrpc/stats.c
+++ b/net/sunrpc/stats.c
@@ -144,7 +144,7 @@ void rpc_count_iostats(struct rpc_task *task)
struct rpc_rqst *req = task->tk_rqstp;
struct rpc_iostats *stats;
struct rpc_iostats *op_metrics;
- long rtt, execute, queue;
+ ktime_t delta;
if (!task->tk_client || !task->tk_client->cl_metrics || !req)
return;
@@ -156,23 +156,16 @@ void rpc_count_iostats(struct rpc_task *task)
op_metrics->om_ntrans += req->rq_ntrans;
op_metrics->om_timeouts += task->tk_timeouts;
- op_metrics->om_bytes_sent += task->tk_bytes_sent;
+ op_metrics->om_bytes_sent += req->rq_xmit_bytes_sent;
op_metrics->om_bytes_recv += req->rq_reply_bytes_recvd;
- queue = (long)req->rq_xtime - task->tk_start;
- if (queue < 0)
- queue = -queue;
- op_metrics->om_queue += queue;
+ delta = ktime_sub(req->rq_xtime, task->tk_start);
+ op_metrics->om_queue = ktime_add(op_metrics->om_queue, delta);
- rtt = task->tk_rtt;
- if (rtt < 0)
- rtt = -rtt;
- op_metrics->om_rtt += rtt;
+ op_metrics->om_rtt = ktime_add(op_metrics->om_rtt, req->rq_rtt);
- execute = (long)jiffies - task->tk_start;
- if (execute < 0)
- execute = -execute;
- op_metrics->om_execute += execute;
+ delta = ktime_sub(ktime_get(), task->tk_start);
+ op_metrics->om_execute = ktime_add(op_metrics->om_execute, delta);
}
static void _print_name(struct seq_file *seq, unsigned int op,
@@ -186,8 +179,6 @@ static void _print_name(struct seq_file *seq, unsigned int op,
seq_printf(seq, "\t%12u: ", op);
}
-#define MILLISECS_PER_JIFFY (1000 / HZ)
-
void rpc_print_iostats(struct seq_file *seq, struct rpc_clnt *clnt)
{
struct rpc_iostats *stats = clnt->cl_metrics;
@@ -214,9 +205,9 @@ void rpc_print_iostats(struct seq_file *seq, struct rpc_clnt *clnt)
metrics->om_timeouts,
metrics->om_bytes_sent,
metrics->om_bytes_recv,
- metrics->om_queue * MILLISECS_PER_JIFFY,
- metrics->om_rtt * MILLISECS_PER_JIFFY,
- metrics->om_execute * MILLISECS_PER_JIFFY);
+ ktime_to_ms(metrics->om_queue),
+ ktime_to_ms(metrics->om_rtt),
+ ktime_to_ms(metrics->om_execute));
}
}
EXPORT_SYMBOL_GPL(rpc_print_iostats);
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 061b2e0f9118..cbc084939dd8 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -744,8 +744,10 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
if (rqstp->rq_deferred) {
svc_xprt_received(xprt);
len = svc_deferred_recv(rqstp);
- } else
+ } else {
len = xprt->xpt_ops->xpo_recvfrom(rqstp);
+ svc_xprt_received(xprt);
+ }
dprintk("svc: got len=%d\n", len);
}
@@ -893,12 +895,12 @@ void svc_delete_xprt(struct svc_xprt *xprt)
*/
if (test_bit(XPT_TEMP, &xprt->xpt_flags))
serv->sv_tmpcnt--;
+ spin_unlock_bh(&serv->sv_lock);
while ((dr = svc_deferred_dequeue(xprt)) != NULL)
kfree(dr);
svc_xprt_put(xprt);
- spin_unlock_bh(&serv->sv_lock);
}
void svc_close_xprt(struct svc_xprt *xprt)
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index a29f259204e6..7e534dd09077 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -150,7 +150,6 @@ static void svc_set_cmsg_data(struct svc_rqst *rqstp, struct cmsghdr *cmh)
}
break;
}
- return;
}
/*
@@ -419,8 +418,8 @@ static void svc_udp_data_ready(struct sock *sk, int count)
set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
svc_xprt_enqueue(&svsk->sk_xprt);
}
- if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
- wake_up_interruptible(sk->sk_sleep);
+ if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
+ wake_up_interruptible(sk_sleep(sk));
}
/*
@@ -436,10 +435,10 @@ static void svc_write_space(struct sock *sk)
svc_xprt_enqueue(&svsk->sk_xprt);
}
- if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) {
+ if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk))) {
dprintk("RPC svc_write_space: someone sleeping on %p\n",
svsk);
- wake_up_interruptible(sk->sk_sleep);
+ wake_up_interruptible(sk_sleep(sk));
}
}
@@ -547,7 +546,6 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
dprintk("svc: recvfrom returned error %d\n", -err);
set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
}
- svc_xprt_received(&svsk->sk_xprt);
return -EAGAIN;
}
len = svc_addr_len(svc_addr(rqstp));
@@ -562,11 +560,6 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
svsk->sk_sk->sk_stamp = skb->tstamp;
set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); /* there may be more data... */
- /*
- * Maybe more packets - kick another thread ASAP.
- */
- svc_xprt_received(&svsk->sk_xprt);
-
len = skb->len - sizeof(struct udphdr);
rqstp->rq_arg.len = len;
@@ -757,8 +750,8 @@ static void svc_tcp_listen_data_ready(struct sock *sk, int count_unused)
printk("svc: socket %p: no user data\n", sk);
}
- if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
- wake_up_interruptible_all(sk->sk_sleep);
+ if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
+ wake_up_interruptible_all(sk_sleep(sk));
}
/*
@@ -777,8 +770,8 @@ static void svc_tcp_state_change(struct sock *sk)
set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
svc_xprt_enqueue(&svsk->sk_xprt);
}
- if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
- wake_up_interruptible_all(sk->sk_sleep);
+ if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
+ wake_up_interruptible_all(sk_sleep(sk));
}
static void svc_tcp_data_ready(struct sock *sk, int count)
@@ -791,8 +784,8 @@ static void svc_tcp_data_ready(struct sock *sk, int count)
set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
svc_xprt_enqueue(&svsk->sk_xprt);
}
- if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
- wake_up_interruptible(sk->sk_sleep);
+ if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
+ wake_up_interruptible(sk_sleep(sk));
}
/*
@@ -917,7 +910,6 @@ static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp)
if (len < want) {
dprintk("svc: short recvfrom while reading record "
"length (%d of %d)\n", len, want);
- svc_xprt_received(&svsk->sk_xprt);
goto err_again; /* record header not complete */
}
@@ -953,7 +945,6 @@ static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp)
if (len < svsk->sk_reclen) {
dprintk("svc: incomplete TCP record (%d of %d)\n",
len, svsk->sk_reclen);
- svc_xprt_received(&svsk->sk_xprt);
goto err_again; /* record not complete */
}
len = svsk->sk_reclen;
@@ -961,14 +952,11 @@ static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp)
return len;
error:
- if (len == -EAGAIN) {
+ if (len == -EAGAIN)
dprintk("RPC: TCP recv_record got EAGAIN\n");
- svc_xprt_received(&svsk->sk_xprt);
- }
return len;
err_delete:
set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
- svc_xprt_received(&svsk->sk_xprt);
err_again:
return -EAGAIN;
}
@@ -1110,7 +1098,6 @@ out:
svsk->sk_tcplen = 0;
svc_xprt_copy_addrs(rqstp, &svsk->sk_xprt);
- svc_xprt_received(&svsk->sk_xprt);
if (serv->sv_stats)
serv->sv_stats->nettcpcnt++;
@@ -1119,7 +1106,6 @@ out:
err_again:
if (len == -EAGAIN) {
dprintk("RPC: TCP recvfrom got EAGAIN\n");
- svc_xprt_received(&svsk->sk_xprt);
return len;
}
error:
@@ -1494,8 +1480,8 @@ static void svc_sock_detach(struct svc_xprt *xprt)
sk->sk_data_ready = svsk->sk_odata;
sk->sk_write_space = svsk->sk_owspace;
- if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
- wake_up_interruptible(sk->sk_sleep);
+ if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
+ wake_up_interruptible(sk_sleep(sk));
}
/*
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 2763fde88499..a1f82a87d34d 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -762,6 +762,7 @@ int write_bytes_to_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, un
__write_bytes_to_xdr_buf(&subbuf, obj, len);
return 0;
}
+EXPORT_SYMBOL_GPL(write_bytes_to_xdr_buf);
int
xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj)
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 42f09ade0044..3fc325399ee4 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -43,6 +43,7 @@
#include <linux/interrupt.h>
#include <linux/workqueue.h>
#include <linux/net.h>
+#include <linux/ktime.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/metrics.h>
@@ -62,7 +63,6 @@
* Local functions
*/
static void xprt_request_init(struct rpc_task *, struct rpc_xprt *);
-static inline void do_xprt_reserve(struct rpc_task *);
static void xprt_connect_status(struct rpc_task *task);
static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *);
@@ -711,12 +711,16 @@ void xprt_connect(struct rpc_task *task)
if (task->tk_rqstp)
task->tk_rqstp->rq_bytes_sent = 0;
- task->tk_timeout = xprt->connect_timeout;
+ task->tk_timeout = task->tk_rqstp->rq_timeout;
rpc_sleep_on(&xprt->pending, task, xprt_connect_status);
+
+ if (test_bit(XPRT_CLOSING, &xprt->state))
+ return;
+ if (xprt_test_and_set_connecting(xprt))
+ return;
xprt->stat.connect_start = jiffies;
xprt->ops->connect(task);
}
- return;
}
static void xprt_connect_status(struct rpc_task *task)
@@ -771,25 +775,19 @@ struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
}
EXPORT_SYMBOL_GPL(xprt_lookup_rqst);
-/**
- * xprt_update_rtt - update an RPC client's RTT state after receiving a reply
- * @task: RPC request that recently completed
- *
- */
-void xprt_update_rtt(struct rpc_task *task)
+static void xprt_update_rtt(struct rpc_task *task)
{
struct rpc_rqst *req = task->tk_rqstp;
struct rpc_rtt *rtt = task->tk_client->cl_rtt;
unsigned timer = task->tk_msg.rpc_proc->p_timer;
+ long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt));
if (timer) {
if (req->rq_ntrans == 1)
- rpc_update_rtt(rtt, timer,
- (long)jiffies - req->rq_xtime);
+ rpc_update_rtt(rtt, timer, m);
rpc_set_timeo(rtt, timer, req->rq_ntrans - 1);
}
}
-EXPORT_SYMBOL_GPL(xprt_update_rtt);
/**
* xprt_complete_rqst - called when reply processing is complete
@@ -807,7 +805,9 @@ void xprt_complete_rqst(struct rpc_task *task, int copied)
task->tk_pid, ntohl(req->rq_xid), copied);
xprt->stat.recvs++;
- task->tk_rtt = (long)jiffies - req->rq_xtime;
+ req->rq_rtt = ktime_sub(ktime_get(), req->rq_xtime);
+ if (xprt->ops->timer != NULL)
+ xprt_update_rtt(task);
list_del_init(&req->rq_list);
req->rq_private_buf.len = copied;
@@ -906,7 +906,7 @@ void xprt_transmit(struct rpc_task *task)
return;
req->rq_connect_cookie = xprt->connect_cookie;
- req->rq_xtime = jiffies;
+ req->rq_xtime = ktime_get();
status = xprt->ops->send_request(task);
if (status != 0) {
task->tk_status = status;
@@ -935,7 +935,7 @@ void xprt_transmit(struct rpc_task *task)
spin_unlock_bh(&xprt->transport_lock);
}
-static inline void do_xprt_reserve(struct rpc_task *task)
+static void xprt_alloc_slot(struct rpc_task *task)
{
struct rpc_xprt *xprt = task->tk_xprt;
@@ -955,6 +955,16 @@ static inline void do_xprt_reserve(struct rpc_task *task)
rpc_sleep_on(&xprt->backlog, task, NULL);
}
+static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
+{
+ memset(req, 0, sizeof(*req)); /* mark unused */
+
+ spin_lock(&xprt->reserve_lock);
+ list_add(&req->rq_list, &xprt->free);
+ rpc_wake_up_next(&xprt->backlog);
+ spin_unlock(&xprt->reserve_lock);
+}
+
/**
* xprt_reserve - allocate an RPC request slot
* @task: RPC task requesting a slot allocation
@@ -968,13 +978,13 @@ void xprt_reserve(struct rpc_task *task)
task->tk_status = -EIO;
spin_lock(&xprt->reserve_lock);
- do_xprt_reserve(task);
+ xprt_alloc_slot(task);
spin_unlock(&xprt->reserve_lock);
}
static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt)
{
- return xprt->xid++;
+ return (__force __be32)xprt->xid++;
}
static inline void xprt_init_xid(struct rpc_xprt *xprt)
@@ -1006,14 +1016,10 @@ void xprt_release(struct rpc_task *task)
{
struct rpc_xprt *xprt;
struct rpc_rqst *req;
- int is_bc_request;
if (!(req = task->tk_rqstp))
return;
- /* Preallocated backchannel request? */
- is_bc_request = bc_prealloc(req);
-
xprt = req->rq_xprt;
rpc_count_iostats(task);
spin_lock_bh(&xprt->transport_lock);
@@ -1027,21 +1033,16 @@ void xprt_release(struct rpc_task *task)
mod_timer(&xprt->timer,
xprt->last_used + xprt->idle_timeout);
spin_unlock_bh(&xprt->transport_lock);
- if (!bc_prealloc(req))
+ if (req->rq_buffer)
xprt->ops->buf_free(req->rq_buffer);
task->tk_rqstp = NULL;
if (req->rq_release_snd_buf)
req->rq_release_snd_buf(req);
dprintk("RPC: %5u release request %p\n", task->tk_pid, req);
- if (likely(!is_bc_request)) {
- memset(req, 0, sizeof(*req)); /* mark unused */
-
- spin_lock(&xprt->reserve_lock);
- list_add(&req->rq_list, &xprt->free);
- rpc_wake_up_next(&xprt->backlog);
- spin_unlock(&xprt->reserve_lock);
- } else
+ if (likely(!bc_prealloc(req)))
+ xprt_free_slot(xprt, req);
+ else
xprt_free_bc_request(req);
}
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index f92e37eb413c..0194de814933 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -566,7 +566,6 @@ static int rdma_read_complete(struct svc_rqst *rqstp,
ret, rqstp->rq_arg.len, rqstp->rq_arg.head[0].iov_base,
rqstp->rq_arg.head[0].iov_len);
- svc_xprt_received(rqstp->rq_xprt);
return ret;
}
@@ -665,7 +664,6 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
rqstp->rq_arg.head[0].iov_len);
rqstp->rq_prot = IPPROTO_MAX;
svc_xprt_copy_addrs(rqstp, xprt);
- svc_xprt_received(xprt);
return ret;
close_out:
@@ -678,6 +676,5 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
*/
set_bit(XPT_CLOSE, &xprt->xpt_flags);
defer:
- svc_xprt_received(xprt);
return 0;
}
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index 187257b1d880..a85e866a77f7 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -305,7 +305,6 @@ xprt_setup_rdma(struct xprt_create *args)
/* 60 second timeout, no retries */
xprt->timeout = &xprt_rdma_default_timeout;
xprt->bind_timeout = (60U * HZ);
- xprt->connect_timeout = (60U * HZ);
xprt->reestablish_timeout = (5U * HZ);
xprt->idle_timeout = (5U * 60 * HZ);
@@ -449,21 +448,19 @@ xprt_rdma_connect(struct rpc_task *task)
struct rpc_xprt *xprt = (struct rpc_xprt *)task->tk_xprt;
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
- if (!xprt_test_and_set_connecting(xprt)) {
- if (r_xprt->rx_ep.rep_connected != 0) {
- /* Reconnect */
- schedule_delayed_work(&r_xprt->rdma_connect,
- xprt->reestablish_timeout);
- xprt->reestablish_timeout <<= 1;
- if (xprt->reestablish_timeout > (30 * HZ))
- xprt->reestablish_timeout = (30 * HZ);
- else if (xprt->reestablish_timeout < (5 * HZ))
- xprt->reestablish_timeout = (5 * HZ);
- } else {
- schedule_delayed_work(&r_xprt->rdma_connect, 0);
- if (!RPC_IS_ASYNC(task))
- flush_scheduled_work();
- }
+ if (r_xprt->rx_ep.rep_connected != 0) {
+ /* Reconnect */
+ schedule_delayed_work(&r_xprt->rdma_connect,
+ xprt->reestablish_timeout);
+ xprt->reestablish_timeout <<= 1;
+ if (xprt->reestablish_timeout > (30 * HZ))
+ xprt->reestablish_timeout = (30 * HZ);
+ else if (xprt->reestablish_timeout < (5 * HZ))
+ xprt->reestablish_timeout = (5 * HZ);
+ } else {
+ schedule_delayed_work(&r_xprt->rdma_connect, 0);
+ if (!RPC_IS_ASYNC(task))
+ flush_scheduled_work();
}
}
@@ -677,7 +674,7 @@ xprt_rdma_send_request(struct rpc_task *task)
if (rpcrdma_ep_post(&r_xprt->rx_ia, &r_xprt->rx_ep, req))
goto drop_connection;
- task->tk_bytes_sent += rqst->rq_snd_buf.len;
+ rqst->rq_xmit_bytes_sent += rqst->rq_snd_buf.len;
rqst->rq_bytes_sent = 0;
return 0;
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 9847c30b5001..b7cd8cccbe72 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -138,20 +138,6 @@ static ctl_table sunrpc_table[] = {
#endif
/*
- * Time out for an RPC UDP socket connect. UDP socket connects are
- * synchronous, but we set a timeout anyway in case of resource
- * exhaustion on the local host.
- */
-#define XS_UDP_CONN_TO (5U * HZ)
-
-/*
- * Wait duration for an RPC TCP connection to be established. Solaris
- * NFS over TCP uses 60 seconds, for example, which is in line with how
- * long a server takes to reboot.
- */
-#define XS_TCP_CONN_TO (60U * HZ)
-
-/*
* Wait duration for a reply from the RPC portmapper.
*/
#define XS_BIND_TO (60U * HZ)
@@ -542,7 +528,7 @@ static int xs_udp_send_request(struct rpc_task *task)
xdr->len - req->rq_bytes_sent, status);
if (status >= 0) {
- task->tk_bytes_sent += status;
+ req->rq_xmit_bytes_sent += status;
if (status >= req->rq_slen)
return 0;
/* Still some bytes left; set up for a retry later. */
@@ -638,7 +624,7 @@ static int xs_tcp_send_request(struct rpc_task *task)
/* If we've sent the entire packet, immediately
* reset the count of bytes sent. */
req->rq_bytes_sent += status;
- task->tk_bytes_sent += status;
+ req->rq_xmit_bytes_sent += status;
if (likely(req->rq_bytes_sent >= req->rq_slen)) {
req->rq_bytes_sent = 0;
return 0;
@@ -858,7 +844,6 @@ static void xs_udp_data_ready(struct sock *sk, int len)
dst_confirm(skb_dst(skb));
xprt_adjust_cwnd(task, copied);
- xprt_update_rtt(task);
xprt_complete_rqst(task, copied);
out_unlock:
@@ -1050,8 +1035,6 @@ static inline void xs_tcp_read_common(struct rpc_xprt *xprt,
if (transport->tcp_flags & TCP_RCV_LAST_FRAG)
transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
}
-
- return;
}
/*
@@ -2016,9 +1999,6 @@ static void xs_connect(struct rpc_task *task)
struct rpc_xprt *xprt = task->tk_xprt;
struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
- if (xprt_test_and_set_connecting(xprt))
- return;
-
if (transport->sock != NULL && !RPC_IS_SOFTCONN(task)) {
dprintk("RPC: xs_connect delayed xprt %p for %lu "
"seconds\n",
@@ -2038,16 +2018,6 @@ static void xs_connect(struct rpc_task *task)
}
}
-static void xs_tcp_connect(struct rpc_task *task)
-{
- struct rpc_xprt *xprt = task->tk_xprt;
-
- /* Exit if we need to wait for socket shutdown to complete */
- if (test_bit(XPRT_CLOSING, &xprt->state))
- return;
- xs_connect(task);
-}
-
/**
* xs_udp_print_stats - display UDP socket-specifc stats
* @xprt: rpc_xprt struct containing statistics
@@ -2210,7 +2180,6 @@ static int bc_send_request(struct rpc_task *task)
static void bc_close(struct rpc_xprt *xprt)
{
- return;
}
/*
@@ -2220,7 +2189,6 @@ static void bc_close(struct rpc_xprt *xprt)
static void bc_destroy(struct rpc_xprt *xprt)
{
- return;
}
static struct rpc_xprt_ops xs_udp_ops = {
@@ -2246,7 +2214,7 @@ static struct rpc_xprt_ops xs_tcp_ops = {
.release_xprt = xs_tcp_release_xprt,
.rpcbind = rpcb_getport_async,
.set_port = xs_set_port,
- .connect = xs_tcp_connect,
+ .connect = xs_connect,
.buf_alloc = rpc_malloc,
.buf_free = rpc_free,
.send_request = xs_tcp_send_request,
@@ -2337,7 +2305,6 @@ static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
xprt->max_payload = (1U << 16) - (MAX_HEADER << 3);
xprt->bind_timeout = XS_BIND_TO;
- xprt->connect_timeout = XS_UDP_CONN_TO;
xprt->reestablish_timeout = XS_UDP_REEST_TO;
xprt->idle_timeout = XS_IDLE_DISC_TO;
@@ -2412,7 +2379,6 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
xprt->bind_timeout = XS_BIND_TO;
- xprt->connect_timeout = XS_TCP_CONN_TO;
xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
xprt->idle_timeout = XS_IDLE_DISC_TO;
@@ -2472,9 +2438,6 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
struct sock_xprt *transport;
struct svc_sock *bc_sock;
- if (!args->bc_xprt)
- ERR_PTR(-EINVAL);
-
xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries);
if (IS_ERR(xprt))
return xprt;
@@ -2488,7 +2451,6 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
/* backchannel */
xprt_set_bound(xprt);
xprt->bind_timeout = 0;
- xprt->connect_timeout = 0;
xprt->reestablish_timeout = 0;
xprt->idle_timeout = 0;
diff --git a/net/sysctl_net.c b/net/sysctl_net.c
index 53196009160a..ca84212cfbfe 100644
--- a/net/sysctl_net.c
+++ b/net/sysctl_net.c
@@ -82,7 +82,6 @@ static int __net_init sysctl_net_init(struct net *net)
static void __net_exit sysctl_net_exit(struct net *net)
{
WARN_ON(!list_empty(&net->sysctls.list));
- return;
}
static struct pernet_operations sysctl_pernet_ops = {
diff --git a/net/tipc/addr.c b/net/tipc/addr.c
index e5207a11edf6..c048543ffbeb 100644
--- a/net/tipc/addr.c
+++ b/net/tipc/addr.c
@@ -92,3 +92,35 @@ int tipc_addr_node_valid(u32 addr)
return (tipc_addr_domain_valid(addr) && tipc_node(addr));
}
+int tipc_in_scope(u32 domain, u32 addr)
+{
+ if (!domain || (domain == addr))
+ return 1;
+ if (domain == (addr & 0xfffff000u)) /* domain <Z.C.0> */
+ return 1;
+ if (domain == (addr & 0xff000000u)) /* domain <Z.0.0> */
+ return 1;
+ return 0;
+}
+
+/**
+ * tipc_addr_scope - convert message lookup domain to a 2-bit scope value
+ */
+
+int tipc_addr_scope(u32 domain)
+{
+ if (likely(!domain))
+ return TIPC_ZONE_SCOPE;
+ if (tipc_node(domain))
+ return TIPC_NODE_SCOPE;
+ if (tipc_cluster(domain))
+ return TIPC_CLUSTER_SCOPE;
+ return TIPC_ZONE_SCOPE;
+}
+
+char *tipc_addr_string_fill(char *string, u32 addr)
+{
+ snprintf(string, 16, "<%u.%u.%u>",
+ tipc_zone(addr), tipc_cluster(addr), tipc_node(addr));
+ return string;
+}
diff --git a/net/tipc/addr.h b/net/tipc/addr.h
index 3ba67e6ce03e..c1cc5724d8cc 100644
--- a/net/tipc/addr.h
+++ b/net/tipc/addr.h
@@ -67,32 +67,6 @@ static inline int may_route(u32 addr)
return(addr ^ tipc_own_addr) >> 11;
}
-static inline int in_scope(u32 domain, u32 addr)
-{
- if (!domain || (domain == addr))
- return 1;
- if (domain == (addr & 0xfffff000u)) /* domain <Z.C.0> */
- return 1;
- if (domain == (addr & 0xff000000u)) /* domain <Z.0.0> */
- return 1;
- return 0;
-}
-
-/**
- * addr_scope - convert message lookup domain to equivalent 2-bit scope value
- */
-
-static inline int addr_scope(u32 domain)
-{
- if (likely(!domain))
- return TIPC_ZONE_SCOPE;
- if (tipc_node(domain))
- return TIPC_NODE_SCOPE;
- if (tipc_cluster(domain))
- return TIPC_CLUSTER_SCOPE;
- return TIPC_ZONE_SCOPE;
-}
-
/**
* addr_domain - convert 2-bit scope value to equivalent message lookup domain
*
@@ -110,14 +84,9 @@ static inline int addr_domain(int sc)
return tipc_addr(tipc_zone(tipc_own_addr), 0, 0);
}
-static inline char *addr_string_fill(char *string, u32 addr)
-{
- snprintf(string, 16, "<%u.%u.%u>",
- tipc_zone(addr), tipc_cluster(addr), tipc_node(addr));
- return string;
-}
-
int tipc_addr_domain_valid(u32);
int tipc_addr_node_valid(u32 addr);
-
+int tipc_in_scope(u32 domain, u32 addr);
+int tipc_addr_scope(u32 domain);
+char *tipc_addr_string_fill(char *string, u32 addr);
#endif
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index a3bfd4064912..a008c6689305 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -119,7 +119,7 @@ static struct bclink *bclink = NULL;
static struct link *bcl = NULL;
static DEFINE_SPINLOCK(bc_lock);
-const char tipc_bclink_name[] = "multicast-link";
+const char tipc_bclink_name[] = "broadcast-link";
static u32 buf_seqno(struct sk_buff *buf)
@@ -275,7 +275,7 @@ static void bclink_send_nack(struct tipc_node *n_ptr)
buf = buf_acquire(INT_H_SIZE);
if (buf) {
msg = buf_msg(buf);
- msg_init(msg, BCAST_PROTOCOL, STATE_MSG,
+ tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG,
INT_H_SIZE, n_ptr->addr);
msg_set_mc_netid(msg, tipc_net_id);
msg_set_bcast_ack(msg, mod(n_ptr->bclink.last_in));
@@ -558,10 +558,7 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
struct tipc_bearer *unused1,
struct tipc_media_addr *unused2)
{
- static int send_count = 0;
-
int bp_index;
- int swap_time;
/* Prepare buffer for broadcasting (if first time trying to send it) */
@@ -575,11 +572,6 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
msg_set_mc_netid(msg, tipc_net_id);
}
- /* Determine if bearer pairs should be swapped following this attempt */
-
- if ((swap_time = (++send_count >= 10)))
- send_count = 0;
-
/* Send buffer over bearers until all targets reached */
bcbearer->remains = tipc_cltr_bcast_nodes;
@@ -595,21 +587,22 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
if (bcbearer->remains_new.count == bcbearer->remains.count)
continue; /* bearer pair doesn't add anything */
- if (!p->publ.blocked &&
- !p->media->send_msg(buf, &p->publ, &p->media->bcast_addr)) {
- if (swap_time && s && !s->publ.blocked)
- goto swap;
- else
- goto update;
+ if (p->publ.blocked ||
+ p->media->send_msg(buf, &p->publ, &p->media->bcast_addr)) {
+ /* unable to send on primary bearer */
+ if (!s || s->publ.blocked ||
+ s->media->send_msg(buf, &s->publ,
+ &s->media->bcast_addr)) {
+ /* unable to send on either bearer */
+ continue;
+ }
+ }
+
+ if (s) {
+ bcbearer->bpairs[bp_index].primary = s;
+ bcbearer->bpairs[bp_index].secondary = p;
}
- if (!s || s->publ.blocked ||
- s->media->send_msg(buf, &s->publ, &s->media->bcast_addr))
- continue; /* unable to send using bearer pair */
-swap:
- bcbearer->bpairs[bp_index].primary = s;
- bcbearer->bpairs[bp_index].secondary = p;
-update:
if (bcbearer->remains_new.count == 0)
return 0;
@@ -829,3 +822,113 @@ void tipc_bclink_stop(void)
spin_unlock_bh(&bc_lock);
}
+
+/**
+ * tipc_nmap_add - add a node to a node map
+ */
+
+void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node)
+{
+ int n = tipc_node(node);
+ int w = n / WSIZE;
+ u32 mask = (1 << (n % WSIZE));
+
+ if ((nm_ptr->map[w] & mask) == 0) {
+ nm_ptr->count++;
+ nm_ptr->map[w] |= mask;
+ }
+}
+
+/**
+ * tipc_nmap_remove - remove a node from a node map
+ */
+
+void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node)
+{
+ int n = tipc_node(node);
+ int w = n / WSIZE;
+ u32 mask = (1 << (n % WSIZE));
+
+ if ((nm_ptr->map[w] & mask) != 0) {
+ nm_ptr->map[w] &= ~mask;
+ nm_ptr->count--;
+ }
+}
+
+/**
+ * tipc_nmap_diff - find differences between node maps
+ * @nm_a: input node map A
+ * @nm_b: input node map B
+ * @nm_diff: output node map A-B (i.e. nodes of A that are not in B)
+ */
+
+void tipc_nmap_diff(struct tipc_node_map *nm_a, struct tipc_node_map *nm_b,
+ struct tipc_node_map *nm_diff)
+{
+ int stop = ARRAY_SIZE(nm_a->map);
+ int w;
+ int b;
+ u32 map;
+
+ memset(nm_diff, 0, sizeof(*nm_diff));
+ for (w = 0; w < stop; w++) {
+ map = nm_a->map[w] ^ (nm_a->map[w] & nm_b->map[w]);
+ nm_diff->map[w] = map;
+ if (map != 0) {
+ for (b = 0 ; b < WSIZE; b++) {
+ if (map & (1 << b))
+ nm_diff->count++;
+ }
+ }
+ }
+}
+
+/**
+ * tipc_port_list_add - add a port to a port list, ensuring no duplicates
+ */
+
+void tipc_port_list_add(struct port_list *pl_ptr, u32 port)
+{
+ struct port_list *item = pl_ptr;
+ int i;
+ int item_sz = PLSIZE;
+ int cnt = pl_ptr->count;
+
+ for (; ; cnt -= item_sz, item = item->next) {
+ if (cnt < PLSIZE)
+ item_sz = cnt;
+ for (i = 0; i < item_sz; i++)
+ if (item->ports[i] == port)
+ return;
+ if (i < PLSIZE) {
+ item->ports[i] = port;
+ pl_ptr->count++;
+ return;
+ }
+ if (!item->next) {
+ item->next = kmalloc(sizeof(*item), GFP_ATOMIC);
+ if (!item->next) {
+ warn("Incomplete multicast delivery, no memory\n");
+ return;
+ }
+ item->next->next = NULL;
+ }
+ }
+}
+
+/**
+ * tipc_port_list_free - free dynamically created entries in port_list chain
+ *
+ */
+
+void tipc_port_list_free(struct port_list *pl_ptr)
+{
+ struct port_list *item;
+ struct port_list *next;
+
+ for (item = pl_ptr->next; item; item = next) {
+ next = item->next;
+ kfree(item);
+ }
+}
+
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
index 4c1771e95c99..e8c2b81658c7 100644
--- a/net/tipc/bcast.h
+++ b/net/tipc/bcast.h
@@ -72,41 +72,11 @@ struct tipc_node;
extern const char tipc_bclink_name[];
+void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node);
+void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node);
/**
- * nmap_add - add a node to a node map
- */
-
-static inline void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node)
-{
- int n = tipc_node(node);
- int w = n / WSIZE;
- u32 mask = (1 << (n % WSIZE));
-
- if ((nm_ptr->map[w] & mask) == 0) {
- nm_ptr->count++;
- nm_ptr->map[w] |= mask;
- }
-}
-
-/**
- * nmap_remove - remove a node from a node map
- */
-
-static inline void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node)
-{
- int n = tipc_node(node);
- int w = n / WSIZE;
- u32 mask = (1 << (n % WSIZE));
-
- if ((nm_ptr->map[w] & mask) != 0) {
- nm_ptr->map[w] &= ~mask;
- nm_ptr->count--;
- }
-}
-
-/**
- * nmap_equal - test for equality of node maps
+ * tipc_nmap_equal - test for equality of node maps
*/
static inline int tipc_nmap_equal(struct tipc_node_map *nm_a, struct tipc_node_map *nm_b)
@@ -114,84 +84,11 @@ static inline int tipc_nmap_equal(struct tipc_node_map *nm_a, struct tipc_node_m
return !memcmp(nm_a, nm_b, sizeof(*nm_a));
}
-/**
- * nmap_diff - find differences between node maps
- * @nm_a: input node map A
- * @nm_b: input node map B
- * @nm_diff: output node map A-B (i.e. nodes of A that are not in B)
- */
-
-static inline void tipc_nmap_diff(struct tipc_node_map *nm_a, struct tipc_node_map *nm_b,
- struct tipc_node_map *nm_diff)
-{
- int stop = ARRAY_SIZE(nm_a->map);
- int w;
- int b;
- u32 map;
-
- memset(nm_diff, 0, sizeof(*nm_diff));
- for (w = 0; w < stop; w++) {
- map = nm_a->map[w] ^ (nm_a->map[w] & nm_b->map[w]);
- nm_diff->map[w] = map;
- if (map != 0) {
- for (b = 0 ; b < WSIZE; b++) {
- if (map & (1 << b))
- nm_diff->count++;
- }
- }
- }
-}
-
-/**
- * port_list_add - add a port to a port list, ensuring no duplicates
- */
-
-static inline void tipc_port_list_add(struct port_list *pl_ptr, u32 port)
-{
- struct port_list *item = pl_ptr;
- int i;
- int item_sz = PLSIZE;
- int cnt = pl_ptr->count;
-
- for (; ; cnt -= item_sz, item = item->next) {
- if (cnt < PLSIZE)
- item_sz = cnt;
- for (i = 0; i < item_sz; i++)
- if (item->ports[i] == port)
- return;
- if (i < PLSIZE) {
- item->ports[i] = port;
- pl_ptr->count++;
- return;
- }
- if (!item->next) {
- item->next = kmalloc(sizeof(*item), GFP_ATOMIC);
- if (!item->next) {
- warn("Incomplete multicast delivery, no memory\n");
- return;
- }
- item->next->next = NULL;
- }
- }
-}
-
-/**
- * port_list_free - free dynamically created entries in port_list chain
- *
- * Note: First item is on stack, so it doesn't need to be released
- */
-
-static inline void tipc_port_list_free(struct port_list *pl_ptr)
-{
- struct port_list *item;
- struct port_list *next;
-
- for (item = pl_ptr->next; item; item = next) {
- next = item->next;
- kfree(item);
- }
-}
+void tipc_nmap_diff(struct tipc_node_map *nm_a, struct tipc_node_map *nm_b,
+ struct tipc_node_map *nm_diff);
+void tipc_port_list_add(struct port_list *pl_ptr, u32 port);
+void tipc_port_list_free(struct port_list *pl_ptr);
int tipc_bclink_init(void);
void tipc_bclink_stop(void);
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 78091375ca12..52ae17b2583e 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -467,6 +467,18 @@ int tipc_bearer_resolve_congestion(struct bearer *b_ptr, struct link *l_ptr)
return res;
}
+/**
+ * tipc_bearer_congested - determines if bearer is currently congested
+ */
+
+int tipc_bearer_congested(struct bearer *b_ptr, struct link *l_ptr)
+{
+ if (unlikely(b_ptr->publ.blocked))
+ return 1;
+ if (likely(list_empty(&b_ptr->cong_links)))
+ return 0;
+ return !tipc_bearer_resolve_congestion(b_ptr, l_ptr);
+}
/**
* tipc_enable_bearer - enable bearer with the given name
@@ -493,7 +505,7 @@ int tipc_enable_bearer(const char *name, u32 bcast_scope, u32 priority)
return -EINVAL;
}
if (!tipc_addr_domain_valid(bcast_scope) ||
- !in_scope(bcast_scope, tipc_own_addr)) {
+ !tipc_in_scope(bcast_scope, tipc_own_addr)) {
warn("Bearer <%s> rejected, illegal broadcast scope\n", name);
return -EINVAL;
}
@@ -571,7 +583,7 @@ restart:
spin_lock_init(&b_ptr->publ.lock);
write_unlock_bh(&tipc_net_lock);
info("Enabled bearer <%s>, discovery domain %s, priority %u\n",
- name, addr_string_fill(addr_string, bcast_scope), priority);
+ name, tipc_addr_string_fill(addr_string, bcast_scope), priority);
return 0;
failed:
write_unlock_bh(&tipc_net_lock);
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index 000228e93f9e..a850b389663e 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -125,6 +125,7 @@ void tipc_bearer_remove_dest(struct bearer *b_ptr, u32 dest);
void tipc_bearer_schedule(struct bearer *b_ptr, struct link *l_ptr);
struct bearer *tipc_bearer_find_interface(const char *if_name);
int tipc_bearer_resolve_congestion(struct bearer *b_ptr, struct link *l_ptr);
+int tipc_bearer_congested(struct bearer *b_ptr, struct link *l_ptr);
int tipc_bearer_init(void);
void tipc_bearer_stop(void);
void tipc_bearer_lock_push(struct bearer *b_ptr);
@@ -154,17 +155,4 @@ static inline int tipc_bearer_send(struct bearer *b_ptr, struct sk_buff *buf,
return !b_ptr->media->send_msg(buf, &b_ptr->publ, dest);
}
-/**
- * tipc_bearer_congested - determines if bearer is currently congested
- */
-
-static inline int tipc_bearer_congested(struct bearer *b_ptr, struct link *l_ptr)
-{
- if (unlikely(b_ptr->publ.blocked))
- return 1;
- if (likely(list_empty(&b_ptr->cong_links)))
- return 0;
- return !tipc_bearer_resolve_congestion(b_ptr, l_ptr);
-}
-
-#endif
+#endif /* _TIPC_BEARER_H */
diff --git a/net/tipc/cluster.c b/net/tipc/cluster.c
index a7eac00cd363..e68f705381bc 100644
--- a/net/tipc/cluster.c
+++ b/net/tipc/cluster.c
@@ -238,7 +238,7 @@ static struct sk_buff *tipc_cltr_prepare_routing_msg(u32 data_size, u32 dest)
if (buf) {
msg = buf_msg(buf);
memset((char *)msg, 0, size);
- msg_init(msg, ROUTE_DISTRIBUTOR, 0, INT_H_SIZE, dest);
+ tipc_msg_init(msg, ROUTE_DISTRIBUTOR, 0, INT_H_SIZE, dest);
}
return buf;
}
diff --git a/net/tipc/config.c b/net/tipc/config.c
index ca3544d030c7..961d1b097146 100644
--- a/net/tipc/config.c
+++ b/net/tipc/config.c
@@ -56,9 +56,6 @@ struct subscr_data {
struct manager {
u32 user_ref;
u32 port_ref;
- u32 subscr_ref;
- u32 link_subscriptions;
- struct list_head link_subscribers;
};
static struct manager mng = { 0};
@@ -70,12 +67,6 @@ static int req_tlv_space; /* request message TLV area size */
static int rep_headroom; /* reply message headroom to use */
-void tipc_cfg_link_event(u32 addr, char *name, int up)
-{
- /* TIPC DOESN'T HANDLE LINK EVENT SUBSCRIPTIONS AT THE MOMENT */
-}
-
-
struct sk_buff *tipc_cfg_reply_alloc(int payload_size)
{
struct sk_buff *buf;
@@ -130,12 +121,24 @@ struct sk_buff *tipc_cfg_reply_string_type(u16 tlv_type, char *string)
}
-
-
#if 0
/* Now obsolete code for handling commands not yet implemented the new way */
+/*
+ * Some of this code assumed that the manager structure contains two added
+ * fields:
+ * u32 link_subscriptions;
+ * struct list_head link_subscribers;
+ * which are currently not present. These fields may need to be re-introduced
+ * if and when support for link subscriptions is added.
+ */
+
+void tipc_cfg_link_event(u32 addr, char *name, int up)
+{
+ /* TIPC DOESN'T HANDLE LINK EVENT SUBSCRIPTIONS AT THE MOMENT */
+}
+
int tipc_cfg_cmd(const struct tipc_cmd_msg * msg,
char *data,
u32 sz,
@@ -243,13 +246,48 @@ static void cfg_cmd_event(struct tipc_cmd_msg *msg,
default:
rv = tipc_cfg_cmd(msg, data, sz, (u32 *)&msg_sect[1].iov_len, orig);
}
- exit:
+exit:
rmsg.result_len = htonl(msg_sect[1].iov_len);
rmsg.retval = htonl(rv);
tipc_cfg_respond(msg_sect, 2u, orig);
}
#endif
+#define MAX_STATS_INFO 2000
+
+static struct sk_buff *tipc_show_stats(void)
+{
+ struct sk_buff *buf;
+ struct tlv_desc *rep_tlv;
+ struct print_buf pb;
+ int str_len;
+ u32 value;
+
+ if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
+ return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+
+ value = ntohl(*(u32 *)TLV_DATA(req_tlv_area));
+ if (value != 0)
+ return tipc_cfg_reply_error_string("unsupported argument");
+
+ buf = tipc_cfg_reply_alloc(TLV_SPACE(MAX_STATS_INFO));
+ if (buf == NULL)
+ return NULL;
+
+ rep_tlv = (struct tlv_desc *)buf->data;
+ tipc_printbuf_init(&pb, (char *)TLV_DATA(rep_tlv), MAX_STATS_INFO);
+
+ tipc_printf(&pb, "TIPC version " TIPC_MOD_VER "\n");
+
+ /* Use additional tipc_printf()'s to return more info ... */
+
+ str_len = tipc_printbuf_validate(&pb);
+ skb_put(buf, TLV_SPACE(str_len));
+ TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
+
+ return buf;
+}
+
static struct sk_buff *cfg_enable_bearer(void)
{
struct tipc_bearer_config *args;
@@ -533,6 +571,9 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
case TIPC_CMD_DUMP_LOG:
rep_tlv_buf = tipc_log_dump();
break;
+ case TIPC_CMD_SHOW_STATS:
+ rep_tlv_buf = tipc_show_stats();
+ break;
case TIPC_CMD_SET_LINK_TOL:
case TIPC_CMD_SET_LINK_PRI:
case TIPC_CMD_SET_LINK_WINDOW:
@@ -667,9 +708,6 @@ int tipc_cfg_init(void)
struct tipc_name_seq seq;
int res;
- memset(&mng, 0, sizeof(mng));
- INIT_LIST_HEAD(&mng.link_subscribers);
-
res = tipc_attach(&mng.user_ref, NULL, NULL);
if (res)
goto failed;
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 52c571fedbe0..696468117985 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -49,8 +49,6 @@
#include "config.h"
-#define TIPC_MOD_VER "1.6.4"
-
#ifndef CONFIG_TIPC_ZONES
#define CONFIG_TIPC_ZONES 3
#endif
@@ -104,6 +102,30 @@ int tipc_get_mode(void)
}
/**
+ * buf_acquire - creates a TIPC message buffer
+ * @size: message size (including TIPC header)
+ *
+ * Returns a new buffer with data pointers set to the specified size.
+ *
+ * NOTE: Headroom is reserved to allow prepending of a data link header.
+ * There may also be unrequested tailroom present at the buffer's end.
+ */
+
+struct sk_buff *buf_acquire(u32 size)
+{
+ struct sk_buff *skb;
+ unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u;
+
+ skb = alloc_skb_fclone(buf_size, GFP_ATOMIC);
+ if (skb) {
+ skb_reserve(skb, BUF_HEADROOM);
+ skb_put(skb, size);
+ skb->next = NULL;
+ }
+ return skb;
+}
+
+/**
* tipc_core_stop_net - shut down TIPC networking sub-systems
*/
diff --git a/net/tipc/core.h b/net/tipc/core.h
index c58a1d16563a..188799017abd 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -59,6 +59,9 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
+
+#define TIPC_MOD_VER "2.0.0"
+
/*
* TIPC sanity test macros
*/
@@ -325,29 +328,7 @@ static inline struct tipc_msg *buf_msg(struct sk_buff *skb)
return (struct tipc_msg *)skb->data;
}
-/**
- * buf_acquire - creates a TIPC message buffer
- * @size: message size (including TIPC header)
- *
- * Returns a new buffer with data pointers set to the specified size.
- *
- * NOTE: Headroom is reserved to allow prepending of a data link header.
- * There may also be unrequested tailroom present at the buffer's end.
- */
-
-static inline struct sk_buff *buf_acquire(u32 size)
-{
- struct sk_buff *skb;
- unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u;
-
- skb = alloc_skb_fclone(buf_size, GFP_ATOMIC);
- if (skb) {
- skb_reserve(skb, BUF_HEADROOM);
- skb_put(skb, size);
- skb->next = NULL;
- }
- return skb;
-}
+extern struct sk_buff *buf_acquire(u32 size);
/**
* buf_discard - frees a TIPC message buffer
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index 74b7d1e28aec..fc1fcf5e6b53 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -120,7 +120,7 @@ static struct sk_buff *tipc_disc_init_msg(u32 type,
if (buf) {
msg = buf_msg(buf);
- msg_init(msg, LINK_CONFIG, type, DSC_H_SIZE, dest_domain);
+ tipc_msg_init(msg, LINK_CONFIG, type, DSC_H_SIZE, dest_domain);
msg_set_non_seq(msg, 1);
msg_set_req_links(msg, req_links);
msg_set_dest_domain(msg, dest_domain);
@@ -144,7 +144,7 @@ static void disc_dupl_alert(struct bearer *b_ptr, u32 node_addr,
char media_addr_str[64];
struct print_buf pb;
- addr_string_fill(node_addr_str, node_addr);
+ tipc_addr_string_fill(node_addr_str, node_addr);
tipc_printbuf_init(&pb, media_addr_str, sizeof(media_addr_str));
tipc_media_addr_printf(&pb, media_addr);
tipc_printbuf_validate(&pb);
@@ -183,7 +183,7 @@ void tipc_disc_recv_msg(struct sk_buff *buf, struct bearer *b_ptr)
disc_dupl_alert(b_ptr, tipc_own_addr, &media_addr);
return;
}
- if (!in_scope(dest, tipc_own_addr))
+ if (!tipc_in_scope(dest, tipc_own_addr))
return;
if (is_slave(tipc_own_addr) && is_slave(orig))
return;
@@ -224,7 +224,7 @@ void tipc_disc_recv_msg(struct sk_buff *buf, struct bearer *b_ptr)
memcpy(addr, &media_addr, sizeof(*addr));
tipc_link_reset(link);
}
- link_fully_up = (link->state == WORKING_WORKING);
+ link_fully_up = link_working_working(link);
spin_unlock_bh(&n_ptr->lock);
if ((type == DSC_RESP_MSG) || link_fully_up)
return;
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 1a7e4665af80..a3616b99529b 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -202,41 +202,6 @@ static unsigned int align(unsigned int i)
return (i + 3) & ~3u;
}
-static int link_working_working(struct link *l_ptr)
-{
- return (l_ptr->state == WORKING_WORKING);
-}
-
-static int link_working_unknown(struct link *l_ptr)
-{
- return (l_ptr->state == WORKING_UNKNOWN);
-}
-
-static int link_reset_unknown(struct link *l_ptr)
-{
- return (l_ptr->state == RESET_UNKNOWN);
-}
-
-static int link_reset_reset(struct link *l_ptr)
-{
- return (l_ptr->state == RESET_RESET);
-}
-
-static int link_blocked(struct link *l_ptr)
-{
- return (l_ptr->exp_msg_count || l_ptr->blocked);
-}
-
-static int link_congested(struct link *l_ptr)
-{
- return (l_ptr->out_queue_size >= l_ptr->queue_limit[0]);
-}
-
-static u32 link_max_pkt(struct link *l_ptr)
-{
- return l_ptr->max_pkt;
-}
-
static void link_init_max_pkt(struct link *l_ptr)
{
u32 max_pkt;
@@ -468,7 +433,7 @@ struct link *tipc_link_create(struct bearer *b_ptr, const u32 peer,
l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
msg = l_ptr->pmsg;
- msg_init(msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, l_ptr->addr);
+ tipc_msg_init(msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, l_ptr->addr);
msg_set_size(msg, sizeof(l_ptr->proto_msg));
msg_set_session(msg, (tipc_random & 0xffff));
msg_set_bearer_id(msg, b_ptr->identity);
@@ -561,9 +526,8 @@ static int link_schedule_port(struct link *l_ptr, u32 origport, u32 sz)
goto exit;
if (!list_empty(&p_ptr->wait_list))
goto exit;
- p_ptr->congested_link = l_ptr;
p_ptr->publ.congested = 1;
- p_ptr->waiting_pkts = 1 + ((sz - 1) / link_max_pkt(l_ptr));
+ p_ptr->waiting_pkts = 1 + ((sz - 1) / l_ptr->max_pkt);
list_add_tail(&p_ptr->wait_list, &l_ptr->waiting_ports);
l_ptr->stats.link_congs++;
exit:
@@ -592,7 +556,6 @@ void tipc_link_wakeup_ports(struct link *l_ptr, int all)
if (win <= 0)
break;
list_del_init(&p_ptr->wait_list);
- p_ptr->congested_link = NULL;
spin_lock_bh(p_ptr->publ.lock);
p_ptr->publ.congested = 0;
p_ptr->wakeup(&p_ptr->publ);
@@ -877,7 +840,7 @@ static void link_state_event(struct link *l_ptr, unsigned event)
case TIMEOUT_EVT:
dbg_link("TIM ");
if (l_ptr->next_in_no != l_ptr->checkpoint) {
- dbg_link("-> WW \n");
+ dbg_link("-> WW\n");
l_ptr->state = WORKING_WORKING;
l_ptr->fsm_msg_cnt = 0;
l_ptr->checkpoint = l_ptr->next_in_no;
@@ -934,7 +897,7 @@ static void link_state_event(struct link *l_ptr, unsigned event)
link_set_timer(l_ptr, cont_intv);
break;
case RESET_MSG:
- dbg_link("RES \n");
+ dbg_link("RES\n");
dbg_link(" -> RR\n");
l_ptr->state = RESET_RESET;
l_ptr->fsm_msg_cnt = 0;
@@ -947,7 +910,7 @@ static void link_state_event(struct link *l_ptr, unsigned event)
l_ptr->started = 1;
/* fall through */
case TIMEOUT_EVT:
- dbg_link("TIM \n");
+ dbg_link("TIM\n");
tipc_link_send_proto_msg(l_ptr, RESET_MSG, 0, 0, 0, 0, 0);
l_ptr->fsm_msg_cnt++;
link_set_timer(l_ptr, cont_intv);
@@ -1017,7 +980,7 @@ static int link_bundle_buf(struct link *l_ptr,
return 0;
if (skb_tailroom(bundler) < (pad + size))
return 0;
- if (link_max_pkt(l_ptr) < (to_pos + size))
+ if (l_ptr->max_pkt < (to_pos + size))
return 0;
skb_put(bundler, pad + size);
@@ -1062,9 +1025,9 @@ int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf)
u32 size = msg_size(msg);
u32 dsz = msg_data_sz(msg);
u32 queue_size = l_ptr->out_queue_size;
- u32 imp = msg_tot_importance(msg);
+ u32 imp = tipc_msg_tot_importance(msg);
u32 queue_limit = l_ptr->queue_limit[imp];
- u32 max_packet = link_max_pkt(l_ptr);
+ u32 max_packet = l_ptr->max_pkt;
msg_set_prevnode(msg, tipc_own_addr); /* If routed message */
@@ -1127,7 +1090,7 @@ int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf)
struct tipc_msg bundler_hdr;
if (bundler) {
- msg_init(&bundler_hdr, MSG_BUNDLER, OPEN_MSG,
+ tipc_msg_init(&bundler_hdr, MSG_BUNDLER, OPEN_MSG,
INT_H_SIZE, l_ptr->addr);
skb_copy_to_linear_data(bundler, &bundler_hdr,
INT_H_SIZE);
@@ -1195,7 +1158,7 @@ static int link_send_buf_fast(struct link *l_ptr, struct sk_buff *buf,
int res = msg_data_sz(msg);
if (likely(!link_congested(l_ptr))) {
- if (likely(msg_size(msg) <= link_max_pkt(l_ptr))) {
+ if (likely(msg_size(msg) <= l_ptr->max_pkt)) {
if (likely(list_empty(&l_ptr->b_ptr->cong_links))) {
link_add_to_outqueue(l_ptr, buf, msg);
if (likely(tipc_bearer_send(l_ptr->b_ptr, buf,
@@ -1212,7 +1175,7 @@ static int link_send_buf_fast(struct link *l_ptr, struct sk_buff *buf,
}
}
else
- *used_max_pkt = link_max_pkt(l_ptr);
+ *used_max_pkt = l_ptr->max_pkt;
}
return tipc_link_send_buf(l_ptr, buf); /* All other cases */
}
@@ -1280,7 +1243,7 @@ again:
* (Must not hold any locks while building message.)
*/
- res = msg_build(hdr, msg_sect, num_sect, sender->publ.max_pkt,
+ res = tipc_msg_build(hdr, msg_sect, num_sect, sender->publ.max_pkt,
!sender->user_port, &buf);
read_lock_bh(&tipc_net_lock);
@@ -1319,7 +1282,7 @@ exit:
* then re-try fast path or fragment the message
*/
- sender->publ.max_pkt = link_max_pkt(l_ptr);
+ sender->publ.max_pkt = l_ptr->max_pkt;
tipc_node_unlock(node);
read_unlock_bh(&tipc_net_lock);
@@ -1391,7 +1354,7 @@ again:
/* Prepare reusable fragment header: */
msg_dbg(hdr, ">FRAGMENTING>");
- msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
+ tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
INT_H_SIZE, msg_destnode(hdr));
msg_set_link_selector(&fragm_hdr, sender->publ.ref);
msg_set_size(&fragm_hdr, max_pkt);
@@ -1482,8 +1445,8 @@ error:
tipc_node_unlock(node);
goto reject;
}
- if (link_max_pkt(l_ptr) < max_pkt) {
- sender->publ.max_pkt = link_max_pkt(l_ptr);
+ if (l_ptr->max_pkt < max_pkt) {
+ sender->publ.max_pkt = l_ptr->max_pkt;
tipc_node_unlock(node);
for (; buf_chain; buf_chain = buf) {
buf = buf_chain->next;
@@ -1553,7 +1516,7 @@ u32 tipc_link_push_packet(struct link *l_ptr)
/* Continue retransmission now, if there is anything: */
- if (r_q_size && buf && !skb_cloned(buf)) {
+ if (r_q_size && buf) {
msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
@@ -1650,7 +1613,7 @@ static void link_reset_all(unsigned long addr)
tipc_node_lock(n_ptr);
warn("Resetting all links to %s\n",
- addr_string_fill(addr_string, n_ptr->addr));
+ tipc_addr_string_fill(addr_string, n_ptr->addr));
for (i = 0; i < MAX_BEARERS; i++) {
if (n_ptr->links[i]) {
@@ -1692,7 +1655,7 @@ static void link_retransmit_failure(struct link *l_ptr, struct sk_buff *buf)
n_ptr = l_ptr->owner->next;
tipc_node_lock(n_ptr);
- addr_string_fill(addr_string, n_ptr->addr);
+ tipc_addr_string_fill(addr_string, n_ptr->addr);
tipc_printf(TIPC_OUTPUT, "Multicast link info for %s\n", addr_string);
tipc_printf(TIPC_OUTPUT, "Supported: %d, ", n_ptr->bclink.supported);
tipc_printf(TIPC_OUTPUT, "Acked: %u\n", n_ptr->bclink.acked);
@@ -1722,15 +1685,16 @@ void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *buf,
dbg("Retransmitting %u in link %x\n", retransmits, l_ptr);
if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) {
- if (!skb_cloned(buf)) {
+ if (l_ptr->retransm_queue_size == 0) {
msg_dbg(msg, ">NO_RETR->BCONG>");
dbg_print_link(l_ptr, " ");
l_ptr->retransm_queue_head = msg_seqno(msg);
l_ptr->retransm_queue_size = retransmits;
- return;
} else {
- /* Don't retransmit if driver already has the buffer */
+ err("Unexpected retransmit on link %s (qsize=%d)\n",
+ l_ptr->name, l_ptr->retransm_queue_size);
}
+ return;
} else {
/* Detect repeated retransmit failures on uncongested bearer */
@@ -1745,7 +1709,7 @@ void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *buf,
}
}
- while (retransmits && (buf != l_ptr->next_out) && buf && !skb_cloned(buf)) {
+ while (retransmits && (buf != l_ptr->next_out) && buf) {
msg = buf_msg(buf);
msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
@@ -2434,7 +2398,7 @@ void tipc_link_changeover(struct link *l_ptr)
return;
}
- msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
+ tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr);
msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
msg_set_msgcnt(&tunnel_hdr, msgcount);
@@ -2489,7 +2453,7 @@ void tipc_link_send_duplicate(struct link *l_ptr, struct link *tunnel)
struct sk_buff *iter;
struct tipc_msg tunnel_hdr;
- msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
+ tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
DUPLICATE_MSG, INT_H_SIZE, l_ptr->addr);
msg_set_msgcnt(&tunnel_hdr, l_ptr->out_queue_size);
msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
@@ -2680,7 +2644,7 @@ int tipc_link_send_long_buf(struct link *l_ptr, struct sk_buff *buf)
u32 dsz = msg_data_sz(inmsg);
unchar *crs = buf->data;
u32 rest = insize;
- u32 pack_sz = link_max_pkt(l_ptr);
+ u32 pack_sz = l_ptr->max_pkt;
u32 fragm_sz = pack_sz - INT_H_SIZE;
u32 fragm_no = 1;
u32 destaddr;
@@ -2695,7 +2659,7 @@ int tipc_link_send_long_buf(struct link *l_ptr, struct sk_buff *buf)
/* Prepare reusable fragment header: */
- msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
+ tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
INT_H_SIZE, destaddr);
msg_set_link_selector(&fragm_hdr, msg_link_selector(inmsg));
msg_set_long_msgno(&fragm_hdr, mod(l_ptr->long_msg_seq_no++));
@@ -3126,7 +3090,7 @@ static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
tipc_printf(&pb, "Link <%s>\n"
" %s MTU:%u Priority:%u Tolerance:%u ms"
" Window:%u packets\n",
- l_ptr->name, status, link_max_pkt(l_ptr),
+ l_ptr->name, status, l_ptr->max_pkt,
l_ptr->priority, l_ptr->tolerance, l_ptr->queue_limit[0]);
tipc_printf(&pb, " RX packets:%u fragments:%u/%u bundles:%u/%u\n",
l_ptr->next_in_no - l_ptr->stats.recv_info,
@@ -3271,7 +3235,7 @@ u32 tipc_link_get_max_pkt(u32 dest, u32 selector)
tipc_node_lock(n_ptr);
l_ptr = n_ptr->active_links[selector & 1];
if (l_ptr)
- res = link_max_pkt(l_ptr);
+ res = l_ptr->max_pkt;
tipc_node_unlock(n_ptr);
}
read_unlock_bh(&tipc_net_lock);
@@ -3294,7 +3258,7 @@ static void link_dump_rec_queue(struct link *l_ptr)
info("buffer %x invalid\n", crs);
return;
}
- msg_dbg(buf_msg(crs), "In rec queue: \n");
+ msg_dbg(buf_msg(crs), "In rec queue:\n");
crs = crs->next;
}
}
@@ -3329,9 +3293,7 @@ static void link_print(struct link *l_ptr, struct print_buf *buf,
if (l_ptr->next_out)
tipc_printf(buf, "%u..",
msg_seqno(buf_msg(l_ptr->next_out)));
- tipc_printf(buf, "%u]",
- msg_seqno(buf_msg
- (l_ptr->last_out)), l_ptr->out_queue_size);
+ tipc_printf(buf, "%u]", msg_seqno(buf_msg(l_ptr->last_out)));
if ((mod(msg_seqno(buf_msg(l_ptr->last_out)) -
msg_seqno(buf_msg(l_ptr->first_out)))
!= (l_ptr->out_queue_size - 1)) ||
diff --git a/net/tipc/link.h b/net/tipc/link.h
index 6a51e38ad25c..2e5385c47d30 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -292,4 +292,39 @@ static inline u32 lesser(u32 left, u32 right)
return less_eq(left, right) ? left : right;
}
+
+/*
+ * Link status checking routines
+ */
+
+static inline int link_working_working(struct link *l_ptr)
+{
+ return (l_ptr->state == WORKING_WORKING);
+}
+
+static inline int link_working_unknown(struct link *l_ptr)
+{
+ return (l_ptr->state == WORKING_UNKNOWN);
+}
+
+static inline int link_reset_unknown(struct link *l_ptr)
+{
+ return (l_ptr->state == RESET_UNKNOWN);
+}
+
+static inline int link_reset_reset(struct link *l_ptr)
+{
+ return (l_ptr->state == RESET_RESET);
+}
+
+static inline int link_blocked(struct link *l_ptr)
+{
+ return (l_ptr->exp_msg_count || l_ptr->blocked);
+}
+
+static inline int link_congested(struct link *l_ptr)
+{
+ return (l_ptr->out_queue_size >= l_ptr->queue_limit[0]);
+}
+
#endif
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 73dcd00d674e..381063817b41 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -40,6 +40,100 @@
#include "msg.h"
#include "bearer.h"
+u32 tipc_msg_tot_importance(struct tipc_msg *m)
+{
+ if (likely(msg_isdata(m))) {
+ if (likely(msg_orignode(m) == tipc_own_addr))
+ return msg_importance(m);
+ return msg_importance(m) + 4;
+ }
+ if ((msg_user(m) == MSG_FRAGMENTER) &&
+ (msg_type(m) == FIRST_FRAGMENT))
+ return msg_importance(msg_get_wrapped(m));
+ return msg_importance(m);
+}
+
+
+void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type,
+ u32 hsize, u32 destnode)
+{
+ memset(m, 0, hsize);
+ msg_set_version(m);
+ msg_set_user(m, user);
+ msg_set_hdr_sz(m, hsize);
+ msg_set_size(m, hsize);
+ msg_set_prevnode(m, tipc_own_addr);
+ msg_set_type(m, type);
+ if (!msg_short(m)) {
+ msg_set_orignode(m, tipc_own_addr);
+ msg_set_destnode(m, destnode);
+ }
+}
+
+/**
+ * tipc_msg_calc_data_size - determine total data size for message
+ */
+
+int tipc_msg_calc_data_size(struct iovec const *msg_sect, u32 num_sect)
+{
+ int dsz = 0;
+ int i;
+
+ for (i = 0; i < num_sect; i++)
+ dsz += msg_sect[i].iov_len;
+ return dsz;
+}
+
+/**
+ * tipc_msg_build - create message using specified header and data
+ *
+ * Note: Caller must not hold any locks in case copy_from_user() is interrupted!
+ *
+ * Returns message data size or errno
+ */
+
+int tipc_msg_build(struct tipc_msg *hdr,
+ struct iovec const *msg_sect, u32 num_sect,
+ int max_size, int usrmem, struct sk_buff** buf)
+{
+ int dsz, sz, hsz, pos, res, cnt;
+
+ dsz = tipc_msg_calc_data_size(msg_sect, num_sect);
+ if (unlikely(dsz > TIPC_MAX_USER_MSG_SIZE)) {
+ *buf = NULL;
+ return -EINVAL;
+ }
+
+ pos = hsz = msg_hdr_sz(hdr);
+ sz = hsz + dsz;
+ msg_set_size(hdr, sz);
+ if (unlikely(sz > max_size)) {
+ *buf = NULL;
+ return dsz;
+ }
+
+ *buf = buf_acquire(sz);
+ if (!(*buf))
+ return -ENOMEM;
+ skb_copy_to_linear_data(*buf, hdr, hsz);
+ for (res = 1, cnt = 0; res && (cnt < num_sect); cnt++) {
+ if (likely(usrmem))
+ res = !copy_from_user((*buf)->data + pos,
+ msg_sect[cnt].iov_base,
+ msg_sect[cnt].iov_len);
+ else
+ skb_copy_to_linear_data_offset(*buf, pos,
+ msg_sect[cnt].iov_base,
+ msg_sect[cnt].iov_len);
+ pos += msg_sect[cnt].iov_len;
+ }
+ if (likely(res))
+ return dsz;
+
+ buf_discard(*buf);
+ *buf = NULL;
+ return -EFAULT;
+}
#ifdef CONFIG_TIPC_DEBUG
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index 7ee6ae238147..995d2da35b01 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -708,100 +708,13 @@ static inline void msg_set_dataoctet(struct tipc_msg *m, u32 pos)
#define DSC_REQ_MSG 0
#define DSC_RESP_MSG 1
-static inline u32 msg_tot_importance(struct tipc_msg *m)
-{
- if (likely(msg_isdata(m))) {
- if (likely(msg_orignode(m) == tipc_own_addr))
- return msg_importance(m);
- return msg_importance(m) + 4;
- }
- if ((msg_user(m) == MSG_FRAGMENTER) &&
- (msg_type(m) == FIRST_FRAGMENT))
- return msg_importance(msg_get_wrapped(m));
- return msg_importance(m);
-}
-
-
-static inline void msg_init(struct tipc_msg *m, u32 user, u32 type,
- u32 hsize, u32 destnode)
-{
- memset(m, 0, hsize);
- msg_set_version(m);
- msg_set_user(m, user);
- msg_set_hdr_sz(m, hsize);
- msg_set_size(m, hsize);
- msg_set_prevnode(m, tipc_own_addr);
- msg_set_type(m, type);
- if (!msg_short(m)) {
- msg_set_orignode(m, tipc_own_addr);
- msg_set_destnode(m, destnode);
- }
-}
-
-/**
- * msg_calc_data_size - determine total data size for message
- */
-
-static inline int msg_calc_data_size(struct iovec const *msg_sect, u32 num_sect)
-{
- int dsz = 0;
- int i;
-
- for (i = 0; i < num_sect; i++)
- dsz += msg_sect[i].iov_len;
- return dsz;
-}
-
-/**
- * msg_build - create message using specified header and data
- *
- * Note: Caller must not hold any locks in case copy_from_user() is interrupted!
- *
- * Returns message data size or errno
- */
-
-static inline int msg_build(struct tipc_msg *hdr,
+u32 tipc_msg_tot_importance(struct tipc_msg *m);
+void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type,
+ u32 hsize, u32 destnode);
+int tipc_msg_calc_data_size(struct iovec const *msg_sect, u32 num_sect);
+int tipc_msg_build(struct tipc_msg *hdr,
struct iovec const *msg_sect, u32 num_sect,
- int max_size, int usrmem, struct sk_buff** buf)
-{
- int dsz, sz, hsz, pos, res, cnt;
-
- dsz = msg_calc_data_size(msg_sect, num_sect);
- if (unlikely(dsz > TIPC_MAX_USER_MSG_SIZE)) {
- *buf = NULL;
- return -EINVAL;
- }
-
- pos = hsz = msg_hdr_sz(hdr);
- sz = hsz + dsz;
- msg_set_size(hdr, sz);
- if (unlikely(sz > max_size)) {
- *buf = NULL;
- return dsz;
- }
-
- *buf = buf_acquire(sz);
- if (!(*buf))
- return -ENOMEM;
- skb_copy_to_linear_data(*buf, hdr, hsz);
- for (res = 1, cnt = 0; res && (cnt < num_sect); cnt++) {
- if (likely(usrmem))
- res = !copy_from_user((*buf)->data + pos,
- msg_sect[cnt].iov_base,
- msg_sect[cnt].iov_len);
- else
- skb_copy_to_linear_data_offset(*buf, pos,
- msg_sect[cnt].iov_base,
- msg_sect[cnt].iov_len);
- pos += msg_sect[cnt].iov_len;
- }
- if (likely(res))
- return dsz;
-
- buf_discard(*buf);
- *buf = NULL;
- return -EFAULT;
-}
+ int max_size, int usrmem, struct sk_buff** buf);
static inline void msg_set_media_addr(struct tipc_msg *m, struct tipc_media_addr *a)
{
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index 10a69894e2fd..6ac3c543250b 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -103,7 +103,7 @@ static struct sk_buff *named_prepare_buf(u32 type, u32 size, u32 dest)
if (buf != NULL) {
msg = buf_msg(buf);
- msg_init(msg, NAME_DISTRIBUTOR, type, LONG_H_SIZE, dest);
+ tipc_msg_init(msg, NAME_DISTRIBUTOR, type, LONG_H_SIZE, dest);
msg_set_size(msg, LONG_H_SIZE + size);
}
return buf;
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index acab41a48d67..8ba79620db3f 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -627,7 +627,7 @@ u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode)
struct name_seq *seq;
u32 ref;
- if (!in_scope(*destnode, tipc_own_addr))
+ if (!tipc_in_scope(*destnode, tipc_own_addr))
return 0;
read_lock_bh(&tipc_nametbl_lock);
diff --git a/net/tipc/net.c b/net/tipc/net.c
index f25b1cdb64eb..f61b7694138b 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -116,7 +116,7 @@
*/
DEFINE_RWLOCK(tipc_net_lock);
-struct _zone *tipc_zones[256] = { NULL, };
+static struct _zone *tipc_zones[256] = { NULL, };
struct network tipc_net = { tipc_zones };
struct tipc_node *tipc_net_select_remote_node(u32 addr, u32 ref)
@@ -219,7 +219,7 @@ void tipc_net_route_msg(struct sk_buff *buf)
/* Handle message for this node */
dnode = msg_short(msg) ? tipc_own_addr : msg_destnode(msg);
- if (in_scope(dnode, tipc_own_addr)) {
+ if (tipc_in_scope(dnode, tipc_own_addr)) {
if (msg_isdata(msg)) {
if (msg_mcast(msg))
tipc_port_recv_mcast(buf, NULL);
@@ -277,7 +277,7 @@ int tipc_net_start(u32 addr)
info("Started in network mode\n");
info("Own node address %s, network identity %u\n",
- addr_string_fill(addr_string, tipc_own_addr), tipc_net_id);
+ tipc_addr_string_fill(addr_string, tipc_own_addr), tipc_net_id);
return 0;
}
@@ -291,6 +291,6 @@ void tipc_net_stop(void)
tipc_bclink_stop();
net_stop();
write_unlock_bh(&tipc_net_lock);
- info("Left network mode \n");
+ info("Left network mode\n");
}
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 2c24e7d6d950..b634942caba5 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -268,7 +268,7 @@ struct tipc_node *tipc_node_attach_link(struct link *l_ptr)
if (n_ptr->link_cnt >= 2) {
err("Attempt to create third link to %s\n",
- addr_string_fill(addr_string, n_ptr->addr));
+ tipc_addr_string_fill(addr_string, n_ptr->addr));
return NULL;
}
@@ -278,9 +278,9 @@ struct tipc_node *tipc_node_attach_link(struct link *l_ptr)
n_ptr->link_cnt++;
return n_ptr;
}
- err("Attempt to establish second link on <%s> to %s \n",
+ err("Attempt to establish second link on <%s> to %s\n",
l_ptr->b_ptr->publ.name,
- addr_string_fill(addr_string, l_ptr->addr));
+ tipc_addr_string_fill(addr_string, l_ptr->addr));
}
return NULL;
}
@@ -439,7 +439,7 @@ static void node_lost_contact(struct tipc_node *n_ptr)
return;
info("Lost contact with %s\n",
- addr_string_fill(addr_string, n_ptr->addr));
+ tipc_addr_string_fill(addr_string, n_ptr->addr));
/* Abort link changeover */
for (i = 0; i < MAX_BEARERS; i++) {
@@ -602,7 +602,7 @@ u32 tipc_available_nodes(const u32 domain)
read_lock_bh(&tipc_net_lock);
for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) {
- if (!in_scope(domain, n_ptr->addr))
+ if (!tipc_in_scope(domain, n_ptr->addr))
continue;
if (tipc_node_is_up(n_ptr))
cnt++;
@@ -651,7 +651,7 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
/* Add TLVs for all nodes in scope */
for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) {
- if (!in_scope(domain, n_ptr->addr))
+ if (!tipc_in_scope(domain, n_ptr->addr))
continue;
node_info.addr = htonl(n_ptr->addr);
node_info.up = htonl(tipc_node_is_up(n_ptr));
@@ -711,7 +711,7 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) {
u32 i;
- if (!in_scope(domain, n_ptr->addr))
+ if (!tipc_in_scope(domain, n_ptr->addr))
continue;
tipc_node_lock(n_ptr);
for (i = 0; i < MAX_BEARERS; i++) {
diff --git a/net/tipc/port.c b/net/tipc/port.c
index e70d27ea6578..0737680e9266 100644
--- a/net/tipc/port.c
+++ b/net/tipc/port.c
@@ -116,7 +116,7 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq, u32 domain,
msg_set_namelower(hdr, seq->lower);
msg_set_nameupper(hdr, seq->upper);
msg_set_hdr_sz(hdr, MCAST_H_SIZE);
- res = msg_build(hdr, msg_sect, num_sect, MAX_MSG_SIZE,
+ res = tipc_msg_build(hdr, msg_sect, num_sect, MAX_MSG_SIZE,
!oport->user_port, &buf);
if (unlikely(!buf))
return res;
@@ -241,13 +241,12 @@ struct tipc_port *tipc_createport_raw(void *usr_handle,
p_ptr->publ.max_pkt = MAX_PKT_DEFAULT;
p_ptr->publ.ref = ref;
msg = &p_ptr->publ.phdr;
- msg_init(msg, importance, TIPC_NAMED_MSG, LONG_H_SIZE, 0);
+ tipc_msg_init(msg, importance, TIPC_NAMED_MSG, LONG_H_SIZE, 0);
msg_set_origport(msg, ref);
p_ptr->last_in_seqno = 41;
p_ptr->sent = 1;
INIT_LIST_HEAD(&p_ptr->wait_list);
INIT_LIST_HEAD(&p_ptr->subscription.nodesub_list);
- p_ptr->congested_link = NULL;
p_ptr->dispatcher = dispatcher;
p_ptr->wakeup = wakeup;
p_ptr->user_port = NULL;
@@ -396,7 +395,7 @@ static struct sk_buff *port_build_proto_msg(u32 destport, u32 destnode,
buf = buf_acquire(LONG_H_SIZE);
if (buf) {
msg = buf_msg(buf);
- msg_init(msg, usr, type, LONG_H_SIZE, destnode);
+ tipc_msg_init(msg, usr, type, LONG_H_SIZE, destnode);
msg_set_errcode(msg, err);
msg_set_destport(msg, destport);
msg_set_origport(msg, origport);
@@ -440,7 +439,7 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err)
return data_sz;
}
rmsg = buf_msg(rbuf);
- msg_init(rmsg, imp, msg_type(msg), hdr_sz, msg_orignode(msg));
+ tipc_msg_init(rmsg, imp, msg_type(msg), hdr_sz, msg_orignode(msg));
msg_set_errcode(rmsg, err);
msg_set_destport(rmsg, msg_origport(msg));
msg_set_origport(rmsg, msg_destport(msg));
@@ -481,7 +480,7 @@ int tipc_port_reject_sections(struct port *p_ptr, struct tipc_msg *hdr,
struct sk_buff *buf;
int res;
- res = msg_build(hdr, msg_sect, num_sect, MAX_MSG_SIZE,
+ res = tipc_msg_build(hdr, msg_sect, num_sect, MAX_MSG_SIZE,
!p_ptr->user_port, &buf);
if (!buf)
return res;
@@ -1344,7 +1343,7 @@ int tipc_port_recv_sections(struct port *sender, unsigned int num_sect,
struct sk_buff *buf;
int res;
- res = msg_build(&sender->publ.phdr, msg_sect, num_sect,
+ res = tipc_msg_build(&sender->publ.phdr, msg_sect, num_sect,
MAX_MSG_SIZE, !sender->user_port, &buf);
if (likely(buf))
tipc_port_recv_msg(buf);
@@ -1384,7 +1383,7 @@ int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect)
if (port_unreliable(p_ptr)) {
p_ptr->publ.congested = 0;
/* Just calculate msg length and return */
- return msg_calc_data_size(msg_sect, num_sect);
+ return tipc_msg_calc_data_size(msg_sect, num_sect);
}
return -ELINKCONG;
}
@@ -1453,7 +1452,7 @@ int tipc_forward2name(u32 ref,
struct port *p_ptr;
struct tipc_msg *msg;
u32 destnode = domain;
- u32 destport = 0;
+ u32 destport;
int res;
p_ptr = tipc_port_deref(ref);
@@ -1467,7 +1466,7 @@ int tipc_forward2name(u32 ref,
msg_set_hdr_sz(msg, LONG_H_SIZE);
msg_set_nametype(msg, name->type);
msg_set_nameinst(msg, name->instance);
- msg_set_lookup_scope(msg, addr_scope(domain));
+ msg_set_lookup_scope(msg, tipc_addr_scope(domain));
if (importance <= TIPC_CRITICAL_IMPORTANCE)
msg_set_importance(msg,importance);
destport = tipc_nametbl_translate(name->type, name->instance, &destnode);
@@ -1484,7 +1483,7 @@ int tipc_forward2name(u32 ref,
return res;
if (port_unreliable(p_ptr)) {
/* Just calculate msg length and return */
- return msg_calc_data_size(msg_sect, num_sect);
+ return tipc_msg_calc_data_size(msg_sect, num_sect);
}
return -ELINKCONG;
}
@@ -1525,7 +1524,7 @@ int tipc_forward_buf2name(u32 ref,
struct port *p_ptr;
struct tipc_msg *msg;
u32 destnode = domain;
- u32 destport = 0;
+ u32 destport;
int res;
p_ptr = (struct port *)tipc_ref_deref(ref);
@@ -1540,7 +1539,7 @@ int tipc_forward_buf2name(u32 ref,
msg_set_origport(msg, orig->ref);
msg_set_nametype(msg, name->type);
msg_set_nameinst(msg, name->instance);
- msg_set_lookup_scope(msg, addr_scope(domain));
+ msg_set_lookup_scope(msg, tipc_addr_scope(domain));
msg_set_hdr_sz(msg, LONG_H_SIZE);
msg_set_size(msg, LONG_H_SIZE + dsz);
destport = tipc_nametbl_translate(name->type, name->instance, &destnode);
@@ -1620,7 +1619,7 @@ int tipc_forward2port(u32 ref,
return res;
if (port_unreliable(p_ptr)) {
/* Just calculate msg length and return */
- return msg_calc_data_size(msg_sect, num_sect);
+ return tipc_msg_calc_data_size(msg_sect, num_sect);
}
return -ELINKCONG;
}
diff --git a/net/tipc/port.h b/net/tipc/port.h
index ff31ee4a1dc3..8d1652aab298 100644
--- a/net/tipc/port.h
+++ b/net/tipc/port.h
@@ -75,7 +75,6 @@ struct user_port {
* @wakeup: ptr to routine to call when port is no longer congested
* @user_port: ptr to user port associated with port (if any)
* @wait_list: adjacent ports in list of ports waiting on link congestion
- * @congested_link: ptr to congested link port is waiting on
* @waiting_pkts:
* @sent:
* @acked:
@@ -95,7 +94,6 @@ struct port {
void (*wakeup)(struct tipc_port *);
struct user_port *user_port;
struct list_head wait_list;
- struct link *congested_link;
u32 waiting_pkts;
u32 sent;
u32 acked;
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index cfb20b80b3a1..66e889ba48fd 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -446,7 +446,7 @@ static unsigned int poll(struct file *file, struct socket *sock,
struct sock *sk = sock->sk;
u32 mask;
- poll_wait(file, sk->sk_sleep, wait);
+ poll_wait(file, sk_sleep(sk), wait);
if (!skb_queue_empty(&sk->sk_receive_queue) ||
(sock->state == SS_UNCONNECTED) ||
@@ -591,7 +591,7 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
break;
}
release_sock(sk);
- res = wait_event_interruptible(*sk->sk_sleep,
+ res = wait_event_interruptible(*sk_sleep(sk),
!tport->congested);
lock_sock(sk);
if (res)
@@ -650,7 +650,7 @@ static int send_packet(struct kiocb *iocb, struct socket *sock,
break;
}
release_sock(sk);
- res = wait_event_interruptible(*sk->sk_sleep,
+ res = wait_event_interruptible(*sk_sleep(sk),
(!tport->congested || !tport->connected));
lock_sock(sk);
if (res)
@@ -931,7 +931,7 @@ restart:
goto exit;
}
release_sock(sk);
- res = wait_event_interruptible(*sk->sk_sleep,
+ res = wait_event_interruptible(*sk_sleep(sk),
(!skb_queue_empty(&sk->sk_receive_queue) ||
(sock->state == SS_DISCONNECTING)));
lock_sock(sk);
@@ -1064,7 +1064,7 @@ restart:
goto exit;
}
release_sock(sk);
- res = wait_event_interruptible(*sk->sk_sleep,
+ res = wait_event_interruptible(*sk_sleep(sk),
(!skb_queue_empty(&sk->sk_receive_queue) ||
(sock->state == SS_DISCONNECTING)));
lock_sock(sk);
@@ -1271,8 +1271,8 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
tipc_disconnect_port(tipc_sk_port(sk));
}
- if (waitqueue_active(sk->sk_sleep))
- wake_up_interruptible(sk->sk_sleep);
+ if (waitqueue_active(sk_sleep(sk)))
+ wake_up_interruptible(sk_sleep(sk));
return TIPC_OK;
}
@@ -1343,8 +1343,8 @@ static void wakeupdispatch(struct tipc_port *tport)
{
struct sock *sk = (struct sock *)tport->usr_handle;
- if (waitqueue_active(sk->sk_sleep))
- wake_up_interruptible(sk->sk_sleep);
+ if (waitqueue_active(sk_sleep(sk)))
+ wake_up_interruptible(sk_sleep(sk));
}
/**
@@ -1426,7 +1426,7 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
/* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
release_sock(sk);
- res = wait_event_interruptible_timeout(*sk->sk_sleep,
+ res = wait_event_interruptible_timeout(*sk_sleep(sk),
(!skb_queue_empty(&sk->sk_receive_queue) ||
(sock->state != SS_CONNECTING)),
sk->sk_rcvtimeo);
@@ -1521,7 +1521,7 @@ static int accept(struct socket *sock, struct socket *new_sock, int flags)
goto exit;
}
release_sock(sk);
- res = wait_event_interruptible(*sk->sk_sleep,
+ res = wait_event_interruptible(*sk_sleep(sk),
(!skb_queue_empty(&sk->sk_receive_queue)));
lock_sock(sk);
if (res)
@@ -1632,8 +1632,8 @@ restart:
/* Discard any unreceived messages; wake up sleeping tasks */
discard_rx_queue(sk);
- if (waitqueue_active(sk->sk_sleep))
- wake_up_interruptible(sk->sk_sleep);
+ if (waitqueue_active(sk_sleep(sk)))
+ wake_up_interruptible(sk_sleep(sk));
res = 0;
break;
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index ff123e56114a..ab6eab4c45e2 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -274,7 +274,7 @@ static void subscr_cancel(struct tipc_subscr *s,
{
struct subscription *sub;
struct subscription *sub_temp;
- __u32 type, lower, upper;
+ __u32 type, lower, upper, timeout, filter;
int found = 0;
/* Find first matching subscription, exit if not found */
@@ -282,12 +282,18 @@ static void subscr_cancel(struct tipc_subscr *s,
type = ntohl(s->seq.type);
lower = ntohl(s->seq.lower);
upper = ntohl(s->seq.upper);
+ timeout = ntohl(s->timeout);
+ filter = ntohl(s->filter) & ~TIPC_SUB_CANCEL;
list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list,
subscription_list) {
if ((type == sub->seq.type) &&
(lower == sub->seq.lower) &&
- (upper == sub->seq.upper)) {
+ (upper == sub->seq.upper) &&
+ (timeout == sub->timeout) &&
+ (filter == sub->filter) &&
+ !memcmp(s->usr_handle,sub->evt.s.usr_handle,
+ sizeof(s->usr_handle)) ){
found = 1;
break;
}
@@ -304,7 +310,7 @@ static void subscr_cancel(struct tipc_subscr *s,
k_term_timer(&sub->timer);
spin_lock_bh(subscriber->lock);
}
- dbg("Cancel: removing sub %u,%u,%u from subscriber %x list\n",
+ dbg("Cancel: removing sub %u,%u,%u from subscriber %p list\n",
sub->seq.type, sub->seq.lower, sub->seq.upper, subscriber);
subscr_del(sub);
}
@@ -352,8 +358,7 @@ static struct subscription *subscr_subscribe(struct tipc_subscr *s,
sub->seq.upper = ntohl(s->seq.upper);
sub->timeout = ntohl(s->timeout);
sub->filter = ntohl(s->filter);
- if ((!(sub->filter & TIPC_SUB_PORTS) ==
- !(sub->filter & TIPC_SUB_SERVICE)) ||
+ if ((sub->filter && (sub->filter != TIPC_SUB_PORTS)) ||
(sub->seq.lower > sub->seq.upper)) {
warn("Subscription rejected, illegal request\n");
kfree(sub);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 3d9122e78f41..fef2cc5e9d2b 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -313,13 +313,16 @@ static inline int unix_writable(struct sock *sk)
static void unix_write_space(struct sock *sk)
{
- read_lock(&sk->sk_callback_lock);
+ struct socket_wq *wq;
+
+ rcu_read_lock();
if (unix_writable(sk)) {
- if (sk_has_sleeper(sk))
- wake_up_interruptible_sync(sk->sk_sleep);
+ wq = rcu_dereference(sk->sk_wq);
+ if (wq_has_sleeper(wq))
+ wake_up_interruptible_sync(&wq->wait);
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
}
- read_unlock(&sk->sk_callback_lock);
+ rcu_read_unlock();
}
/* When dgram socket disconnects (or changes its peer), we clear its receive
@@ -406,9 +409,7 @@ static int unix_release_sock(struct sock *sk, int embrion)
skpair->sk_err = ECONNRESET;
unix_state_unlock(skpair);
skpair->sk_state_change(skpair);
- read_lock(&skpair->sk_callback_lock);
sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
- read_unlock(&skpair->sk_callback_lock);
}
sock_put(skpair); /* It may now die */
unix_peer(sk) = NULL;
@@ -1142,7 +1143,7 @@ restart:
newsk->sk_peercred.pid = task_tgid_vnr(current);
current_euid_egid(&newsk->sk_peercred.uid, &newsk->sk_peercred.gid);
newu = unix_sk(newsk);
- newsk->sk_sleep = &newu->peer_wait;
+ newsk->sk_wq = &newu->peer_wq;
otheru = unix_sk(other);
/* copy address information from listening to new sock*/
@@ -1736,7 +1737,7 @@ static long unix_stream_data_wait(struct sock *sk, long timeo)
unix_state_lock(sk);
for (;;) {
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
if (!skb_queue_empty(&sk->sk_receive_queue) ||
sk->sk_err ||
@@ -1752,7 +1753,7 @@ static long unix_stream_data_wait(struct sock *sk, long timeo)
clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
}
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
unix_state_unlock(sk);
return timeo;
}
@@ -1931,12 +1932,10 @@ static int unix_shutdown(struct socket *sock, int mode)
other->sk_shutdown |= peer_mode;
unix_state_unlock(other);
other->sk_state_change(other);
- read_lock(&other->sk_callback_lock);
if (peer_mode == SHUTDOWN_MASK)
sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
else if (peer_mode & RCV_SHUTDOWN)
sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
- read_unlock(&other->sk_callback_lock);
}
if (other)
sock_put(other);
@@ -1991,7 +1990,7 @@ static unsigned int unix_poll(struct file *file, struct socket *sock, poll_table
struct sock *sk = sock->sk;
unsigned int mask;
- sock_poll_wait(file, sk->sk_sleep, wait);
+ sock_poll_wait(file, sk_sleep(sk), wait);
mask = 0;
/* exceptional events? */
@@ -2028,7 +2027,7 @@ static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
struct sock *sk = sock->sk, *other;
unsigned int mask, writable;
- sock_poll_wait(file, sk->sk_sleep, wait);
+ sock_poll_wait(file, sk_sleep(sk), wait);
mask = 0;
/* exceptional events? */
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index 14c22c3768da..c8df6fda0b1f 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -153,15 +153,6 @@ void unix_notinflight(struct file *fp)
}
}
-static inline struct sk_buff *sock_queue_head(struct sock *sk)
-{
- return (struct sk_buff *)&sk->sk_receive_queue;
-}
-
-#define receive_queue_for_each_skb(sk, next, skb) \
- for (skb = sock_queue_head(sk)->next, next = skb->next; \
- skb != sock_queue_head(sk); skb = next, next = skb->next)
-
static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
struct sk_buff_head *hitlist)
{
@@ -169,7 +160,7 @@ static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
struct sk_buff *next;
spin_lock(&x->sk_receive_queue.lock);
- receive_queue_for_each_skb(x, next, skb) {
+ skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
/*
* Do we have file descriptors ?
*/
@@ -225,7 +216,7 @@ static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
* and perform a scan on them as well.
*/
spin_lock(&x->sk_receive_queue.lock);
- receive_queue_for_each_skb(x, next, skb) {
+ skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
u = unix_sk(skb->sk);
/*
diff --git a/net/wimax/op-reset.c b/net/wimax/op-reset.c
index 4dc82a54ba30..68bedf3e5443 100644
--- a/net/wimax/op-reset.c
+++ b/net/wimax/op-reset.c
@@ -110,7 +110,6 @@ int wimax_gnl_doit_reset(struct sk_buff *skb, struct genl_info *info)
{
int result, ifindex;
struct wimax_dev *wimax_dev;
- struct device *dev;
d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info);
result = -ENODEV;
@@ -123,7 +122,6 @@ int wimax_gnl_doit_reset(struct sk_buff *skb, struct genl_info *info)
wimax_dev = wimax_dev_get_by_genl_info(info, ifindex);
if (wimax_dev == NULL)
goto error_no_wimax_dev;
- dev = wimax_dev_to_dev(wimax_dev);
/* Execute the operation and send the result back to user space */
result = wimax_reset(wimax_dev);
dev_put(wimax_dev->net_dev);
diff --git a/net/wimax/op-rfkill.c b/net/wimax/op-rfkill.c
index e978c7136c97..2609e445fe7d 100644
--- a/net/wimax/op-rfkill.c
+++ b/net/wimax/op-rfkill.c
@@ -43,7 +43,7 @@
* wimax_rfkill() Kernel calling wimax_rfkill()
* __wimax_rf_toggle_radio()
*
- * wimax_rfkill_set_radio_block() RF-Kill subsytem calling
+ * wimax_rfkill_set_radio_block() RF-Kill subsystem calling
* __wimax_rf_toggle_radio()
*
* __wimax_rf_toggle_radio()
diff --git a/net/wimax/op-state-get.c b/net/wimax/op-state-get.c
index 11ad3356eb56..aff8776e2d41 100644
--- a/net/wimax/op-state-get.c
+++ b/net/wimax/op-state-get.c
@@ -53,7 +53,6 @@ int wimax_gnl_doit_state_get(struct sk_buff *skb, struct genl_info *info)
{
int result, ifindex;
struct wimax_dev *wimax_dev;
- struct device *dev;
d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info);
result = -ENODEV;
@@ -66,7 +65,6 @@ int wimax_gnl_doit_state_get(struct sk_buff *skb, struct genl_info *info)
wimax_dev = wimax_dev_get_by_genl_info(info, ifindex);
if (wimax_dev == NULL)
goto error_no_wimax_dev;
- dev = wimax_dev_to_dev(wimax_dev);
/* Execute the operation and send the result back to user space */
result = wimax_state_get(wimax_dev);
dev_put(wimax_dev->net_dev);
diff --git a/net/wimax/stack.c b/net/wimax/stack.c
index 1ed65dbdab03..ee99e7dfcdba 100644
--- a/net/wimax/stack.c
+++ b/net/wimax/stack.c
@@ -315,12 +315,11 @@ void __wimax_state_change(struct wimax_dev *wimax_dev, enum wimax_st new_state)
BUG();
}
__wimax_state_set(wimax_dev, new_state);
- if (stch_skb)
+ if (!IS_ERR(stch_skb))
wimax_gnl_re_state_change_send(wimax_dev, stch_skb, header);
out:
d_fnend(3, dev, "(wimax_dev %p new_state %u [old %u]) = void\n",
wimax_dev, new_state, old_state);
- return;
}
@@ -362,7 +361,6 @@ void wimax_state_change(struct wimax_dev *wimax_dev, enum wimax_st new_state)
if (wimax_dev->state > __WIMAX_ST_NULL)
__wimax_state_change(wimax_dev, new_state);
mutex_unlock(&wimax_dev->mutex);
- return;
}
EXPORT_SYMBOL_GPL(wimax_state_change);
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index bf1737fc9a7e..d92d088026bf 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -10,38 +10,6 @@
#include "core.h"
struct ieee80211_channel *
-rdev_fixed_channel(struct cfg80211_registered_device *rdev,
- struct wireless_dev *for_wdev)
-{
- struct wireless_dev *wdev;
- struct ieee80211_channel *result = NULL;
-
- WARN_ON(!mutex_is_locked(&rdev->devlist_mtx));
-
- list_for_each_entry(wdev, &rdev->netdev_list, list) {
- if (wdev == for_wdev)
- continue;
-
- /*
- * Lock manually to tell lockdep about allowed
- * nesting here if for_wdev->mtx is held already.
- * This is ok as it's all under the rdev devlist
- * mutex and as such can only be done once at any
- * given time.
- */
- mutex_lock_nested(&wdev->mtx, SINGLE_DEPTH_NESTING);
- if (wdev->current_bss)
- result = wdev->current_bss->pub.channel;
- wdev_unlock(wdev);
-
- if (result)
- break;
- }
-
- return result;
-}
-
-struct ieee80211_channel *
rdev_freq_to_chan(struct cfg80211_registered_device *rdev,
int freq, enum nl80211_channel_type channel_type)
{
@@ -75,15 +43,22 @@ rdev_freq_to_chan(struct cfg80211_registered_device *rdev,
return chan;
}
-int rdev_set_freq(struct cfg80211_registered_device *rdev,
- struct wireless_dev *for_wdev,
- int freq, enum nl80211_channel_type channel_type)
+int cfg80211_set_freq(struct cfg80211_registered_device *rdev,
+ struct wireless_dev *wdev, int freq,
+ enum nl80211_channel_type channel_type)
{
struct ieee80211_channel *chan;
int result;
- if (rdev_fixed_channel(rdev, for_wdev))
- return -EBUSY;
+ if (wdev->iftype == NL80211_IFTYPE_MONITOR)
+ wdev = NULL;
+
+ if (wdev) {
+ ASSERT_WDEV_LOCK(wdev);
+
+ if (!netif_running(wdev->netdev))
+ return -ENETDOWN;
+ }
if (!rdev->ops->set_channel)
return -EOPNOTSUPP;
@@ -92,11 +67,14 @@ int rdev_set_freq(struct cfg80211_registered_device *rdev,
if (!chan)
return -EINVAL;
- result = rdev->ops->set_channel(&rdev->wiphy, chan, channel_type);
+ result = rdev->ops->set_channel(&rdev->wiphy,
+ wdev ? wdev->netdev : NULL,
+ chan, channel_type);
if (result)
return result;
- rdev->channel = chan;
+ if (wdev)
+ wdev->channel = chan;
return 0;
}
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 6ac70c101523..37d0e0ab4432 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -705,7 +705,8 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
wdev->ps = true;
else
wdev->ps = false;
- wdev->ps_timeout = 100;
+ /* allow mac80211 to determine the timeout */
+ wdev->ps_timeout = -1;
if (rdev->ops->set_power_mgmt)
if (rdev->ops->set_power_mgmt(wdev->wiphy, dev,
wdev->ps,
diff --git a/net/wireless/core.h b/net/wireless/core.h
index d52da913145a..ae930acf75e9 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -70,9 +70,6 @@ struct cfg80211_registered_device {
struct work_struct conn_work;
struct work_struct event_work;
- /* current channel */
- struct ieee80211_channel *channel;
-
/* must be last because of the way we do wiphy_priv(),
* and it should at least be aligned to NETDEV_ALIGN */
struct wiphy wiphy __attribute__((__aligned__(NETDEV_ALIGN)));
@@ -293,13 +290,15 @@ int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
const u8 *bssid,
const u8 *ssid, int ssid_len,
const u8 *ie, int ie_len,
- const u8 *key, int key_len, int key_idx);
+ const u8 *key, int key_len, int key_idx,
+ bool local_state_change);
int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
struct net_device *dev, struct ieee80211_channel *chan,
enum nl80211_auth_type auth_type, const u8 *bssid,
const u8 *ssid, int ssid_len,
const u8 *ie, int ie_len,
- const u8 *key, int key_len, int key_idx);
+ const u8 *key, int key_len, int key_idx,
+ bool local_state_change);
int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
struct net_device *dev,
struct ieee80211_channel *chan,
@@ -315,13 +314,16 @@ int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
struct cfg80211_crypto_settings *crypt);
int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
struct net_device *dev, const u8 *bssid,
- const u8 *ie, int ie_len, u16 reason);
+ const u8 *ie, int ie_len, u16 reason,
+ bool local_state_change);
int cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
struct net_device *dev, const u8 *bssid,
- const u8 *ie, int ie_len, u16 reason);
+ const u8 *ie, int ie_len, u16 reason,
+ bool local_state_change);
int cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev,
struct net_device *dev, const u8 *bssid,
- const u8 *ie, int ie_len, u16 reason);
+ const u8 *ie, int ie_len, u16 reason,
+ bool local_state_change);
void cfg80211_mlme_down(struct cfg80211_registered_device *rdev,
struct net_device *dev);
void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
@@ -383,14 +385,11 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev);
struct ieee80211_channel *
-rdev_fixed_channel(struct cfg80211_registered_device *rdev,
- struct wireless_dev *for_wdev);
-struct ieee80211_channel *
rdev_freq_to_chan(struct cfg80211_registered_device *rdev,
int freq, enum nl80211_channel_type channel_type);
-int rdev_set_freq(struct cfg80211_registered_device *rdev,
- struct wireless_dev *for_wdev,
- int freq, enum nl80211_channel_type channel_type);
+int cfg80211_set_freq(struct cfg80211_registered_device *rdev,
+ struct wireless_dev *wdev, int freq,
+ enum nl80211_channel_type channel_type);
u16 cfg80211_calculate_bitrate(struct rate_info *rate);
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
index 6a5acf750174..adcabba02e20 100644
--- a/net/wireless/ibss.c
+++ b/net/wireless/ibss.c
@@ -81,15 +81,10 @@ int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
struct cfg80211_cached_keys *connkeys)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
- struct ieee80211_channel *chan;
int err;
ASSERT_WDEV_LOCK(wdev);
- chan = rdev_fixed_channel(rdev, wdev);
- if (chan && chan != params->channel)
- return -EBUSY;
-
if (wdev->ssid_len)
return -EALREADY;
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 22139fa46115..48ead6f0426d 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -378,7 +378,8 @@ int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
const u8 *bssid,
const u8 *ssid, int ssid_len,
const u8 *ie, int ie_len,
- const u8 *key, int key_len, int key_idx)
+ const u8 *key, int key_len, int key_idx,
+ bool local_state_change)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_auth_request req;
@@ -408,6 +409,7 @@ int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
memset(&req, 0, sizeof(req));
+ req.local_state_change = local_state_change;
req.ie = ie;
req.ie_len = ie_len;
req.auth_type = auth_type;
@@ -434,12 +436,18 @@ int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
goto out;
}
- wdev->authtry_bsses[slot] = bss;
+ if (local_state_change)
+ wdev->auth_bsses[slot] = bss;
+ else
+ wdev->authtry_bsses[slot] = bss;
cfg80211_hold_bss(bss);
err = rdev->ops->auth(&rdev->wiphy, dev, &req);
if (err) {
- wdev->authtry_bsses[slot] = NULL;
+ if (local_state_change)
+ wdev->auth_bsses[slot] = NULL;
+ else
+ wdev->authtry_bsses[slot] = NULL;
cfg80211_unhold_bss(bss);
}
@@ -454,14 +462,15 @@ int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
enum nl80211_auth_type auth_type, const u8 *bssid,
const u8 *ssid, int ssid_len,
const u8 *ie, int ie_len,
- const u8 *key, int key_len, int key_idx)
+ const u8 *key, int key_len, int key_idx,
+ bool local_state_change)
{
int err;
wdev_lock(dev->ieee80211_ptr);
err = __cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid,
ssid, ssid_len, ie, ie_len,
- key, key_len, key_idx);
+ key, key_len, key_idx, local_state_change);
wdev_unlock(dev->ieee80211_ptr);
return err;
@@ -555,7 +564,8 @@ int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
struct net_device *dev, const u8 *bssid,
- const u8 *ie, int ie_len, u16 reason)
+ const u8 *ie, int ie_len, u16 reason,
+ bool local_state_change)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_deauth_request req;
@@ -565,6 +575,7 @@ int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
memset(&req, 0, sizeof(req));
req.reason_code = reason;
+ req.local_state_change = local_state_change;
req.ie = ie;
req.ie_len = ie_len;
if (wdev->current_bss &&
@@ -591,13 +602,15 @@ int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
int cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
struct net_device *dev, const u8 *bssid,
- const u8 *ie, int ie_len, u16 reason)
+ const u8 *ie, int ie_len, u16 reason,
+ bool local_state_change)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
int err;
wdev_lock(wdev);
- err = __cfg80211_mlme_deauth(rdev, dev, bssid, ie, ie_len, reason);
+ err = __cfg80211_mlme_deauth(rdev, dev, bssid, ie, ie_len, reason,
+ local_state_change);
wdev_unlock(wdev);
return err;
@@ -605,7 +618,8 @@ int cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
static int __cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev,
struct net_device *dev, const u8 *bssid,
- const u8 *ie, int ie_len, u16 reason)
+ const u8 *ie, int ie_len, u16 reason,
+ bool local_state_change)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_disassoc_request req;
@@ -620,6 +634,7 @@ static int __cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev,
memset(&req, 0, sizeof(req));
req.reason_code = reason;
+ req.local_state_change = local_state_change;
req.ie = ie;
req.ie_len = ie_len;
if (memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0)
@@ -632,13 +647,15 @@ static int __cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev,
int cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev,
struct net_device *dev, const u8 *bssid,
- const u8 *ie, int ie_len, u16 reason)
+ const u8 *ie, int ie_len, u16 reason,
+ bool local_state_change)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
int err;
wdev_lock(wdev);
- err = __cfg80211_mlme_disassoc(rdev, dev, bssid, ie, ie_len, reason);
+ err = __cfg80211_mlme_disassoc(rdev, dev, bssid, ie, ie_len, reason,
+ local_state_change);
wdev_unlock(wdev);
return err;
@@ -895,3 +912,16 @@ void cfg80211_action_tx_status(struct net_device *dev, u64 cookie,
nl80211_send_action_tx_status(rdev, dev, cookie, buf, len, ack, gfp);
}
EXPORT_SYMBOL(cfg80211_action_tx_status);
+
+void cfg80211_cqm_rssi_notify(struct net_device *dev,
+ enum nl80211_cqm_rssi_threshold_event rssi_event,
+ gfp_t gfp)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct wiphy *wiphy = wdev->wiphy;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+
+ /* Indicate roaming trigger event to user space */
+ nl80211_send_cqm_rssi_notify(rdev, dev, rssi_event, gfp);
+}
+EXPORT_SYMBOL(cfg80211_cqm_rssi_notify);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 030cf153bea2..aaa1aad566cd 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -150,6 +150,9 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
.len = IEEE80211_MAX_DATA_LEN },
[NL80211_ATTR_FRAME_MATCH] = { .type = NLA_BINARY, },
[NL80211_ATTR_PS_STATE] = { .type = NLA_U32 },
+ [NL80211_ATTR_CQM] = { .type = NLA_NESTED, },
+ [NL80211_ATTR_LOCAL_STATE_CHANGE] = { .type = NLA_FLAG },
+ [NL80211_ATTR_AP_ISOLATE] = { .type = NLA_U8 },
};
/* policy for the attributes */
@@ -586,6 +589,7 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
i++;
NLA_PUT_U32(msg, i, NL80211_CMD_SET_WIPHY_NETNS);
}
+ CMD(set_channel, SET_CHANNEL);
#undef CMD
@@ -686,10 +690,90 @@ static int parse_txq_params(struct nlattr *tb[],
return 0;
}
+static bool nl80211_can_set_dev_channel(struct wireless_dev *wdev)
+{
+ /*
+ * You can only set the channel explicitly for AP, mesh
+ * and WDS type interfaces; all others have their channel
+ * managed via their respective "establish a connection"
+ * command (connect, join, ...)
+ *
+ * Monitors are special as they are normally slaved to
+ * whatever else is going on, so they behave as though
+ * you tried setting the wiphy channel itself.
+ */
+ return !wdev ||
+ wdev->iftype == NL80211_IFTYPE_AP ||
+ wdev->iftype == NL80211_IFTYPE_WDS ||
+ wdev->iftype == NL80211_IFTYPE_MESH_POINT ||
+ wdev->iftype == NL80211_IFTYPE_MONITOR;
+}
+
+static int __nl80211_set_channel(struct cfg80211_registered_device *rdev,
+ struct wireless_dev *wdev,
+ struct genl_info *info)
+{
+ enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
+ u32 freq;
+ int result;
+
+ if (!info->attrs[NL80211_ATTR_WIPHY_FREQ])
+ return -EINVAL;
+
+ if (!nl80211_can_set_dev_channel(wdev))
+ return -EOPNOTSUPP;
+
+ if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) {
+ channel_type = nla_get_u32(info->attrs[
+ NL80211_ATTR_WIPHY_CHANNEL_TYPE]);
+ if (channel_type != NL80211_CHAN_NO_HT &&
+ channel_type != NL80211_CHAN_HT20 &&
+ channel_type != NL80211_CHAN_HT40PLUS &&
+ channel_type != NL80211_CHAN_HT40MINUS)
+ return -EINVAL;
+ }
+
+ freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
+
+ mutex_lock(&rdev->devlist_mtx);
+ if (wdev) {
+ wdev_lock(wdev);
+ result = cfg80211_set_freq(rdev, wdev, freq, channel_type);
+ wdev_unlock(wdev);
+ } else {
+ result = cfg80211_set_freq(rdev, NULL, freq, channel_type);
+ }
+ mutex_unlock(&rdev->devlist_mtx);
+
+ return result;
+}
+
+static int nl80211_set_channel(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev;
+ struct net_device *netdev;
+ int result;
+
+ rtnl_lock();
+
+ result = get_rdev_dev_by_info_ifindex(info, &rdev, &netdev);
+ if (result)
+ goto unlock;
+
+ result = __nl80211_set_channel(rdev, netdev->ieee80211_ptr, info);
+
+ unlock:
+ rtnl_unlock();
+
+ return result;
+}
+
static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev;
- int result = 0, rem_txq_params = 0;
+ struct net_device *netdev = NULL;
+ struct wireless_dev *wdev;
+ int result, rem_txq_params = 0;
struct nlattr *nl_txq_params;
u32 changed;
u8 retry_short = 0, retry_long = 0;
@@ -698,16 +782,50 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
rtnl_lock();
+ /*
+ * Try to find the wiphy and netdev. Normally this
+ * function shouldn't need the netdev, but this is
+ * done for backward compatibility -- previously
+ * setting the channel was done per wiphy, but now
+ * it is per netdev. Previous userland like hostapd
+ * also passed a netdev to set_wiphy, so that it is
+ * possible to let that go to the right netdev!
+ */
mutex_lock(&cfg80211_mutex);
- rdev = __cfg80211_rdev_from_info(info);
- if (IS_ERR(rdev)) {
- mutex_unlock(&cfg80211_mutex);
- result = PTR_ERR(rdev);
- goto unlock;
+ if (info->attrs[NL80211_ATTR_IFINDEX]) {
+ int ifindex = nla_get_u32(info->attrs[NL80211_ATTR_IFINDEX]);
+
+ netdev = dev_get_by_index(genl_info_net(info), ifindex);
+ if (netdev && netdev->ieee80211_ptr) {
+ rdev = wiphy_to_dev(netdev->ieee80211_ptr->wiphy);
+ mutex_lock(&rdev->mtx);
+ } else
+ netdev = NULL;
}
- mutex_lock(&rdev->mtx);
+ if (!netdev) {
+ rdev = __cfg80211_rdev_from_info(info);
+ if (IS_ERR(rdev)) {
+ mutex_unlock(&cfg80211_mutex);
+ result = PTR_ERR(rdev);
+ goto unlock;
+ }
+ wdev = NULL;
+ netdev = NULL;
+ result = 0;
+
+ mutex_lock(&rdev->mtx);
+ } else if (netif_running(netdev) &&
+ nl80211_can_set_dev_channel(netdev->ieee80211_ptr))
+ wdev = netdev->ieee80211_ptr;
+ else
+ wdev = NULL;
+
+ /*
+ * end workaround code, by now the rdev is available
+ * and locked, and wdev may or may not be NULL.
+ */
if (info->attrs[NL80211_ATTR_WIPHY_NAME])
result = cfg80211_dev_rename(
@@ -746,26 +864,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
}
if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) {
- enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
- u32 freq;
-
- result = -EINVAL;
-
- if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) {
- channel_type = nla_get_u32(info->attrs[
- NL80211_ATTR_WIPHY_CHANNEL_TYPE]);
- if (channel_type != NL80211_CHAN_NO_HT &&
- channel_type != NL80211_CHAN_HT20 &&
- channel_type != NL80211_CHAN_HT40PLUS &&
- channel_type != NL80211_CHAN_HT40MINUS)
- goto bad_res;
- }
-
- freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
-
- mutex_lock(&rdev->devlist_mtx);
- result = rdev_set_freq(rdev, NULL, freq, channel_type);
- mutex_unlock(&rdev->devlist_mtx);
+ result = __nl80211_set_channel(rdev, wdev, info);
if (result)
goto bad_res;
}
@@ -862,6 +961,8 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
bad_res:
mutex_unlock(&rdev->mtx);
+ if (netdev)
+ dev_put(netdev);
unlock:
rtnl_unlock();
return result;
@@ -2096,7 +2197,8 @@ static int nl80211_del_station(struct sk_buff *skb, struct genl_info *info)
goto out_rtnl;
if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN) {
+ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN &&
+ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) {
err = -EINVAL;
goto out;
}
@@ -2439,6 +2541,7 @@ static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info)
params.use_cts_prot = -1;
params.use_short_preamble = -1;
params.use_short_slot_time = -1;
+ params.ap_isolate = -1;
if (info->attrs[NL80211_ATTR_BSS_CTS_PROT])
params.use_cts_prot =
@@ -2455,6 +2558,8 @@ static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info)
params.basic_rates_len =
nla_len(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]);
}
+ if (info->attrs[NL80211_ATTR_AP_ISOLATE])
+ params.ap_isolate = !!nla_get_u8(info->attrs[NL80211_ATTR_AP_ISOLATE]);
rtnl_lock();
@@ -3392,6 +3497,7 @@ static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info)
int err, ssid_len, ie_len = 0;
enum nl80211_auth_type auth_type;
struct key_parse key;
+ bool local_state_change;
if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
return -EINVAL;
@@ -3470,9 +3576,12 @@ static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info)
goto out;
}
+ local_state_change = !!info->attrs[NL80211_ATTR_LOCAL_STATE_CHANGE];
+
err = cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid,
ssid, ssid_len, ie, ie_len,
- key.p.key, key.p.key_len, key.idx);
+ key.p.key, key.p.key_len, key.idx,
+ local_state_change);
out:
cfg80211_unlock_rdev(rdev);
@@ -3551,9 +3660,8 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev;
struct net_device *dev;
- struct wireless_dev *wdev;
struct cfg80211_crypto_settings crypto;
- struct ieee80211_channel *chan, *fixedchan;
+ struct ieee80211_channel *chan;
const u8 *bssid, *ssid, *ie = NULL, *prev_bssid = NULL;
int err, ssid_len, ie_len = 0;
bool use_mfp = false;
@@ -3596,16 +3704,6 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
goto out;
}
- mutex_lock(&rdev->devlist_mtx);
- wdev = dev->ieee80211_ptr;
- fixedchan = rdev_fixed_channel(rdev, wdev);
- if (fixedchan && chan != fixedchan) {
- err = -EBUSY;
- mutex_unlock(&rdev->devlist_mtx);
- goto out;
- }
- mutex_unlock(&rdev->devlist_mtx);
-
ssid = nla_data(info->attrs[NL80211_ATTR_SSID]);
ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]);
@@ -3649,6 +3747,7 @@ static int nl80211_deauthenticate(struct sk_buff *skb, struct genl_info *info)
const u8 *ie = NULL, *bssid;
int err, ie_len = 0;
u16 reason_code;
+ bool local_state_change;
if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
return -EINVAL;
@@ -3694,7 +3793,10 @@ static int nl80211_deauthenticate(struct sk_buff *skb, struct genl_info *info)
ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
}
- err = cfg80211_mlme_deauth(rdev, dev, bssid, ie, ie_len, reason_code);
+ local_state_change = !!info->attrs[NL80211_ATTR_LOCAL_STATE_CHANGE];
+
+ err = cfg80211_mlme_deauth(rdev, dev, bssid, ie, ie_len, reason_code,
+ local_state_change);
out:
cfg80211_unlock_rdev(rdev);
@@ -3711,6 +3813,7 @@ static int nl80211_disassociate(struct sk_buff *skb, struct genl_info *info)
const u8 *ie = NULL, *bssid;
int err, ie_len = 0;
u16 reason_code;
+ bool local_state_change;
if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
return -EINVAL;
@@ -3756,7 +3859,10 @@ static int nl80211_disassociate(struct sk_buff *skb, struct genl_info *info)
ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
}
- err = cfg80211_mlme_disassoc(rdev, dev, bssid, ie, ie_len, reason_code);
+ local_state_change = !!info->attrs[NL80211_ATTR_LOCAL_STATE_CHANGE];
+
+ err = cfg80211_mlme_disassoc(rdev, dev, bssid, ie, ie_len, reason_code,
+ local_state_change);
out:
cfg80211_unlock_rdev(rdev);
@@ -4779,6 +4885,84 @@ unlock_rtnl:
return err;
}
+static struct nla_policy
+nl80211_attr_cqm_policy[NL80211_ATTR_CQM_MAX + 1] __read_mostly = {
+ [NL80211_ATTR_CQM_RSSI_THOLD] = { .type = NLA_U32 },
+ [NL80211_ATTR_CQM_RSSI_HYST] = { .type = NLA_U32 },
+ [NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT] = { .type = NLA_U32 },
+};
+
+static int nl80211_set_cqm_rssi(struct genl_info *info,
+ s32 threshold, u32 hysteresis)
+{
+ struct cfg80211_registered_device *rdev;
+ struct wireless_dev *wdev;
+ struct net_device *dev;
+ int err;
+
+ if (threshold > 0)
+ return -EINVAL;
+
+ rtnl_lock();
+
+ err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
+ if (err)
+ goto unlock_rdev;
+
+ wdev = dev->ieee80211_ptr;
+
+ if (!rdev->ops->set_cqm_rssi_config) {
+ err = -EOPNOTSUPP;
+ goto unlock_rdev;
+ }
+
+ if (wdev->iftype != NL80211_IFTYPE_STATION) {
+ err = -EOPNOTSUPP;
+ goto unlock_rdev;
+ }
+
+ err = rdev->ops->set_cqm_rssi_config(wdev->wiphy, dev,
+ threshold, hysteresis);
+
+unlock_rdev:
+ cfg80211_unlock_rdev(rdev);
+ dev_put(dev);
+ rtnl_unlock();
+
+ return err;
+}
+
+static int nl80211_set_cqm(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *attrs[NL80211_ATTR_CQM_MAX + 1];
+ struct nlattr *cqm;
+ int err;
+
+ cqm = info->attrs[NL80211_ATTR_CQM];
+ if (!cqm) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = nla_parse_nested(attrs, NL80211_ATTR_CQM_MAX, cqm,
+ nl80211_attr_cqm_policy);
+ if (err)
+ goto out;
+
+ if (attrs[NL80211_ATTR_CQM_RSSI_THOLD] &&
+ attrs[NL80211_ATTR_CQM_RSSI_HYST]) {
+ s32 threshold;
+ u32 hysteresis;
+ threshold = nla_get_u32(attrs[NL80211_ATTR_CQM_RSSI_THOLD]);
+ hysteresis = nla_get_u32(attrs[NL80211_ATTR_CQM_RSSI_HYST]);
+ err = nl80211_set_cqm_rssi(info, threshold, hysteresis);
+ } else
+ err = -EINVAL;
+
+out:
+ return err;
+}
+
static struct genl_ops nl80211_ops[] = {
{
.cmd = NL80211_CMD_GET_WIPHY,
@@ -5083,6 +5267,18 @@ static struct genl_ops nl80211_ops[] = {
.policy = nl80211_policy,
/* can be retrieved by unprivileged users */
},
+ {
+ .cmd = NL80211_CMD_SET_CQM,
+ .doit = nl80211_set_cqm,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = NL80211_CMD_SET_CHANNEL,
+ .doit = nl80211_set_channel,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
};
static struct genl_multicast_group nl80211_mlme_mcgrp = {
@@ -5833,6 +6029,52 @@ void nl80211_send_action_tx_status(struct cfg80211_registered_device *rdev,
nlmsg_free(msg);
}
+void
+nl80211_send_cqm_rssi_notify(struct cfg80211_registered_device *rdev,
+ struct net_device *netdev,
+ enum nl80211_cqm_rssi_threshold_event rssi_event,
+ gfp_t gfp)
+{
+ struct sk_buff *msg;
+ struct nlattr *pinfoattr;
+ void *hdr;
+
+ msg = nlmsg_new(NLMSG_GOODSIZE, gfp);
+ if (!msg)
+ return;
+
+ hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_NOTIFY_CQM);
+ if (!hdr) {
+ nlmsg_free(msg);
+ return;
+ }
+
+ NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
+ NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
+
+ pinfoattr = nla_nest_start(msg, NL80211_ATTR_CQM);
+ if (!pinfoattr)
+ goto nla_put_failure;
+
+ NLA_PUT_U32(msg, NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT,
+ rssi_event);
+
+ nla_nest_end(msg, pinfoattr);
+
+ if (genlmsg_end(msg, hdr) < 0) {
+ nlmsg_free(msg);
+ return;
+ }
+
+ genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
+ nl80211_mlme_mcgrp.id, gfp);
+ return;
+
+ nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ nlmsg_free(msg);
+}
+
static int nl80211_netlink_notify(struct notifier_block * nb,
unsigned long state,
void *_notify)
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index 4ca511102c6c..2ad7fbc7d9f1 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -82,4 +82,10 @@ void nl80211_send_action_tx_status(struct cfg80211_registered_device *rdev,
const u8 *buf, size_t len, bool ack,
gfp_t gfp);
+void
+nl80211_send_cqm_rssi_notify(struct cfg80211_registered_device *rdev,
+ struct net_device *netdev,
+ enum nl80211_cqm_rssi_threshold_event rssi_event,
+ gfp_t gfp);
+
#endif /* __NET_WIRELESS_NL80211_H */
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 422da20d1e5b..8f0d97dd3109 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -2356,10 +2356,10 @@ static void print_regdomain(const struct ieee80211_regdomain *rd)
rdev->country_ie_alpha2[1]);
} else
printk(KERN_INFO "cfg80211: Current regulatory "
- "domain intersected: \n");
+ "domain intersected:\n");
} else
- printk(KERN_INFO "cfg80211: Current regulatory "
- "domain intersected: \n");
+ printk(KERN_INFO "cfg80211: Current regulatory "
+ "domain intersected:\n");
} else if (is_world_regdom(rd->alpha2))
printk(KERN_INFO "cfg80211: World regulatory "
"domain updated:\n");
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index f4dfd5f5f2ea..72222f0074db 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -171,7 +171,7 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev)
params->ssid, params->ssid_len,
NULL, 0,
params->key, params->key_len,
- params->key_idx);
+ params->key_idx, false);
case CFG80211_CONN_ASSOCIATE_NEXT:
BUG_ON(!rdev->ops->assoc);
wdev->conn->state = CFG80211_CONN_ASSOCIATING;
@@ -186,12 +186,13 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev)
if (err)
__cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid,
NULL, 0,
- WLAN_REASON_DEAUTH_LEAVING);
+ WLAN_REASON_DEAUTH_LEAVING,
+ false);
return err;
case CFG80211_CONN_DEAUTH_ASSOC_FAIL:
__cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid,
NULL, 0,
- WLAN_REASON_DEAUTH_LEAVING);
+ WLAN_REASON_DEAUTH_LEAVING, false);
/* return an error so that we call __cfg80211_connect_result() */
return -EINVAL;
default:
@@ -517,12 +518,16 @@ void cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
ev->type = EVENT_CONNECT_RESULT;
if (bssid)
memcpy(ev->cr.bssid, bssid, ETH_ALEN);
- ev->cr.req_ie = ((u8 *)ev) + sizeof(*ev);
- ev->cr.req_ie_len = req_ie_len;
- memcpy((void *)ev->cr.req_ie, req_ie, req_ie_len);
- ev->cr.resp_ie = ((u8 *)ev) + sizeof(*ev) + req_ie_len;
- ev->cr.resp_ie_len = resp_ie_len;
- memcpy((void *)ev->cr.resp_ie, resp_ie, resp_ie_len);
+ if (req_ie_len) {
+ ev->cr.req_ie = ((u8 *)ev) + sizeof(*ev);
+ ev->cr.req_ie_len = req_ie_len;
+ memcpy((void *)ev->cr.req_ie, req_ie, req_ie_len);
+ }
+ if (resp_ie_len) {
+ ev->cr.resp_ie = ((u8 *)ev) + sizeof(*ev) + req_ie_len;
+ ev->cr.resp_ie_len = resp_ie_len;
+ memcpy((void *)ev->cr.resp_ie, resp_ie, resp_ie_len);
+ }
ev->cr.status = status;
spin_lock_irqsave(&wdev->event_lock, flags);
@@ -676,7 +681,8 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
continue;
bssid = wdev->auth_bsses[i]->pub.bssid;
ret = __cfg80211_mlme_deauth(rdev, dev, bssid, NULL, 0,
- WLAN_REASON_DEAUTH_LEAVING);
+ WLAN_REASON_DEAUTH_LEAVING,
+ false);
WARN(ret, "deauth failed: %d\n", ret);
}
}
@@ -735,7 +741,6 @@ int __cfg80211_connect(struct cfg80211_registered_device *rdev,
const u8 *prev_bssid)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
- struct ieee80211_channel *chan;
struct cfg80211_bss *bss = NULL;
int err;
@@ -744,10 +749,6 @@ int __cfg80211_connect(struct cfg80211_registered_device *rdev,
if (wdev->sme_state != CFG80211_SME_IDLE)
return -EALREADY;
- chan = rdev_fixed_channel(rdev, wdev);
- if (chan && chan != connect->channel)
- return -EBUSY;
-
if (WARN_ON(wdev->connect_keys)) {
kfree(wdev->connect_keys);
wdev->connect_keys = NULL;
@@ -935,7 +936,7 @@ int __cfg80211_disconnect(struct cfg80211_registered_device *rdev,
/* wdev->conn->params.bssid must be set if > SCANNING */
err = __cfg80211_mlme_deauth(rdev, dev,
wdev->conn->params.bssid,
- NULL, 0, reason);
+ NULL, 0, reason, false);
if (err)
return err;
} else {
@@ -991,7 +992,8 @@ void cfg80211_sme_disassoc(struct net_device *dev, int idx)
memcpy(bssid, wdev->auth_bsses[idx]->pub.bssid, ETH_ALEN);
if (__cfg80211_mlme_deauth(rdev, dev, bssid,
- NULL, 0, WLAN_REASON_DEAUTH_LEAVING)) {
+ NULL, 0, WLAN_REASON_DEAUTH_LEAVING,
+ false)) {
/* whatever -- assume gone anyway */
cfg80211_unhold_bss(wdev->auth_bsses[idx]);
cfg80211_put_bss(&wdev->auth_bsses[idx]->pub);
diff --git a/net/wireless/util.c b/net/wireless/util.c
index d3574a4eb3ba..3416373a9c0c 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -331,11 +331,18 @@ int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
if (iftype == NL80211_IFTYPE_MESH_POINT) {
struct ieee80211s_hdr *meshdr =
(struct ieee80211s_hdr *) (skb->data + hdrlen);
- hdrlen += ieee80211_get_mesh_hdrlen(meshdr);
+ /* make sure meshdr->flags is on the linear part */
+ if (!pskb_may_pull(skb, hdrlen + 1))
+ return -1;
if (meshdr->flags & MESH_FLAGS_AE_A5_A6) {
- memcpy(dst, meshdr->eaddr1, ETH_ALEN);
- memcpy(src, meshdr->eaddr2, ETH_ALEN);
+ skb_copy_bits(skb, hdrlen +
+ offsetof(struct ieee80211s_hdr, eaddr1),
+ dst, ETH_ALEN);
+ skb_copy_bits(skb, hdrlen +
+ offsetof(struct ieee80211s_hdr, eaddr2),
+ src, ETH_ALEN);
}
+ hdrlen += ieee80211_get_mesh_hdrlen(meshdr);
}
break;
case cpu_to_le16(IEEE80211_FCTL_FROMDS):
@@ -347,9 +354,14 @@ int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
if (iftype == NL80211_IFTYPE_MESH_POINT) {
struct ieee80211s_hdr *meshdr =
(struct ieee80211s_hdr *) (skb->data + hdrlen);
- hdrlen += ieee80211_get_mesh_hdrlen(meshdr);
+ /* make sure meshdr->flags is on the linear part */
+ if (!pskb_may_pull(skb, hdrlen + 1))
+ return -1;
if (meshdr->flags & MESH_FLAGS_AE_A4)
- memcpy(src, meshdr->eaddr1, ETH_ALEN);
+ skb_copy_bits(skb, hdrlen +
+ offsetof(struct ieee80211s_hdr, eaddr1),
+ src, ETH_ALEN);
+ hdrlen += ieee80211_get_mesh_hdrlen(meshdr);
}
break;
case cpu_to_le16(0):
@@ -358,7 +370,7 @@ int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
break;
}
- if (unlikely(skb->len - hdrlen < 8))
+ if (!pskb_may_pull(skb, hdrlen + 8))
return -1;
payload = skb->data + hdrlen;
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index a60a2773b497..96342993cf93 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -782,16 +782,22 @@ int cfg80211_wext_siwfreq(struct net_device *dev,
return cfg80211_mgd_wext_siwfreq(dev, info, wextfreq, extra);
case NL80211_IFTYPE_ADHOC:
return cfg80211_ibss_wext_siwfreq(dev, info, wextfreq, extra);
- default:
+ case NL80211_IFTYPE_MONITOR:
+ case NL80211_IFTYPE_WDS:
+ case NL80211_IFTYPE_MESH_POINT:
freq = cfg80211_wext_freq(wdev->wiphy, wextfreq);
if (freq < 0)
return freq;
if (freq == 0)
return -EINVAL;
+ wdev_lock(wdev);
mutex_lock(&rdev->devlist_mtx);
- err = rdev_set_freq(rdev, NULL, freq, NL80211_CHAN_NO_HT);
+ err = cfg80211_set_freq(rdev, wdev, freq, NL80211_CHAN_NO_HT);
mutex_unlock(&rdev->devlist_mtx);
+ wdev_unlock(wdev);
return err;
+ default:
+ return -EOPNOTSUPP;
}
}
EXPORT_SYMBOL_GPL(cfg80211_wext_siwfreq);
@@ -801,7 +807,6 @@ int cfg80211_wext_giwfreq(struct net_device *dev,
struct iw_freq *freq, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
- struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
switch (wdev->iftype) {
case NL80211_IFTYPE_STATION:
@@ -809,9 +814,9 @@ int cfg80211_wext_giwfreq(struct net_device *dev,
case NL80211_IFTYPE_ADHOC:
return cfg80211_ibss_wext_giwfreq(dev, info, freq, extra);
default:
- if (!rdev->channel)
+ if (!wdev->channel)
return -EINVAL;
- freq->m = rdev->channel->center_freq;
+ freq->m = wdev->channel->center_freq;
freq->e = 6;
return 0;
}
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
index 4f5a47091fde..0ef17bc42bac 100644
--- a/net/wireless/wext-core.c
+++ b/net/wireless/wext-core.c
@@ -29,226 +29,226 @@ typedef int (*wext_ioctl_func)(struct net_device *, struct iwreq *,
* know about.
*/
static const struct iw_ioctl_description standard_ioctl[] = {
- [SIOCSIWCOMMIT - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCSIWCOMMIT)] = {
.header_type = IW_HEADER_TYPE_NULL,
},
- [SIOCGIWNAME - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCGIWNAME)] = {
.header_type = IW_HEADER_TYPE_CHAR,
.flags = IW_DESCR_FLAG_DUMP,
},
- [SIOCSIWNWID - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCSIWNWID)] = {
.header_type = IW_HEADER_TYPE_PARAM,
.flags = IW_DESCR_FLAG_EVENT,
},
- [SIOCGIWNWID - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCGIWNWID)] = {
.header_type = IW_HEADER_TYPE_PARAM,
.flags = IW_DESCR_FLAG_DUMP,
},
- [SIOCSIWFREQ - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCSIWFREQ)] = {
.header_type = IW_HEADER_TYPE_FREQ,
.flags = IW_DESCR_FLAG_EVENT,
},
- [SIOCGIWFREQ - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCGIWFREQ)] = {
.header_type = IW_HEADER_TYPE_FREQ,
.flags = IW_DESCR_FLAG_DUMP,
},
- [SIOCSIWMODE - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCSIWMODE)] = {
.header_type = IW_HEADER_TYPE_UINT,
.flags = IW_DESCR_FLAG_EVENT,
},
- [SIOCGIWMODE - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCGIWMODE)] = {
.header_type = IW_HEADER_TYPE_UINT,
.flags = IW_DESCR_FLAG_DUMP,
},
- [SIOCSIWSENS - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCSIWSENS)] = {
.header_type = IW_HEADER_TYPE_PARAM,
},
- [SIOCGIWSENS - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCGIWSENS)] = {
.header_type = IW_HEADER_TYPE_PARAM,
},
- [SIOCSIWRANGE - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCSIWRANGE)] = {
.header_type = IW_HEADER_TYPE_NULL,
},
- [SIOCGIWRANGE - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCGIWRANGE)] = {
.header_type = IW_HEADER_TYPE_POINT,
.token_size = 1,
.max_tokens = sizeof(struct iw_range),
.flags = IW_DESCR_FLAG_DUMP,
},
- [SIOCSIWPRIV - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCSIWPRIV)] = {
.header_type = IW_HEADER_TYPE_NULL,
},
- [SIOCGIWPRIV - SIOCIWFIRST] = { /* (handled directly by us) */
+ [IW_IOCTL_IDX(SIOCGIWPRIV)] = { /* (handled directly by us) */
.header_type = IW_HEADER_TYPE_POINT,
.token_size = sizeof(struct iw_priv_args),
.max_tokens = 16,
.flags = IW_DESCR_FLAG_NOMAX,
},
- [SIOCSIWSTATS - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCSIWSTATS)] = {
.header_type = IW_HEADER_TYPE_NULL,
},
- [SIOCGIWSTATS - SIOCIWFIRST] = { /* (handled directly by us) */
+ [IW_IOCTL_IDX(SIOCGIWSTATS)] = { /* (handled directly by us) */
.header_type = IW_HEADER_TYPE_POINT,
.token_size = 1,
.max_tokens = sizeof(struct iw_statistics),
.flags = IW_DESCR_FLAG_DUMP,
},
- [SIOCSIWSPY - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCSIWSPY)] = {
.header_type = IW_HEADER_TYPE_POINT,
.token_size = sizeof(struct sockaddr),
.max_tokens = IW_MAX_SPY,
},
- [SIOCGIWSPY - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCGIWSPY)] = {
.header_type = IW_HEADER_TYPE_POINT,
.token_size = sizeof(struct sockaddr) +
sizeof(struct iw_quality),
.max_tokens = IW_MAX_SPY,
},
- [SIOCSIWTHRSPY - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCSIWTHRSPY)] = {
.header_type = IW_HEADER_TYPE_POINT,
.token_size = sizeof(struct iw_thrspy),
.min_tokens = 1,
.max_tokens = 1,
},
- [SIOCGIWTHRSPY - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCGIWTHRSPY)] = {
.header_type = IW_HEADER_TYPE_POINT,
.token_size = sizeof(struct iw_thrspy),
.min_tokens = 1,
.max_tokens = 1,
},
- [SIOCSIWAP - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCSIWAP)] = {
.header_type = IW_HEADER_TYPE_ADDR,
},
- [SIOCGIWAP - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCGIWAP)] = {
.header_type = IW_HEADER_TYPE_ADDR,
.flags = IW_DESCR_FLAG_DUMP,
},
- [SIOCSIWMLME - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCSIWMLME)] = {
.header_type = IW_HEADER_TYPE_POINT,
.token_size = 1,
.min_tokens = sizeof(struct iw_mlme),
.max_tokens = sizeof(struct iw_mlme),
},
- [SIOCGIWAPLIST - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCGIWAPLIST)] = {
.header_type = IW_HEADER_TYPE_POINT,
.token_size = sizeof(struct sockaddr) +
sizeof(struct iw_quality),
.max_tokens = IW_MAX_AP,
.flags = IW_DESCR_FLAG_NOMAX,
},
- [SIOCSIWSCAN - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCSIWSCAN)] = {
.header_type = IW_HEADER_TYPE_POINT,
.token_size = 1,
.min_tokens = 0,
.max_tokens = sizeof(struct iw_scan_req),
},
- [SIOCGIWSCAN - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCGIWSCAN)] = {
.header_type = IW_HEADER_TYPE_POINT,
.token_size = 1,
.max_tokens = IW_SCAN_MAX_DATA,
.flags = IW_DESCR_FLAG_NOMAX,
},
- [SIOCSIWESSID - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCSIWESSID)] = {
.header_type = IW_HEADER_TYPE_POINT,
.token_size = 1,
.max_tokens = IW_ESSID_MAX_SIZE,
.flags = IW_DESCR_FLAG_EVENT,
},
- [SIOCGIWESSID - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCGIWESSID)] = {
.header_type = IW_HEADER_TYPE_POINT,
.token_size = 1,
.max_tokens = IW_ESSID_MAX_SIZE,
.flags = IW_DESCR_FLAG_DUMP,
},
- [SIOCSIWNICKN - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCSIWNICKN)] = {
.header_type = IW_HEADER_TYPE_POINT,
.token_size = 1,
.max_tokens = IW_ESSID_MAX_SIZE,
},
- [SIOCGIWNICKN - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCGIWNICKN)] = {
.header_type = IW_HEADER_TYPE_POINT,
.token_size = 1,
.max_tokens = IW_ESSID_MAX_SIZE,
},
- [SIOCSIWRATE - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCSIWRATE)] = {
.header_type = IW_HEADER_TYPE_PARAM,
},
- [SIOCGIWRATE - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCGIWRATE)] = {
.header_type = IW_HEADER_TYPE_PARAM,
},
- [SIOCSIWRTS - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCSIWRTS)] = {
.header_type = IW_HEADER_TYPE_PARAM,
},
- [SIOCGIWRTS - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCGIWRTS)] = {
.header_type = IW_HEADER_TYPE_PARAM,
},
- [SIOCSIWFRAG - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCSIWFRAG)] = {
.header_type = IW_HEADER_TYPE_PARAM,
},
- [SIOCGIWFRAG - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCGIWFRAG)] = {
.header_type = IW_HEADER_TYPE_PARAM,
},
- [SIOCSIWTXPOW - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCSIWTXPOW)] = {
.header_type = IW_HEADER_TYPE_PARAM,
},
- [SIOCGIWTXPOW - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCGIWTXPOW)] = {
.header_type = IW_HEADER_TYPE_PARAM,
},
- [SIOCSIWRETRY - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCSIWRETRY)] = {
.header_type = IW_HEADER_TYPE_PARAM,
},
- [SIOCGIWRETRY - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCGIWRETRY)] = {
.header_type = IW_HEADER_TYPE_PARAM,
},
- [SIOCSIWENCODE - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCSIWENCODE)] = {
.header_type = IW_HEADER_TYPE_POINT,
.token_size = 1,
.max_tokens = IW_ENCODING_TOKEN_MAX,
.flags = IW_DESCR_FLAG_EVENT | IW_DESCR_FLAG_RESTRICT,
},
- [SIOCGIWENCODE - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCGIWENCODE)] = {
.header_type = IW_HEADER_TYPE_POINT,
.token_size = 1,
.max_tokens = IW_ENCODING_TOKEN_MAX,
.flags = IW_DESCR_FLAG_DUMP | IW_DESCR_FLAG_RESTRICT,
},
- [SIOCSIWPOWER - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCSIWPOWER)] = {
.header_type = IW_HEADER_TYPE_PARAM,
},
- [SIOCGIWPOWER - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCGIWPOWER)] = {
.header_type = IW_HEADER_TYPE_PARAM,
},
- [SIOCSIWGENIE - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCSIWGENIE)] = {
.header_type = IW_HEADER_TYPE_POINT,
.token_size = 1,
.max_tokens = IW_GENERIC_IE_MAX,
},
- [SIOCGIWGENIE - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCGIWGENIE)] = {
.header_type = IW_HEADER_TYPE_POINT,
.token_size = 1,
.max_tokens = IW_GENERIC_IE_MAX,
},
- [SIOCSIWAUTH - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCSIWAUTH)] = {
.header_type = IW_HEADER_TYPE_PARAM,
},
- [SIOCGIWAUTH - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCGIWAUTH)] = {
.header_type = IW_HEADER_TYPE_PARAM,
},
- [SIOCSIWENCODEEXT - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCSIWENCODEEXT)] = {
.header_type = IW_HEADER_TYPE_POINT,
.token_size = 1,
.min_tokens = sizeof(struct iw_encode_ext),
.max_tokens = sizeof(struct iw_encode_ext) +
IW_ENCODING_TOKEN_MAX,
},
- [SIOCGIWENCODEEXT - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCGIWENCODEEXT)] = {
.header_type = IW_HEADER_TYPE_POINT,
.token_size = 1,
.min_tokens = sizeof(struct iw_encode_ext),
.max_tokens = sizeof(struct iw_encode_ext) +
IW_ENCODING_TOKEN_MAX,
},
- [SIOCSIWPMKSA - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCSIWPMKSA)] = {
.header_type = IW_HEADER_TYPE_POINT,
.token_size = 1,
.min_tokens = sizeof(struct iw_pmksa),
@@ -262,44 +262,44 @@ static const unsigned standard_ioctl_num = ARRAY_SIZE(standard_ioctl);
* we know about.
*/
static const struct iw_ioctl_description standard_event[] = {
- [IWEVTXDROP - IWEVFIRST] = {
+ [IW_EVENT_IDX(IWEVTXDROP)] = {
.header_type = IW_HEADER_TYPE_ADDR,
},
- [IWEVQUAL - IWEVFIRST] = {
+ [IW_EVENT_IDX(IWEVQUAL)] = {
.header_type = IW_HEADER_TYPE_QUAL,
},
- [IWEVCUSTOM - IWEVFIRST] = {
+ [IW_EVENT_IDX(IWEVCUSTOM)] = {
.header_type = IW_HEADER_TYPE_POINT,
.token_size = 1,
.max_tokens = IW_CUSTOM_MAX,
},
- [IWEVREGISTERED - IWEVFIRST] = {
+ [IW_EVENT_IDX(IWEVREGISTERED)] = {
.header_type = IW_HEADER_TYPE_ADDR,
},
- [IWEVEXPIRED - IWEVFIRST] = {
+ [IW_EVENT_IDX(IWEVEXPIRED)] = {
.header_type = IW_HEADER_TYPE_ADDR,
},
- [IWEVGENIE - IWEVFIRST] = {
+ [IW_EVENT_IDX(IWEVGENIE)] = {
.header_type = IW_HEADER_TYPE_POINT,
.token_size = 1,
.max_tokens = IW_GENERIC_IE_MAX,
},
- [IWEVMICHAELMICFAILURE - IWEVFIRST] = {
+ [IW_EVENT_IDX(IWEVMICHAELMICFAILURE)] = {
.header_type = IW_HEADER_TYPE_POINT,
.token_size = 1,
.max_tokens = sizeof(struct iw_michaelmicfailure),
},
- [IWEVASSOCREQIE - IWEVFIRST] = {
+ [IW_EVENT_IDX(IWEVASSOCREQIE)] = {
.header_type = IW_HEADER_TYPE_POINT,
.token_size = 1,
.max_tokens = IW_GENERIC_IE_MAX,
},
- [IWEVASSOCRESPIE - IWEVFIRST] = {
+ [IW_EVENT_IDX(IWEVASSOCRESPIE)] = {
.header_type = IW_HEADER_TYPE_POINT,
.token_size = 1,
.max_tokens = IW_GENERIC_IE_MAX,
},
- [IWEVPMKIDCAND - IWEVFIRST] = {
+ [IW_EVENT_IDX(IWEVPMKIDCAND)] = {
.header_type = IW_HEADER_TYPE_POINT,
.token_size = 1,
.max_tokens = sizeof(struct iw_pmkid_cand),
@@ -450,11 +450,11 @@ void wireless_send_event(struct net_device * dev,
/* Get the description of the Event */
if (cmd <= SIOCIWLAST) {
- cmd_index = cmd - SIOCIWFIRST;
+ cmd_index = IW_IOCTL_IDX(cmd);
if (cmd_index < standard_ioctl_num)
descr = &(standard_ioctl[cmd_index]);
} else {
- cmd_index = cmd - IWEVFIRST;
+ cmd_index = IW_EVENT_IDX(cmd);
if (cmd_index < standard_event_num)
descr = &(standard_event[cmd_index]);
}
@@ -663,7 +663,7 @@ static iw_handler get_handler(struct net_device *dev, unsigned int cmd)
return NULL;
/* Try as a standard command */
- index = cmd - SIOCIWFIRST;
+ index = IW_IOCTL_IDX(cmd);
if (index < handlers->num_standard)
return handlers->standard[index];
@@ -955,9 +955,9 @@ static int ioctl_standard_call(struct net_device * dev,
int ret = -EINVAL;
/* Get the description of the IOCTL */
- if ((cmd - SIOCIWFIRST) >= standard_ioctl_num)
+ if (IW_IOCTL_IDX(cmd) >= standard_ioctl_num)
return -EOPNOTSUPP;
- descr = &(standard_ioctl[cmd - SIOCIWFIRST]);
+ descr = &(standard_ioctl[IW_IOCTL_IDX(cmd)]);
/* Check if we have a pointer to user space data or not */
if (descr->header_type != IW_HEADER_TYPE_POINT) {
@@ -1013,7 +1013,7 @@ static int compat_standard_call(struct net_device *dev,
struct iw_point iwp;
int err;
- descr = standard_ioctl + (cmd - SIOCIWFIRST);
+ descr = standard_ioctl + IW_IOCTL_IDX(cmd);
if (descr->header_type != IW_HEADER_TYPE_POINT)
return ioctl_standard_call(dev, iwr, cmd, info, handler);
diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c
index d5c6140f4cb8..9818198add8a 100644
--- a/net/wireless/wext-sme.c
+++ b/net/wireless/wext-sme.c
@@ -108,7 +108,7 @@ int cfg80211_mgd_wext_siwfreq(struct net_device *dev,
/* SSID is not set, we just want to switch channel */
if (chan && !wdev->wext.connect.ssid_len) {
- err = rdev_set_freq(rdev, wdev, freq, NL80211_CHAN_NO_HT);
+ err = cfg80211_set_freq(rdev, wdev, freq, NL80211_CHAN_NO_HT);
goto out;
}
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index 36e84e13c6aa..5e86d4e97dce 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -453,7 +453,6 @@ static int x25_setsockopt(struct socket *sock, int level, int optname,
struct sock *sk = sock->sk;
int rc = -ENOPROTOOPT;
- lock_kernel();
if (level != SOL_X25 || optname != X25_QBITINCL)
goto out;
@@ -465,10 +464,12 @@ static int x25_setsockopt(struct socket *sock, int level, int optname,
if (get_user(opt, (int __user *)optval))
goto out;
- x25_sk(sk)->qbitincl = !!opt;
+ if (opt)
+ set_bit(X25_Q_BIT_FLAG, &x25_sk(sk)->flags);
+ else
+ clear_bit(X25_Q_BIT_FLAG, &x25_sk(sk)->flags);
rc = 0;
out:
- unlock_kernel();
return rc;
}
@@ -478,7 +479,6 @@ static int x25_getsockopt(struct socket *sock, int level, int optname,
struct sock *sk = sock->sk;
int val, len, rc = -ENOPROTOOPT;
- lock_kernel();
if (level != SOL_X25 || optname != X25_QBITINCL)
goto out;
@@ -496,10 +496,9 @@ static int x25_getsockopt(struct socket *sock, int level, int optname,
if (put_user(len, optlen))
goto out;
- val = x25_sk(sk)->qbitincl;
+ val = test_bit(X25_Q_BIT_FLAG, &x25_sk(sk)->flags);
rc = copy_to_user(optval, &val, len) ? -EFAULT : 0;
out:
- unlock_kernel();
return rc;
}
@@ -583,7 +582,7 @@ static int x25_create(struct net *net, struct socket *sock, int protocol,
x25->t2 = sysctl_x25_ack_holdback_timeout;
x25->state = X25_STATE_0;
x25->cudmatchlength = 0;
- x25->accptapprv = X25_DENY_ACCPT_APPRV; /* normally no cud */
+ set_bit(X25_ACCPT_APPRV_FLAG, &x25->flags); /* normally no cud */
/* on call accept */
x25->facilities.winsize_in = X25_DEFAULT_WINDOW_SIZE;
@@ -632,12 +631,12 @@ static struct sock *x25_make_new(struct sock *osk)
x25->t22 = ox25->t22;
x25->t23 = ox25->t23;
x25->t2 = ox25->t2;
+ x25->flags = ox25->flags;
x25->facilities = ox25->facilities;
- x25->qbitincl = ox25->qbitincl;
x25->dte_facilities = ox25->dte_facilities;
x25->cudmatchlength = ox25->cudmatchlength;
- x25->accptapprv = ox25->accptapprv;
+ clear_bit(X25_INTERRUPT_FLAG, &x25->flags);
x25_init_timers(sk);
out:
return sk;
@@ -719,7 +718,7 @@ static int x25_wait_for_connection_establishment(struct sock *sk)
DECLARE_WAITQUEUE(wait, current);
int rc;
- add_wait_queue_exclusive(sk->sk_sleep, &wait);
+ add_wait_queue_exclusive(sk_sleep(sk), &wait);
for (;;) {
__set_current_state(TASK_INTERRUPTIBLE);
rc = -ERESTARTSYS;
@@ -739,7 +738,7 @@ static int x25_wait_for_connection_establishment(struct sock *sk)
break;
}
__set_current_state(TASK_RUNNING);
- remove_wait_queue(sk->sk_sleep, &wait);
+ remove_wait_queue(sk_sleep(sk), &wait);
return rc;
}
@@ -839,7 +838,7 @@ static int x25_wait_for_data(struct sock *sk, long timeout)
DECLARE_WAITQUEUE(wait, current);
int rc = 0;
- add_wait_queue_exclusive(sk->sk_sleep, &wait);
+ add_wait_queue_exclusive(sk_sleep(sk), &wait);
for (;;) {
__set_current_state(TASK_INTERRUPTIBLE);
if (sk->sk_shutdown & RCV_SHUTDOWN)
@@ -859,7 +858,7 @@ static int x25_wait_for_data(struct sock *sk, long timeout)
break;
}
__set_current_state(TASK_RUNNING);
- remove_wait_queue(sk->sk_sleep, &wait);
+ remove_wait_queue(sk_sleep(sk), &wait);
return rc;
}
@@ -1053,8 +1052,8 @@ int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb,
makex25->vc_facil_mask &= ~X25_MASK_CALLING_AE;
makex25->cudmatchlength = x25_sk(sk)->cudmatchlength;
- /* Normally all calls are accepted immediatly */
- if(makex25->accptapprv & X25_DENY_ACCPT_APPRV) {
+ /* Normally all calls are accepted immediately */
+ if (test_bit(X25_ACCPT_APPRV_FLAG, &makex25->flags)) {
x25_write_internal(make, X25_CALL_ACCEPTED);
makex25->state = X25_STATE_3;
}
@@ -1186,7 +1185,7 @@ static int x25_sendmsg(struct kiocb *iocb, struct socket *sock,
* If the Q BIT Include socket option is in force, the first
* byte of the user data is the logical value of the Q Bit.
*/
- if (x25->qbitincl) {
+ if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) {
qbit = skb->data[0];
skb_pull(skb, 1);
}
@@ -1242,7 +1241,7 @@ static int x25_sendmsg(struct kiocb *iocb, struct socket *sock,
len = rc;
if (rc < 0)
kfree_skb(skb);
- else if (x25->qbitincl)
+ else if (test_bit(X25_Q_BIT_FLAG, &x25->flags))
len++;
}
@@ -1307,7 +1306,7 @@ static int x25_recvmsg(struct kiocb *iocb, struct socket *sock,
/*
* No Q bit information on Interrupt data.
*/
- if (x25->qbitincl) {
+ if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) {
asmptr = skb_push(skb, 1);
*asmptr = 0x00;
}
@@ -1325,7 +1324,7 @@ static int x25_recvmsg(struct kiocb *iocb, struct socket *sock,
skb_pull(skb, x25->neighbour->extended ?
X25_EXT_MIN_LEN : X25_STD_MIN_LEN);
- if (x25->qbitincl) {
+ if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) {
asmptr = skb_push(skb, 1);
*asmptr = qbit;
}
@@ -1576,7 +1575,7 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
rc = -EINVAL;
if (sk->sk_state != TCP_CLOSE)
break;
- x25->accptapprv = X25_ALLOW_ACCPT_APPRV;
+ clear_bit(X25_ACCPT_APPRV_FLAG, &x25->flags);
rc = 0;
break;
}
@@ -1585,7 +1584,8 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
rc = -EINVAL;
if (sk->sk_state != TCP_ESTABLISHED)
break;
- if (x25->accptapprv) /* must call accptapprv above */
+ /* must call accptapprv above */
+ if (test_bit(X25_ACCPT_APPRV_FLAG, &x25->flags))
break;
x25_write_internal(sk, X25_CALL_ACCEPTED);
x25->state = X25_STATE_3;
diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c
index b9ef682230a0..9005f6daeab5 100644
--- a/net/x25/x25_dev.c
+++ b/net/x25/x25_dev.c
@@ -24,6 +24,7 @@
#include <net/sock.h>
#include <linux/if_arp.h>
#include <net/x25.h>
+#include <net/x25device.h>
static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb)
{
@@ -115,19 +116,22 @@ int x25_lapb_receive_frame(struct sk_buff *skb, struct net_device *dev,
}
switch (skb->data[0]) {
- case 0x00:
- skb_pull(skb, 1);
- if (x25_receive_data(skb, nb)) {
- x25_neigh_put(nb);
- goto out;
- }
- break;
- case 0x01:
- x25_link_established(nb);
- break;
- case 0x02:
- x25_link_terminated(nb);
- break;
+
+ case X25_IFACE_DATA:
+ skb_pull(skb, 1);
+ if (x25_receive_data(skb, nb)) {
+ x25_neigh_put(nb);
+ goto out;
+ }
+ break;
+
+ case X25_IFACE_CONNECT:
+ x25_link_established(nb);
+ break;
+
+ case X25_IFACE_DISCONNECT:
+ x25_link_terminated(nb);
+ break;
}
x25_neigh_put(nb);
drop:
@@ -148,7 +152,7 @@ void x25_establish_link(struct x25_neigh *nb)
return;
}
ptr = skb_put(skb, 1);
- *ptr = 0x01;
+ *ptr = X25_IFACE_CONNECT;
break;
#if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE)
@@ -184,7 +188,7 @@ void x25_terminate_link(struct x25_neigh *nb)
}
ptr = skb_put(skb, 1);
- *ptr = 0x02;
+ *ptr = X25_IFACE_DISCONNECT;
skb->protocol = htons(ETH_P_X25);
skb->dev = nb->dev;
@@ -200,7 +204,7 @@ void x25_send_frame(struct sk_buff *skb, struct x25_neigh *nb)
switch (nb->dev->type) {
case ARPHRD_X25:
dptr = skb_push(skb, 1);
- *dptr = 0x00;
+ *dptr = X25_IFACE_DATA;
break;
#if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE)
diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c
index 372ac226e648..63178961efac 100644
--- a/net/x25/x25_in.c
+++ b/net/x25/x25_in.c
@@ -273,7 +273,7 @@ static int x25_state3_machine(struct sock *sk, struct sk_buff *skb, int frametyp
break;
case X25_INTERRUPT_CONFIRMATION:
- x25->intflag = 0;
+ clear_bit(X25_INTERRUPT_FLAG, &x25->flags);
break;
case X25_INTERRUPT:
diff --git a/net/x25/x25_out.c b/net/x25/x25_out.c
index 52351a26b6fc..d00649fb251d 100644
--- a/net/x25/x25_out.c
+++ b/net/x25/x25_out.c
@@ -148,8 +148,9 @@ void x25_kick(struct sock *sk)
/*
* Transmit interrupt data.
*/
- if (!x25->intflag && skb_peek(&x25->interrupt_out_queue) != NULL) {
- x25->intflag = 1;
+ if (skb_peek(&x25->interrupt_out_queue) != NULL &&
+ !test_and_set_bit(X25_INTERRUPT_FLAG, &x25->flags)) {
+
skb = skb_dequeue(&x25->interrupt_out_queue);
x25_transmit_link(skb, x25->neighbour);
}
diff --git a/net/xfrm/xfrm_hash.h b/net/xfrm/xfrm_hash.h
index e5195c99f71e..8e69533d2313 100644
--- a/net/xfrm/xfrm_hash.h
+++ b/net/xfrm/xfrm_hash.h
@@ -16,7 +16,8 @@ static inline unsigned int __xfrm6_addr_hash(xfrm_address_t *addr)
static inline unsigned int __xfrm4_daddr_saddr_hash(xfrm_address_t *daddr, xfrm_address_t *saddr)
{
- return ntohl(daddr->a4 + saddr->a4);
+ u32 sum = (__force u32)daddr->a4 + (__force u32)saddr->a4;
+ return ntohl((__force __be32)sum);
}
static inline unsigned int __xfrm6_daddr_saddr_hash(xfrm_address_t *daddr, xfrm_address_t *saddr)
@@ -54,7 +55,7 @@ static inline unsigned __xfrm_src_hash(xfrm_address_t *daddr,
case AF_INET6:
h ^= __xfrm6_daddr_saddr_hash(daddr, saddr);
break;
- };
+ }
return (h ^ (h >> 16)) & hmask;
}
@@ -101,7 +102,7 @@ static inline unsigned int __sel_hash(struct xfrm_selector *sel, unsigned short
h = __xfrm6_daddr_saddr_hash(daddr, saddr);
break;
- };
+ }
h ^= (h >> 16);
return h & hmask;
}
@@ -118,7 +119,7 @@ static inline unsigned int __addr_hash(xfrm_address_t *daddr, xfrm_address_t *sa
case AF_INET6:
h = __xfrm6_daddr_saddr_hash(daddr, saddr);
break;
- };
+ }
h ^= (h >> 16);
return h & hmask;
}
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 843e066649cb..d965a2bad8d3 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -37,6 +37,8 @@
DEFINE_MUTEX(xfrm_cfg_mutex);
EXPORT_SYMBOL(xfrm_cfg_mutex);
+static DEFINE_SPINLOCK(xfrm_policy_sk_bundle_lock);
+static struct dst_entry *xfrm_policy_sk_bundles;
static DEFINE_RWLOCK(xfrm_policy_lock);
static DEFINE_RWLOCK(xfrm_policy_afinfo_lock);
@@ -44,12 +46,10 @@ static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO];
static struct kmem_cache *xfrm_dst_cache __read_mostly;
-static HLIST_HEAD(xfrm_policy_gc_list);
-static DEFINE_SPINLOCK(xfrm_policy_gc_lock);
-
static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family);
static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo);
static void xfrm_init_pmtu(struct dst_entry *dst);
+static int stale_bundle(struct dst_entry *dst);
static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
int dir);
@@ -156,7 +156,7 @@ static void xfrm_policy_timer(unsigned long data)
read_lock(&xp->lock);
- if (xp->walk.dead)
+ if (unlikely(xp->walk.dead))
goto out;
dir = xfrm_policy_id2dir(xp->index);
@@ -216,6 +216,35 @@ expired:
xfrm_pol_put(xp);
}
+static struct flow_cache_object *xfrm_policy_flo_get(struct flow_cache_object *flo)
+{
+ struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo);
+
+ if (unlikely(pol->walk.dead))
+ flo = NULL;
+ else
+ xfrm_pol_hold(pol);
+
+ return flo;
+}
+
+static int xfrm_policy_flo_check(struct flow_cache_object *flo)
+{
+ struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo);
+
+ return !pol->walk.dead;
+}
+
+static void xfrm_policy_flo_delete(struct flow_cache_object *flo)
+{
+ xfrm_pol_put(container_of(flo, struct xfrm_policy, flo));
+}
+
+static const struct flow_cache_ops xfrm_policy_fc_ops = {
+ .get = xfrm_policy_flo_get,
+ .check = xfrm_policy_flo_check,
+ .delete = xfrm_policy_flo_delete,
+};
/* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
* SPD calls.
@@ -236,6 +265,7 @@ struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
atomic_set(&policy->refcnt, 1);
setup_timer(&policy->timer, xfrm_policy_timer,
(unsigned long)policy);
+ policy->flo.ops = &xfrm_policy_fc_ops;
}
return policy;
}
@@ -247,8 +277,6 @@ void xfrm_policy_destroy(struct xfrm_policy *policy)
{
BUG_ON(!policy->walk.dead);
- BUG_ON(policy->bundles);
-
if (del_timer(&policy->timer))
BUG();
@@ -257,63 +285,20 @@ void xfrm_policy_destroy(struct xfrm_policy *policy)
}
EXPORT_SYMBOL(xfrm_policy_destroy);
-static void xfrm_policy_gc_kill(struct xfrm_policy *policy)
-{
- struct dst_entry *dst;
-
- while ((dst = policy->bundles) != NULL) {
- policy->bundles = dst->next;
- dst_free(dst);
- }
-
- if (del_timer(&policy->timer))
- atomic_dec(&policy->refcnt);
-
- if (atomic_read(&policy->refcnt) > 1)
- flow_cache_flush();
-
- xfrm_pol_put(policy);
-}
-
-static void xfrm_policy_gc_task(struct work_struct *work)
-{
- struct xfrm_policy *policy;
- struct hlist_node *entry, *tmp;
- struct hlist_head gc_list;
-
- spin_lock_bh(&xfrm_policy_gc_lock);
- gc_list.first = xfrm_policy_gc_list.first;
- INIT_HLIST_HEAD(&xfrm_policy_gc_list);
- spin_unlock_bh(&xfrm_policy_gc_lock);
-
- hlist_for_each_entry_safe(policy, entry, tmp, &gc_list, bydst)
- xfrm_policy_gc_kill(policy);
-}
-static DECLARE_WORK(xfrm_policy_gc_work, xfrm_policy_gc_task);
-
/* Rule must be locked. Release descentant resources, announce
* entry dead. The rule must be unlinked from lists to the moment.
*/
static void xfrm_policy_kill(struct xfrm_policy *policy)
{
- int dead;
-
- write_lock_bh(&policy->lock);
- dead = policy->walk.dead;
policy->walk.dead = 1;
- write_unlock_bh(&policy->lock);
- if (unlikely(dead)) {
- WARN_ON(1);
- return;
- }
+ atomic_inc(&policy->genid);
- spin_lock_bh(&xfrm_policy_gc_lock);
- hlist_add_head(&policy->bydst, &xfrm_policy_gc_list);
- spin_unlock_bh(&xfrm_policy_gc_lock);
+ if (del_timer(&policy->timer))
+ xfrm_pol_put(policy);
- schedule_work(&xfrm_policy_gc_work);
+ xfrm_pol_put(policy);
}
static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
@@ -555,7 +540,6 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
struct xfrm_policy *delpol;
struct hlist_head *chain;
struct hlist_node *entry, *newpos;
- struct dst_entry *gc_list;
u32 mark = policy->mark.v & policy->mark.m;
write_lock_bh(&xfrm_policy_lock);
@@ -605,34 +589,6 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
else if (xfrm_bydst_should_resize(net, dir, NULL))
schedule_work(&net->xfrm.policy_hash_work);
- read_lock_bh(&xfrm_policy_lock);
- gc_list = NULL;
- entry = &policy->bydst;
- hlist_for_each_entry_continue(policy, entry, bydst) {
- struct dst_entry *dst;
-
- write_lock(&policy->lock);
- dst = policy->bundles;
- if (dst) {
- struct dst_entry *tail = dst;
- while (tail->next)
- tail = tail->next;
- tail->next = gc_list;
- gc_list = dst;
-
- policy->bundles = NULL;
- }
- write_unlock(&policy->lock);
- }
- read_unlock_bh(&xfrm_policy_lock);
-
- while (gc_list) {
- struct dst_entry *dst = gc_list;
-
- gc_list = dst->next;
- dst_free(dst);
- }
-
return 0;
}
EXPORT_SYMBOL(xfrm_policy_insert);
@@ -671,10 +627,8 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type,
}
write_unlock_bh(&xfrm_policy_lock);
- if (ret && delete) {
- atomic_inc(&flow_cache_genid);
+ if (ret && delete)
xfrm_policy_kill(ret);
- }
return ret;
}
EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
@@ -713,10 +667,8 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type,
}
write_unlock_bh(&xfrm_policy_lock);
- if (ret && delete) {
- atomic_inc(&flow_cache_genid);
+ if (ret && delete)
xfrm_policy_kill(ret);
- }
return ret;
}
EXPORT_SYMBOL(xfrm_policy_byid);
@@ -776,7 +728,6 @@ xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audi
int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
{
int dir, err = 0, cnt = 0;
- struct xfrm_policy *dp;
write_lock_bh(&xfrm_policy_lock);
@@ -794,10 +745,9 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
&net->xfrm.policy_inexact[dir], bydst) {
if (pol->type != type)
continue;
- dp = __xfrm_policy_unlink(pol, dir);
+ __xfrm_policy_unlink(pol, dir);
write_unlock_bh(&xfrm_policy_lock);
- if (dp)
- cnt++;
+ cnt++;
xfrm_audit_policy_delete(pol, 1, audit_info->loginuid,
audit_info->sessionid,
@@ -816,10 +766,9 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
bydst) {
if (pol->type != type)
continue;
- dp = __xfrm_policy_unlink(pol, dir);
+ __xfrm_policy_unlink(pol, dir);
write_unlock_bh(&xfrm_policy_lock);
- if (dp)
- cnt++;
+ cnt++;
xfrm_audit_policy_delete(pol, 1,
audit_info->loginuid,
@@ -835,7 +784,6 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
}
if (!cnt)
err = -ESRCH;
- atomic_inc(&flow_cache_genid);
out:
write_unlock_bh(&xfrm_policy_lock);
return err;
@@ -989,32 +937,37 @@ fail:
return ret;
}
-static int xfrm_policy_lookup(struct net *net, struct flowi *fl, u16 family,
- u8 dir, void **objp, atomic_t **obj_refp)
+static struct xfrm_policy *
+__xfrm_policy_lookup(struct net *net, struct flowi *fl, u16 family, u8 dir)
{
+#ifdef CONFIG_XFRM_SUB_POLICY
struct xfrm_policy *pol;
- int err = 0;
-#ifdef CONFIG_XFRM_SUB_POLICY
pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family, dir);
- if (IS_ERR(pol)) {
- err = PTR_ERR(pol);
- pol = NULL;
- }
- if (pol || err)
- goto end;
-#endif
- pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, dir);
- if (IS_ERR(pol)) {
- err = PTR_ERR(pol);
- pol = NULL;
- }
-#ifdef CONFIG_XFRM_SUB_POLICY
-end:
+ if (pol != NULL)
+ return pol;
#endif
- if ((*objp = (void *) pol) != NULL)
- *obj_refp = &pol->refcnt;
- return err;
+ return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, dir);
+}
+
+static struct flow_cache_object *
+xfrm_policy_lookup(struct net *net, struct flowi *fl, u16 family,
+ u8 dir, struct flow_cache_object *old_obj, void *ctx)
+{
+ struct xfrm_policy *pol;
+
+ if (old_obj)
+ xfrm_pol_put(container_of(old_obj, struct xfrm_policy, flo));
+
+ pol = __xfrm_policy_lookup(net, fl, family, dir);
+ if (IS_ERR_OR_NULL(pol))
+ return ERR_CAST(pol);
+
+ /* Resolver returns two references:
+ * one for cache and one for caller of flow_cache_lookup() */
+ xfrm_pol_hold(pol);
+
+ return &pol->flo;
}
static inline int policy_to_flow_dir(int dir)
@@ -1104,8 +1057,6 @@ int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
pol = __xfrm_policy_unlink(pol, dir);
write_unlock_bh(&xfrm_policy_lock);
if (pol) {
- if (dir < XFRM_POLICY_MAX)
- atomic_inc(&flow_cache_genid);
xfrm_policy_kill(pol);
return 0;
}
@@ -1132,6 +1083,9 @@ int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
__xfrm_policy_link(pol, XFRM_POLICY_MAX+dir);
}
if (old_pol)
+ /* Unlinking succeeds always. This is the only function
+ * allowed to delete or replace socket policy.
+ */
__xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir);
write_unlock_bh(&xfrm_policy_lock);
@@ -1300,18 +1254,6 @@ xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, struct flowi *fl,
* still valid.
*/
-static struct dst_entry *
-xfrm_find_bundle(struct flowi *fl, struct xfrm_policy *policy, unsigned short family)
-{
- struct dst_entry *x;
- struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
- if (unlikely(afinfo == NULL))
- return ERR_PTR(-EINVAL);
- x = afinfo->find_bundle(fl, policy);
- xfrm_policy_put_afinfo(afinfo);
- return x;
-}
-
static inline int xfrm_get_tos(struct flowi *fl, int family)
{
struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
@@ -1327,6 +1269,54 @@ static inline int xfrm_get_tos(struct flowi *fl, int family)
return tos;
}
+static struct flow_cache_object *xfrm_bundle_flo_get(struct flow_cache_object *flo)
+{
+ struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
+ struct dst_entry *dst = &xdst->u.dst;
+
+ if (xdst->route == NULL) {
+ /* Dummy bundle - if it has xfrms we were not
+ * able to build bundle as template resolution failed.
+ * It means we need to try again resolving. */
+ if (xdst->num_xfrms > 0)
+ return NULL;
+ } else {
+ /* Real bundle */
+ if (stale_bundle(dst))
+ return NULL;
+ }
+
+ dst_hold(dst);
+ return flo;
+}
+
+static int xfrm_bundle_flo_check(struct flow_cache_object *flo)
+{
+ struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
+ struct dst_entry *dst = &xdst->u.dst;
+
+ if (!xdst->route)
+ return 0;
+ if (stale_bundle(dst))
+ return 0;
+
+ return 1;
+}
+
+static void xfrm_bundle_flo_delete(struct flow_cache_object *flo)
+{
+ struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
+ struct dst_entry *dst = &xdst->u.dst;
+
+ dst_free(dst);
+}
+
+static const struct flow_cache_ops xfrm_bundle_fc_ops = {
+ .get = xfrm_bundle_flo_get,
+ .check = xfrm_bundle_flo_check,
+ .delete = xfrm_bundle_flo_delete,
+};
+
static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
{
struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
@@ -1349,9 +1339,10 @@ static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
BUG();
}
xdst = dst_alloc(dst_ops) ?: ERR_PTR(-ENOBUFS);
-
xfrm_policy_put_afinfo(afinfo);
+ xdst->flo.ops = &xfrm_bundle_fc_ops;
+
return xdst;
}
@@ -1389,6 +1380,7 @@ static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
return err;
}
+
/* Allocate chain of dst_entry's, attach known xfrm's, calculate
* all the metrics... Shortly, bundle a bundle.
*/
@@ -1452,7 +1444,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
dst_hold(dst);
dst1->xfrm = xfrm[i];
- xdst->genid = xfrm[i]->genid;
+ xdst->xfrm_genid = xfrm[i]->genid;
dst1->obsolete = -1;
dst1->flags |= DST_HOST;
@@ -1545,7 +1537,186 @@ xfrm_dst_update_origin(struct dst_entry *dst, struct flowi *fl)
#endif
}
-static int stale_bundle(struct dst_entry *dst);
+static int xfrm_expand_policies(struct flowi *fl, u16 family,
+ struct xfrm_policy **pols,
+ int *num_pols, int *num_xfrms)
+{
+ int i;
+
+ if (*num_pols == 0 || !pols[0]) {
+ *num_pols = 0;
+ *num_xfrms = 0;
+ return 0;
+ }
+ if (IS_ERR(pols[0]))
+ return PTR_ERR(pols[0]);
+
+ *num_xfrms = pols[0]->xfrm_nr;
+
+#ifdef CONFIG_XFRM_SUB_POLICY
+ if (pols[0] && pols[0]->action == XFRM_POLICY_ALLOW &&
+ pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
+ pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]),
+ XFRM_POLICY_TYPE_MAIN,
+ fl, family,
+ XFRM_POLICY_OUT);
+ if (pols[1]) {
+ if (IS_ERR(pols[1])) {
+ xfrm_pols_put(pols, *num_pols);
+ return PTR_ERR(pols[1]);
+ }
+ (*num_pols) ++;
+ (*num_xfrms) += pols[1]->xfrm_nr;
+ }
+ }
+#endif
+ for (i = 0; i < *num_pols; i++) {
+ if (pols[i]->action != XFRM_POLICY_ALLOW) {
+ *num_xfrms = -1;
+ break;
+ }
+ }
+
+ return 0;
+
+}
+
+static struct xfrm_dst *
+xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
+ struct flowi *fl, u16 family,
+ struct dst_entry *dst_orig)
+{
+ struct net *net = xp_net(pols[0]);
+ struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
+ struct dst_entry *dst;
+ struct xfrm_dst *xdst;
+ int err;
+
+ /* Try to instantiate a bundle */
+ err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
+ if (err < 0) {
+ if (err != -EAGAIN)
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
+ return ERR_PTR(err);
+ }
+
+ dst = xfrm_bundle_create(pols[0], xfrm, err, fl, dst_orig);
+ if (IS_ERR(dst)) {
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
+ return ERR_CAST(dst);
+ }
+
+ xdst = (struct xfrm_dst *)dst;
+ xdst->num_xfrms = err;
+ if (num_pols > 1)
+ err = xfrm_dst_update_parent(dst, &pols[1]->selector);
+ else
+ err = xfrm_dst_update_origin(dst, fl);
+ if (unlikely(err)) {
+ dst_free(dst);
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
+ return ERR_PTR(err);
+ }
+
+ xdst->num_pols = num_pols;
+ memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
+ xdst->policy_genid = atomic_read(&pols[0]->genid);
+
+ return xdst;
+}
+
+static struct flow_cache_object *
+xfrm_bundle_lookup(struct net *net, struct flowi *fl, u16 family, u8 dir,
+ struct flow_cache_object *oldflo, void *ctx)
+{
+ struct dst_entry *dst_orig = (struct dst_entry *)ctx;
+ struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
+ struct xfrm_dst *xdst, *new_xdst;
+ int num_pols = 0, num_xfrms = 0, i, err, pol_dead;
+
+ /* Check if the policies from old bundle are usable */
+ xdst = NULL;
+ if (oldflo) {
+ xdst = container_of(oldflo, struct xfrm_dst, flo);
+ num_pols = xdst->num_pols;
+ num_xfrms = xdst->num_xfrms;
+ pol_dead = 0;
+ for (i = 0; i < num_pols; i++) {
+ pols[i] = xdst->pols[i];
+ pol_dead |= pols[i]->walk.dead;
+ }
+ if (pol_dead) {
+ dst_free(&xdst->u.dst);
+ xdst = NULL;
+ num_pols = 0;
+ num_xfrms = 0;
+ oldflo = NULL;
+ }
+ }
+
+ /* Resolve policies to use if we couldn't get them from
+ * previous cache entry */
+ if (xdst == NULL) {
+ num_pols = 1;
+ pols[0] = __xfrm_policy_lookup(net, fl, family, dir);
+ err = xfrm_expand_policies(fl, family, pols,
+ &num_pols, &num_xfrms);
+ if (err < 0)
+ goto inc_error;
+ if (num_pols == 0)
+ return NULL;
+ if (num_xfrms <= 0)
+ goto make_dummy_bundle;
+ }
+
+ new_xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family, dst_orig);
+ if (IS_ERR(new_xdst)) {
+ err = PTR_ERR(new_xdst);
+ if (err != -EAGAIN)
+ goto error;
+ if (oldflo == NULL)
+ goto make_dummy_bundle;
+ dst_hold(&xdst->u.dst);
+ return oldflo;
+ }
+
+ /* Kill the previous bundle */
+ if (xdst) {
+ /* The policies were stolen for newly generated bundle */
+ xdst->num_pols = 0;
+ dst_free(&xdst->u.dst);
+ }
+
+ /* Flow cache does not have reference, it dst_free()'s,
+ * but we do need to return one reference for original caller */
+ dst_hold(&new_xdst->u.dst);
+ return &new_xdst->flo;
+
+make_dummy_bundle:
+ /* We found policies, but there's no bundles to instantiate:
+ * either because the policy blocks, has no transformations or
+ * we could not build template (no xfrm_states).*/
+ xdst = xfrm_alloc_dst(net, family);
+ if (IS_ERR(xdst)) {
+ xfrm_pols_put(pols, num_pols);
+ return ERR_CAST(xdst);
+ }
+ xdst->num_pols = num_pols;
+ xdst->num_xfrms = num_xfrms;
+ memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
+
+ dst_hold(&xdst->u.dst);
+ return &xdst->flo;
+
+inc_error:
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
+error:
+ if (xdst != NULL)
+ dst_free(&xdst->u.dst);
+ else
+ xfrm_pols_put(pols, num_pols);
+ return ERR_PTR(err);
+}
/* Main function: finds/creates a bundle for given flow.
*
@@ -1555,245 +1726,152 @@ static int stale_bundle(struct dst_entry *dst);
int __xfrm_lookup(struct net *net, struct dst_entry **dst_p, struct flowi *fl,
struct sock *sk, int flags)
{
- struct xfrm_policy *policy;
struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
- int npols;
- int pol_dead;
- int xfrm_nr;
- int pi;
- struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
- struct dst_entry *dst, *dst_orig = *dst_p;
- int nx = 0;
- int err;
- u32 genid;
- u16 family;
+ struct flow_cache_object *flo;
+ struct xfrm_dst *xdst;
+ struct dst_entry *dst, *dst_orig = *dst_p, *route;
+ u16 family = dst_orig->ops->family;
u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
+ int i, err, num_pols, num_xfrms = 0, drop_pols = 0;
restart:
- genid = atomic_read(&flow_cache_genid);
- policy = NULL;
- for (pi = 0; pi < ARRAY_SIZE(pols); pi++)
- pols[pi] = NULL;
- npols = 0;
- pol_dead = 0;
- xfrm_nr = 0;
+ dst = NULL;
+ xdst = NULL;
+ route = NULL;
if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
- policy = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);
- err = PTR_ERR(policy);
- if (IS_ERR(policy)) {
- XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
+ num_pols = 1;
+ pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);
+ err = xfrm_expand_policies(fl, family, pols,
+ &num_pols, &num_xfrms);
+ if (err < 0)
goto dropdst;
+
+ if (num_pols) {
+ if (num_xfrms <= 0) {
+ drop_pols = num_pols;
+ goto no_transform;
+ }
+
+ xdst = xfrm_resolve_and_create_bundle(
+ pols, num_pols, fl,
+ family, dst_orig);
+ if (IS_ERR(xdst)) {
+ xfrm_pols_put(pols, num_pols);
+ err = PTR_ERR(xdst);
+ goto dropdst;
+ }
+
+ spin_lock_bh(&xfrm_policy_sk_bundle_lock);
+ xdst->u.dst.next = xfrm_policy_sk_bundles;
+ xfrm_policy_sk_bundles = &xdst->u.dst;
+ spin_unlock_bh(&xfrm_policy_sk_bundle_lock);
+
+ route = xdst->route;
}
}
- if (!policy) {
+ if (xdst == NULL) {
/* To accelerate a bit... */
if ((dst_orig->flags & DST_NOXFRM) ||
!net->xfrm.policy_count[XFRM_POLICY_OUT])
goto nopol;
- policy = flow_cache_lookup(net, fl, dst_orig->ops->family,
- dir, xfrm_policy_lookup);
- err = PTR_ERR(policy);
- if (IS_ERR(policy)) {
- XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
+ flo = flow_cache_lookup(net, fl, family, dir,
+ xfrm_bundle_lookup, dst_orig);
+ if (flo == NULL)
+ goto nopol;
+ if (IS_ERR(flo)) {
+ err = PTR_ERR(flo);
goto dropdst;
}
+ xdst = container_of(flo, struct xfrm_dst, flo);
+
+ num_pols = xdst->num_pols;
+ num_xfrms = xdst->num_xfrms;
+ memcpy(pols, xdst->pols, sizeof(struct xfrm_policy*) * num_pols);
+ route = xdst->route;
+ }
+
+ dst = &xdst->u.dst;
+ if (route == NULL && num_xfrms > 0) {
+ /* The only case when xfrm_bundle_lookup() returns a
+ * bundle with null route, is when the template could
+ * not be resolved. It means policies are there, but
+ * bundle could not be created, since we don't yet
+ * have the xfrm_state's. We need to wait for KM to
+ * negotiate new SA's or bail out with error.*/
+ if (net->xfrm.sysctl_larval_drop) {
+ /* EREMOTE tells the caller to generate
+ * a one-shot blackhole route. */
+ dst_release(dst);
+ xfrm_pols_put(pols, drop_pols);
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
+ return -EREMOTE;
+ }
+ if (flags & XFRM_LOOKUP_WAIT) {
+ DECLARE_WAITQUEUE(wait, current);
+
+ add_wait_queue(&net->xfrm.km_waitq, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule();
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&net->xfrm.km_waitq, &wait);
+
+ if (!signal_pending(current)) {
+ dst_release(dst);
+ goto restart;
+ }
+
+ err = -ERESTART;
+ } else
+ err = -EAGAIN;
+
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
+ goto error;
}
- if (!policy)
+no_transform:
+ if (num_pols == 0)
goto nopol;
- family = dst_orig->ops->family;
- pols[0] = policy;
- npols ++;
- xfrm_nr += pols[0]->xfrm_nr;
-
- err = -ENOENT;
- if ((flags & XFRM_LOOKUP_ICMP) && !(policy->flags & XFRM_POLICY_ICMP))
+ if ((flags & XFRM_LOOKUP_ICMP) &&
+ !(pols[0]->flags & XFRM_POLICY_ICMP)) {
+ err = -ENOENT;
goto error;
+ }
- policy->curlft.use_time = get_seconds();
+ for (i = 0; i < num_pols; i++)
+ pols[i]->curlft.use_time = get_seconds();
- switch (policy->action) {
- default:
- case XFRM_POLICY_BLOCK:
+ if (num_xfrms < 0) {
/* Prohibit the flow */
XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
err = -EPERM;
goto error;
-
- case XFRM_POLICY_ALLOW:
-#ifndef CONFIG_XFRM_SUB_POLICY
- if (policy->xfrm_nr == 0) {
- /* Flow passes not transformed. */
- xfrm_pol_put(policy);
- return 0;
- }
-#endif
-
- /* Try to find matching bundle.
- *
- * LATER: help from flow cache. It is optional, this
- * is required only for output policy.
- */
- dst = xfrm_find_bundle(fl, policy, family);
- if (IS_ERR(dst)) {
- XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
- err = PTR_ERR(dst);
- goto error;
- }
-
- if (dst)
- break;
-
-#ifdef CONFIG_XFRM_SUB_POLICY
- if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
- pols[1] = xfrm_policy_lookup_bytype(net,
- XFRM_POLICY_TYPE_MAIN,
- fl, family,
- XFRM_POLICY_OUT);
- if (pols[1]) {
- if (IS_ERR(pols[1])) {
- XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
- err = PTR_ERR(pols[1]);
- goto error;
- }
- if (pols[1]->action == XFRM_POLICY_BLOCK) {
- XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
- err = -EPERM;
- goto error;
- }
- npols ++;
- xfrm_nr += pols[1]->xfrm_nr;
- }
- }
-
- /*
- * Because neither flowi nor bundle information knows about
- * transformation template size. On more than one policy usage
- * we can realize whether all of them is bypass or not after
- * they are searched. See above not-transformed bypass
- * is surrounded by non-sub policy configuration, too.
- */
- if (xfrm_nr == 0) {
- /* Flow passes not transformed. */
- xfrm_pols_put(pols, npols);
- return 0;
- }
-
-#endif
- nx = xfrm_tmpl_resolve(pols, npols, fl, xfrm, family);
-
- if (unlikely(nx<0)) {
- err = nx;
- if (err == -EAGAIN && net->xfrm.sysctl_larval_drop) {
- /* EREMOTE tells the caller to generate
- * a one-shot blackhole route.
- */
- XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
- xfrm_pol_put(policy);
- return -EREMOTE;
- }
- if (err == -EAGAIN && (flags & XFRM_LOOKUP_WAIT)) {
- DECLARE_WAITQUEUE(wait, current);
-
- add_wait_queue(&net->xfrm.km_waitq, &wait);
- set_current_state(TASK_INTERRUPTIBLE);
- schedule();
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&net->xfrm.km_waitq, &wait);
-
- nx = xfrm_tmpl_resolve(pols, npols, fl, xfrm, family);
-
- if (nx == -EAGAIN && signal_pending(current)) {
- XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
- err = -ERESTART;
- goto error;
- }
- if (nx == -EAGAIN ||
- genid != atomic_read(&flow_cache_genid)) {
- xfrm_pols_put(pols, npols);
- goto restart;
- }
- err = nx;
- }
- if (err < 0) {
- XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
- goto error;
- }
- }
- if (nx == 0) {
- /* Flow passes not transformed. */
- xfrm_pols_put(pols, npols);
- return 0;
- }
-
- dst = xfrm_bundle_create(policy, xfrm, nx, fl, dst_orig);
- err = PTR_ERR(dst);
- if (IS_ERR(dst)) {
- XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
- goto error;
- }
-
- for (pi = 0; pi < npols; pi++) {
- read_lock_bh(&pols[pi]->lock);
- pol_dead |= pols[pi]->walk.dead;
- read_unlock_bh(&pols[pi]->lock);
- }
-
- write_lock_bh(&policy->lock);
- if (unlikely(pol_dead || stale_bundle(dst))) {
- /* Wow! While we worked on resolving, this
- * policy has gone. Retry. It is not paranoia,
- * we just cannot enlist new bundle to dead object.
- * We can't enlist stable bundles either.
- */
- write_unlock_bh(&policy->lock);
- dst_free(dst);
-
- if (pol_dead)
- XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLDEAD);
- else
- XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
- err = -EHOSTUNREACH;
- goto error;
- }
-
- if (npols > 1)
- err = xfrm_dst_update_parent(dst, &pols[1]->selector);
- else
- err = xfrm_dst_update_origin(dst, fl);
- if (unlikely(err)) {
- write_unlock_bh(&policy->lock);
- dst_free(dst);
- XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
- goto error;
- }
-
- dst->next = policy->bundles;
- policy->bundles = dst;
- dst_hold(dst);
- write_unlock_bh(&policy->lock);
+ } else if (num_xfrms > 0) {
+ /* Flow transformed */
+ *dst_p = dst;
+ dst_release(dst_orig);
+ } else {
+ /* Flow passes untransformed */
+ dst_release(dst);
}
- *dst_p = dst;
- dst_release(dst_orig);
- xfrm_pols_put(pols, npols);
+ok:
+ xfrm_pols_put(pols, drop_pols);
return 0;
+nopol:
+ if (!(flags & XFRM_LOOKUP_ICMP))
+ goto ok;
+ err = -ENOENT;
error:
- xfrm_pols_put(pols, npols);
+ dst_release(dst);
dropdst:
dst_release(dst_orig);
*dst_p = NULL;
+ xfrm_pols_put(pols, drop_pols);
return err;
-
-nopol:
- err = -ENOENT;
- if (flags & XFRM_LOOKUP_ICMP)
- goto dropdst;
- return 0;
}
EXPORT_SYMBOL(__xfrm_lookup);
@@ -1952,9 +2030,16 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
}
}
- if (!pol)
- pol = flow_cache_lookup(net, &fl, family, fl_dir,
- xfrm_policy_lookup);
+ if (!pol) {
+ struct flow_cache_object *flo;
+
+ flo = flow_cache_lookup(net, &fl, family, fl_dir,
+ xfrm_policy_lookup, NULL);
+ if (IS_ERR_OR_NULL(flo))
+ pol = ERR_CAST(flo);
+ else
+ pol = container_of(flo, struct xfrm_policy, flo);
+ }
if (IS_ERR(pol)) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
@@ -2124,7 +2209,6 @@ EXPORT_SYMBOL(xfrm_dst_ifdown);
static void xfrm_link_failure(struct sk_buff *skb)
{
/* Impossible. Such dst must be popped before reaches point of failure. */
- return;
}
static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
@@ -2138,71 +2222,24 @@ static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
return dst;
}
-static void prune_one_bundle(struct xfrm_policy *pol, int (*func)(struct dst_entry *), struct dst_entry **gc_list_p)
-{
- struct dst_entry *dst, **dstp;
-
- write_lock(&pol->lock);
- dstp = &pol->bundles;
- while ((dst=*dstp) != NULL) {
- if (func(dst)) {
- *dstp = dst->next;
- dst->next = *gc_list_p;
- *gc_list_p = dst;
- } else {
- dstp = &dst->next;
- }
- }
- write_unlock(&pol->lock);
-}
-
-static void xfrm_prune_bundles(struct net *net, int (*func)(struct dst_entry *))
+static void __xfrm_garbage_collect(struct net *net)
{
- struct dst_entry *gc_list = NULL;
- int dir;
+ struct dst_entry *head, *next;
- read_lock_bh(&xfrm_policy_lock);
- for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
- struct xfrm_policy *pol;
- struct hlist_node *entry;
- struct hlist_head *table;
- int i;
+ flow_cache_flush();
- hlist_for_each_entry(pol, entry,
- &net->xfrm.policy_inexact[dir], bydst)
- prune_one_bundle(pol, func, &gc_list);
+ spin_lock_bh(&xfrm_policy_sk_bundle_lock);
+ head = xfrm_policy_sk_bundles;
+ xfrm_policy_sk_bundles = NULL;
+ spin_unlock_bh(&xfrm_policy_sk_bundle_lock);
- table = net->xfrm.policy_bydst[dir].table;
- for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
- hlist_for_each_entry(pol, entry, table + i, bydst)
- prune_one_bundle(pol, func, &gc_list);
- }
- }
- read_unlock_bh(&xfrm_policy_lock);
-
- while (gc_list) {
- struct dst_entry *dst = gc_list;
- gc_list = dst->next;
- dst_free(dst);
+ while (head) {
+ next = head->next;
+ dst_free(head);
+ head = next;
}
}
-static int unused_bundle(struct dst_entry *dst)
-{
- return !atomic_read(&dst->__refcnt);
-}
-
-static void __xfrm_garbage_collect(struct net *net)
-{
- xfrm_prune_bundles(net, unused_bundle);
-}
-
-static int xfrm_flush_bundles(struct net *net)
-{
- xfrm_prune_bundles(net, stale_bundle);
- return 0;
-}
-
static void xfrm_init_pmtu(struct dst_entry *dst)
{
do {
@@ -2260,7 +2297,9 @@ int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *first,
return 0;
if (dst->xfrm->km.state != XFRM_STATE_VALID)
return 0;
- if (xdst->genid != dst->xfrm->genid)
+ if (xdst->xfrm_genid != dst->xfrm->genid)
+ return 0;
+ if (xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
return 0;
if (strict && fl &&
@@ -2425,7 +2464,7 @@ static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void
switch (event) {
case NETDEV_DOWN:
- xfrm_flush_bundles(dev_net(dev));
+ __xfrm_garbage_collect(dev_net(dev));
}
return NOTIFY_DONE;
}
@@ -2531,7 +2570,6 @@ static void xfrm_policy_fini(struct net *net)
audit_info.sessionid = -1;
audit_info.secid = 0;
xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, &audit_info);
- flush_work(&xfrm_policy_gc_work);
WARN_ON(!list_empty(&net->xfrm.policy_all));
@@ -2757,7 +2795,6 @@ static int xfrm_policy_migrate(struct xfrm_policy *pol,
struct xfrm_migrate *m, int num_migrate)
{
struct xfrm_migrate *mp;
- struct dst_entry *dst;
int i, j, n = 0;
write_lock_bh(&pol->lock);
@@ -2782,10 +2819,7 @@ static int xfrm_policy_migrate(struct xfrm_policy *pol,
sizeof(pol->xfrm_vec[i].saddr));
pol->xfrm_vec[i].encap_family = mp->new_family;
/* flush bundles */
- while ((dst = pol->bundles) != NULL) {
- pol->bundles = dst->next;
- dst_free(dst);
- }
+ atomic_inc(&pol->genid);
}
}
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index add77ecb8ac4..5208b12fbfb4 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -38,7 +38,6 @@
static DEFINE_SPINLOCK(xfrm_state_lock);
static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
-static unsigned int xfrm_state_genid;
static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
@@ -924,8 +923,6 @@ static void __xfrm_state_insert(struct xfrm_state *x)
struct net *net = xs_net(x);
unsigned int h;
- x->genid = ++xfrm_state_genid;
-
list_add(&x->km.all, &net->xfrm.state_all);
h = xfrm_dst_hash(net, &x->id.daddr, &x->props.saddr,
@@ -971,7 +968,7 @@ static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
(mark & x->mark.m) == x->mark.v &&
!xfrm_addr_cmp(&x->id.daddr, &xnew->id.daddr, family) &&
!xfrm_addr_cmp(&x->props.saddr, &xnew->props.saddr, family))
- x->genid = xfrm_state_genid;
+ x->genid++;
}
}
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 6106b72826d3..ba59983aaffe 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -1741,6 +1741,10 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
if (err)
return err;
+ err = verify_policy_dir(p->dir);
+ if (err)
+ return err;
+
if (p->index)
xp = xfrm_policy_byid(net, mark, type, p->dir, p->index, 0, &err);
else {
@@ -1766,13 +1770,9 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
if (xp == NULL)
return -ENOENT;
- read_lock(&xp->lock);
- if (xp->walk.dead) {
- read_unlock(&xp->lock);
+ if (unlikely(xp->walk.dead))
goto out;
- }
- read_unlock(&xp->lock);
err = 0;
if (up->hard) {
uid_t loginuid = NETLINK_CB(skb).loginuid;
@@ -1783,7 +1783,7 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
} else {
// reset the timers here?
- printk("Dont know what to do with soft policy expire\n");
+ WARN(1, "Dont know what to do with soft policy expire\n");
}
km_policy_expired(xp, p->dir, up->hard, current->pid);
@@ -1883,7 +1883,7 @@ static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
return 0;
bad_policy:
- printk("BAD policy passed\n");
+ WARN(1, "BAD policy passed\n");
free_state:
kfree(x);
nomem:
@@ -2385,8 +2385,9 @@ static int xfrm_send_state_notify(struct xfrm_state *x, struct km_event *c)
case XFRM_MSG_FLUSHSA:
return xfrm_notify_sa_flush(c);
default:
- printk("xfrm_user: Unknown SA event %d\n", c->event);
- break;
+ printk(KERN_NOTICE "xfrm_user: Unknown SA event %d\n",
+ c->event);
+ break;
}
return 0;
@@ -2676,7 +2677,8 @@ static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, struct km_ev
case XFRM_MSG_POLEXPIRE:
return xfrm_exp_policy_notify(xp, dir, c);
default:
- printk("xfrm_user: Unknown Policy event %d\n", c->event);
+ printk(KERN_NOTICE "xfrm_user: Unknown Policy event %d\n",
+ c->event);
}
return 0;