summaryrefslogtreecommitdiff
path: root/openwrt/package/linux/kernel-patches/302-ebtables
diff options
context:
space:
mode:
Diffstat (limited to 'openwrt/package/linux/kernel-patches/302-ebtables')
-rw-r--r--openwrt/package/linux/kernel-patches/302-ebtables9355
1 files changed, 9355 insertions, 0 deletions
diff --git a/openwrt/package/linux/kernel-patches/302-ebtables b/openwrt/package/linux/kernel-patches/302-ebtables
new file mode 100644
index 0000000000..27c5cb54c7
--- /dev/null
+++ b/openwrt/package/linux/kernel-patches/302-ebtables
@@ -0,0 +1,9355 @@
+diff -Nur linux-mips-cvs/include/linux/if_bridge.h linux-ebtables/include/linux/if_bridge.h
+--- linux-mips-cvs/include/linux/if_bridge.h 2000-02-24 01:13:20.000000000 +0100
++++ linux-ebtables/include/linux/if_bridge.h 2005-02-07 05:52:50.000000000 +0100
+@@ -102,7 +102,8 @@
+ struct net_bridge_port;
+
+ extern int (*br_ioctl_hook)(unsigned long arg);
+-extern void (*br_handle_frame_hook)(struct sk_buff *skb);
++extern int (*br_handle_frame_hook)(struct sk_buff *skb);
++extern int (*br_should_route_hook)(struct sk_buff **pskb);
+
+ #endif
+
+diff -Nur linux-mips-cvs/include/linux/netfilter.h linux-ebtables/include/linux/netfilter.h
+--- linux-mips-cvs/include/linux/netfilter.h 2005-01-20 03:19:24.000000000 +0100
++++ linux-ebtables/include/linux/netfilter.h 2005-02-07 05:52:50.000000000 +0100
+@@ -118,17 +118,23 @@
+ /* This is gross, but inline doesn't cut it for avoiding the function
+ call in fast path: gcc doesn't inline (needs value tracking?). --RR */
+ #ifdef CONFIG_NETFILTER_DEBUG
+-#define NF_HOOK nf_hook_slow
++#define NF_HOOK(pf, hook, skb, indev, outdev, okfn) \
++nf_hook_slow((pf), (hook), (skb), (indev), (outdev), (okfn), INT_MIN)
++#define NF_HOOK_THRESH nf_hook_slow
+ #else
+ #define NF_HOOK(pf, hook, skb, indev, outdev, okfn) \
+ (list_empty(&nf_hooks[(pf)][(hook)]) \
+ ? (okfn)(skb) \
+- : nf_hook_slow((pf), (hook), (skb), (indev), (outdev), (okfn)))
++ : nf_hook_slow((pf), (hook), (skb), (indev), (outdev), (okfn), INT_MIN))
++#define NF_HOOK_THRESH(pf, hook, skb, indev, outdev, okfn, thresh) \
++(list_empty(&nf_hooks[(pf)][(hook)]) \
++ ? (okfn)(skb) \
++ : nf_hook_slow((pf), (hook), (skb), (indev), (outdev), (okfn), (thresh)))
+ #endif
+
+ int nf_hook_slow(int pf, unsigned int hook, struct sk_buff *skb,
+ struct net_device *indev, struct net_device *outdev,
+- int (*okfn)(struct sk_buff *));
++ int (*okfn)(struct sk_buff *), int thresh);
+
+ /* Call setsockopt() */
+ int nf_setsockopt(struct sock *sk, int pf, int optval, char *opt,
+diff -Nur linux-mips-cvs/include/linux/netfilter_bridge/ebt_802_3.h linux-ebtables/include/linux/netfilter_bridge/ebt_802_3.h
+--- linux-mips-cvs/include/linux/netfilter_bridge/ebt_802_3.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-ebtables/include/linux/netfilter_bridge/ebt_802_3.h 2005-02-07 05:52:50.000000000 +0100
+@@ -0,0 +1,60 @@
++#ifndef __LINUX_BRIDGE_EBT_802_3_H
++#define __LINUX_BRIDGE_EBT_802_3_H
++
++#define EBT_802_3_SAP 0x01
++#define EBT_802_3_TYPE 0x02
++
++#define EBT_802_3_MATCH "802_3"
++
++/*
++ * If frame has DSAP/SSAP value 0xaa you must check the SNAP type
++ * to discover what kind of packet we're carrying.
++ */
++#define CHECK_TYPE 0xaa
++
++/*
++ * Control field may be one or two bytes. If the first byte has
++ * the value 0x03 then the entire length is one byte, otherwise it is two.
++ * One byte controls are used in Unnumbered Information frames.
++ * Two byte controls are used in Numbered Information frames.
++ */
++#define IS_UI 0x03
++
++#define EBT_802_3_MASK (EBT_802_3_SAP | EBT_802_3_TYPE | EBT_802_3)
++
++/* ui has one byte ctrl, ni has two */
++struct hdr_ui {
++ uint8_t dsap;
++ uint8_t ssap;
++ uint8_t ctrl;
++ uint8_t orig[3];
++ uint16_t type;
++};
++
++struct hdr_ni {
++ uint8_t dsap;
++ uint8_t ssap;
++ uint16_t ctrl;
++ uint8_t orig[3];
++ uint16_t type;
++};
++
++struct ebt_802_3_hdr {
++ uint8_t daddr[6];
++ uint8_t saddr[6];
++ uint16_t len;
++ union {
++ struct hdr_ui ui;
++ struct hdr_ni ni;
++ } llc;
++};
++
++struct ebt_802_3_info
++{
++ uint8_t sap;
++ uint16_t type;
++ uint8_t bitmask;
++ uint8_t invflags;
++};
++
++#endif
+diff -Nur linux-mips-cvs/include/linux/netfilter_bridge/ebt_among.h linux-ebtables/include/linux/netfilter_bridge/ebt_among.h
+--- linux-mips-cvs/include/linux/netfilter_bridge/ebt_among.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-ebtables/include/linux/netfilter_bridge/ebt_among.h 2005-02-07 05:52:50.000000000 +0100
+@@ -0,0 +1,65 @@
++#ifndef __LINUX_BRIDGE_EBT_AMONG_H
++#define __LINUX_BRIDGE_EBT_AMONG_H
++
++#define EBT_AMONG_DST 0x01
++#define EBT_AMONG_SRC 0x02
++
++/* Grzegorz Borowiak <grzes@gnu.univ.gda.pl> 2003
++ *
++ * Write-once-read-many hash table, used for checking if a given
++ * MAC address belongs to a set or not and possibly for checking
++ * if it is related with a given IPv4 address.
++ *
++ * The hash value of an address is its last byte.
++ *
++ * In real-world ethernet addresses, values of the last byte are
++ * evenly distributed and there is no need to consider other bytes.
++ * It would only slow the routines down.
++ *
++ * For MAC address comparison speedup reasons, we introduce a trick.
++ * MAC address is mapped onto an array of two 32-bit integers.
++ * This pair of integers is compared with MAC addresses in the
++ * hash table, which are stored also in form of pairs of integers
++ * (in `cmp' array). This is quick as it requires only two elementary
++ * number comparisons in worst case. Further, we take advantage of
++ * fact that entropy of 3 last bytes of address is larger than entropy
++ * of 3 first bytes. So first we compare 4 last bytes of addresses and
++ * if they are the same we compare 2 first.
++ *
++ * Yes, it is a memory overhead, but in 2003 AD, who cares?
++ */
++
++struct ebt_mac_wormhash_tuple
++{
++ uint32_t cmp[2];
++ uint32_t ip;
++};
++
++struct ebt_mac_wormhash
++{
++ int table[257];
++ int poolsize;
++ struct ebt_mac_wormhash_tuple pool[0];
++};
++
++#define ebt_mac_wormhash_size(x) ((x) ? sizeof(struct ebt_mac_wormhash) \
++ + (x)->poolsize * sizeof(struct ebt_mac_wormhash_tuple) : 0)
++
++struct ebt_among_info
++{
++ int wh_dst_ofs;
++ int wh_src_ofs;
++ int bitmask;
++};
++
++#define EBT_AMONG_DST_NEG 0x1
++#define EBT_AMONG_SRC_NEG 0x2
++
++#define ebt_among_wh_dst(x) ((x)->wh_dst_ofs ? \
++ (struct ebt_mac_wormhash*)((char*)(x) + (x)->wh_dst_ofs) : NULL)
++#define ebt_among_wh_src(x) ((x)->wh_src_ofs ? \
++ (struct ebt_mac_wormhash*)((char*)(x) + (x)->wh_src_ofs) : NULL)
++
++#define EBT_AMONG_MATCH "among"
++
++#endif
+diff -Nur linux-mips-cvs/include/linux/netfilter_bridge/ebt_arp.h linux-ebtables/include/linux/netfilter_bridge/ebt_arp.h
+--- linux-mips-cvs/include/linux/netfilter_bridge/ebt_arp.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-ebtables/include/linux/netfilter_bridge/ebt_arp.h 2005-02-07 05:52:50.000000000 +0100
+@@ -0,0 +1,32 @@
++#ifndef __LINUX_BRIDGE_EBT_ARP_H
++#define __LINUX_BRIDGE_EBT_ARP_H
++
++#define EBT_ARP_OPCODE 0x01
++#define EBT_ARP_HTYPE 0x02
++#define EBT_ARP_PTYPE 0x04
++#define EBT_ARP_SRC_IP 0x08
++#define EBT_ARP_DST_IP 0x10
++#define EBT_ARP_SRC_MAC 0x20
++#define EBT_ARP_DST_MAC 0x40
++#define EBT_ARP_MASK (EBT_ARP_OPCODE | EBT_ARP_HTYPE | EBT_ARP_PTYPE | \
++ EBT_ARP_SRC_IP | EBT_ARP_DST_IP | EBT_ARP_SRC_MAC | EBT_ARP_DST_MAC)
++#define EBT_ARP_MATCH "arp"
++
++struct ebt_arp_info
++{
++ uint16_t htype;
++ uint16_t ptype;
++ uint16_t opcode;
++ uint32_t saddr;
++ uint32_t smsk;
++ uint32_t daddr;
++ uint32_t dmsk;
++ unsigned char smaddr[ETH_ALEN];
++ unsigned char smmsk[ETH_ALEN];
++ unsigned char dmaddr[ETH_ALEN];
++ unsigned char dmmsk[ETH_ALEN];
++ uint8_t bitmask;
++ uint8_t invflags;
++};
++
++#endif
+diff -Nur linux-mips-cvs/include/linux/netfilter_bridge/ebt_arpreply.h linux-ebtables/include/linux/netfilter_bridge/ebt_arpreply.h
+--- linux-mips-cvs/include/linux/netfilter_bridge/ebt_arpreply.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-ebtables/include/linux/netfilter_bridge/ebt_arpreply.h 2005-02-07 05:52:50.000000000 +0100
+@@ -0,0 +1,11 @@
++#ifndef __LINUX_BRIDGE_EBT_ARPREPLY_H
++#define __LINUX_BRIDGE_EBT_ARPREPLY_H
++
++struct ebt_arpreply_info
++{
++ unsigned char mac[ETH_ALEN];
++ int target;
++};
++#define EBT_ARPREPLY_TARGET "arpreply"
++
++#endif
+diff -Nur linux-mips-cvs/include/linux/netfilter_bridge/ebt_ip.h linux-ebtables/include/linux/netfilter_bridge/ebt_ip.h
+--- linux-mips-cvs/include/linux/netfilter_bridge/ebt_ip.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-ebtables/include/linux/netfilter_bridge/ebt_ip.h 2005-02-07 05:52:50.000000000 +0100
+@@ -0,0 +1,43 @@
++/*
++ * ebt_ip
++ *
++ * Authors:
++ * Bart De Schuymer <bart.de.schuymer@pandora.be>
++ *
++ * April, 2002
++ *
++ * Changes:
++ * added ip-sport and ip-dport
++ * Innominate Security Technologies AG <mhopf@innominate.com>
++ * September, 2002
++ */
++
++#ifndef __LINUX_BRIDGE_EBT_IP_H
++#define __LINUX_BRIDGE_EBT_IP_H
++
++#define EBT_IP_SOURCE 0x01
++#define EBT_IP_DEST 0x02
++#define EBT_IP_TOS 0x04
++#define EBT_IP_PROTO 0x08
++#define EBT_IP_SPORT 0x10
++#define EBT_IP_DPORT 0x20
++#define EBT_IP_MASK (EBT_IP_SOURCE | EBT_IP_DEST | EBT_IP_TOS | EBT_IP_PROTO |\
++ EBT_IP_SPORT | EBT_IP_DPORT )
++#define EBT_IP_MATCH "ip"
++
++// the same values are used for the invflags
++struct ebt_ip_info
++{
++ uint32_t saddr;
++ uint32_t daddr;
++ uint32_t smsk;
++ uint32_t dmsk;
++ uint8_t tos;
++ uint8_t protocol;
++ uint8_t bitmask;
++ uint8_t invflags;
++ uint16_t sport[2];
++ uint16_t dport[2];
++};
++
++#endif
+diff -Nur linux-mips-cvs/include/linux/netfilter_bridge/ebt_limit.h linux-ebtables/include/linux/netfilter_bridge/ebt_limit.h
+--- linux-mips-cvs/include/linux/netfilter_bridge/ebt_limit.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-ebtables/include/linux/netfilter_bridge/ebt_limit.h 2005-02-07 05:52:50.000000000 +0100
+@@ -0,0 +1,23 @@
++#ifndef __LINUX_BRIDGE_EBT_LIMIT_H
++#define __LINUX_BRIDGE_EBT_LIMIT_H
++
++#define EBT_LIMIT_MATCH "limit"
++
++/* timings are in milliseconds. */
++#define EBT_LIMIT_SCALE 10000
++
++/* 1/10,000 sec period => max of 10,000/sec. Min rate is then 429490
++ seconds, or one every 59 hours. */
++
++struct ebt_limit_info
++{
++ u_int32_t avg; /* Average secs between packets * scale */
++ u_int32_t burst; /* Period multiplier for upper limit. */
++
++ /* Used internally by the kernel */
++ unsigned long prev;
++ u_int32_t credit;
++ u_int32_t credit_cap, cost;
++};
++
++#endif
+diff -Nur linux-mips-cvs/include/linux/netfilter_bridge/ebt_log.h linux-ebtables/include/linux/netfilter_bridge/ebt_log.h
+--- linux-mips-cvs/include/linux/netfilter_bridge/ebt_log.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-ebtables/include/linux/netfilter_bridge/ebt_log.h 2005-02-07 05:52:50.000000000 +0100
+@@ -0,0 +1,17 @@
++#ifndef __LINUX_BRIDGE_EBT_LOG_H
++#define __LINUX_BRIDGE_EBT_LOG_H
++
++#define EBT_LOG_IP 0x01 // if the frame is made by ip, log the ip information
++#define EBT_LOG_ARP 0x02
++#define EBT_LOG_MASK (EBT_LOG_IP | EBT_LOG_ARP)
++#define EBT_LOG_PREFIX_SIZE 30
++#define EBT_LOG_WATCHER "log"
++
++struct ebt_log_info
++{
++ uint8_t loglevel;
++ uint8_t prefix[EBT_LOG_PREFIX_SIZE];
++ uint32_t bitmask;
++};
++
++#endif
+diff -Nur linux-mips-cvs/include/linux/netfilter_bridge/ebt_mark_m.h linux-ebtables/include/linux/netfilter_bridge/ebt_mark_m.h
+--- linux-mips-cvs/include/linux/netfilter_bridge/ebt_mark_m.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-ebtables/include/linux/netfilter_bridge/ebt_mark_m.h 2005-02-07 05:52:50.000000000 +0100
+@@ -0,0 +1,15 @@
++#ifndef __LINUX_BRIDGE_EBT_MARK_M_H
++#define __LINUX_BRIDGE_EBT_MARK_M_H
++
++#define EBT_MARK_AND 0x01
++#define EBT_MARK_OR 0x02
++#define EBT_MARK_MASK (EBT_MARK_AND | EBT_MARK_OR)
++struct ebt_mark_m_info
++{
++ unsigned long mark, mask;
++ uint8_t invert;
++ uint8_t bitmask;
++};
++#define EBT_MARK_MATCH "mark_m"
++
++#endif
+diff -Nur linux-mips-cvs/include/linux/netfilter_bridge/ebt_mark_t.h linux-ebtables/include/linux/netfilter_bridge/ebt_mark_t.h
+--- linux-mips-cvs/include/linux/netfilter_bridge/ebt_mark_t.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-ebtables/include/linux/netfilter_bridge/ebt_mark_t.h 2005-02-07 05:52:50.000000000 +0100
+@@ -0,0 +1,12 @@
++#ifndef __LINUX_BRIDGE_EBT_MARK_T_H
++#define __LINUX_BRIDGE_EBT_MARK_T_H
++
++struct ebt_mark_t_info
++{
++ unsigned long mark;
++ // EBT_ACCEPT, EBT_DROP or EBT_CONTINUE or EBT_RETURN
++ int target;
++};
++#define EBT_MARK_TARGET "mark"
++
++#endif
+diff -Nur linux-mips-cvs/include/linux/netfilter_bridge/ebt_nat.h linux-ebtables/include/linux/netfilter_bridge/ebt_nat.h
+--- linux-mips-cvs/include/linux/netfilter_bridge/ebt_nat.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-ebtables/include/linux/netfilter_bridge/ebt_nat.h 2005-02-07 05:52:50.000000000 +0100
+@@ -0,0 +1,13 @@
++#ifndef __LINUX_BRIDGE_EBT_NAT_H
++#define __LINUX_BRIDGE_EBT_NAT_H
++
++struct ebt_nat_info
++{
++ unsigned char mac[ETH_ALEN];
++ // EBT_ACCEPT, EBT_DROP, EBT_CONTINUE or EBT_RETURN
++ int target;
++};
++#define EBT_SNAT_TARGET "snat"
++#define EBT_DNAT_TARGET "dnat"
++
++#endif
+diff -Nur linux-mips-cvs/include/linux/netfilter_bridge/ebt_pkttype.h linux-ebtables/include/linux/netfilter_bridge/ebt_pkttype.h
+--- linux-mips-cvs/include/linux/netfilter_bridge/ebt_pkttype.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-ebtables/include/linux/netfilter_bridge/ebt_pkttype.h 2005-02-07 05:52:50.000000000 +0100
+@@ -0,0 +1,11 @@
++#ifndef __LINUX_BRIDGE_EBT_PKTTYPE_H
++#define __LINUX_BRIDGE_EBT_PKTTYPE_H
++
++struct ebt_pkttype_info
++{
++ uint8_t pkt_type;
++ uint8_t invert;
++};
++#define EBT_PKTTYPE_MATCH "pkttype"
++
++#endif
+diff -Nur linux-mips-cvs/include/linux/netfilter_bridge/ebt_redirect.h linux-ebtables/include/linux/netfilter_bridge/ebt_redirect.h
+--- linux-mips-cvs/include/linux/netfilter_bridge/ebt_redirect.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-ebtables/include/linux/netfilter_bridge/ebt_redirect.h 2005-02-07 05:52:50.000000000 +0100
+@@ -0,0 +1,11 @@
++#ifndef __LINUX_BRIDGE_EBT_REDIRECT_H
++#define __LINUX_BRIDGE_EBT_REDIRECT_H
++
++struct ebt_redirect_info
++{
++ // EBT_ACCEPT, EBT_DROP or EBT_CONTINUE or EBT_RETURN
++ int target;
++};
++#define EBT_REDIRECT_TARGET "redirect"
++
++#endif
+diff -Nur linux-mips-cvs/include/linux/netfilter_bridge/ebt_stp.h linux-ebtables/include/linux/netfilter_bridge/ebt_stp.h
+--- linux-mips-cvs/include/linux/netfilter_bridge/ebt_stp.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-ebtables/include/linux/netfilter_bridge/ebt_stp.h 2005-02-07 05:52:50.000000000 +0100
+@@ -0,0 +1,46 @@
++#ifndef __LINUX_BRIDGE_EBT_STP_H
++#define __LINUX_BRIDGE_EBT_STP_H
++
++#define EBT_STP_TYPE 0x0001
++
++#define EBT_STP_FLAGS 0x0002
++#define EBT_STP_ROOTPRIO 0x0004
++#define EBT_STP_ROOTADDR 0x0008
++#define EBT_STP_ROOTCOST 0x0010
++#define EBT_STP_SENDERPRIO 0x0020
++#define EBT_STP_SENDERADDR 0x0040
++#define EBT_STP_PORT 0x0080
++#define EBT_STP_MSGAGE 0x0100
++#define EBT_STP_MAXAGE 0x0200
++#define EBT_STP_HELLOTIME 0x0400
++#define EBT_STP_FWDD 0x0800
++
++#define EBT_STP_MASK 0x0fff
++#define EBT_STP_CONFIG_MASK 0x0ffe
++
++#define EBT_STP_MATCH "stp"
++
++struct ebt_stp_config_info
++{
++ uint8_t flags;
++ uint16_t root_priol, root_priou;
++ char root_addr[6], root_addrmsk[6];
++ uint32_t root_costl, root_costu;
++ uint16_t sender_priol, sender_priou;
++ char sender_addr[6], sender_addrmsk[6];
++ uint16_t portl, portu;
++ uint16_t msg_agel, msg_ageu;
++ uint16_t max_agel, max_ageu;
++ uint16_t hello_timel, hello_timeu;
++ uint16_t forward_delayl, forward_delayu;
++};
++
++struct ebt_stp_info
++{
++ uint8_t type;
++ struct ebt_stp_config_info config;
++ uint16_t bitmask;
++ uint16_t invflags;
++};
++
++#endif
+diff -Nur linux-mips-cvs/include/linux/netfilter_bridge/ebt_ulog.h linux-ebtables/include/linux/netfilter_bridge/ebt_ulog.h
+--- linux-mips-cvs/include/linux/netfilter_bridge/ebt_ulog.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-ebtables/include/linux/netfilter_bridge/ebt_ulog.h 2005-02-07 05:52:50.000000000 +0100
+@@ -0,0 +1,33 @@
++#ifndef _EBT_ULOG_H
++#define _EBT_ULOG_H
++
++#define EBT_ULOG_DEFAULT_NLGROUP 0
++#define EBT_ULOG_DEFAULT_QTHRESHOLD 1
++#define EBT_ULOG_MAXNLGROUPS 32 /* hardcoded netlink max */
++#define EBT_ULOG_PREFIX_LEN 32
++#define EBT_ULOG_MAX_QLEN 50
++#define EBT_ULOG_WATCHER "ulog"
++
++struct ebt_ulog_info {
++ uint32_t nlgroup;
++ unsigned int cprange;
++ unsigned int qthreshold;
++ char prefix[EBT_ULOG_PREFIX_LEN];
++};
++
++typedef struct ebt_ulog_packet_msg {
++ char indev[IFNAMSIZ];
++ char outdev[IFNAMSIZ];
++ char physindev[IFNAMSIZ];
++ char physoutdev[IFNAMSIZ];
++ char prefix[EBT_ULOG_PREFIX_LEN];
++ struct timeval stamp;
++ unsigned long mark;
++ unsigned int hook;
++ size_t data_len;
++ /* The complete packet, including Ethernet header and perhaps
++ * the VLAN header is appended */
++ unsigned char data[0] __attribute__ ((aligned (__alignof__(int))));
++} ebt_ulog_packet_msg_t;
++
++#endif /* _EBT_ULOG_H */
+diff -Nur linux-mips-cvs/include/linux/netfilter_bridge/ebt_vlan.h linux-ebtables/include/linux/netfilter_bridge/ebt_vlan.h
+--- linux-mips-cvs/include/linux/netfilter_bridge/ebt_vlan.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-ebtables/include/linux/netfilter_bridge/ebt_vlan.h 2005-02-07 05:52:50.000000000 +0100
+@@ -0,0 +1,20 @@
++#ifndef __LINUX_BRIDGE_EBT_VLAN_H
++#define __LINUX_BRIDGE_EBT_VLAN_H
++
++#define EBT_VLAN_ID 0x01
++#define EBT_VLAN_PRIO 0x02
++#define EBT_VLAN_ENCAP 0x04
++#define EBT_VLAN_MASK (EBT_VLAN_ID | EBT_VLAN_PRIO | EBT_VLAN_ENCAP)
++#define EBT_VLAN_MATCH "vlan"
++
++struct ebt_vlan_info {
++ uint16_t id; /* VLAN ID {1-4095} */
++ uint8_t prio; /* VLAN User Priority {0-7} */
++ uint16_t encap; /* VLAN Encapsulated frame code {0-65535} */
++ uint8_t bitmask; /* Args bitmask bit 1=1 - ID arg,
++ bit 2=1 User-Priority arg, bit 3=1 encap*/
++ uint8_t invflags; /* Inverse bitmask bit 1=1 - inversed ID arg,
++ bit 2=1 - inversed Pirority arg */
++};
++
++#endif
+diff -Nur linux-mips-cvs/include/linux/netfilter_bridge/ebtables.h linux-ebtables/include/linux/netfilter_bridge/ebtables.h
+--- linux-mips-cvs/include/linux/netfilter_bridge/ebtables.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-ebtables/include/linux/netfilter_bridge/ebtables.h 2005-02-07 05:52:50.000000000 +0100
+@@ -0,0 +1,361 @@
++/*
++ * ebtables
++ *
++ * Authors:
++ * Bart De Schuymer <bart.de.schuymer@pandora.be>
++ *
++ * ebtables.c,v 2.0, September, 2002
++ *
++ * This code is stongly inspired on the iptables code which is
++ * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
++ */
++
++#ifndef __LINUX_BRIDGE_EFF_H
++#define __LINUX_BRIDGE_EFF_H
++#include <linux/if.h>
++#include <linux/netfilter_bridge.h>
++#include <linux/if_ether.h>
++
++#define EBT_TABLE_MAXNAMELEN 32
++#define EBT_CHAIN_MAXNAMELEN EBT_TABLE_MAXNAMELEN
++#define EBT_FUNCTION_MAXNAMELEN EBT_TABLE_MAXNAMELEN
++
++// verdicts >0 are "branches"
++#define EBT_ACCEPT -1
++#define EBT_DROP -2
++#define EBT_CONTINUE -3
++#define EBT_RETURN -4
++#define NUM_STANDARD_TARGETS 4
++
++struct ebt_counter
++{
++ uint64_t pcnt;
++ uint64_t bcnt;
++};
++
++struct ebt_entries {
++ // this field is always set to zero
++ // See EBT_ENTRY_OR_ENTRIES.
++ // Must be same size as ebt_entry.bitmask
++ unsigned int distinguisher;
++ // the chain name
++ char name[EBT_CHAIN_MAXNAMELEN];
++ // counter offset for this chain
++ unsigned int counter_offset;
++ // one standard (accept, drop, return) per hook
++ int policy;
++ // nr. of entries
++ unsigned int nentries;
++ // entry list
++ char data[0];
++};
++
++// used for the bitmask of struct ebt_entry
++
++// This is a hack to make a difference between an ebt_entry struct and an
++// ebt_entries struct when traversing the entries from start to end.
++// Using this simplifies the code alot, while still being able to use
++// ebt_entries.
++// Contrary, iptables doesn't use something like ebt_entries and therefore uses
++// different techniques for naming the policy and such. So, iptables doesn't
++// need a hack like this.
++#define EBT_ENTRY_OR_ENTRIES 0x01
++// these are the normal masks
++#define EBT_NOPROTO 0x02
++#define EBT_802_3 0x04
++#define EBT_SOURCEMAC 0x08
++#define EBT_DESTMAC 0x10
++#define EBT_F_MASK (EBT_NOPROTO | EBT_802_3 | EBT_SOURCEMAC | EBT_DESTMAC \
++ | EBT_ENTRY_OR_ENTRIES)
++
++#define EBT_IPROTO 0x01
++#define EBT_IIN 0x02
++#define EBT_IOUT 0x04
++#define EBT_ISOURCE 0x8
++#define EBT_IDEST 0x10
++#define EBT_ILOGICALIN 0x20
++#define EBT_ILOGICALOUT 0x40
++#define EBT_INV_MASK (EBT_IPROTO | EBT_IIN | EBT_IOUT | EBT_ILOGICALIN \
++ | EBT_ILOGICALOUT | EBT_ISOURCE | EBT_IDEST)
++
++struct ebt_entry_match
++{
++ union {
++ char name[EBT_FUNCTION_MAXNAMELEN];
++ struct ebt_match *match;
++ } u;
++ // size of data
++ unsigned int match_size;
++ unsigned char data[0];
++};
++
++struct ebt_entry_watcher
++{
++ union {
++ char name[EBT_FUNCTION_MAXNAMELEN];
++ struct ebt_watcher *watcher;
++ } u;
++ // size of data
++ unsigned int watcher_size;
++ unsigned char data[0];
++};
++
++struct ebt_entry_target
++{
++ union {
++ char name[EBT_FUNCTION_MAXNAMELEN];
++ struct ebt_target *target;
++ } u;
++ // size of data
++ unsigned int target_size;
++ unsigned char data[0];
++};
++
++#define EBT_STANDARD_TARGET "standard"
++struct ebt_standard_target
++{
++ struct ebt_entry_target target;
++ int verdict;
++};
++
++// one entry
++struct ebt_entry {
++ // this needs to be the first field
++ unsigned int bitmask;
++ unsigned int invflags;
++ uint16_t ethproto;
++ // the physical in-dev
++ char in[IFNAMSIZ];
++ // the logical in-dev
++ char logical_in[IFNAMSIZ];
++ // the physical out-dev
++ char out[IFNAMSIZ];
++ // the logical out-dev
++ char logical_out[IFNAMSIZ];
++ unsigned char sourcemac[ETH_ALEN];
++ unsigned char sourcemsk[ETH_ALEN];
++ unsigned char destmac[ETH_ALEN];
++ unsigned char destmsk[ETH_ALEN];
++ // sizeof ebt_entry + matches
++ unsigned int watchers_offset;
++ // sizeof ebt_entry + matches + watchers
++ unsigned int target_offset;
++ // sizeof ebt_entry + matches + watchers + target
++ unsigned int next_offset;
++ unsigned char elems[0];
++};
++
++struct ebt_replace
++{
++ char name[EBT_TABLE_MAXNAMELEN];
++ unsigned int valid_hooks;
++ // nr of rules in the table
++ unsigned int nentries;
++ // total size of the entries
++ unsigned int entries_size;
++ // start of the chains
++ struct ebt_entries *hook_entry[NF_BR_NUMHOOKS];
++ // nr of counters userspace expects back
++ unsigned int num_counters;
++ // where the kernel will put the old counters
++ struct ebt_counter *counters;
++ char *entries;
++};
++
++// [gs]etsockopt numbers
++#define EBT_BASE_CTL 128
++
++#define EBT_SO_SET_ENTRIES (EBT_BASE_CTL)
++#define EBT_SO_SET_COUNTERS (EBT_SO_SET_ENTRIES+1)
++#define EBT_SO_SET_MAX (EBT_SO_SET_COUNTERS+1)
++
++#define EBT_SO_GET_INFO (EBT_BASE_CTL)
++#define EBT_SO_GET_ENTRIES (EBT_SO_GET_INFO+1)
++#define EBT_SO_GET_INIT_INFO (EBT_SO_GET_ENTRIES+1)
++#define EBT_SO_GET_INIT_ENTRIES (EBT_SO_GET_INIT_INFO+1)
++#define EBT_SO_GET_MAX (EBT_SO_GET_INIT_ENTRIES+1)
++
++#ifdef __KERNEL__
++
++// return values for match() functions
++#define EBT_MATCH 0
++#define EBT_NOMATCH 1
++
++struct ebt_match
++{
++ struct list_head list;
++ const char name[EBT_FUNCTION_MAXNAMELEN];
++ // 0 == it matches
++ int (*match)(const struct sk_buff *skb, const struct net_device *in,
++ const struct net_device *out, const void *matchdata,
++ unsigned int datalen);
++ // 0 == let it in
++ int (*check)(const char *tablename, unsigned int hookmask,
++ const struct ebt_entry *e, void *matchdata, unsigned int datalen);
++ void (*destroy)(void *matchdata, unsigned int datalen);
++ struct module *me;
++};
++
++struct ebt_watcher
++{
++ struct list_head list;
++ const char name[EBT_FUNCTION_MAXNAMELEN];
++ void (*watcher)(const struct sk_buff *skb, unsigned int hooknr,
++ const struct net_device *in, const struct net_device *out,
++ const void *watcherdata, unsigned int datalen);
++ // 0 == let it in
++ int (*check)(const char *tablename, unsigned int hookmask,
++ const struct ebt_entry *e, void *watcherdata, unsigned int datalen);
++ void (*destroy)(void *watcherdata, unsigned int datalen);
++ struct module *me;
++};
++
++struct ebt_target
++{
++ struct list_head list;
++ const char name[EBT_FUNCTION_MAXNAMELEN];
++ // returns one of the standard verdicts
++ int (*target)(struct sk_buff **pskb, unsigned int hooknr,
++ const struct net_device *in, const struct net_device *out,
++ const void *targetdata, unsigned int datalen);
++ // 0 == let it in
++ int (*check)(const char *tablename, unsigned int hookmask,
++ const struct ebt_entry *e, void *targetdata, unsigned int datalen);
++ void (*destroy)(void *targetdata, unsigned int datalen);
++ struct module *me;
++};
++
++// used for jumping from and into user defined chains (udc)
++struct ebt_chainstack
++{
++ struct ebt_entries *chaininfo; // pointer to chain data
++ struct ebt_entry *e; // pointer to entry data
++ unsigned int n; // n'th entry
++};
++
++struct ebt_table_info
++{
++ // total size of the entries
++ unsigned int entries_size;
++ unsigned int nentries;
++ // pointers to the start of the chains
++ struct ebt_entries *hook_entry[NF_BR_NUMHOOKS];
++ // room to maintain the stack used for jumping from and into udc
++ struct ebt_chainstack **chainstack;
++ char *entries;
++ struct ebt_counter counters[0] ____cacheline_aligned;
++};
++
++struct ebt_table
++{
++ struct list_head list;
++ char name[EBT_TABLE_MAXNAMELEN];
++ struct ebt_replace *table;
++ unsigned int valid_hooks;
++ rwlock_t lock;
++ // e.g. could be the table explicitly only allows certain
++ // matches, targets, ... 0 == let it in
++ int (*check)(const struct ebt_table_info *info,
++ unsigned int valid_hooks);
++ // the data used by the kernel
++ struct ebt_table_info *private;
++};
++
++#define EBT_ALIGN(s) (((s) + (__alignof__(struct ebt_entry_target)-1)) & \
++ ~(__alignof__(struct ebt_entry_target)-1))
++extern int ebt_register_table(struct ebt_table *table);
++extern void ebt_unregister_table(struct ebt_table *table);
++extern int ebt_register_match(struct ebt_match *match);
++extern void ebt_unregister_match(struct ebt_match *match);
++extern int ebt_register_watcher(struct ebt_watcher *watcher);
++extern void ebt_unregister_watcher(struct ebt_watcher *watcher);
++extern int ebt_register_target(struct ebt_target *target);
++extern void ebt_unregister_target(struct ebt_target *target);
++extern unsigned int ebt_do_table(unsigned int hook, struct sk_buff **pskb,
++ const struct net_device *in, const struct net_device *out,
++ struct ebt_table *table);
++
++ // Used in the kernel match() functions
++#define FWINV(bool,invflg) ((bool) ^ !!(info->invflags & invflg))
++// True if the hook mask denotes that the rule is in a base chain,
++// used in the check() functions
++#define BASE_CHAIN (hookmask & (1 << NF_BR_NUMHOOKS))
++// Clear the bit in the hook mask that tells if the rule is on a base chain
++#define CLEAR_BASE_CHAIN_BIT (hookmask &= ~(1 << NF_BR_NUMHOOKS))
++// True if the target is not a standard target
++#define INVALID_TARGET (info->target < -NUM_STANDARD_TARGETS || info->target >= 0)
++
++#endif /* __KERNEL__ */
++
++// blatently stolen from ip_tables.h
++// fn returns 0 to continue iteration
++#define EBT_MATCH_ITERATE(e, fn, args...) \
++({ \
++ unsigned int __i; \
++ int __ret = 0; \
++ struct ebt_entry_match *__match; \
++ \
++ for (__i = sizeof(struct ebt_entry); \
++ __i < (e)->watchers_offset; \
++ __i += __match->match_size + \
++ sizeof(struct ebt_entry_match)) { \
++ __match = (void *)(e) + __i; \
++ \
++ __ret = fn(__match , ## args); \
++ if (__ret != 0) \
++ break; \
++ } \
++ if (__ret == 0) { \
++ if (__i != (e)->watchers_offset) \
++ __ret = -EINVAL; \
++ } \
++ __ret; \
++})
++
++#define EBT_WATCHER_ITERATE(e, fn, args...) \
++({ \
++ unsigned int __i; \
++ int __ret = 0; \
++ struct ebt_entry_watcher *__watcher; \
++ \
++ for (__i = e->watchers_offset; \
++ __i < (e)->target_offset; \
++ __i += __watcher->watcher_size + \
++ sizeof(struct ebt_entry_watcher)) { \
++ __watcher = (void *)(e) + __i; \
++ \
++ __ret = fn(__watcher , ## args); \
++ if (__ret != 0) \
++ break; \
++ } \
++ if (__ret == 0) { \
++ if (__i != (e)->target_offset) \
++ __ret = -EINVAL; \
++ } \
++ __ret; \
++})
++
++#define EBT_ENTRY_ITERATE(entries, size, fn, args...) \
++({ \
++ unsigned int __i; \
++ int __ret = 0; \
++ struct ebt_entry *__entry; \
++ \
++ for (__i = 0; __i < (size);) { \
++ __entry = (void *)(entries) + __i; \
++ __ret = fn(__entry , ## args); \
++ if (__ret != 0) \
++ break; \
++ if (__entry->bitmask != 0) \
++ __i += __entry->next_offset; \
++ else \
++ __i += sizeof(struct ebt_entries); \
++ } \
++ if (__ret == 0) { \
++ if (__i != (size)) \
++ __ret = -EINVAL; \
++ } \
++ __ret; \
++})
++
++#endif
+diff -Nur linux-mips-cvs/include/linux/netfilter_bridge.h linux-ebtables/include/linux/netfilter_bridge.h
+--- linux-mips-cvs/include/linux/netfilter_bridge.h 2001-08-22 05:25:11.000000000 +0200
++++ linux-ebtables/include/linux/netfilter_bridge.h 2005-02-07 05:52:50.000000000 +0100
+@@ -6,6 +6,10 @@
+
+ #include <linux/config.h>
+ #include <linux/netfilter.h>
++#if defined(__KERNEL__) && defined(CONFIG_NETFILTER)
++#include <asm/atomic.h>
++#include <linux/if_ether.h>
++#endif
+
+ /* Bridge Hooks */
+ /* After promisc drops, checksum checks. */
+@@ -18,7 +22,76 @@
+ #define NF_BR_LOCAL_OUT 3
+ /* Packets about to hit the wire. */
+ #define NF_BR_POST_ROUTING 4
+-#define NF_BR_NUMHOOKS 5
++/* Not really a hook, but used for the ebtables broute table */
++#define NF_BR_BROUTING 5
++#define NF_BR_NUMHOOKS 6
++
++#ifdef __KERNEL__
++
++#define BRNF_PKT_TYPE 0x01
++#define BRNF_BRIDGED_DNAT 0x02
++#define BRNF_DONT_TAKE_PARENT 0x04
++#define BRNF_BRIDGED 0x08
++#define BRNF_NF_BRIDGE_PREROUTING 0x10
++
++enum nf_br_hook_priorities {
++ NF_BR_PRI_FIRST = INT_MIN,
++ NF_BR_PRI_NAT_DST_BRIDGED = -300,
++ NF_BR_PRI_FILTER_BRIDGED = -200,
++ NF_BR_PRI_BRNF = 0,
++ NF_BR_PRI_NAT_DST_OTHER = 100,
++ NF_BR_PRI_FILTER_OTHER = 200,
++ NF_BR_PRI_NAT_SRC = 300,
++ NF_BR_PRI_LAST = INT_MAX,
++};
++
++#ifdef CONFIG_NETFILTER
++static inline
++struct nf_bridge_info *nf_bridge_alloc(struct sk_buff *skb)
++{
++ struct nf_bridge_info **nf_bridge = &(skb->nf_bridge);
++
++ if ((*nf_bridge = kmalloc(sizeof(**nf_bridge), GFP_ATOMIC)) != NULL) {
++ atomic_set(&(*nf_bridge)->use, 1);
++ (*nf_bridge)->mask = 0;
++ (*nf_bridge)->physindev = (*nf_bridge)->physoutdev = NULL;
++#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
++ (*nf_bridge)->netoutdev = NULL;
++#endif
++ }
++
++ return *nf_bridge;
++}
++
++/* Only used in br_forward.c */
++static inline
++void nf_bridge_maybe_copy_header(struct sk_buff *skb)
++{
++ if (skb->nf_bridge) {
++ if (skb->protocol == __constant_htons(ETH_P_8021Q)) {
++ memcpy(skb->data - 18, skb->nf_bridge->data, 18);
++ skb_push(skb, 4);
++ } else
++ memcpy(skb->data - 16, skb->nf_bridge->data, 16);
++ }
++}
++
++static inline
++void nf_bridge_save_header(struct sk_buff *skb)
++{
++ int header_size = 16;
++
++ if (skb->protocol == __constant_htons(ETH_P_8021Q))
++ header_size = 18;
++ memcpy(skb->nf_bridge->data, skb->data - header_size, header_size);
++}
+
++struct bridge_skb_cb {
++ union {
++ __u32 ipv4;
++ } daddr;
++};
++#endif /* CONFIG_NETFILTER */
+
++#endif /* __KERNEL__ */
+ #endif
+diff -Nur linux-mips-cvs/include/linux/netfilter_ipv4/ipt_physdev.h linux-ebtables/include/linux/netfilter_ipv4/ipt_physdev.h
+--- linux-mips-cvs/include/linux/netfilter_ipv4/ipt_physdev.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-ebtables/include/linux/netfilter_ipv4/ipt_physdev.h 2005-02-07 05:52:50.000000000 +0100
+@@ -0,0 +1,24 @@
++#ifndef _IPT_PHYSDEV_H
++#define _IPT_PHYSDEV_H
++
++#ifdef __KERNEL__
++#include <linux/if.h>
++#endif
++
++#define IPT_PHYSDEV_OP_IN 0x01
++#define IPT_PHYSDEV_OP_OUT 0x02
++#define IPT_PHYSDEV_OP_BRIDGED 0x04
++#define IPT_PHYSDEV_OP_ISIN 0x08
++#define IPT_PHYSDEV_OP_ISOUT 0x10
++#define IPT_PHYSDEV_OP_MASK (0x20 - 1)
++
++struct ipt_physdev_info {
++ char physindev[IFNAMSIZ];
++ char in_mask[IFNAMSIZ];
++ char physoutdev[IFNAMSIZ];
++ char out_mask[IFNAMSIZ];
++ u_int8_t invert;
++ u_int8_t bitmask;
++};
++
++#endif /*_IPT_PHYSDEV_H*/
+diff -Nur linux-mips-cvs/include/linux/netfilter_ipv4.h linux-ebtables/include/linux/netfilter_ipv4.h
+--- linux-mips-cvs/include/linux/netfilter_ipv4.h 2002-02-26 07:00:31.000000000 +0100
++++ linux-ebtables/include/linux/netfilter_ipv4.h 2005-02-07 05:52:50.000000000 +0100
+@@ -52,8 +52,10 @@
+ enum nf_ip_hook_priorities {
+ NF_IP_PRI_FIRST = INT_MIN,
+ NF_IP_PRI_CONNTRACK = -200,
++ NF_IP_PRI_BRIDGE_SABOTAGE_FORWARD = -175,
+ NF_IP_PRI_MANGLE = -150,
+ NF_IP_PRI_NAT_DST = -100,
++ NF_IP_PRI_BRIDGE_SABOTAGE_LOCAL_OUT = -50,
+ NF_IP_PRI_FILTER = 0,
+ NF_IP_PRI_NAT_SRC = 100,
+ NF_IP_PRI_LAST = INT_MAX,
+diff -Nur linux-mips-cvs/include/linux/netfilter_ipv6.h linux-ebtables/include/linux/netfilter_ipv6.h
+--- linux-mips-cvs/include/linux/netfilter_ipv6.h 2001-01-11 05:02:45.000000000 +0100
++++ linux-ebtables/include/linux/netfilter_ipv6.h 2005-02-07 05:52:50.000000000 +0100
+@@ -57,8 +57,10 @@
+ enum nf_ip6_hook_priorities {
+ NF_IP6_PRI_FIRST = INT_MIN,
+ NF_IP6_PRI_CONNTRACK = -200,
++ NF_IP6_PRI_BRIDGE_SABOTAGE_FORWARD = -175,
+ NF_IP6_PRI_MANGLE = -150,
+ NF_IP6_PRI_NAT_DST = -100,
++ NF_IP6_PRI_BRIDGE_SABOTAGE_LOCAL_OUT = -50,
+ NF_IP6_PRI_FILTER = 0,
+ NF_IP6_PRI_NAT_SRC = 100,
+ NF_IP6_PRI_LAST = INT_MAX,
+diff -Nur linux-mips-cvs/include/linux/skbuff.h linux-ebtables/include/linux/skbuff.h
+--- linux-mips-cvs/include/linux/skbuff.h 2005-01-31 12:56:47.000000000 +0100
++++ linux-ebtables/include/linux/skbuff.h 2005-02-07 05:52:50.000000000 +0100
+@@ -92,6 +92,20 @@
+ struct nf_ct_info {
+ struct nf_conntrack *master;
+ };
++
++#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
++struct nf_bridge_info {
++ atomic_t use;
++ struct net_device *physindev;
++ struct net_device *physoutdev;
++#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
++ struct net_device *netoutdev;
++#endif
++ unsigned int mask;
++ unsigned long data[32 / sizeof(unsigned long)];
++};
++#endif
++
+ #endif
+
+ struct sk_buff_head {
+@@ -208,6 +222,9 @@
+ #ifdef CONFIG_NETFILTER_DEBUG
+ unsigned int nf_debug;
+ #endif
++#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
++ struct nf_bridge_info *nf_bridge; /* Saved data about a bridged frame - see br_netfilter.c */
++#endif
+ #endif /*CONFIG_NETFILTER*/
+
+ #if defined(CONFIG_HIPPI)
+@@ -1175,6 +1192,20 @@
+ skb->nf_debug = 0;
+ #endif
+ }
++
++#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
++static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
++{
++ if (nf_bridge && atomic_dec_and_test(&nf_bridge->use))
++ kfree(nf_bridge);
++}
++static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
++{
++ if (nf_bridge)
++ atomic_inc(&nf_bridge->use);
++}
++#endif
++
+ #else /* CONFIG_NETFILTER */
+ static inline void nf_reset(struct sk_buff *skb) {}
+ #endif /* CONFIG_NETFILTER */
+diff -Nur linux-mips-cvs/include/linux/sysctl.h linux-ebtables/include/linux/sysctl.h
+--- linux-mips-cvs/include/linux/sysctl.h 2004-11-29 18:47:18.000000000 +0100
++++ linux-ebtables/include/linux/sysctl.h 2005-02-07 05:52:50.000000000 +0100
+@@ -608,6 +608,15 @@
+ NET_DECNET_CONF_DEV_STATE = 7
+ };
+
++/* /proc/sys/net/bridge */
++enum {
++ NET_BRIDGE_NF_CALL_ARPTABLES = 1,
++ NET_BRIDGE_NF_CALL_IPTABLES = 2,
++ NET_BRIDGE_NF_CALL_IP6TABLES = 3,
++ NET_BRIDGE_NF_FILTER_VLAN_TAGGED = 4,
++};
++
++
+ /* CTL_PROC names: */
+
+ /* CTL_FS names: */
+diff -Nur linux-mips-cvs/include/linux/sysctl.h.orig linux-ebtables/include/linux/sysctl.h.orig
+--- linux-mips-cvs/include/linux/sysctl.h.orig 1970-01-01 01:00:00.000000000 +0100
++++ linux-ebtables/include/linux/sysctl.h.orig 2004-11-29 18:47:18.000000000 +0100
+@@ -0,0 +1,841 @@
++/*
++ * sysctl.h: General linux system control interface
++ *
++ * Begun 24 March 1995, Stephen Tweedie
++ *
++ ****************************************************************
++ ****************************************************************
++ **
++ ** WARNING:
++ ** The values in this file are exported to user space via
++ ** the sysctl() binary interface. Do *NOT* change the
++ ** numbering of any existing values here, and do not change
++ ** any numbers within any one set of values. If you have
++ ** to redefine an existing interface, use a new number for it.
++ ** The kernel will then return ENOTDIR to any application using
++ ** the old binary interface.
++ **
++ ** --sct
++ **
++ ****************************************************************
++ ****************************************************************
++ */
++
++#ifndef _LINUX_SYSCTL_H
++#define _LINUX_SYSCTL_H
++
++#include <linux/kernel.h>
++#include <linux/types.h>
++#include <linux/list.h>
++
++struct file;
++
++#define CTL_MAXNAME 10
++
++struct __sysctl_args {
++ int *name;
++ int nlen;
++ void *oldval;
++ size_t *oldlenp;
++ void *newval;
++ size_t newlen;
++ unsigned long __unused[4];
++};
++
++/* Define sysctl names first */
++
++/* Top-level names: */
++
++/* For internal pattern-matching use only: */
++#ifdef __KERNEL__
++#define CTL_ANY -1 /* Matches any name */
++#define CTL_NONE 0
++#endif
++
++enum
++{
++ CTL_KERN=1, /* General kernel info and control */
++ CTL_VM=2, /* VM management */
++ CTL_NET=3, /* Networking */
++ CTL_PROC=4, /* Process info */
++ CTL_FS=5, /* Filesystems */
++ CTL_DEBUG=6, /* Debugging */
++ CTL_DEV=7, /* Devices */
++ CTL_BUS=8, /* Busses */
++ CTL_ABI=9, /* Binary emulation */
++ CTL_CPU=10 /* CPU stuff (speed scaling, etc) */
++};
++
++/* CTL_BUS names: */
++enum
++{
++ CTL_BUS_ISA=1 /* ISA */
++};
++
++/* CTL_KERN names: */
++enum
++{
++ KERN_OSTYPE=1, /* string: system version */
++ KERN_OSRELEASE=2, /* string: system release */
++ KERN_OSREV=3, /* int: system revision */
++ KERN_VERSION=4, /* string: compile time info */
++ KERN_SECUREMASK=5, /* struct: maximum rights mask */
++ KERN_PROF=6, /* table: profiling information */
++ KERN_NODENAME=7,
++ KERN_DOMAINNAME=8,
++
++ KERN_CAP_BSET=14, /* int: capability bounding set */
++ KERN_PANIC=15, /* int: panic timeout */
++ KERN_REALROOTDEV=16, /* real root device to mount after initrd */
++
++ KERN_SPARC_REBOOT=21, /* reboot command on Sparc */
++ KERN_CTLALTDEL=22, /* int: allow ctl-alt-del to reboot */
++ KERN_PRINTK=23, /* struct: control printk logging parameters */
++ KERN_NAMETRANS=24, /* Name translation */
++ KERN_PPC_HTABRECLAIM=25, /* turn htab reclaimation on/off on PPC */
++ KERN_PPC_ZEROPAGED=26, /* turn idle page zeroing on/off on PPC */
++ KERN_PPC_POWERSAVE_NAP=27, /* use nap mode for power saving */
++ KERN_MODPROBE=28,
++ KERN_SG_BIG_BUFF=29,
++ KERN_ACCT=30, /* BSD process accounting parameters */
++ KERN_PPC_L2CR=31, /* l2cr register on PPC */
++
++ KERN_RTSIGNR=32, /* Number of rt sigs queued */
++ KERN_RTSIGMAX=33, /* Max queuable */
++
++ KERN_SHMMAX=34, /* long: Maximum shared memory segment */
++ KERN_MSGMAX=35, /* int: Maximum size of a messege */
++ KERN_MSGMNB=36, /* int: Maximum message queue size */
++ KERN_MSGPOOL=37, /* int: Maximum system message pool size */
++ KERN_SYSRQ=38, /* int: Sysreq enable */
++ KERN_MAX_THREADS=39, /* int: Maximum nr of threads in the system */
++ KERN_RANDOM=40, /* Random driver */
++ KERN_SHMALL=41, /* int: Maximum size of shared memory */
++ KERN_MSGMNI=42, /* int: msg queue identifiers */
++ KERN_SEM=43, /* struct: sysv semaphore limits */
++ KERN_SPARC_STOP_A=44, /* int: Sparc Stop-A enable */
++ KERN_SHMMNI=45, /* int: shm array identifiers */
++ KERN_OVERFLOWUID=46, /* int: overflow UID */
++ KERN_OVERFLOWGID=47, /* int: overflow GID */
++ KERN_SHMPATH=48, /* string: path to shm fs */
++ KERN_HOTPLUG=49, /* string: path to hotplug policy agent */
++ KERN_IEEE_EMULATION_WARNINGS=50, /* int: unimplemented ieee instructions */
++ KERN_S390_USER_DEBUG_LOGGING=51, /* int: dumps of user faults */
++ KERN_CORE_USES_PID=52, /* int: use core or core.%pid */
++ KERN_TAINTED=53, /* int: various kernel tainted flags */
++ KERN_CADPID=54, /* int: PID of the process to notify on CAD */
++ KERN_CORE_PATTERN=56, /* string: pattern for core-files */
++ KERN_PPC_L3CR=57, /* l3cr register on PPC */
++ KERN_EXCEPTION_TRACE=58, /* boolean: exception trace */
++ KERN_CORE_SETUID=59, /* int: set to allow core dumps of setuid apps */
++ KERN_SPARC_SCONS_PWROFF=64, /* int: serial console power-off halt */
++};
++
++
++/* CTL_VM names: */
++enum
++{
++ VM_SWAPCTL=1, /* struct: Set vm swapping control */
++ VM_SWAPOUT=2, /* int: Linear or sqrt() swapout for hogs */
++ VM_FREEPG=3, /* struct: Set free page thresholds */
++ VM_BDFLUSH=4, /* struct: Control buffer cache flushing */
++ VM_OVERCOMMIT_MEMORY=5, /* Turn off the virtual memory safety limit */
++ VM_BUFFERMEM=6, /* struct: Set buffer memory thresholds */
++ VM_PAGECACHE=7, /* struct: Set cache memory thresholds */
++ VM_PAGERDAEMON=8, /* struct: Control kswapd behaviour */
++ VM_PGT_CACHE=9, /* struct: Set page table cache parameters */
++ VM_PAGE_CLUSTER=10, /* int: set number of pages to swap together */
++ VM_MAX_MAP_COUNT=11, /* int: Maximum number of active map areas */
++ VM_MIN_READAHEAD=12, /* Min file readahead */
++ VM_MAX_READAHEAD=13, /* Max file readahead */
++ VM_VFS_SCAN_RATIO=14, /* part of the inactive vfs lists to scan */
++ VM_LRU_BALANCE_RATIO=15,/* balance active and inactive caches */
++ VM_PASSES=16, /* number of vm passes before failing */
++ VM_PAGEBUF=17, /* struct: Control pagebuf parameters */
++ VM_GFP_DEBUG=18, /* debug GFP failures */
++ VM_CACHE_SCAN_RATIO=19, /* part of the inactive cache list to scan */
++ VM_MAPPED_RATIO=20, /* amount of unfreeable pages that triggers swapout */
++ VM_LAPTOP_MODE=21, /* kernel in laptop flush mode */
++ VM_BLOCK_DUMP=22, /* dump fs activity to log */
++ VM_ANON_LRU=23, /* immediatly insert anon pages in the vm page lru */
++};
++
++
++/* CTL_NET names: */
++enum
++{
++ NET_CORE=1,
++ NET_ETHER=2,
++ NET_802=3,
++ NET_UNIX=4,
++ NET_IPV4=5,
++ NET_IPX=6,
++ NET_ATALK=7,
++ NET_NETROM=8,
++ NET_AX25=9,
++ NET_BRIDGE=10,
++ NET_ROSE=11,
++ NET_IPV6=12,
++ NET_X25=13,
++ NET_TR=14,
++ NET_DECNET=15,
++ NET_ECONET=16,
++ NET_KHTTPD=17,
++ NET_SCTP=18
++};
++
++/* /proc/sys/kernel/random */
++enum
++{
++ RANDOM_POOLSIZE=1,
++ RANDOM_ENTROPY_COUNT=2,
++ RANDOM_READ_THRESH=3,
++ RANDOM_WRITE_THRESH=4,
++ RANDOM_BOOT_ID=5,
++ RANDOM_UUID=6
++};
++
++/* /proc/sys/bus/isa */
++enum
++{
++ BUS_ISA_MEM_BASE=1,
++ BUS_ISA_PORT_BASE=2,
++ BUS_ISA_PORT_SHIFT=3
++};
++
++/* /proc/sys/net/core */
++enum
++{
++ NET_CORE_WMEM_MAX=1,
++ NET_CORE_RMEM_MAX=2,
++ NET_CORE_WMEM_DEFAULT=3,
++ NET_CORE_RMEM_DEFAULT=4,
++/* was NET_CORE_DESTROY_DELAY */
++ NET_CORE_MAX_BACKLOG=6,
++ NET_CORE_FASTROUTE=7,
++ NET_CORE_MSG_COST=8,
++ NET_CORE_MSG_BURST=9,
++ NET_CORE_OPTMEM_MAX=10,
++ NET_CORE_HOT_LIST_LENGTH=11,
++ NET_CORE_DIVERT_VERSION=12,
++ NET_CORE_NO_CONG_THRESH=13,
++ NET_CORE_NO_CONG=14,
++ NET_CORE_LO_CONG=15,
++ NET_CORE_MOD_CONG=16,
++ NET_CORE_DEV_WEIGHT=17,
++ NET_CORE_SOMAXCONN=18,
++};
++
++/* /proc/sys/net/ethernet */
++
++/* /proc/sys/net/802 */
++
++/* /proc/sys/net/unix */
++
++enum
++{
++ NET_UNIX_DESTROY_DELAY=1,
++ NET_UNIX_DELETE_DELAY=2,
++ NET_UNIX_MAX_DGRAM_QLEN=3,
++};
++
++/* /proc/sys/net/ipv4 */
++enum
++{
++ /* v2.0 compatibile variables */
++ NET_IPV4_FORWARD=8,
++ NET_IPV4_DYNADDR=9,
++
++ NET_IPV4_CONF=16,
++ NET_IPV4_NEIGH=17,
++ NET_IPV4_ROUTE=18,
++ NET_IPV4_FIB_HASH=19,
++ NET_IPV4_NETFILTER=20,
++
++ NET_IPV4_TCP_TIMESTAMPS=33,
++ NET_IPV4_TCP_WINDOW_SCALING=34,
++ NET_IPV4_TCP_SACK=35,
++ NET_IPV4_TCP_RETRANS_COLLAPSE=36,
++ NET_IPV4_DEFAULT_TTL=37,
++ NET_IPV4_AUTOCONFIG=38,
++ NET_IPV4_NO_PMTU_DISC=39,
++ NET_IPV4_TCP_SYN_RETRIES=40,
++ NET_IPV4_IPFRAG_HIGH_THRESH=41,
++ NET_IPV4_IPFRAG_LOW_THRESH=42,
++ NET_IPV4_IPFRAG_TIME=43,
++ NET_IPV4_TCP_MAX_KA_PROBES=44,
++ NET_IPV4_TCP_KEEPALIVE_TIME=45,
++ NET_IPV4_TCP_KEEPALIVE_PROBES=46,
++ NET_IPV4_TCP_RETRIES1=47,
++ NET_IPV4_TCP_RETRIES2=48,
++ NET_IPV4_TCP_FIN_TIMEOUT=49,
++ NET_IPV4_IP_MASQ_DEBUG=50,
++ NET_TCP_SYNCOOKIES=51,
++ NET_TCP_STDURG=52,
++ NET_TCP_RFC1337=53,
++ NET_TCP_SYN_TAILDROP=54,
++ NET_TCP_MAX_SYN_BACKLOG=55,
++ NET_IPV4_LOCAL_PORT_RANGE=56,
++ NET_IPV4_ICMP_ECHO_IGNORE_ALL=57,
++ NET_IPV4_ICMP_ECHO_IGNORE_BROADCASTS=58,
++ NET_IPV4_ICMP_SOURCEQUENCH_RATE=59,
++ NET_IPV4_ICMP_DESTUNREACH_RATE=60,
++ NET_IPV4_ICMP_TIMEEXCEED_RATE=61,
++ NET_IPV4_ICMP_PARAMPROB_RATE=62,
++ NET_IPV4_ICMP_ECHOREPLY_RATE=63,
++ NET_IPV4_ICMP_IGNORE_BOGUS_ERROR_RESPONSES=64,
++ NET_IPV4_IGMP_MAX_MEMBERSHIPS=65,
++ NET_TCP_TW_RECYCLE=66,
++ NET_IPV4_ALWAYS_DEFRAG=67,
++ NET_IPV4_TCP_KEEPALIVE_INTVL=68,
++ NET_IPV4_INET_PEER_THRESHOLD=69,
++ NET_IPV4_INET_PEER_MINTTL=70,
++ NET_IPV4_INET_PEER_MAXTTL=71,
++ NET_IPV4_INET_PEER_GC_MINTIME=72,
++ NET_IPV4_INET_PEER_GC_MAXTIME=73,
++ NET_TCP_ORPHAN_RETRIES=74,
++ NET_TCP_ABORT_ON_OVERFLOW=75,
++ NET_TCP_SYNACK_RETRIES=76,
++ NET_TCP_MAX_ORPHANS=77,
++ NET_TCP_MAX_TW_BUCKETS=78,
++ NET_TCP_FACK=79,
++ NET_TCP_REORDERING=80,
++ NET_TCP_ECN=81,
++ NET_TCP_DSACK=82,
++ NET_TCP_MEM=83,
++ NET_TCP_WMEM=84,
++ NET_TCP_RMEM=85,
++ NET_TCP_APP_WIN=86,
++ NET_TCP_ADV_WIN_SCALE=87,
++ NET_IPV4_NONLOCAL_BIND=88,
++ NET_IPV4_ICMP_RATELIMIT=89,
++ NET_IPV4_ICMP_RATEMASK=90,
++ NET_TCP_TW_REUSE=91,
++ NET_TCP_FRTO=92,
++ NET_TCP_LOW_LATENCY=93,
++ NET_IPV4_IPFRAG_SECRET_INTERVAL=94,
++ NET_TCP_WESTWOOD=95,
++ NET_IPV4_IGMP_MAX_MSF=96,
++ NET_TCP_NO_METRICS_SAVE=97,
++ NET_TCP_VEGAS=98,
++ NET_TCP_VEGAS_ALPHA=99,
++ NET_TCP_VEGAS_BETA=100,
++ NET_TCP_VEGAS_GAMMA=101,
++ NET_TCP_BIC=102,
++ NET_TCP_BIC_FAST_CONVERGENCE=103,
++ NET_TCP_BIC_LOW_WINDOW=104,
++ NET_TCP_DEFAULT_WIN_SCALE=105,
++ NET_TCP_MODERATE_RCVBUF=106,
++};
++
++enum {
++ NET_IPV4_ROUTE_FLUSH=1,
++ NET_IPV4_ROUTE_MIN_DELAY=2,
++ NET_IPV4_ROUTE_MAX_DELAY=3,
++ NET_IPV4_ROUTE_GC_THRESH=4,
++ NET_IPV4_ROUTE_MAX_SIZE=5,
++ NET_IPV4_ROUTE_GC_MIN_INTERVAL=6,
++ NET_IPV4_ROUTE_GC_TIMEOUT=7,
++ NET_IPV4_ROUTE_GC_INTERVAL=8,
++ NET_IPV4_ROUTE_REDIRECT_LOAD=9,
++ NET_IPV4_ROUTE_REDIRECT_NUMBER=10,
++ NET_IPV4_ROUTE_REDIRECT_SILENCE=11,
++ NET_IPV4_ROUTE_ERROR_COST=12,
++ NET_IPV4_ROUTE_ERROR_BURST=13,
++ NET_IPV4_ROUTE_GC_ELASTICITY=14,
++ NET_IPV4_ROUTE_MTU_EXPIRES=15,
++ NET_IPV4_ROUTE_MIN_PMTU=16,
++ NET_IPV4_ROUTE_MIN_ADVMSS=17,
++ NET_IPV4_ROUTE_SECRET_INTERVAL=18,
++};
++
++enum
++{
++ NET_PROTO_CONF_ALL=-2,
++ NET_PROTO_CONF_DEFAULT=-3
++
++ /* And device ifindices ... */
++};
++
++enum
++{
++ NET_IPV4_CONF_FORWARDING=1,
++ NET_IPV4_CONF_MC_FORWARDING=2,
++ NET_IPV4_CONF_PROXY_ARP=3,
++ NET_IPV4_CONF_ACCEPT_REDIRECTS=4,
++ NET_IPV4_CONF_SECURE_REDIRECTS=5,
++ NET_IPV4_CONF_SEND_REDIRECTS=6,
++ NET_IPV4_CONF_SHARED_MEDIA=7,
++ NET_IPV4_CONF_RP_FILTER=8,
++ NET_IPV4_CONF_ACCEPT_SOURCE_ROUTE=9,
++ NET_IPV4_CONF_BOOTP_RELAY=10,
++ NET_IPV4_CONF_LOG_MARTIANS=11,
++ NET_IPV4_CONF_TAG=12,
++ NET_IPV4_CONF_ARPFILTER=13,
++ NET_IPV4_CONF_MEDIUM_ID=14,
++ NET_IPV4_CONF_FORCE_IGMP_VERSION=17,
++ NET_IPV4_CONF_ARP_ANNOUNCE=18,
++ NET_IPV4_CONF_ARP_IGNORE=19,
++};
++
++/* /proc/sys/net/ipv4/netfilter */
++enum
++{
++ NET_IPV4_NF_CONNTRACK_MAX=1,
++ NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_SYN_SENT=2,
++ NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_SYN_RECV=3,
++ NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED=4,
++ NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_FIN_WAIT=5,
++ NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_CLOSE_WAIT=6,
++ NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_LAST_ACK=7,
++ NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_TIME_WAIT=8,
++ NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_CLOSE=9,
++ NET_IPV4_NF_CONNTRACK_UDP_TIMEOUT=10,
++ NET_IPV4_NF_CONNTRACK_UDP_TIMEOUT_STREAM=11,
++ NET_IPV4_NF_CONNTRACK_ICMP_TIMEOUT=12,
++ NET_IPV4_NF_CONNTRACK_GENERIC_TIMEOUT=13,
++ NET_IPV4_NF_CONNTRACK_BUCKETS=14,
++};
++
++/* /proc/sys/net/ipv6 */
++enum {
++ NET_IPV6_CONF=16,
++ NET_IPV6_NEIGH=17,
++ NET_IPV6_ROUTE=18,
++ NET_IPV6_ICMP=19,
++ NET_IPV6_BINDV6ONLY=20,
++ NET_IPV6_MLD_MAX_MSF=25,
++};
++
++enum {
++ NET_IPV6_ROUTE_FLUSH=1,
++ NET_IPV6_ROUTE_GC_THRESH=2,
++ NET_IPV6_ROUTE_MAX_SIZE=3,
++ NET_IPV6_ROUTE_GC_MIN_INTERVAL=4,
++ NET_IPV6_ROUTE_GC_TIMEOUT=5,
++ NET_IPV6_ROUTE_GC_INTERVAL=6,
++ NET_IPV6_ROUTE_GC_ELASTICITY=7,
++ NET_IPV6_ROUTE_MTU_EXPIRES=8,
++ NET_IPV6_ROUTE_MIN_ADVMSS=9
++};
++
++enum {
++ NET_IPV6_FORWARDING=1,
++ NET_IPV6_HOP_LIMIT=2,
++ NET_IPV6_MTU=3,
++ NET_IPV6_ACCEPT_RA=4,
++ NET_IPV6_ACCEPT_REDIRECTS=5,
++ NET_IPV6_AUTOCONF=6,
++ NET_IPV6_DAD_TRANSMITS=7,
++ NET_IPV6_RTR_SOLICITS=8,
++ NET_IPV6_RTR_SOLICIT_INTERVAL=9,
++ NET_IPV6_RTR_SOLICIT_DELAY=10
++};
++
++/* /proc/sys/net/ipv6/icmp */
++enum {
++ NET_IPV6_ICMP_RATELIMIT=1
++};
++
++/* /proc/sys/net/<protocol>/neigh/<dev> */
++enum {
++ NET_NEIGH_MCAST_SOLICIT=1,
++ NET_NEIGH_UCAST_SOLICIT=2,
++ NET_NEIGH_APP_SOLICIT=3,
++ NET_NEIGH_RETRANS_TIME=4,
++ NET_NEIGH_REACHABLE_TIME=5,
++ NET_NEIGH_DELAY_PROBE_TIME=6,
++ NET_NEIGH_GC_STALE_TIME=7,
++ NET_NEIGH_UNRES_QLEN=8,
++ NET_NEIGH_PROXY_QLEN=9,
++ NET_NEIGH_ANYCAST_DELAY=10,
++ NET_NEIGH_PROXY_DELAY=11,
++ NET_NEIGH_LOCKTIME=12,
++ NET_NEIGH_GC_INTERVAL=13,
++ NET_NEIGH_GC_THRESH1=14,
++ NET_NEIGH_GC_THRESH2=15,
++ NET_NEIGH_GC_THRESH3=16
++};
++
++/* /proc/sys/net/ipx */
++enum {
++ NET_IPX_PPROP_BROADCASTING=1,
++ NET_IPX_FORWARDING=2
++};
++
++
++/* /proc/sys/net/appletalk */
++enum {
++ NET_ATALK_AARP_EXPIRY_TIME=1,
++ NET_ATALK_AARP_TICK_TIME=2,
++ NET_ATALK_AARP_RETRANSMIT_LIMIT=3,
++ NET_ATALK_AARP_RESOLVE_TIME=4
++};
++
++
++/* /proc/sys/net/netrom */
++enum {
++ NET_NETROM_DEFAULT_PATH_QUALITY=1,
++ NET_NETROM_OBSOLESCENCE_COUNT_INITIALISER=2,
++ NET_NETROM_NETWORK_TTL_INITIALISER=3,
++ NET_NETROM_TRANSPORT_TIMEOUT=4,
++ NET_NETROM_TRANSPORT_MAXIMUM_TRIES=5,
++ NET_NETROM_TRANSPORT_ACKNOWLEDGE_DELAY=6,
++ NET_NETROM_TRANSPORT_BUSY_DELAY=7,
++ NET_NETROM_TRANSPORT_REQUESTED_WINDOW_SIZE=8,
++ NET_NETROM_TRANSPORT_NO_ACTIVITY_TIMEOUT=9,
++ NET_NETROM_ROUTING_CONTROL=10,
++ NET_NETROM_LINK_FAILS_COUNT=11
++};
++
++/* /proc/sys/net/ax25 */
++enum {
++ NET_AX25_IP_DEFAULT_MODE=1,
++ NET_AX25_DEFAULT_MODE=2,
++ NET_AX25_BACKOFF_TYPE=3,
++ NET_AX25_CONNECT_MODE=4,
++ NET_AX25_STANDARD_WINDOW=5,
++ NET_AX25_EXTENDED_WINDOW=6,
++ NET_AX25_T1_TIMEOUT=7,
++ NET_AX25_T2_TIMEOUT=8,
++ NET_AX25_T3_TIMEOUT=9,
++ NET_AX25_IDLE_TIMEOUT=10,
++ NET_AX25_N2=11,
++ NET_AX25_PACLEN=12,
++ NET_AX25_PROTOCOL=13,
++ NET_AX25_DAMA_SLAVE_TIMEOUT=14
++};
++
++/* /proc/sys/net/rose */
++enum {
++ NET_ROSE_RESTART_REQUEST_TIMEOUT=1,
++ NET_ROSE_CALL_REQUEST_TIMEOUT=2,
++ NET_ROSE_RESET_REQUEST_TIMEOUT=3,
++ NET_ROSE_CLEAR_REQUEST_TIMEOUT=4,
++ NET_ROSE_ACK_HOLD_BACK_TIMEOUT=5,
++ NET_ROSE_ROUTING_CONTROL=6,
++ NET_ROSE_LINK_FAIL_TIMEOUT=7,
++ NET_ROSE_MAX_VCS=8,
++ NET_ROSE_WINDOW_SIZE=9,
++ NET_ROSE_NO_ACTIVITY_TIMEOUT=10
++};
++
++/* /proc/sys/net/x25 */
++enum {
++ NET_X25_RESTART_REQUEST_TIMEOUT=1,
++ NET_X25_CALL_REQUEST_TIMEOUT=2,
++ NET_X25_RESET_REQUEST_TIMEOUT=3,
++ NET_X25_CLEAR_REQUEST_TIMEOUT=4,
++ NET_X25_ACK_HOLD_BACK_TIMEOUT=5
++};
++
++/* /proc/sys/net/token-ring */
++enum
++{
++ NET_TR_RIF_TIMEOUT=1
++};
++
++/* /proc/sys/net/decnet/ */
++enum {
++ NET_DECNET_NODE_TYPE = 1,
++ NET_DECNET_NODE_ADDRESS = 2,
++ NET_DECNET_NODE_NAME = 3,
++ NET_DECNET_DEFAULT_DEVICE = 4,
++ NET_DECNET_TIME_WAIT = 5,
++ NET_DECNET_DN_COUNT = 6,
++ NET_DECNET_DI_COUNT = 7,
++ NET_DECNET_DR_COUNT = 8,
++ NET_DECNET_DST_GC_INTERVAL = 9,
++ NET_DECNET_CONF = 10,
++ NET_DECNET_NO_FC_MAX_CWND = 11,
++ NET_DECNET_DEBUG_LEVEL = 255
++};
++
++/* /proc/sys/net/sctp */
++enum {
++ NET_SCTP_RTO_INITIAL = 1,
++ NET_SCTP_RTO_MIN = 2,
++ NET_SCTP_RTO_MAX = 3,
++ NET_SCTP_RTO_ALPHA = 4,
++ NET_SCTP_RTO_BETA = 5,
++ NET_SCTP_VALID_COOKIE_LIFE = 6,
++ NET_SCTP_ASSOCIATION_MAX_RETRANS = 7,
++ NET_SCTP_PATH_MAX_RETRANS = 8,
++ NET_SCTP_MAX_INIT_RETRANSMITS = 9,
++ NET_SCTP_HB_INTERVAL = 10,
++ NET_SCTP_PRESERVE_ENABLE = 11,
++ NET_SCTP_MAX_BURST = 12,
++ NET_SCTP_ADDIP_ENABLE = 13,
++ NET_SCTP_PRSCTP_ENABLE = 14,
++};
++/* /proc/sys/net/khttpd/ */
++enum {
++ NET_KHTTPD_DOCROOT = 1,
++ NET_KHTTPD_START = 2,
++ NET_KHTTPD_STOP = 3,
++ NET_KHTTPD_UNLOAD = 4,
++ NET_KHTTPD_CLIENTPORT = 5,
++ NET_KHTTPD_PERMREQ = 6,
++ NET_KHTTPD_PERMFORBID = 7,
++ NET_KHTTPD_LOGGING = 8,
++ NET_KHTTPD_SERVERPORT = 9,
++ NET_KHTTPD_DYNAMICSTRING= 10,
++ NET_KHTTPD_SLOPPYMIME = 11,
++ NET_KHTTPD_THREADS = 12,
++ NET_KHTTPD_MAXCONNECT = 13
++};
++
++/* /proc/sys/net/decnet/conf/<dev> */
++enum {
++ NET_DECNET_CONF_LOOPBACK = -2,
++ NET_DECNET_CONF_DDCMP = -3,
++ NET_DECNET_CONF_PPP = -4,
++ NET_DECNET_CONF_X25 = -5,
++ NET_DECNET_CONF_GRE = -6,
++ NET_DECNET_CONF_ETHER = -7
++
++ /* ... and ifindex of devices */
++};
++
++/* /proc/sys/net/decnet/conf/<dev>/ */
++enum {
++ NET_DECNET_CONF_DEV_PRIORITY = 1,
++ NET_DECNET_CONF_DEV_T1 = 2,
++ NET_DECNET_CONF_DEV_T2 = 3,
++ NET_DECNET_CONF_DEV_T3 = 4,
++ NET_DECNET_CONF_DEV_FORWARDING = 5,
++ NET_DECNET_CONF_DEV_BLKSIZE = 6,
++ NET_DECNET_CONF_DEV_STATE = 7
++};
++
++/* CTL_PROC names: */
++
++/* CTL_FS names: */
++enum
++{
++ FS_NRINODE=1, /* int:current number of allocated inodes */
++ FS_STATINODE=2,
++ FS_MAXINODE=3, /* int:maximum number of inodes that can be allocated */
++ FS_NRDQUOT=4, /* int:current number of allocated dquots */
++ /* was FS_MAXDQUOT */
++ FS_NRFILE=6, /* int:current number of allocated filedescriptors */
++ FS_MAXFILE=7, /* int:maximum number of filedescriptors that can be allocated */
++ FS_DENTRY=8,
++ FS_NRSUPER=9, /* int:current number of allocated super_blocks */
++ FS_MAXSUPER=10, /* int:maximum number of super_blocks that can be allocated */
++ FS_OVERFLOWUID=11, /* int: overflow UID */
++ FS_OVERFLOWGID=12, /* int: overflow GID */
++ FS_LEASES=13, /* int: leases enabled */
++ FS_DIR_NOTIFY=14, /* int: directory notification enabled */
++ FS_LEASE_TIME=15, /* int: maximum time to wait for a lease break */
++ FS_DQSTATS=16, /* dir: disc quota usage statistics and settings */
++ FS_XFS=17, /* struct: control xfs parameters */
++};
++
++/* /proc/sys/fs/quota/ */
++enum {
++ FS_DQ_LOOKUPS = 1,
++ FS_DQ_DROPS = 2,
++ FS_DQ_READS = 3,
++ FS_DQ_WRITES = 4,
++ FS_DQ_CACHE_HITS = 5,
++ FS_DQ_ALLOCATED = 6,
++ FS_DQ_FREE = 7,
++ FS_DQ_SYNCS = 8,
++ FS_DQ_WARNINGS = 9,
++};
++
++/* CTL_DEBUG names: */
++
++/* CTL_DEV names: */
++enum {
++ DEV_CDROM=1,
++ DEV_HWMON=2,
++ DEV_PARPORT=3,
++ DEV_RAID=4,
++ DEV_MAC_HID=5
++};
++
++/* /proc/sys/dev/cdrom */
++enum {
++ DEV_CDROM_INFO=1,
++ DEV_CDROM_AUTOCLOSE=2,
++ DEV_CDROM_AUTOEJECT=3,
++ DEV_CDROM_DEBUG=4,
++ DEV_CDROM_LOCK=5,
++ DEV_CDROM_CHECK_MEDIA=6
++};
++
++/* /proc/sys/dev/parport */
++enum {
++ DEV_PARPORT_DEFAULT=-3
++};
++
++/* /proc/sys/dev/raid */
++enum {
++ DEV_RAID_SPEED_LIMIT_MIN=1,
++ DEV_RAID_SPEED_LIMIT_MAX=2
++};
++
++/* /proc/sys/dev/parport/default */
++enum {
++ DEV_PARPORT_DEFAULT_TIMESLICE=1,
++ DEV_PARPORT_DEFAULT_SPINTIME=2
++};
++
++/* /proc/sys/dev/parport/parport n */
++enum {
++ DEV_PARPORT_SPINTIME=1,
++ DEV_PARPORT_BASE_ADDR=2,
++ DEV_PARPORT_IRQ=3,
++ DEV_PARPORT_DMA=4,
++ DEV_PARPORT_MODES=5,
++ DEV_PARPORT_DEVICES=6,
++ DEV_PARPORT_AUTOPROBE=16
++};
++
++/* /proc/sys/dev/parport/parport n/devices/ */
++enum {
++ DEV_PARPORT_DEVICES_ACTIVE=-3,
++};
++
++/* /proc/sys/dev/parport/parport n/devices/device n */
++enum {
++ DEV_PARPORT_DEVICE_TIMESLICE=1,
++};
++
++/* /proc/sys/dev/mac_hid */
++enum {
++ DEV_MAC_HID_KEYBOARD_SENDS_LINUX_KEYCODES=1,
++ DEV_MAC_HID_KEYBOARD_LOCK_KEYCODES=2,
++ DEV_MAC_HID_MOUSE_BUTTON_EMULATION=3,
++ DEV_MAC_HID_MOUSE_BUTTON2_KEYCODE=4,
++ DEV_MAC_HID_MOUSE_BUTTON3_KEYCODE=5,
++ DEV_MAC_HID_ADB_MOUSE_SENDS_KEYCODES=6
++};
++
++/* /proc/sys/abi */
++enum
++{
++ ABI_DEFHANDLER_COFF=1, /* default handler for coff binaries */
++ ABI_DEFHANDLER_ELF=2, /* default handler for ELF binaries */
++ ABI_DEFHANDLER_LCALL7=3,/* default handler for procs using lcall7 */
++ ABI_DEFHANDLER_LIBCSO=4,/* default handler for an libc.so ELF interp */
++ ABI_TRACE=5, /* tracing flags */
++ ABI_FAKE_UTSNAME=6, /* fake target utsname information */
++};
++
++#ifdef __KERNEL__
++
++extern asmlinkage long sys_sysctl(struct __sysctl_args *);
++extern void sysctl_init(void);
++
++typedef struct ctl_table ctl_table;
++
++typedef int ctl_handler (ctl_table *table, int *name, int nlen,
++ void *oldval, size_t *oldlenp,
++ void *newval, size_t newlen,
++ void **context);
++
++typedef int proc_handler (ctl_table *ctl, int write, struct file * filp,
++ void *buffer, size_t *lenp);
++
++extern int proc_dostring(ctl_table *, int, struct file *,
++ void *, size_t *);
++extern int proc_dointvec(ctl_table *, int, struct file *,
++ void *, size_t *);
++extern int proc_dointvec_bset(ctl_table *, int, struct file *,
++ void *, size_t *);
++extern int proc_dointvec_minmax(ctl_table *, int, struct file *,
++ void *, size_t *);
++extern int proc_dointvec_jiffies(ctl_table *, int, struct file *,
++ void *, size_t *);
++extern int proc_doulongvec_minmax(ctl_table *, int, struct file *,
++ void *, size_t *);
++extern int proc_doulongvec_ms_jiffies_minmax(ctl_table *table, int,
++ struct file *, void *, size_t *);
++
++extern int do_sysctl (int *name, int nlen,
++ void *oldval, size_t *oldlenp,
++ void *newval, size_t newlen);
++
++extern int do_sysctl_strategy (ctl_table *table,
++ int *name, int nlen,
++ void *oldval, size_t *oldlenp,
++ void *newval, size_t newlen, void ** context);
++
++extern ctl_handler sysctl_string;
++extern ctl_handler sysctl_intvec;
++extern ctl_handler sysctl_jiffies;
++
++
++/*
++ * Register a set of sysctl names by calling register_sysctl_table
++ * with an initialised array of ctl_table's. An entry with zero
++ * ctl_name terminates the table. table->de will be set up by the
++ * registration and need not be initialised in advance.
++ *
++ * sysctl names can be mirrored automatically under /proc/sys. The
++ * procname supplied controls /proc naming.
++ *
++ * The table's mode will be honoured both for sys_sysctl(2) and
++ * proc-fs access.
++ *
++ * Leaf nodes in the sysctl tree will be represented by a single file
++ * under /proc; non-leaf nodes will be represented by directories. A
++ * null procname disables /proc mirroring at this node.
++ *
++ * sysctl(2) can automatically manage read and write requests through
++ * the sysctl table. The data and maxlen fields of the ctl_table
++ * struct enable minimal validation of the values being written to be
++ * performed, and the mode field allows minimal authentication.
++ *
++ * More sophisticated management can be enabled by the provision of a
++ * strategy routine with the table entry. This will be called before
++ * any automatic read or write of the data is performed.
++ *
++ * The strategy routine may return:
++ * <0: Error occurred (error is passed to user process)
++ * 0: OK - proceed with automatic read or write.
++ * >0: OK - read or write has been done by the strategy routine, so
++ * return immediately.
++ *
++ * There must be a proc_handler routine for any terminal nodes
++ * mirrored under /proc/sys (non-terminals are handled by a built-in
++ * directory handler). Several default handlers are available to
++ * cover common cases.
++ */
++
++/* A sysctl table is an array of struct ctl_table: */
++struct ctl_table
++{
++ int ctl_name; /* Binary ID */
++ const char *procname; /* Text ID for /proc/sys, or zero */
++ void *data;
++ int maxlen;
++ mode_t mode;
++ ctl_table *child;
++ proc_handler *proc_handler; /* Callback for text formatting */
++ ctl_handler *strategy; /* Callback function for all r/w */
++ struct proc_dir_entry *de; /* /proc control block */
++ void *extra1;
++ void *extra2;
++};
++
++/* struct ctl_table_header is used to maintain dynamic lists of
++ ctl_table trees. */
++struct ctl_table_header
++{
++ ctl_table *ctl_table;
++ struct list_head ctl_entry;
++};
++
++struct ctl_table_header * register_sysctl_table(ctl_table * table,
++ int insert_at_head);
++void unregister_sysctl_table(struct ctl_table_header * table);
++
++#else /* __KERNEL__ */
++
++#endif /* __KERNEL__ */
++
++#endif /* _LINUX_SYSCTL_H */
+diff -Nur linux-mips-cvs/net/8021q/vlan_dev.c linux-ebtables/net/8021q/vlan_dev.c
+--- linux-mips-cvs/net/8021q/vlan_dev.c 2004-11-29 18:47:19.000000000 +0100
++++ linux-ebtables/net/8021q/vlan_dev.c 2005-02-07 05:52:50.000000000 +0100
+@@ -488,6 +488,10 @@
+ stats->tx_packets++; /* for statics only */
+ stats->tx_bytes += skb->len;
+
++ skb->protocol = __constant_htons(ETH_P_8021Q);
++ skb->mac.raw -= VLAN_HLEN;
++ skb->nh.raw -= VLAN_HLEN;
++
+ skb->dev = VLAN_DEV_INFO(dev)->real_dev;
+ dev_queue_xmit(skb);
+
+diff -Nur linux-mips-cvs/net/Config.in linux-ebtables/net/Config.in
+--- linux-mips-cvs/net/Config.in 2005-01-09 20:34:04.000000000 +0100
++++ linux-ebtables/net/Config.in 2005-02-07 05:52:50.000000000 +0100
+@@ -70,6 +70,9 @@
+ source net/decnet/Config.in
+ fi
+ dep_tristate '802.1d Ethernet Bridging' CONFIG_BRIDGE $CONFIG_INET
++if [ "$CONFIG_BRIDGE" != "n" -a "$CONFIG_NETFILTER" != "n" ]; then
++ source net/bridge/netfilter/Config.in
++fi
+ if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
+ tristate 'CCITT X.25 Packet Layer (EXPERIMENTAL)' CONFIG_X25
+ tristate 'LAPB Data Link Driver (EXPERIMENTAL)' CONFIG_LAPB
+diff -Nur linux-mips-cvs/net/Config.in.orig linux-ebtables/net/Config.in.orig
+--- linux-mips-cvs/net/Config.in.orig 1970-01-01 01:00:00.000000000 +0100
++++ linux-ebtables/net/Config.in.orig 2005-01-09 20:34:04.000000000 +0100
+@@ -0,0 +1,107 @@
++#
++# Network configuration
++#
++mainmenu_option next_comment
++comment 'Networking options'
++tristate 'Packet socket' CONFIG_PACKET
++if [ "$CONFIG_PACKET" != "n" ]; then
++ bool ' Packet socket: mmapped IO' CONFIG_PACKET_MMAP
++fi
++
++tristate 'Netlink device emulation' CONFIG_NETLINK_DEV
++
++bool 'Network packet filtering (replaces ipchains)' CONFIG_NETFILTER
++if [ "$CONFIG_NETFILTER" = "y" ]; then
++ bool ' Network packet filtering debugging' CONFIG_NETFILTER_DEBUG
++fi
++bool 'Socket Filtering' CONFIG_FILTER
++tristate 'Unix domain sockets' CONFIG_UNIX
++bool 'TCP/IP networking' CONFIG_INET
++if [ "$CONFIG_INET" = "y" ]; then
++ source net/ipv4/Config.in
++ if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
++# IPv6 as module will cause a CRASH if you try to unload it
++ tristate ' The IPv6 protocol (EXPERIMENTAL)' CONFIG_IPV6
++ if [ "$CONFIG_IPV6" != "n" ]; then
++ source net/ipv6/Config.in
++ fi
++ fi
++ if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
++ source net/khttpd/Config.in
++ fi
++ if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
++ source net/sctp/Config.in
++ fi
++fi
++if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
++ tristate 'Asynchronous Transfer Mode (ATM) (EXPERIMENTAL)' CONFIG_ATM
++ if [ "$CONFIG_ATM" = "y" -o "$CONFIG_ATM" = "m" ]; then
++ if [ "$CONFIG_INET" = "y" ]; then
++ dep_tristate ' Classical IP over ATM' CONFIG_ATM_CLIP $CONFIG_ATM
++ if [ "$CONFIG_ATM_CLIP" != "n" ]; then
++ bool ' Do NOT send ICMP if no neighbour' CONFIG_ATM_CLIP_NO_ICMP
++ fi
++ fi
++ dep_tristate ' LAN Emulation (LANE) support' CONFIG_ATM_LANE $CONFIG_ATM
++ if [ "$CONFIG_INET" = "y" -a "$CONFIG_ATM_LANE" != "n" ]; then
++ tristate ' Multi-Protocol Over ATM (MPOA) support' CONFIG_ATM_MPOA
++ fi
++ dep_tristate ' RFC1483/2684 Bridged protocols' CONFIG_ATM_BR2684 $CONFIG_ATM
++ if [ "$CONFIG_ATM_BR2684" != "n" ]; then
++ bool ' Per-VC IP filter kludge' CONFIG_ATM_BR2684_IPFILTER
++ fi
++ fi
++fi
++tristate '802.1Q VLAN Support' CONFIG_VLAN_8021Q
++
++comment ' '
++tristate 'The IPX protocol' CONFIG_IPX
++if [ "$CONFIG_IPX" != "n" ]; then
++ source net/ipx/Config.in
++fi
++
++tristate 'Appletalk protocol support' CONFIG_ATALK
++if [ "$CONFIG_ATALK" != "n" ]; then
++ source drivers/net/appletalk/Config.in
++fi
++
++tristate 'DECnet Support' CONFIG_DECNET
++if [ "$CONFIG_DECNET" != "n" ]; then
++ source net/decnet/Config.in
++fi
++dep_tristate '802.1d Ethernet Bridging' CONFIG_BRIDGE $CONFIG_INET
++if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
++ tristate 'CCITT X.25 Packet Layer (EXPERIMENTAL)' CONFIG_X25
++ tristate 'LAPB Data Link Driver (EXPERIMENTAL)' CONFIG_LAPB
++ bool '802.2 LLC (EXPERIMENTAL)' CONFIG_LLC
++ bool 'Frame Diverter (EXPERIMENTAL)' CONFIG_NET_DIVERT
++# if [ "$CONFIG_LLC" = "y" ]; then
++# bool ' Netbeui (EXPERIMENTAL)' CONFIG_NETBEUI
++# fi
++ if [ "$CONFIG_INET" = "y" ]; then
++ tristate 'Acorn Econet/AUN protocols (EXPERIMENTAL)' CONFIG_ECONET
++ if [ "$CONFIG_ECONET" != "n" ]; then
++ bool ' AUN over UDP' CONFIG_ECONET_AUNUDP
++ bool ' Native Econet' CONFIG_ECONET_NATIVE
++ fi
++ fi
++ tristate 'WAN router' CONFIG_WAN_ROUTER
++ bool 'Fast switching (read help!)' CONFIG_NET_FASTROUTE
++ bool 'Forwarding between high speed interfaces' CONFIG_NET_HW_FLOWCONTROL
++fi
++
++mainmenu_option next_comment
++comment 'QoS and/or fair queueing'
++bool 'QoS and/or fair queueing' CONFIG_NET_SCHED
++if [ "$CONFIG_NET_SCHED" = "y" ]; then
++ source net/sched/Config.in
++fi
++#bool 'Network code profiler' CONFIG_NET_PROFILE
++endmenu
++
++mainmenu_option next_comment
++comment 'Network testing'
++dep_tristate 'Packet Generator (USE WITH CAUTION)' CONFIG_NET_PKTGEN $CONFIG_PROC_FS
++endmenu
++
++endmenu
+diff -Nur linux-mips-cvs/net/Makefile linux-ebtables/net/Makefile
+--- linux-mips-cvs/net/Makefile 2004-08-14 20:39:04.000000000 +0200
++++ linux-ebtables/net/Makefile 2005-02-07 05:52:50.000000000 +0100
+@@ -7,7 +7,8 @@
+
+ O_TARGET := network.o
+
+-mod-subdirs := ipv4/netfilter ipv6/netfilter ipx irda bluetooth atm netlink sched core sctp 802
++mod-subdirs := ipv4/netfilter ipv6/netfilter bridge/netfilter ipx irda \
++ bluetooth atm netlink sched core sctp 802
+ export-objs := netsyms.o
+
+ subdir-y := core ethernet
+@@ -27,6 +28,12 @@
+ endif
+ endif
+
++ifneq ($(CONFIG_BRIDGE),n)
++ifneq ($(CONFIG_BRIDGE),)
++subdir-$(CONFIG_BRIDGE) += bridge/netfilter
++endif
++endif
++
+ subdir-$(CONFIG_KHTTPD) += khttpd
+ subdir-$(CONFIG_PACKET) += packet
+ subdir-$(CONFIG_NET_SCHED) += sched
+diff -Nur linux-mips-cvs/net/bridge/Makefile linux-ebtables/net/bridge/Makefile
+--- linux-mips-cvs/net/bridge/Makefile 2001-01-10 18:18:10.000000000 +0100
++++ linux-ebtables/net/bridge/Makefile 2005-02-07 05:52:50.000000000 +0100
+@@ -7,10 +7,17 @@
+ #
+ # Note 2! The CFLAGS definition is now in the main makefile...
+
++export-objs := br.o
++
+ O_TARGET := bridge.o
+ obj-y := br.o br_device.o br_fdb.o br_forward.o br_if.o br_input.o \
+ br_ioctl.o br_notify.o br_stp.o br_stp_bpdu.o \
+ br_stp_if.o br_stp_timer.o
++
++ifeq ($(CONFIG_NETFILTER),y)
++obj-y += br_netfilter.o
++endif
++
+ obj-m := $(O_TARGET)
+
+ include $(TOPDIR)/Rules.make
+diff -Nur linux-mips-cvs/net/bridge/br.c linux-ebtables/net/bridge/br.c
+--- linux-mips-cvs/net/bridge/br.c 2004-08-14 20:39:04.000000000 +0200
++++ linux-ebtables/net/bridge/br.c 2005-02-07 05:52:50.000000000 +0100
+@@ -30,6 +30,8 @@
+ #include "../atm/lec.h"
+ #endif
+
++int (*br_should_route_hook) (struct sk_buff **pskb) = NULL;
++
+ void br_dec_use_count()
+ {
+ MOD_DEC_USE_COUNT;
+@@ -44,6 +46,10 @@
+ {
+ printk(KERN_INFO "NET4: Ethernet Bridge 008 for NET4.0\n");
+
++#ifdef CONFIG_NETFILTER
++ if (br_netfilter_init())
++ return 1;
++#endif
+ br_handle_frame_hook = br_handle_frame;
+ br_ioctl_hook = br_ioctl_deviceless_stub;
+ #if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
+@@ -57,6 +63,9 @@
+
+ static void __exit br_deinit(void)
+ {
++#ifdef CONFIG_NETFILTER
++ br_netfilter_fini();
++#endif
+ unregister_netdevice_notifier(&br_device_notifier);
+
+ rtnl_lock();
+@@ -73,7 +82,7 @@
+ #endif
+ }
+
+-EXPORT_NO_SYMBOLS;
++EXPORT_SYMBOL(br_should_route_hook);
+
+ module_init(br_init)
+ module_exit(br_deinit)
+diff -Nur linux-mips-cvs/net/bridge/br_forward.c linux-ebtables/net/bridge/br_forward.c
+--- linux-mips-cvs/net/bridge/br_forward.c 2003-11-17 02:07:47.000000000 +0100
++++ linux-ebtables/net/bridge/br_forward.c 2005-02-07 05:52:50.000000000 +0100
+@@ -30,18 +30,21 @@
+ return 1;
+ }
+
+-static int __dev_queue_push_xmit(struct sk_buff *skb)
++int br_dev_queue_push_xmit(struct sk_buff *skb)
+ {
++#ifdef CONFIG_NETFILTER
++ nf_bridge_maybe_copy_header(skb);
++#endif
+ skb_push(skb, ETH_HLEN);
+ dev_queue_xmit(skb);
+
+ return 0;
+ }
+
+-static int __br_forward_finish(struct sk_buff *skb)
++int br_forward_finish(struct sk_buff *skb)
+ {
+ NF_HOOK(PF_BRIDGE, NF_BR_POST_ROUTING, skb, NULL, skb->dev,
+- __dev_queue_push_xmit);
++ br_dev_queue_push_xmit);
+
+ return 0;
+ }
+@@ -49,8 +52,11 @@
+ static void __br_deliver(struct net_bridge_port *to, struct sk_buff *skb)
+ {
+ skb->dev = to->dev;
++#ifdef CONFIG_NETFILTER_DEBUG
++ skb->nf_debug = 0;
++#endif
+ NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
+- __br_forward_finish);
++ br_forward_finish);
+ }
+
+ static void __br_forward(struct net_bridge_port *to, struct sk_buff *skb)
+@@ -62,7 +68,7 @@
+ skb->ip_summed = CHECKSUM_NONE;
+
+ NF_HOOK(PF_BRIDGE, NF_BR_FORWARD, skb, indev, skb->dev,
+- __br_forward_finish);
++ br_forward_finish);
+ }
+
+ /* called under bridge lock */
+diff -Nur linux-mips-cvs/net/bridge/br_input.c linux-ebtables/net/bridge/br_input.c
+--- linux-mips-cvs/net/bridge/br_input.c 2003-08-13 19:19:30.000000000 +0200
++++ linux-ebtables/net/bridge/br_input.c 2005-02-07 05:52:50.000000000 +0100
+@@ -24,6 +24,9 @@
+
+ static int br_pass_frame_up_finish(struct sk_buff *skb)
+ {
++#ifdef CONFIG_NETFILTER_DEBUG
++ skb->nf_debug = 0;
++#endif
+ netif_rx(skb);
+
+ return 0;
+@@ -46,7 +49,7 @@
+ br_pass_frame_up_finish);
+ }
+
+-static int br_handle_frame_finish(struct sk_buff *skb)
++int br_handle_frame_finish(struct sk_buff *skb)
+ {
+ struct net_bridge *br;
+ unsigned char *dest;
+@@ -112,7 +115,7 @@
+ return 0;
+ }
+
+-void br_handle_frame(struct sk_buff *skb)
++int br_handle_frame(struct sk_buff *skb)
+ {
+ struct net_bridge *br;
+ unsigned char *dest;
+@@ -146,26 +149,35 @@
+ goto handle_special_frame;
+
+ if (p->state == BR_STATE_FORWARDING) {
++ if (br_should_route_hook && br_should_route_hook(&skb)) {
++ read_unlock(&br->lock);
++ return -1;
++ }
++
++ if (!memcmp(p->br->dev.dev_addr, dest, ETH_ALEN))
++ skb->pkt_type = PACKET_HOST;
++
+ NF_HOOK(PF_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
+ br_handle_frame_finish);
+ read_unlock(&br->lock);
+- return;
++ return 0;
+ }
+
+ err:
+ read_unlock(&br->lock);
+ err_nolock:
+ kfree_skb(skb);
+- return;
++ return 0;
+
+ handle_special_frame:
+ if (!dest[5]) {
+ NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev,NULL,
+ br_stp_handle_bpdu);
+ read_unlock(&br->lock);
+- return;
++ return 0;
+ }
+
+ read_unlock(&br->lock);
+ kfree_skb(skb);
++ return 0;
+ }
+diff -Nur linux-mips-cvs/net/bridge/br_netfilter.c linux-ebtables/net/bridge/br_netfilter.c
+--- linux-mips-cvs/net/bridge/br_netfilter.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-ebtables/net/bridge/br_netfilter.c 2005-02-07 05:52:50.000000000 +0100
+@@ -0,0 +1,1103 @@
++/*
++ * Handle firewalling
++ * Linux ethernet bridge
++ *
++ * Authors:
++ * Lennert Buytenhek <buytenh@gnu.org>
++ * Bart De Schuymer (maintainer) <bdschuym@pandora.be>
++ *
++ * Changes:
++ * Apr 29 2003: physdev module support (bdschuym)
++ * Jun 19 2003: let arptables see bridged ARP traffic (bdschuym)
++ * Oct 06 2003: filter encapsulated IP/ARP VLAN traffic on untagged bridge
++ * (bdschuym)
++ * Aug 28 2004: add IPv6 filtering (bdschuym)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ *
++ * Lennert dedicates this file to Kerstin Wurdinger.
++ */
++
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/ip.h>
++#include <linux/netdevice.h>
++#include <linux/skbuff.h>
++#include <linux/if_ether.h>
++#include <linux/if_vlan.h>
++#include <linux/netfilter_bridge.h>
++#include <linux/netfilter_ipv4.h>
++#include <linux/netfilter_ipv6.h>
++#include <linux/in_route.h>
++#include <net/ip.h>
++#include <net/ipv6.h>
++#include <asm/uaccess.h>
++#include <asm/checksum.h>
++#include "br_private.h"
++#ifdef CONFIG_SYSCTL
++#include <linux/sysctl.h>
++#endif
++
++#define skb_origaddr(skb) (((struct bridge_skb_cb *) \
++ (skb->nf_bridge->data))->daddr.ipv4)
++#define store_orig_dstaddr(skb) (skb_origaddr(skb) = (skb)->nh.iph->daddr)
++#define dnat_took_place(skb) (skb_origaddr(skb) != (skb)->nh.iph->daddr)
++
++#define has_bridge_parent(device) ((device)->br_port != NULL)
++#define bridge_parent(device) (&((device)->br_port->br->dev))
++
++#ifdef CONFIG_SYSCTL
++static struct ctl_table_header *brnf_sysctl_header;
++static int brnf_call_iptables = 1;
++static int brnf_call_ip6tables = 1;
++static int brnf_call_arptables = 1;
++static int brnf_filter_vlan_tagged = 1;
++#else
++#define brnf_filter_vlan_tagged 1
++#endif
++
++#define IS_VLAN_IP (skb->protocol == __constant_htons(ETH_P_8021Q) && \
++ hdr->h_vlan_encapsulated_proto == __constant_htons(ETH_P_IP) && \
++ brnf_filter_vlan_tagged)
++#define IS_VLAN_IPV6 (skb->protocol == __constant_htons(ETH_P_8021Q) && \
++ hdr->h_vlan_encapsulated_proto == __constant_htons(ETH_P_IPV6) && \
++ brnf_filter_vlan_tagged)
++/*
++#define IS_VLAN_ARP (skb->protocol == __constant_htons(ETH_P_8021Q) && \
++ hdr->h_vlan_encapsulated_proto == __constant_htons(ETH_P_ARP) && \
++ brnf_filter_vlan_tagged)
++*/
++
++/* We need these fake structures to make netfilter happy --
++ * lots of places assume that skb->dst != NULL, which isn't
++ * all that unreasonable.
++ *
++ * Currently, we fill in the PMTU entry because netfilter
++ * refragmentation needs it, and the rt_flags entry because
++ * ipt_REJECT needs it. Future netfilter modules might
++ * require us to fill additional fields.
++ */
++static struct net_device __fake_net_device = {
++ .hard_header_len = ETH_HLEN
++};
++
++static struct rtable __fake_rtable = {
++ u: {
++ dst: {
++ __refcnt: ATOMIC_INIT(1),
++ dev: &__fake_net_device,
++ pmtu: 1500
++ }
++ },
++
++ rt_flags: 0
++};
++
++
++/* PF_BRIDGE/PRE_ROUTING *********************************************/
++/* Undo the changes made for ip6tables PREROUTING and continue the
++ * bridge PRE_ROUTING hook. */
++static int br_nf_pre_routing_finish_ipv6(struct sk_buff *skb)
++{
++ struct nf_bridge_info *nf_bridge = skb->nf_bridge;
++
++#ifdef CONFIG_NETFILTER_DEBUG
++ skb->nf_debug ^= (1 << NF_BR_PRE_ROUTING);
++#endif
++
++ if (nf_bridge->mask & BRNF_PKT_TYPE) {
++ skb->pkt_type = PACKET_OTHERHOST;
++ nf_bridge->mask ^= BRNF_PKT_TYPE;
++ }
++ nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING;
++
++ skb->dst = (struct dst_entry *)&__fake_rtable;
++ dst_hold(skb->dst);
++
++ skb->dev = nf_bridge->physindev;
++ if (skb->protocol == __constant_htons(ETH_P_8021Q)) {
++ skb_push(skb, VLAN_HLEN);
++ skb->nh.raw -= VLAN_HLEN;
++ }
++ NF_HOOK_THRESH(PF_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
++ br_handle_frame_finish, 1);
++
++ return 0;
++}
++
++static void __br_dnat_complain(void)
++{
++ static unsigned long last_complaint;
++
++ if (jiffies - last_complaint >= 5 * HZ) {
++ printk(KERN_WARNING "Performing cross-bridge DNAT requires IP "
++ "forwarding to be enabled\n");
++ last_complaint = jiffies;
++ }
++}
++
++
++/* This requires some explaining. If DNAT has taken place,
++ * we will need to fix up the destination Ethernet address,
++ * and this is a tricky process.
++ *
++ * There are two cases to consider:
++ * 1. The packet was DNAT'ed to a device in the same bridge
++ * port group as it was received on. We can still bridge
++ * the packet.
++ * 2. The packet was DNAT'ed to a different device, either
++ * a non-bridged device or another bridge port group.
++ * The packet will need to be routed.
++ *
++ * The correct way of distinguishing between these two cases is to
++ * call ip_route_input() and to look at skb->dst->dev, which is
++ * changed to the destination device if ip_route_input() succeeds.
++ *
++ * Let us first consider the case that ip_route_input() succeeds:
++ *
++ * If skb->dst->dev equals the logical bridge device the packet
++ * came in on, we can consider this bridging. We then call
++ * skb->dst->output() which will make the packet enter br_nf_local_out()
++ * not much later. In that function it is assured that the iptables
++ * FORWARD chain is traversed for the packet.
++ *
++ * Otherwise, the packet is considered to be routed and we just
++ * change the destination MAC address so that the packet will
++ * later be passed up to the IP stack to be routed.
++ *
++ * Let us now consider the case that ip_route_input() fails:
++ *
++ * After a "echo '0' > /proc/sys/net/ipv4/ip_forward" ip_route_input()
++ * will fail, while __ip_route_output_key() will return success. The source
++ * address for __ip_route_output_key() is set to zero, so __ip_route_output_key
++ * thinks we're handling a locally generated packet and won't care
++ * if IP forwarding is allowed. We send a warning message to the users's
++ * log telling her to put IP forwarding on.
++ *
++ * ip_route_input() will also fail if there is no route available.
++ * In that case we just drop the packet.
++ *
++ * --Lennert, 20020411
++ * --Bart, 20020416 (updated)
++ * --Bart, 20021007 (updated)
++ */
++
++static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb)
++{
++#ifdef CONFIG_NETFILTER_DEBUG
++ skb->nf_debug |= (1 << NF_BR_PRE_ROUTING) | (1 << NF_BR_FORWARD);
++#endif
++
++ if (skb->pkt_type == PACKET_OTHERHOST) {
++ skb->pkt_type = PACKET_HOST;
++ skb->nf_bridge->mask |= BRNF_PKT_TYPE;
++ }
++ skb->nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING;
++
++ skb->dev = bridge_parent(skb->dev);
++ if (skb->protocol == __constant_htons(ETH_P_8021Q)) {
++ skb_pull(skb, VLAN_HLEN);
++ skb->nh.raw += VLAN_HLEN;
++ }
++ skb->dst->output(skb);
++ return 0;
++}
++
++static int br_nf_pre_routing_finish(struct sk_buff *skb)
++{
++ struct net_device *dev = skb->dev;
++ struct iphdr *iph = skb->nh.iph;
++ struct nf_bridge_info *nf_bridge = skb->nf_bridge;
++
++#ifdef CONFIG_NETFILTER_DEBUG
++ skb->nf_debug ^= (1 << NF_BR_PRE_ROUTING);
++#endif
++
++ if (nf_bridge->mask & BRNF_PKT_TYPE) {
++ skb->pkt_type = PACKET_OTHERHOST;
++ nf_bridge->mask ^= BRNF_PKT_TYPE;
++ }
++ nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING;
++
++ if (dnat_took_place(skb)) {
++ if (ip_route_input(skb, iph->daddr, iph->saddr, iph->tos,
++ dev)) {
++ struct rtable *rt;
++
++ if (!ip_route_output(&rt, iph->daddr, 0, iph->tos, 0)) {
++ /* Bridged-and-DNAT'ed traffic doesn't
++ * require ip_forwarding.
++ */
++ if (((struct dst_entry *)rt)->dev == dev) {
++ skb->dst = (struct dst_entry *)rt;
++ goto bridged_dnat;
++ }
++ __br_dnat_complain();
++ dst_release((struct dst_entry *)rt);
++ }
++ kfree_skb(skb);
++ return 0;
++ } else {
++ if (skb->dst->dev == dev) {
++bridged_dnat:
++ /* Tell br_nf_local_out this is a
++ * bridged frame
++ */
++ nf_bridge->mask |= BRNF_BRIDGED_DNAT;
++ skb->dev = nf_bridge->physindev;
++ if (skb->protocol ==
++ __constant_htons(ETH_P_8021Q)) {
++ skb_push(skb, VLAN_HLEN);
++ skb->nh.raw -= VLAN_HLEN;
++ }
++ NF_HOOK_THRESH(PF_BRIDGE, NF_BR_PRE_ROUTING,
++ skb, skb->dev, NULL,
++ br_nf_pre_routing_finish_bridge,
++ 1);
++ return 0;
++ }
++ memcpy(skb->mac.ethernet->h_dest, dev->dev_addr,
++ ETH_ALEN);
++ skb->pkt_type = PACKET_HOST;
++ }
++ } else {
++ skb->dst = (struct dst_entry *)&__fake_rtable;
++ dst_hold(skb->dst);
++ }
++
++ skb->dev = nf_bridge->physindev;
++ if (skb->protocol == __constant_htons(ETH_P_8021Q)) {
++ skb_push(skb, VLAN_HLEN);
++ skb->nh.raw -= VLAN_HLEN;
++ }
++ NF_HOOK_THRESH(PF_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
++ br_handle_frame_finish, 1);
++
++ return 0;
++}
++
++/* Some common code for IPv4/IPv6 */
++static void setup_pre_routing(struct sk_buff *skb)
++{
++ struct nf_bridge_info *nf_bridge = skb->nf_bridge;
++
++ if (skb->pkt_type == PACKET_OTHERHOST) {
++ skb->pkt_type = PACKET_HOST;
++ nf_bridge->mask |= BRNF_PKT_TYPE;
++ }
++
++ nf_bridge->mask |= BRNF_NF_BRIDGE_PREROUTING;
++ nf_bridge->physindev = skb->dev;
++ skb->dev = bridge_parent(skb->dev);
++}
++
++/* We only check the length. A bridge shouldn't do any hop-by-hop stuff anyway */
++static int check_hbh_len(struct sk_buff *skb)
++{
++ unsigned char *raw = (u8*)(skb->nh.ipv6h+1);
++ u32 pkt_len;
++ int off = raw - skb->nh.raw;
++ int len = (raw[1]+1)<<3;
++
++ if ((raw + len) - skb->data > skb_headlen(skb))
++ goto bad;
++
++ off += 2;
++ len -= 2;
++
++ while (len > 0) {
++ int optlen = raw[off+1]+2;
++
++ switch (skb->nh.raw[off]) {
++ case IPV6_TLV_PAD0:
++ optlen = 1;
++ break;
++
++ case IPV6_TLV_PADN:
++ break;
++
++ case IPV6_TLV_JUMBO:
++ if (skb->nh.raw[off+1] != 4 || (off&3) != 2)
++ goto bad;
++
++ pkt_len = ntohl(*(u32*)(skb->nh.raw+off+2));
++
++ if (pkt_len > skb->len - sizeof(struct ipv6hdr))
++ goto bad;
++ if (pkt_len + sizeof(struct ipv6hdr) < skb->len) {
++ if (__pskb_trim(skb,
++ pkt_len + sizeof(struct ipv6hdr)))
++ goto bad;
++ if (skb->ip_summed == CHECKSUM_HW)
++ skb->ip_summed = CHECKSUM_NONE;
++ }
++ break;
++ default:
++ if (optlen > len)
++ goto bad;
++ break;
++ }
++ off += optlen;
++ len -= optlen;
++ }
++ if (len == 0)
++ return 0;
++bad:
++ return -1;
++
++}
++
++/* Replicate the checks that IPv6 does on packet reception and pass the packet
++ * to ip6tables, which doesn't support NAT, so things are fairly simple. */
++static unsigned int br_nf_pre_routing_ipv6(unsigned int hook,
++ struct sk_buff *skb, const struct net_device *in,
++ const struct net_device *out, int (*okfn)(struct sk_buff *))
++{
++ struct ipv6hdr *hdr;
++ u32 pkt_len;
++ struct nf_bridge_info *nf_bridge;
++
++ if (skb->len < sizeof(struct ipv6hdr))
++ goto inhdr_error;
++
++ if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
++ goto inhdr_error;
++
++ hdr = skb->nh.ipv6h;
++
++ if (hdr->version != 6)
++ goto inhdr_error;
++
++ pkt_len = ntohs(hdr->payload_len);
++
++ if (pkt_len || hdr->nexthdr != NEXTHDR_HOP) {
++ if (pkt_len + sizeof(struct ipv6hdr) > skb->len)
++ goto inhdr_error;
++ if (pkt_len + sizeof(struct ipv6hdr) < skb->len) {
++ if (__pskb_trim(skb, pkt_len + sizeof(struct ipv6hdr)))
++ goto inhdr_error;
++ if (skb->ip_summed == CHECKSUM_HW)
++ skb->ip_summed = CHECKSUM_NONE;
++ }
++ }
++ if (hdr->nexthdr == NEXTHDR_HOP && check_hbh_len(skb))
++ goto inhdr_error;
++
++#ifdef CONFIG_NETFILTER_DEBUG
++ skb->nf_debug ^= (1 << NF_IP6_PRE_ROUTING);
++#endif
++ if ((nf_bridge = nf_bridge_alloc(skb)) == NULL)
++ return NF_DROP;
++ setup_pre_routing(skb);
++
++ NF_HOOK(PF_INET6, NF_IP6_PRE_ROUTING, skb, skb->dev, NULL,
++ br_nf_pre_routing_finish_ipv6);
++
++ return NF_STOLEN;
++
++inhdr_error:
++ return NF_DROP;
++}
++
++/* Direct IPv6 traffic to br_nf_pre_routing_ipv6.
++ * Replicate the checks that IPv4 does on packet reception.
++ * Set skb->dev to the bridge device (i.e. parent of the
++ * receiving device) to make netfilter happy, the REDIRECT
++ * target in particular. Save the original destination IP
++ * address to be able to detect DNAT afterwards.
++ */
++static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff **pskb,
++ const struct net_device *in, const struct net_device *out,
++ int (*okfn)(struct sk_buff *))
++{
++ struct iphdr *iph;
++ __u32 len;
++ struct sk_buff *skb = *pskb;
++ struct nf_bridge_info *nf_bridge;
++ struct vlan_ethhdr *hdr = (struct vlan_ethhdr *)
++ ((*pskb)->mac.ethernet);
++
++ if (skb->protocol == __constant_htons(ETH_P_IPV6) || IS_VLAN_IPV6) {
++#ifdef CONFIG_SYSCTL
++ if (!brnf_call_ip6tables)
++ return NF_ACCEPT;
++#endif
++ if ((skb = skb_share_check(*pskb, GFP_ATOMIC)) == NULL)
++ goto out;
++
++ if (skb->protocol == __constant_htons(ETH_P_8021Q)) {
++ skb_pull(skb, VLAN_HLEN);
++ (skb)->nh.raw += VLAN_HLEN;
++ }
++ return br_nf_pre_routing_ipv6(hook, skb, in, out, okfn);
++ }
++
++#ifdef CONFIG_SYSCTL
++ if (!brnf_call_iptables)
++ return NF_ACCEPT;
++#endif
++
++ if (skb->protocol != __constant_htons(ETH_P_IP) && !IS_VLAN_IP)
++ return NF_ACCEPT;
++ if ((skb = skb_share_check(*pskb, GFP_ATOMIC)) == NULL)
++ goto out;
++
++ if (skb->protocol == __constant_htons(ETH_P_8021Q)) {
++ skb_pull(skb, VLAN_HLEN);
++ (skb)->nh.raw += VLAN_HLEN;
++ }
++
++ if (!pskb_may_pull(skb, sizeof(struct iphdr)))
++ goto inhdr_error;
++
++ iph = skb->nh.iph;
++ if (iph->ihl < 5 || iph->version != 4)
++ goto inhdr_error;
++
++ if (!pskb_may_pull(skb, 4*iph->ihl))
++ goto inhdr_error;
++
++ iph = skb->nh.iph;
++ if (ip_fast_csum((__u8 *)iph, iph->ihl) != 0)
++ goto inhdr_error;
++
++ len = ntohs(iph->tot_len);
++ if (skb->len < len || len < 4*iph->ihl)
++ goto inhdr_error;
++
++ if (skb->len > len) {
++ __pskb_trim(skb, len);
++ if (skb->ip_summed == CHECKSUM_HW)
++ skb->ip_summed = CHECKSUM_NONE;
++ }
++
++#ifdef CONFIG_NETFILTER_DEBUG
++ skb->nf_debug ^= (1 << NF_IP_PRE_ROUTING);
++#endif
++ if ((nf_bridge = nf_bridge_alloc(skb)) == NULL)
++ return NF_DROP;
++
++ setup_pre_routing(skb);
++ store_orig_dstaddr(skb);
++
++ NF_HOOK(PF_INET, NF_IP_PRE_ROUTING, skb, skb->dev, NULL,
++ br_nf_pre_routing_finish);
++
++ return NF_STOLEN;
++
++inhdr_error:
++// IP_INC_STATS_BH(IpInHdrErrors);
++out:
++ return NF_DROP;
++}
++
++
++/* PF_BRIDGE/LOCAL_IN ************************************************/
++/* The packet is locally destined, which requires a real
++ * dst_entry, so detach the fake one. On the way up, the
++ * packet would pass through PRE_ROUTING again (which already
++ * took place when the packet entered the bridge), but we
++ * register an IPv4 PRE_ROUTING 'sabotage' hook that will
++ * prevent this from happening.
++ */
++static unsigned int br_nf_local_in(unsigned int hook, struct sk_buff **pskb,
++ const struct net_device *in, const struct net_device *out,
++ int (*okfn)(struct sk_buff *))
++{
++ struct sk_buff *skb = *pskb;
++
++ if (skb->dst == (struct dst_entry *)&__fake_rtable) {
++ dst_release(skb->dst);
++ skb->dst = NULL;
++ }
++
++ return NF_ACCEPT;
++}
++
++/* PF_BRIDGE/FORWARD *************************************************/
++static int br_nf_forward_finish(struct sk_buff *skb)
++{
++ struct nf_bridge_info *nf_bridge = skb->nf_bridge;
++ struct net_device *in;
++/* struct vlan_ethhdr *hdr = (struct vlan_ethhdr *)(skb->mac.ethernet);*/
++
++#ifdef CONFIG_NETFILTER_DEBUG
++ skb->nf_debug ^= (1 << NF_BR_FORWARD);
++#endif
++
++/* if (skb->protocol != __constant_htons(ETH_P_ARP) && !IS_VLAN_ARP) {*/
++ in = nf_bridge->physindev;
++ if (nf_bridge->mask & BRNF_PKT_TYPE) {
++ skb->pkt_type = PACKET_OTHERHOST;
++ nf_bridge->mask ^= BRNF_PKT_TYPE;
++ }
++/* } else {
++ in = *((struct net_device **)(skb->cb));
++ }*/
++ if (skb->protocol == __constant_htons(ETH_P_8021Q)) {
++ skb_push(skb, VLAN_HLEN);
++ skb->nh.raw -= VLAN_HLEN;
++ }
++ NF_HOOK_THRESH(PF_BRIDGE, NF_BR_FORWARD, skb, in,
++ skb->dev, br_forward_finish, 1);
++ return 0;
++}
++
++/* This is the 'purely bridged' case. For IP, we pass the packet to
++ * netfilter with indev and outdev set to the bridge device,
++ * but we are still able to filter on the 'real' indev/outdev
++ * because of the ipt_physdev.c module. For ARP, indev and outdev are the
++ * bridge ports.
++ */
++static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff **pskb,
++ const struct net_device *in, const struct net_device *out,
++ int (*okfn)(struct sk_buff *))
++{
++ struct sk_buff *skb = *pskb;
++ struct nf_bridge_info *nf_bridge;
++ struct vlan_ethhdr *hdr = (struct vlan_ethhdr *)(skb->mac.ethernet);
++ int pf;
++
++ if (!skb->nf_bridge)
++ return NF_ACCEPT;
++
++ if (skb->protocol == __constant_htons(ETH_P_IP) || IS_VLAN_IP)
++ pf = PF_INET;
++ else
++ pf = PF_INET6;
++
++ if (skb->protocol == __constant_htons(ETH_P_8021Q)) {
++ skb_pull(*pskb, VLAN_HLEN);
++ (*pskb)->nh.raw += VLAN_HLEN;
++ }
++
++#ifdef CONFIG_NETFILTER_DEBUG
++ skb->nf_debug ^= (1 << NF_BR_FORWARD);
++#endif
++ nf_bridge = skb->nf_bridge;
++ if (skb->pkt_type == PACKET_OTHERHOST) {
++ skb->pkt_type = PACKET_HOST;
++ nf_bridge->mask |= BRNF_PKT_TYPE;
++ }
++
++ /* The physdev module checks on this */
++ nf_bridge->mask |= BRNF_BRIDGED;
++ nf_bridge->physoutdev = skb->dev;
++
++ NF_HOOK(pf, NF_IP_FORWARD, skb, bridge_parent(in),
++ bridge_parent(out), br_nf_forward_finish);
++
++ return NF_STOLEN;
++}
++
++/*
++static unsigned int br_nf_forward_arp(unsigned int hook, struct sk_buff **pskb,
++ const struct net_device *in, const struct net_device *out,
++ int (*okfn)(struct sk_buff *))
++{
++ struct sk_buff *skb = *pskb;
++ struct vlan_ethhdr *hdr = (struct vlan_ethhdr *)(skb->mac.ethernet);
++ struct net_device **d = (struct net_device **)(skb->cb);
++
++ if (!brnf_call_arptables)
++ return NF_ACCEPT;
++
++ if (skb->protocol != __constant_htons(ETH_P_ARP)) {
++ if (!IS_VLAN_ARP)
++ return NF_ACCEPT;
++ skb_pull(*pskb, VLAN_HLEN);
++ (*pskb)->nh.raw += VLAN_HLEN;
++ }
++
++#ifdef CONFIG_NETFILTER_DEBUG
++ skb->nf_debug ^= (1 << NF_BR_FORWARD);
++#endif
++
++ if (skb->nh.arph->ar_pln != 4) {
++ if (IS_VLAN_ARP) {
++ skb_push(*pskb, VLAN_HLEN);
++ (*pskb)->nh.raw -= VLAN_HLEN;
++ }
++ return NF_ACCEPT;
++ }
++ *d = (struct net_device *)in;
++ NF_HOOK(NF_ARP, NF_ARP_FORWARD, skb, (struct net_device *)in,
++ (struct net_device *)out, br_nf_forward_finish);
++
++ return NF_STOLEN;
++}
++*/
++
++/* PF_BRIDGE/LOCAL_OUT ***********************************************/
++static int br_nf_local_out_finish(struct sk_buff *skb)
++{
++#ifdef CONFIG_NETFILTER_DEBUG
++ skb->nf_debug &= ~(1 << NF_BR_LOCAL_OUT);
++#endif
++ if (skb->protocol == __constant_htons(ETH_P_8021Q)) {
++ skb_push(skb, VLAN_HLEN);
++ skb->nh.raw -= VLAN_HLEN;
++ }
++
++ NF_HOOK_THRESH(PF_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
++ br_forward_finish, NF_BR_PRI_FIRST + 1);
++
++ return 0;
++}
++
++
++/* This function sees both locally originated IP packets and forwarded
++ * IP packets (in both cases the destination device is a bridge
++ * device). It also sees bridged-and-DNAT'ed packets.
++ * To be able to filter on the physical bridge devices (with the ipt_physdev.c
++ * module), we steal packets destined to a bridge device away from the
++ * PF_INET/FORWARD and PF_INET/OUTPUT hook functions, and give them back later,
++ * when we have determined the real output device. This is done in here.
++ *
++ * If (nf_bridge->mask & BRNF_BRIDGED_DNAT) then the packet is bridged
++ * and we fake the PF_BRIDGE/FORWARD hook. The function br_nf_forward()
++ * will then fake the PF_INET/FORWARD hook. br_nf_local_out() has priority
++ * NF_BR_PRI_FIRST, so no relevant PF_BRIDGE/INPUT functions have been nor
++ * will be executed.
++ * Otherwise, if nf_bridge->physindev is NULL, the bridge-nf code never touched
++ * this packet before, and so the packet was locally originated. We fake
++ * the PF_INET/LOCAL_OUT hook.
++ * Finally, if nf_bridge->physindev isn't NULL, then the packet was IP routed,
++ * so we fake the PF_INET/FORWARD hook. ipv4_sabotage_out() makes sure
++ * even routed packets that didn't arrive on a bridge interface have their
++ * nf_bridge->physindev set.
++ */
++
++static unsigned int br_nf_local_out(unsigned int hook, struct sk_buff **pskb,
++ const struct net_device *in, const struct net_device *out,
++ int (*okfn)(struct sk_buff *))
++{
++ struct net_device *realindev, *realoutdev;
++ struct sk_buff *skb = *pskb;
++ struct nf_bridge_info *nf_bridge;
++ struct vlan_ethhdr *hdr = (struct vlan_ethhdr *)(skb->mac.ethernet);
++ int pf;
++
++ if (!skb->nf_bridge)
++ return NF_ACCEPT;
++
++ if (skb->protocol == __constant_htons(ETH_P_IP) || IS_VLAN_IP)
++ pf = PF_INET;
++ else
++ pf = PF_INET6;
++
++#ifdef CONFIG_NETFILTER_DEBUG
++ /* Sometimes we get packets with NULL ->dst here (for example,
++ * running a dhcp client daemon triggers this). This should now
++ * be fixed, but let's keep the check around. */
++ if (skb->dst == NULL) {
++ printk(KERN_CRIT "br_netfilter: skb->dst == NULL.");
++ return NF_ACCEPT;
++ }
++#endif
++
++ nf_bridge = skb->nf_bridge;
++ nf_bridge->physoutdev = skb->dev;
++ realindev = nf_bridge->physindev;
++
++ /* Bridged, take PF_BRIDGE/FORWARD.
++ * (see big note in front of br_nf_pre_routing_finish)
++ */
++ if (nf_bridge->mask & BRNF_BRIDGED_DNAT) {
++ if (nf_bridge->mask & BRNF_PKT_TYPE) {
++ skb->pkt_type = PACKET_OTHERHOST;
++ nf_bridge->mask ^= BRNF_PKT_TYPE;
++ }
++ if (skb->protocol == __constant_htons(ETH_P_8021Q)) {
++ skb_push(skb, VLAN_HLEN);
++ skb->nh.raw -= VLAN_HLEN;
++ }
++
++ NF_HOOK(PF_BRIDGE, NF_BR_FORWARD, skb, realindev,
++ skb->dev, br_forward_finish);
++ goto out;
++ }
++ realoutdev = bridge_parent(skb->dev);
++
++#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
++ /* iptables should match -o br0.x */
++ if (nf_bridge->netoutdev)
++ realoutdev = nf_bridge->netoutdev;
++#endif
++ if (skb->protocol == __constant_htons(ETH_P_8021Q)) {
++ skb_pull(skb, VLAN_HLEN);
++ (*pskb)->nh.raw += VLAN_HLEN;
++ }
++ /* IP forwarded traffic has a physindev, locally
++ * generated traffic hasn't.
++ */
++ if (realindev != NULL) {
++ if (((nf_bridge->mask & BRNF_DONT_TAKE_PARENT) == 0) &&
++ has_bridge_parent(realindev))
++ realindev = bridge_parent(realindev);
++ NF_HOOK_THRESH(pf, NF_IP_FORWARD, skb, realindev,
++ realoutdev, okfn,
++ NF_IP_PRI_BRIDGE_SABOTAGE_FORWARD + 1);
++ } else {
++#ifdef CONFIG_NETFILTER_DEBUG
++ skb->nf_debug ^= (1 << NF_IP_LOCAL_OUT);
++#endif
++
++ NF_HOOK_THRESH(pf, NF_IP_LOCAL_OUT, skb, realindev,
++ realoutdev, okfn,
++ NF_IP_PRI_BRIDGE_SABOTAGE_LOCAL_OUT + 1);
++ }
++
++out:
++ return NF_STOLEN;
++}
++
++
++/* PF_BRIDGE/POST_ROUTING ********************************************/
++static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff **pskb,
++ const struct net_device *in, const struct net_device *out,
++ int (*okfn)(struct sk_buff *))
++{
++ struct sk_buff *skb = *pskb;
++ struct nf_bridge_info *nf_bridge = (*pskb)->nf_bridge;
++ struct vlan_ethhdr *hdr = (struct vlan_ethhdr *)(skb->mac.ethernet);
++ struct net_device *realoutdev = bridge_parent(skb->dev);
++ int pf;
++
++#ifdef CONFIG_NETFILTER_DEBUG
++ /* Be very paranoid. This probably won't happen anymore, but let's
++ * keep the check just to be sure... */
++ if (skb->mac.raw < skb->head || skb->mac.raw + ETH_HLEN > skb->data) {
++ printk(KERN_CRIT "br_netfilter: Argh!! br_nf_post_routing: "
++ "bad mac.raw pointer.");
++ goto print_error;
++ }
++#endif
++
++ if (!nf_bridge)
++ return NF_ACCEPT;
++
++ if (skb->protocol == __constant_htons(ETH_P_IP) || IS_VLAN_IP)
++ pf = PF_INET;
++ else
++ pf = PF_INET6;
++
++ /* Sometimes we get packets with NULL ->dst here (for example,
++ * running a dhcp client daemon triggers this).
++ */
++ if (skb->dst == NULL)
++ return NF_ACCEPT;
++
++#ifdef CONFIG_NETFILTER_DEBUG
++ /* Sometimes we get packets with NULL ->dst here (for example,
++ * running a dhcp client daemon triggers this). This should now
++ * be fixed, but let's keep the check around.
++ */
++ if (skb->dst == NULL) {
++ printk(KERN_CRIT "br_netfilter: skb->dst == NULL.");
++ goto print_error;
++ }
++
++ skb->nf_debug ^= (1 << NF_IP_POST_ROUTING);
++#endif
++
++ /* We assume any code from br_dev_queue_push_xmit onwards doesn't care
++ * about the value of skb->pkt_type.
++ */
++ if (skb->pkt_type == PACKET_OTHERHOST) {
++ skb->pkt_type = PACKET_HOST;
++ nf_bridge->mask |= BRNF_PKT_TYPE;
++ }
++
++ if (skb->protocol == __constant_htons(ETH_P_8021Q)) {
++ skb_pull(skb, VLAN_HLEN);
++ skb->nh.raw += VLAN_HLEN;
++ }
++
++ nf_bridge_save_header(skb);
++
++#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
++ if (nf_bridge->netoutdev)
++ realoutdev = nf_bridge->netoutdev;
++#endif
++ NF_HOOK(pf, NF_IP_POST_ROUTING, skb, NULL,
++ realoutdev, br_dev_queue_push_xmit);
++
++ return NF_STOLEN;
++
++#ifdef CONFIG_NETFILTER_DEBUG
++print_error:
++ if (skb->dev != NULL) {
++ printk("[%s]", skb->dev->name);
++ if (has_bridge_parent(skb->dev))
++ printk("[%s]", bridge_parent(skb->dev)->name);
++ }
++ printk(" head:%p, raw:%p, data:%p\n", skb->head, skb->mac.raw,
++ skb->data);
++ return NF_ACCEPT;
++#endif
++}
++
++
++/* IPv4/SABOTAGE *****************************************************/
++
++/* Don't hand locally destined packets to PF_INET/PRE_ROUTING
++ * for the second time.
++ */
++static unsigned int ip_sabotage_in(unsigned int hook, struct sk_buff **pskb,
++ const struct net_device *in, const struct net_device *out,
++ int (*okfn)(struct sk_buff *))
++{
++ if ((*pskb)->nf_bridge &&
++ !((*pskb)->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)) {
++ okfn(*pskb);
++ return NF_STOLEN;
++ }
++
++ return NF_ACCEPT;
++}
++
++/* Postpone execution of PF_INET/FORWARD, PF_INET/LOCAL_OUT
++ * and PF_INET/POST_ROUTING until we have done the forwarding
++ * decision in the bridge code and have determined skb->physoutdev.
++ */
++static unsigned int ip_sabotage_out(unsigned int hook, struct sk_buff **pskb,
++ const struct net_device *in, const struct net_device *out,
++ int (*okfn)(struct sk_buff *))
++{
++ struct sk_buff *skb = *pskb;
++
++ if ((out->hard_start_xmit == br_dev_xmit &&
++ okfn != br_nf_forward_finish &&
++ okfn != br_nf_local_out_finish &&
++ okfn != br_dev_queue_push_xmit)
++#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
++ || ((out->priv_flags & IFF_802_1Q_VLAN) &&
++ VLAN_DEV_INFO(out)->real_dev->hard_start_xmit == br_dev_xmit)
++#endif
++ ) {
++ struct nf_bridge_info *nf_bridge;
++
++ if (!skb->nf_bridge) {
++#ifdef CONFIG_SYSCTL
++ /* This code is executed while in the IP(v6) stack,
++ the version should be 4 or 6. We can't use
++ skb->protocol because that isn't set on
++ PF_INET(6)/LOCAL_OUT. */
++ struct iphdr *ip = skb->nh.iph;
++
++ if (ip->version == 4 && !brnf_call_iptables)
++ return NF_ACCEPT;
++ else if (ip->version == 6 && !brnf_call_ip6tables)
++ return NF_ACCEPT;
++#endif
++ if (hook == NF_IP_POST_ROUTING)
++ return NF_ACCEPT;
++ if (!nf_bridge_alloc(skb))
++ return NF_DROP;
++ }
++
++ nf_bridge = skb->nf_bridge;
++
++ /* This frame will arrive on PF_BRIDGE/LOCAL_OUT and we
++ * will need the indev then. For a brouter, the real indev
++ * can be a bridge port, so we make sure br_nf_local_out()
++ * doesn't use the bridge parent of the indev by using
++ * the BRNF_DONT_TAKE_PARENT mask.
++ */
++ if (hook == NF_IP_FORWARD && nf_bridge->physindev == NULL) {
++ nf_bridge->mask &= BRNF_DONT_TAKE_PARENT;
++ nf_bridge->physindev = (struct net_device *)in;
++ }
++#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
++ /* the iptables outdev is br0.x, not br0 */
++ if (out->priv_flags & IFF_802_1Q_VLAN)
++ nf_bridge->netoutdev = (struct net_device *)out;
++#endif
++ okfn(skb);
++ return NF_STOLEN;
++ }
++
++ return NF_ACCEPT;
++}
++
++/* For br_nf_local_out we need (prio = NF_BR_PRI_FIRST), to insure that innocent
++ * PF_BRIDGE/NF_BR_LOCAL_OUT functions don't get bridged traffic as input.
++ * For br_nf_post_routing, we need (prio = NF_BR_PRI_LAST), because
++ * ip_refrag() can return NF_STOLEN.
++ */
++static struct nf_hook_ops br_nf_ops[] = {
++ { .hook = br_nf_pre_routing,
++ .pf = PF_BRIDGE,
++ .hooknum = NF_BR_PRE_ROUTING,
++ .priority = NF_BR_PRI_BRNF, },
++ { .hook = br_nf_local_in,
++ .pf = PF_BRIDGE,
++ .hooknum = NF_BR_LOCAL_IN,
++ .priority = NF_BR_PRI_BRNF, },
++ { .hook = br_nf_forward_ip,
++ .pf = PF_BRIDGE,
++ .hooknum = NF_BR_FORWARD,
++ .priority = NF_BR_PRI_BRNF /*- 1*/, },
++/* { .hook = br_nf_forward_arp,
++ .pf = PF_BRIDGE,
++ .hooknum = NF_BR_FORWARD,
++ .priority = NF_BR_PRI_BRNF, },*/
++ { .hook = br_nf_local_out,
++ .pf = PF_BRIDGE,
++ .hooknum = NF_BR_LOCAL_OUT,
++ .priority = NF_BR_PRI_FIRST, },
++ { .hook = br_nf_post_routing,
++ .pf = PF_BRIDGE,
++ .hooknum = NF_BR_POST_ROUTING,
++ .priority = NF_BR_PRI_LAST, },
++ { .hook = ip_sabotage_in,
++ .pf = PF_INET,
++ .hooknum = NF_IP_PRE_ROUTING,
++ .priority = NF_IP_PRI_FIRST, },
++ { .hook = ip_sabotage_in,
++ .pf = PF_INET6,
++ .hooknum = NF_IP6_PRE_ROUTING,
++ .priority = NF_IP6_PRI_FIRST, },
++ { .hook = ip_sabotage_out,
++ .pf = PF_INET,
++ .hooknum = NF_IP_FORWARD,
++ .priority = NF_IP_PRI_BRIDGE_SABOTAGE_FORWARD, },
++ { .hook = ip_sabotage_out,
++ .pf = PF_INET6,
++ .hooknum = NF_IP6_FORWARD,
++ .priority = NF_IP6_PRI_BRIDGE_SABOTAGE_FORWARD, },
++ { .hook = ip_sabotage_out,
++ .pf = PF_INET,
++ .hooknum = NF_IP_LOCAL_OUT,
++ .priority = NF_IP_PRI_BRIDGE_SABOTAGE_LOCAL_OUT, },
++ { .hook = ip_sabotage_out,
++ .pf = PF_INET6,
++ .hooknum = NF_IP6_LOCAL_OUT,
++ .priority = NF_IP6_PRI_BRIDGE_SABOTAGE_LOCAL_OUT, },
++ { .hook = ip_sabotage_out,
++ .pf = PF_INET,
++ .hooknum = NF_IP_POST_ROUTING,
++ .priority = NF_IP_PRI_FIRST, },
++ { .hook = ip_sabotage_out,
++ .pf = PF_INET6,
++ .hooknum = NF_IP6_POST_ROUTING,
++ .priority = NF_IP6_PRI_FIRST, },
++};
++
++#ifdef CONFIG_SYSCTL
++static
++int brnf_sysctl_call_tables(ctl_table *ctl, int write, struct file * filp,
++ void *buffer, size_t *lenp)
++{
++ int ret;
++
++ ret = proc_dointvec(ctl, write, filp, buffer, lenp);
++
++ if (write && *(int *)(ctl->data))
++ *(int *)(ctl->data) = 1;
++ return ret;
++}
++
++static ctl_table brnf_table[] = {
++ {
++ .ctl_name = NET_BRIDGE_NF_CALL_ARPTABLES,
++ .procname = "bridge-nf-call-arptables",
++ .data = &brnf_call_arptables,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = &brnf_sysctl_call_tables,
++ },
++ {
++ .ctl_name = NET_BRIDGE_NF_CALL_IPTABLES,
++ .procname = "bridge-nf-call-iptables",
++ .data = &brnf_call_iptables,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = &brnf_sysctl_call_tables,
++ },
++ {
++ .ctl_name = NET_BRIDGE_NF_CALL_IP6TABLES,
++ .procname = "bridge-nf-call-ip6tables",
++ .data = &brnf_call_ip6tables,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = &brnf_sysctl_call_tables,
++ },
++ {
++ .ctl_name = NET_BRIDGE_NF_FILTER_VLAN_TAGGED,
++ .procname = "bridge-nf-filter-vlan-tagged",
++ .data = &brnf_filter_vlan_tagged,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = &brnf_sysctl_call_tables,
++ },
++ { .ctl_name = 0 }
++};
++
++static ctl_table brnf_bridge_table[] = {
++ {
++ .ctl_name = NET_BRIDGE,
++ .procname = "bridge",
++ .mode = 0555,
++ .child = brnf_table,
++ },
++ { .ctl_name = 0 }
++};
++
++static ctl_table brnf_net_table[] = {
++ {
++ .ctl_name = CTL_NET,
++ .procname = "net",
++ .mode = 0555,
++ .child = brnf_bridge_table,
++ },
++ { .ctl_name = 0 }
++};
++#endif
++
++int br_netfilter_init(void)
++{
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(br_nf_ops); i++) {
++ int ret;
++
++ if ((ret = nf_register_hook(&br_nf_ops[i])) >= 0)
++ continue;
++
++ while (i--)
++ nf_unregister_hook(&br_nf_ops[i]);
++
++ return ret;
++ }
++
++#ifdef CONFIG_SYSCTL
++ brnf_sysctl_header = register_sysctl_table(brnf_net_table, 0);
++ if (brnf_sysctl_header == NULL) {
++ printk(KERN_WARNING "br_netfilter: can't register to sysctl.\n");
++ for (i = 0; i < ARRAY_SIZE(br_nf_ops); i++)
++ nf_unregister_hook(&br_nf_ops[i]);
++ return -EFAULT;
++ }
++#endif
++
++ printk(KERN_NOTICE "Bridge firewalling registered\n");
++
++ return 0;
++}
++
++void br_netfilter_fini(void)
++{
++ int i;
++
++ for (i = ARRAY_SIZE(br_nf_ops) - 1; i >= 0; i--)
++ nf_unregister_hook(&br_nf_ops[i]);
++#ifdef CONFIG_SYSCTL
++ unregister_sysctl_table(brnf_sysctl_header);
++#endif
++
++}
+diff -Nur linux-mips-cvs/net/bridge/br_private.h linux-ebtables/net/bridge/br_private.h
+--- linux-mips-cvs/net/bridge/br_private.h 2004-08-14 20:39:04.000000000 +0200
++++ linux-ebtables/net/bridge/br_private.h 2005-02-07 05:52:50.000000000 +0100
+@@ -143,8 +143,10 @@
+ /* br_forward.c */
+ extern void br_deliver(struct net_bridge_port *to,
+ struct sk_buff *skb);
++extern int br_dev_queue_push_xmit(struct sk_buff *skb);
+ extern void br_forward(struct net_bridge_port *to,
+ struct sk_buff *skb);
++extern int br_forward_finish(struct sk_buff *skb);
+ extern void br_flood_deliver(struct net_bridge *br,
+ struct sk_buff *skb,
+ int clone);
+@@ -165,7 +167,8 @@
+ int *ifindices);
+
+ /* br_input.c */
+-extern void br_handle_frame(struct sk_buff *skb);
++extern int br_handle_frame_finish(struct sk_buff *skb);
++extern int br_handle_frame(struct sk_buff *skb);
+
+ /* br_ioctl.c */
+ extern int br_ioctl(struct net_bridge *br,
+@@ -175,6 +178,10 @@
+ unsigned long arg2);
+ extern int br_ioctl_deviceless_stub(unsigned long arg);
+
++/* br_netfilter.c */
++extern int br_netfilter_init(void);
++extern void br_netfilter_fini(void);
++
+ /* br_stp.c */
+ extern int br_is_root_bridge(struct net_bridge *br);
+ extern struct net_bridge_port *br_get_port(struct net_bridge *br,
+diff -Nur linux-mips-cvs/net/bridge/netfilter/Config.in linux-ebtables/net/bridge/netfilter/Config.in
+--- linux-mips-cvs/net/bridge/netfilter/Config.in 1970-01-01 01:00:00.000000000 +0100
++++ linux-ebtables/net/bridge/netfilter/Config.in 2005-02-07 05:52:50.000000000 +0100
+@@ -0,0 +1,23 @@
++#
++# Bridge netfilter configuration
++#
++dep_tristate ' Bridge: ebtables' CONFIG_BRIDGE_NF_EBTABLES $CONFIG_BRIDGE
++dep_tristate ' ebt: filter table support' CONFIG_BRIDGE_EBT_T_FILTER $CONFIG_BRIDGE_NF_EBTABLES
++dep_tristate ' ebt: nat table support' CONFIG_BRIDGE_EBT_T_NAT $CONFIG_BRIDGE_NF_EBTABLES
++dep_tristate ' ebt: broute table support' CONFIG_BRIDGE_EBT_BROUTE $CONFIG_BRIDGE_NF_EBTABLES
++dep_tristate ' ebt: log support' CONFIG_BRIDGE_EBT_LOG $CONFIG_BRIDGE_NF_EBTABLES
++dep_tristate ' ebt: ulog support' CONFIG_BRIDGE_EBT_LOG $CONFIG_BRIDGE_NF_EBTABLES
++dep_tristate ' ebt: IP filter support' CONFIG_BRIDGE_EBT_IPF $CONFIG_BRIDGE_NF_EBTABLES
++dep_tristate ' ebt: ARP filter support' CONFIG_BRIDGE_EBT_ARPF $CONFIG_BRIDGE_NF_EBTABLES
++dep_tristate ' ebt: among filter support' CONFIG_BRIDGE_EBT_AMONG $CONFIG_BRIDGE_NF_EBTABLES
++dep_tristate ' ebt: limit filter support' CONFIG_BRIDGE_EBT_LIMIT $CONFIG_BRIDGE_NF_EBTABLES
++dep_tristate ' ebt: 802.1Q VLAN filter support' CONFIG_BRIDGE_EBT_VLANF $CONFIG_BRIDGE_NF_EBTABLES
++dep_tristate ' ebt: 802.3 filter support' CONFIG_BRIDGE_EBT_802_3 $CONFIG_BRIDGE_NF_EBTABLES
++dep_tristate ' ebt: packet type filter support' CONFIG_BRIDGE_EBT_PKTTYPE $CONFIG_BRIDGE_NF_EBTABLES
++dep_tristate ' ebt: STP filter support' CONFIG_BRIDGE_EBT_STP $CONFIG_BRIDGE_NF_EBTABLES
++dep_tristate ' ebt: mark filter support' CONFIG_BRIDGE_EBT_MARKF $CONFIG_BRIDGE_NF_EBTABLES
++dep_tristate ' ebt: arp reply target support' CONFIG_BRIDGE_EBT_ARPREPLY $CONFIG_BRIDGE_NF_EBTABLES
++dep_tristate ' ebt: snat target support' CONFIG_BRIDGE_EBT_SNAT $CONFIG_BRIDGE_NF_EBTABLES
++dep_tristate ' ebt: dnat target support' CONFIG_BRIDGE_EBT_DNAT $CONFIG_BRIDGE_NF_EBTABLES
++dep_tristate ' ebt: redirect target support' CONFIG_BRIDGE_EBT_REDIRECT $CONFIG_BRIDGE_NF_EBTABLES
++dep_tristate ' ebt: mark target support' CONFIG_BRIDGE_EBT_MARK_T $CONFIG_BRIDGE_NF_EBTABLES
+diff -Nur linux-mips-cvs/net/bridge/netfilter/Makefile linux-ebtables/net/bridge/netfilter/Makefile
+--- linux-mips-cvs/net/bridge/netfilter/Makefile 1970-01-01 01:00:00.000000000 +0100
++++ linux-ebtables/net/bridge/netfilter/Makefile 2005-02-07 05:52:50.000000000 +0100
+@@ -0,0 +1,34 @@
++#
++# Makefile for the netfilter modules on top of bridging.
++#
++# Note! Dependencies are done automagically by 'make dep', which also
++# removes any old dependencies. DON'T put your own dependencies here
++# unless it's something special (ie not a .c file).
++#
++# Note 2! The CFLAGS definition is now in the main makefile...
++
++O_TARGET := netfilter.o
++
++export-objs := ebtables.o
++
++obj-$(CONFIG_BRIDGE_NF_EBTABLES) += ebtables.o
++obj-$(CONFIG_BRIDGE_EBT_T_FILTER) += ebtable_filter.o
++obj-$(CONFIG_BRIDGE_EBT_T_NAT) += ebtable_nat.o
++obj-$(CONFIG_BRIDGE_EBT_BROUTE) += ebtable_broute.o
++obj-$(CONFIG_BRIDGE_EBT_802_3) += ebt_802_3.o
++obj-$(CONFIG_BRIDGE_EBT_ARPF) += ebt_arp.o
++obj-$(CONFIG_BRIDGE_EBT_AMONG) += ebt_among.o
++obj-$(CONFIG_BRIDGE_EBT_IPF) += ebt_ip.o
++obj-$(CONFIG_BRIDGE_EBT_LIMIT) += ebt_limit.o
++obj-$(CONFIG_BRIDGE_EBT_MARKF) += ebt_mark_m.o
++obj-$(CONFIG_BRIDGE_EBT_PKTTYPE) += ebt_pkttype.o
++obj-$(CONFIG_BRIDGE_EBT_STP) += ebt_stp.o
++obj-$(CONFIG_BRIDGE_EBT_VLANF) += ebt_vlan.o
++obj-$(CONFIG_BRIDGE_EBT_LOG) += ebt_log.o
++obj-$(CONFIG_BRIDGE_EBT_LOG) += ebt_ulog.o
++obj-$(CONFIG_BRIDGE_EBT_ARPREPLY) += ebt_arpreply.o
++obj-$(CONFIG_BRIDGE_EBT_DNAT) += ebt_dnat.o
++obj-$(CONFIG_BRIDGE_EBT_MARK_T) += ebt_mark.o
++obj-$(CONFIG_BRIDGE_EBT_REDIRECT) += ebt_redirect.o
++obj-$(CONFIG_BRIDGE_EBT_SNAT) += ebt_snat.o
++include $(TOPDIR)/Rules.make
+diff -Nur linux-mips-cvs/net/bridge/netfilter/ebt_802_3.c linux-ebtables/net/bridge/netfilter/ebt_802_3.c
+--- linux-mips-cvs/net/bridge/netfilter/ebt_802_3.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-ebtables/net/bridge/netfilter/ebt_802_3.c 2005-02-07 05:52:50.000000000 +0100
+@@ -0,0 +1,74 @@
++/*
++ * 802_3
++ *
++ * Author:
++ * Chris Vitale csv@bluetail.com
++ *
++ * May 2003
++ *
++ */
++
++#include <linux/netfilter_bridge/ebtables.h>
++#include <linux/netfilter_bridge/ebt_802_3.h>
++#include <linux/module.h>
++
++static int ebt_filter_802_3(const struct sk_buff *skb, const struct net_device *in,
++ const struct net_device *out, const void *data, unsigned int datalen)
++{
++ struct ebt_802_3_info *info = (struct ebt_802_3_info *)data;
++ struct ebt_802_3_hdr *hdr = (struct ebt_802_3_hdr *)skb->mac.ethernet;
++ uint16_t type = hdr->llc.ui.ctrl & IS_UI ? hdr->llc.ui.type : hdr->llc.ni.type;
++
++ if (info->bitmask & EBT_802_3_SAP) {
++ if (FWINV(info->sap != hdr->llc.ui.ssap, EBT_802_3_SAP))
++ return EBT_NOMATCH;
++ if (FWINV(info->sap != hdr->llc.ui.dsap, EBT_802_3_SAP))
++ return EBT_NOMATCH;
++ }
++
++ if (info->bitmask & EBT_802_3_TYPE) {
++ if (!(hdr->llc.ui.dsap == CHECK_TYPE && hdr->llc.ui.ssap == CHECK_TYPE))
++ return EBT_NOMATCH;
++ if (FWINV(info->type != type, EBT_802_3_TYPE))
++ return EBT_NOMATCH;
++ }
++
++ return EBT_MATCH;
++}
++
++static struct ebt_match filter_802_3;
++static int ebt_802_3_check(const char *tablename, unsigned int hookmask,
++ const struct ebt_entry *e, void *data, unsigned int datalen)
++{
++ struct ebt_802_3_info *info = (struct ebt_802_3_info *)data;
++
++ if (datalen != EBT_ALIGN(sizeof(struct ebt_802_3_info)))
++ return -EINVAL;
++ if (info->bitmask & ~EBT_802_3_MASK || info->invflags & ~EBT_802_3_MASK)
++ return -EINVAL;
++
++ return 0;
++}
++
++static struct ebt_match filter_802_3 =
++{
++ .name = EBT_802_3_MATCH,
++ .match = ebt_filter_802_3,
++ .check = ebt_802_3_check,
++ .me = THIS_MODULE,
++};
++
++static int __init init(void)
++{
++ return ebt_register_match(&filter_802_3);
++}
++
++static void __exit fini(void)
++{
++ ebt_unregister_match(&filter_802_3);
++}
++
++module_init(init);
++module_exit(fini);
++EXPORT_NO_SYMBOLS;
++MODULE_LICENSE("GPL");
+diff -Nur linux-mips-cvs/net/bridge/netfilter/ebt_among.c linux-ebtables/net/bridge/netfilter/ebt_among.c
+--- linux-mips-cvs/net/bridge/netfilter/ebt_among.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-ebtables/net/bridge/netfilter/ebt_among.c 2005-02-07 05:52:50.000000000 +0100
+@@ -0,0 +1,223 @@
++/*
++ * ebt_among
++ *
++ * Authors:
++ * Grzegorz Borowiak <grzes@gnu.univ.gda.pl>
++ *
++ * August, 2003
++ *
++ */
++
++#include <linux/netfilter_bridge/ebtables.h>
++#include <linux/netfilter_bridge/ebt_among.h>
++#include <linux/ip.h>
++#include <linux/if_arp.h>
++#include <linux/module.h>
++
++static int ebt_mac_wormhash_contains(const struct ebt_mac_wormhash *wh,
++ const char *mac, uint32_t ip)
++{
++ /* You may be puzzled as to how this code works.
++ * Some tricks were used, refer to
++ * include/linux/netfilter_bridge/ebt_among.h
++ * as there you can find a solution of this mystery.
++ */
++ const struct ebt_mac_wormhash_tuple *p;
++ int start, limit, i;
++ uint32_t cmp[2] = { 0, 0 };
++ int key = (const unsigned char) mac[5];
++
++ memcpy(((char *) cmp) + 2, mac, 6);
++ start = wh->table[key];
++ limit = wh->table[key + 1];
++ if (ip) {
++ for (i = start; i < limit; i++) {
++ p = &wh->pool[i];
++ if (cmp[1] == p->cmp[1] && cmp[0] == p->cmp[0]) {
++ if (p->ip == 0 || p->ip == ip) {
++ return 1;
++ }
++ }
++ }
++ } else {
++ for (i = start; i < limit; i++) {
++ p = &wh->pool[i];
++ if (cmp[1] == p->cmp[1] && cmp[0] == p->cmp[0]) {
++ if (p->ip == 0) {
++ return 1;
++ }
++ }
++ }
++ }
++ return 0;
++}
++
++static int ebt_mac_wormhash_check_integrity(const struct ebt_mac_wormhash
++ *wh)
++{
++ int i;
++
++ for (i = 0; i < 256; i++) {
++ if (wh->table[i] > wh->table[i + 1])
++ return -0x100 - i;
++ if (wh->table[i] < 0)
++ return -0x200 - i;
++ if (wh->table[i] > wh->poolsize)
++ return -0x300 - i;
++ }
++ if (wh->table[256] > wh->poolsize)
++ return -0xc00;
++ return 0;
++}
++
++static int get_ip_dst(const struct sk_buff *skb, uint32_t * addr)
++{
++ if (skb->mac.ethernet->h_proto == __constant_htons(ETH_P_IP))
++ *addr = skb->nh.iph->daddr;
++ else if (skb->mac.ethernet->h_proto == __constant_htons(ETH_P_ARP)) {
++ uint32_t arp_len = sizeof(struct arphdr) +
++ (2 * (((*skb).nh.arph)->ar_hln)) +
++ (2 * (((*skb).nh.arph)->ar_pln));
++
++ /* Make sure the packet is long enough. */
++ if ((((*skb).nh.raw) + arp_len) > (*skb).tail)
++ return -1;
++ /* IPv4 addresses are always 4 bytes. */
++ if (((*skb).nh.arph)->ar_pln != sizeof(uint32_t))
++ return -1;
++
++ memcpy(addr, ((*skb).nh.raw) + sizeof(struct arphdr) +
++ (2 * (((*skb).nh.arph)->ar_hln)) +
++ (((*skb).nh.arph)->ar_pln), sizeof(uint32_t));
++
++ }
++ return 0;
++}
++
++static int get_ip_src(const struct sk_buff *skb, uint32_t * addr)
++{
++ if (skb->mac.ethernet->h_proto == __constant_htons(ETH_P_IP))
++ *addr = skb->nh.iph->saddr;
++ else if (skb->mac.ethernet->h_proto == __constant_htons(ETH_P_ARP)) {
++ uint32_t arp_len = sizeof(struct arphdr) +
++ (2 * (((*skb).nh.arph)->ar_hln)) +
++ (2 * (((*skb).nh.arph)->ar_pln));
++
++ /* Make sure the packet is long enough. */
++ if ((((*skb).nh.raw) + arp_len) > (*skb).tail)
++ return -1;
++ /* IPv4 addresses are always 4 bytes. */
++ if (((*skb).nh.arph)->ar_pln != sizeof(uint32_t))
++ return -1;
++
++ memcpy(addr, ((*skb).nh.raw) + sizeof(struct arphdr) +
++ ((((*skb).nh.arph)->ar_hln)), sizeof(uint32_t));
++
++ }
++ return 0;
++}
++
++static int ebt_filter_among(const struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out, const void *data,
++ unsigned int datalen)
++{
++ struct ebt_among_info *info = (struct ebt_among_info *) data;
++ const char *dmac, *smac;
++ const struct ebt_mac_wormhash *wh_dst, *wh_src;
++ uint32_t dip = 0, sip = 0;
++
++ wh_dst = ebt_among_wh_dst(info);
++ wh_src = ebt_among_wh_src(info);
++
++ if (wh_src) {
++ smac = skb->mac.ethernet->h_source;
++ if (get_ip_src(skb, &sip))
++ return EBT_NOMATCH;
++ if (!(info->bitmask & EBT_AMONG_SRC_NEG)) {
++ /* we match only if it contains */
++ if (!ebt_mac_wormhash_contains(wh_src, smac, sip))
++ return EBT_NOMATCH;
++ } else {
++ /* we match only if it DOES NOT contain */
++ if (ebt_mac_wormhash_contains(wh_src, smac, sip))
++ return EBT_NOMATCH;
++ }
++ }
++
++ if (wh_dst) {
++ dmac = skb->mac.ethernet->h_dest;
++ if (get_ip_dst(skb, &dip))
++ return EBT_NOMATCH;
++ if (!(info->bitmask & EBT_AMONG_DST_NEG)) {
++ /* we match only if it contains */
++ if (!ebt_mac_wormhash_contains(wh_dst, dmac, dip))
++ return EBT_NOMATCH;
++ } else {
++ /* we match only if it DOES NOT contain */
++ if (ebt_mac_wormhash_contains(wh_dst, dmac, dip))
++ return EBT_NOMATCH;
++ }
++ }
++
++ return EBT_MATCH;
++}
++
++static int ebt_among_check(const char *tablename, unsigned int hookmask,
++ const struct ebt_entry *e, void *data,
++ unsigned int datalen)
++{
++ struct ebt_among_info *info = (struct ebt_among_info *) data;
++ int expected_length = sizeof(struct ebt_among_info);
++ const struct ebt_mac_wormhash *wh_dst, *wh_src;
++ int err;
++
++ wh_dst = ebt_among_wh_dst(info);
++ wh_src = ebt_among_wh_src(info);
++ expected_length += ebt_mac_wormhash_size(wh_dst);
++ expected_length += ebt_mac_wormhash_size(wh_src);
++
++ if (datalen != EBT_ALIGN(expected_length)) {
++ printk(KERN_WARNING
++ "ebtables: among: wrong size: %d"
++ "against expected %d, rounded to %d\n",
++ datalen, expected_length,
++ EBT_ALIGN(expected_length));
++ return -EINVAL;
++ }
++ if (wh_dst && (err = ebt_mac_wormhash_check_integrity(wh_dst))) {
++ printk(KERN_WARNING
++ "ebtables: among: dst integrity fail: %x\n", -err);
++ return -EINVAL;
++ }
++ if (wh_src && (err = ebt_mac_wormhash_check_integrity(wh_src))) {
++ printk(KERN_WARNING
++ "ebtables: among: src integrity fail: %x\n", -err);
++ return -EINVAL;
++ }
++ return 0;
++}
++
++static struct ebt_match filter_among = {
++ {NULL, NULL},
++ EBT_AMONG_MATCH,
++ ebt_filter_among,
++ ebt_among_check,
++ NULL,
++ THIS_MODULE
++};
++
++static int __init init(void)
++{
++ return ebt_register_match(&filter_among);
++}
++
++static void __exit fini(void)
++{
++ ebt_unregister_match(&filter_among);
++}
++
++module_init(init);
++module_exit(fini);
++EXPORT_NO_SYMBOLS;
++MODULE_LICENSE("GPL");
+diff -Nur linux-mips-cvs/net/bridge/netfilter/ebt_arp.c linux-ebtables/net/bridge/netfilter/ebt_arp.c
+--- linux-mips-cvs/net/bridge/netfilter/ebt_arp.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-ebtables/net/bridge/netfilter/ebt_arp.c 2005-02-07 05:52:50.000000000 +0100
+@@ -0,0 +1,149 @@
++/*
++ * ebt_arp
++ *
++ * Authors:
++ * Bart De Schuymer <bart.de.schuymer@pandora.be>
++ * Tim Gardner <timg@tpi.com>
++ *
++ * April, 2002
++ *
++ */
++
++#include <linux/netfilter_bridge/ebtables.h>
++#include <linux/netfilter_bridge/ebt_arp.h>
++#include <linux/if_arp.h>
++#include <linux/if_ether.h>
++#include <linux/module.h>
++
++static int ebt_filter_arp(const struct sk_buff *skb, const struct net_device *in,
++ const struct net_device *out, const void *data, unsigned int datalen)
++{
++ struct ebt_arp_info *info = (struct ebt_arp_info *)data;
++
++ if (info->bitmask & EBT_ARP_OPCODE && FWINV(info->opcode !=
++ ((*skb).nh.arph)->ar_op, EBT_ARP_OPCODE))
++ return EBT_NOMATCH;
++ if (info->bitmask & EBT_ARP_HTYPE && FWINV(info->htype !=
++ ((*skb).nh.arph)->ar_hrd, EBT_ARP_HTYPE))
++ return EBT_NOMATCH;
++ if (info->bitmask & EBT_ARP_PTYPE && FWINV(info->ptype !=
++ ((*skb).nh.arph)->ar_pro, EBT_ARP_PTYPE))
++ return EBT_NOMATCH;
++
++ if (info->bitmask & (EBT_ARP_SRC_IP | EBT_ARP_DST_IP))
++ {
++ uint32_t arp_len = sizeof(struct arphdr) +
++ (2 * (((*skb).nh.arph)->ar_hln)) +
++ (2 * (((*skb).nh.arph)->ar_pln));
++ uint32_t dst;
++ uint32_t src;
++
++ // Make sure the packet is long enough.
++ if ((((*skb).nh.raw) + arp_len) > (*skb).tail)
++ return EBT_NOMATCH;
++ // IPv4 addresses are always 4 bytes.
++ if (((*skb).nh.arph)->ar_pln != sizeof(uint32_t))
++ return EBT_NOMATCH;
++
++ if (info->bitmask & EBT_ARP_SRC_IP) {
++ memcpy(&src, ((*skb).nh.raw) + sizeof(struct arphdr) +
++ ((*skb).nh.arph)->ar_hln, sizeof(uint32_t));
++ if (FWINV(info->saddr != (src & info->smsk),
++ EBT_ARP_SRC_IP))
++ return EBT_NOMATCH;
++ }
++
++ if (info->bitmask & EBT_ARP_DST_IP) {
++ memcpy(&dst, ((*skb).nh.raw)+sizeof(struct arphdr) +
++ (2*(((*skb).nh.arph)->ar_hln)) +
++ (((*skb).nh.arph)->ar_pln), sizeof(uint32_t));
++ if (FWINV(info->daddr != (dst & info->dmsk),
++ EBT_ARP_DST_IP))
++ return EBT_NOMATCH;
++ }
++ }
++
++ if (info->bitmask & (EBT_ARP_SRC_MAC | EBT_ARP_DST_MAC))
++ {
++ uint32_t arp_len = sizeof(struct arphdr) +
++ (2 * (((*skb).nh.arph)->ar_hln)) +
++ (2 * (((*skb).nh.arph)->ar_pln));
++ unsigned char dst[ETH_ALEN];
++ unsigned char src[ETH_ALEN];
++
++ // Make sure the packet is long enough.
++ if ((((*skb).nh.raw) + arp_len) > (*skb).tail)
++ return EBT_NOMATCH;
++ // MAC addresses are 6 bytes.
++ if (((*skb).nh.arph)->ar_hln != ETH_ALEN)
++ return EBT_NOMATCH;
++ if (info->bitmask & EBT_ARP_SRC_MAC) {
++ uint8_t verdict, i;
++
++ memcpy(&src, ((*skb).nh.raw) +
++ sizeof(struct arphdr),
++ ETH_ALEN);
++ verdict = 0;
++ for (i = 0; i < 6; i++)
++ verdict |= (src[i] ^ info->smaddr[i]) &
++ info->smmsk[i];
++ if (FWINV(verdict != 0, EBT_ARP_SRC_MAC))
++ return EBT_NOMATCH;
++ }
++
++ if (info->bitmask & EBT_ARP_DST_MAC) {
++ uint8_t verdict, i;
++
++ memcpy(&dst, ((*skb).nh.raw) +
++ sizeof(struct arphdr) +
++ (((*skb).nh.arph)->ar_hln) +
++ (((*skb).nh.arph)->ar_pln),
++ ETH_ALEN);
++ verdict = 0;
++ for (i = 0; i < 6; i++)
++ verdict |= (dst[i] ^ info->dmaddr[i]) &
++ info->dmmsk[i];
++ if (FWINV(verdict != 0, EBT_ARP_DST_MAC))
++ return EBT_NOMATCH;
++ }
++ }
++
++ return EBT_MATCH;
++}
++
++static int ebt_arp_check(const char *tablename, unsigned int hookmask,
++ const struct ebt_entry *e, void *data, unsigned int datalen)
++{
++ struct ebt_arp_info *info = (struct ebt_arp_info *)data;
++
++ if (datalen != EBT_ALIGN(sizeof(struct ebt_arp_info)))
++ return -EINVAL;
++ if ((e->ethproto != __constant_htons(ETH_P_ARP) &&
++ e->ethproto != __constant_htons(ETH_P_RARP)) ||
++ e->invflags & EBT_IPROTO)
++ return -EINVAL;
++ if (info->bitmask & ~EBT_ARP_MASK || info->invflags & ~EBT_ARP_MASK)
++ return -EINVAL;
++ return 0;
++}
++
++static struct ebt_match filter_arp =
++{
++ {NULL, NULL}, EBT_ARP_MATCH, ebt_filter_arp, ebt_arp_check, NULL,
++ THIS_MODULE
++};
++
++static int __init init(void)
++{
++ return ebt_register_match(&filter_arp);
++}
++
++static void __exit fini(void)
++{
++ ebt_unregister_match(&filter_arp);
++}
++
++module_init(init);
++module_exit(fini);
++EXPORT_NO_SYMBOLS;
++MODULE_LICENSE("GPL");
+diff -Nur linux-mips-cvs/net/bridge/netfilter/ebt_arpreply.c linux-ebtables/net/bridge/netfilter/ebt_arpreply.c
+--- linux-mips-cvs/net/bridge/netfilter/ebt_arpreply.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-ebtables/net/bridge/netfilter/ebt_arpreply.c 2005-02-07 05:52:50.000000000 +0100
+@@ -0,0 +1,86 @@
++/*
++ * ebt_arpreply
++ *
++ * Authors:
++ * Grzegorz Borowiak <grzes@gnu.univ.gda.pl>
++ * Bart De Schuymer <bdschuym@pandora.be>
++ *
++ * August, 2003
++ *
++ */
++
++#include <linux/netfilter_bridge/ebtables.h>
++#include <linux/netfilter_bridge/ebt_arpreply.h>
++#include <linux/if_arp.h>
++#include <net/arp.h>
++#include <linux/module.h>
++
++static int ebt_target_reply(struct sk_buff **pskb, unsigned int hooknr,
++ const struct net_device *in, const struct net_device *out,
++ const void *data, unsigned int datalen)
++{
++ struct ebt_arpreply_info *info = (struct ebt_arpreply_info *)data;
++ struct arphdr *ah;
++ unsigned char *sha, *arp_ptr;
++ u32 sip, tip;
++
++ ah = (**pskb).nh.arph;
++ if (ah->ar_op != __constant_htons(ARPOP_REQUEST) ||
++ ah->ar_hln != ETH_ALEN || ah->ar_pro != htons(ETH_P_IP) ||
++ ah->ar_pln != 4)
++ return EBT_CONTINUE;
++
++ arp_ptr = (unsigned char *)(ah + 1);
++
++ /* get source and target IP */
++ sha = arp_ptr;
++ arp_ptr += ETH_ALEN;
++ memcpy(&sip, arp_ptr, 4);
++ arp_ptr += 4 + ETH_ALEN;
++ memcpy(&tip, arp_ptr, 4);
++
++ arp_send(ARPOP_REPLY, ETH_P_ARP, sip, in, tip, sha, info->mac, sha);
++
++ return info->target;
++}
++
++static int ebt_target_reply_check(const char *tablename, unsigned int hookmask,
++ const struct ebt_entry *e, void *data, unsigned int datalen)
++{
++ struct ebt_arpreply_info *info = (struct ebt_arpreply_info *)data;
++
++ if (datalen != EBT_ALIGN(sizeof(struct ebt_arpreply_info)))
++ return -EINVAL;
++ if (BASE_CHAIN && info->target == EBT_RETURN)
++ return -EINVAL;
++ if (e->ethproto != __constant_htons(ETH_P_ARP) ||
++ e->invflags & EBT_IPROTO)
++ return -EINVAL;
++ CLEAR_BASE_CHAIN_BIT;
++ if (strcmp(tablename, "nat") || hookmask & ~(1 << NF_BR_PRE_ROUTING))
++ return -EINVAL;
++ return 0;
++}
++
++static struct ebt_target reply_target =
++{
++ .name = EBT_ARPREPLY_TARGET,
++ .target = ebt_target_reply,
++ .check = ebt_target_reply_check,
++ .me = THIS_MODULE,
++};
++
++static int __init init(void)
++{
++ return ebt_register_target(&reply_target);
++}
++
++static void __exit fini(void)
++{
++ ebt_unregister_target(&reply_target);
++}
++
++module_init(init);
++module_exit(fini);
++EXPORT_NO_SYMBOLS;
++MODULE_LICENSE("GPL");
+diff -Nur linux-mips-cvs/net/bridge/netfilter/ebt_dnat.c linux-ebtables/net/bridge/netfilter/ebt_dnat.c
+--- linux-mips-cvs/net/bridge/netfilter/ebt_dnat.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-ebtables/net/bridge/netfilter/ebt_dnat.c 2005-02-07 05:52:50.000000000 +0100
+@@ -0,0 +1,65 @@
++/*
++ * ebt_dnat
++ *
++ * Authors:
++ * Bart De Schuymer <bart.de.schuymer@pandora.be>
++ *
++ * June, 2002
++ *
++ */
++
++#include <linux/netfilter_bridge/ebtables.h>
++#include <linux/netfilter_bridge/ebt_nat.h>
++#include <linux/module.h>
++#include <net/sock.h>
++
++static int ebt_target_dnat(struct sk_buff **pskb, unsigned int hooknr,
++ const struct net_device *in, const struct net_device *out,
++ const void *data, unsigned int datalen)
++{
++ struct ebt_nat_info *info = (struct ebt_nat_info *)data;
++
++ memcpy(((**pskb).mac.ethernet)->h_dest, info->mac,
++ ETH_ALEN * sizeof(unsigned char));
++ return info->target;
++}
++
++static int ebt_target_dnat_check(const char *tablename, unsigned int hookmask,
++ const struct ebt_entry *e, void *data, unsigned int datalen)
++{
++ struct ebt_nat_info *info = (struct ebt_nat_info *)data;
++
++ if (BASE_CHAIN && info->target == EBT_RETURN)
++ return -EINVAL;
++ CLEAR_BASE_CHAIN_BIT;
++ if ( (strcmp(tablename, "nat") ||
++ (hookmask & ~((1 << NF_BR_PRE_ROUTING) | (1 << NF_BR_LOCAL_OUT)))) &&
++ (strcmp(tablename, "broute") || hookmask & ~(1 << NF_BR_BROUTING)) )
++ return -EINVAL;
++ if (datalen != EBT_ALIGN(sizeof(struct ebt_nat_info)))
++ return -EINVAL;
++ if (INVALID_TARGET)
++ return -EINVAL;
++ return 0;
++}
++
++static struct ebt_target dnat =
++{
++ {NULL, NULL}, EBT_DNAT_TARGET, ebt_target_dnat, ebt_target_dnat_check,
++ NULL, THIS_MODULE
++};
++
++static int __init init(void)
++{
++ return ebt_register_target(&dnat);
++}
++
++static void __exit fini(void)
++{
++ ebt_unregister_target(&dnat);
++}
++
++module_init(init);
++module_exit(fini);
++EXPORT_NO_SYMBOLS;
++MODULE_LICENSE("GPL");
+diff -Nur linux-mips-cvs/net/bridge/netfilter/ebt_ip.c linux-ebtables/net/bridge/netfilter/ebt_ip.c
+--- linux-mips-cvs/net/bridge/netfilter/ebt_ip.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-ebtables/net/bridge/netfilter/ebt_ip.c 2005-02-07 05:52:50.000000000 +0100
+@@ -0,0 +1,121 @@
++/*
++ * ebt_ip
++ *
++ * Authors:
++ * Bart De Schuymer <bart.de.schuymer@pandora.be>
++ *
++ * April, 2002
++ *
++ * Changes:
++ * added ip-sport and ip-dport
++ * Innominate Security Technologies AG <mhopf@innominate.com>
++ * September, 2002
++ */
++
++#include <linux/netfilter_bridge/ebtables.h>
++#include <linux/netfilter_bridge/ebt_ip.h>
++#include <linux/ip.h>
++#include <linux/in.h>
++#include <linux/module.h>
++
++struct tcpudphdr {
++ uint16_t src;
++ uint16_t dst;
++};
++
++union h_u {
++ unsigned char *raw;
++ struct tcpudphdr *tuh;
++};
++
++static int ebt_filter_ip(const struct sk_buff *skb, const struct net_device *in,
++ const struct net_device *out, const void *data,
++ unsigned int datalen)
++{
++ struct ebt_ip_info *info = (struct ebt_ip_info *)data;
++
++ if (info->bitmask & EBT_IP_TOS &&
++ FWINV(info->tos != ((*skb).nh.iph)->tos, EBT_IP_TOS))
++ return EBT_NOMATCH;
++ if (info->bitmask & EBT_IP_PROTO) {
++ if (FWINV(info->protocol != ((*skb).nh.iph)->protocol,
++ EBT_IP_PROTO))
++ return EBT_NOMATCH;
++ if ( info->protocol == IPPROTO_TCP ||
++ info->protocol == IPPROTO_UDP )
++ {
++ union h_u h;
++ h.raw = skb->data + skb->nh.iph->ihl*4;
++ if (info->bitmask & EBT_IP_DPORT) {
++ uint16_t port = ntohs(h.tuh->dst);
++ if (FWINV(port < info->dport[0] ||
++ port > info->dport[1],
++ EBT_IP_DPORT))
++ return EBT_NOMATCH;
++ }
++ if (info->bitmask & EBT_IP_SPORT) {
++ uint16_t port = ntohs(h.tuh->src);
++ if (FWINV(port < info->sport[0] ||
++ port > info->sport[1],
++ EBT_IP_SPORT))
++ return EBT_NOMATCH;
++ }
++ }
++ }
++ if (info->bitmask & EBT_IP_SOURCE &&
++ FWINV((((*skb).nh.iph)->saddr & info->smsk) !=
++ info->saddr, EBT_IP_SOURCE))
++ return EBT_NOMATCH;
++ if ((info->bitmask & EBT_IP_DEST) &&
++ FWINV((((*skb).nh.iph)->daddr & info->dmsk) !=
++ info->daddr, EBT_IP_DEST))
++ return EBT_NOMATCH;
++ return EBT_MATCH;
++}
++
++static int ebt_ip_check(const char *tablename, unsigned int hookmask,
++ const struct ebt_entry *e, void *data, unsigned int datalen)
++{
++ struct ebt_ip_info *info = (struct ebt_ip_info *)data;
++
++ if (datalen != EBT_ALIGN(sizeof(struct ebt_ip_info)))
++ return -EINVAL;
++ if (e->ethproto != __constant_htons(ETH_P_IP) ||
++ e->invflags & EBT_IPROTO)
++ return -EINVAL;
++ if (info->bitmask & ~EBT_IP_MASK || info->invflags & ~EBT_IP_MASK)
++ return -EINVAL;
++ if (info->bitmask & (EBT_IP_DPORT | EBT_IP_SPORT)) {
++ if (!info->bitmask & EBT_IPROTO)
++ return -EINVAL;
++ if (info->protocol != IPPROTO_TCP &&
++ info->protocol != IPPROTO_UDP)
++ return -EINVAL;
++ }
++ if (info->bitmask & EBT_IP_DPORT && info->dport[0] > info->dport[1])
++ return -EINVAL;
++ if (info->bitmask & EBT_IP_SPORT && info->sport[0] > info->sport[1])
++ return -EINVAL;
++ return 0;
++}
++
++static struct ebt_match filter_ip =
++{
++ {NULL, NULL}, EBT_IP_MATCH, ebt_filter_ip, ebt_ip_check, NULL,
++ THIS_MODULE
++};
++
++static int __init init(void)
++{
++ return ebt_register_match(&filter_ip);
++}
++
++static void __exit fini(void)
++{
++ ebt_unregister_match(&filter_ip);
++}
++
++module_init(init);
++module_exit(fini);
++EXPORT_NO_SYMBOLS;
++MODULE_LICENSE("GPL");
+diff -Nur linux-mips-cvs/net/bridge/netfilter/ebt_limit.c linux-ebtables/net/bridge/netfilter/ebt_limit.c
+--- linux-mips-cvs/net/bridge/netfilter/ebt_limit.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-ebtables/net/bridge/netfilter/ebt_limit.c 2005-02-07 05:52:50.000000000 +0100
+@@ -0,0 +1,101 @@
++/*
++ * ebt_limit
++ *
++ * Authors:
++ * Tom Marshall <tommy@home.tig-grr.com>
++ *
++ * Mostly copied from netfilter's ipt_limit.c, see that file for explanation
++ *
++ * September, 2003
++ *
++ */
++
++#include <linux/netfilter_bridge/ebtables.h>
++#include <linux/netfilter_bridge/ebt_limit.h>
++#include <linux/module.h>
++
++#include <linux/netdevice.h>
++#include <linux/spinlock.h>
++
++static spinlock_t limit_lock = SPIN_LOCK_UNLOCKED;
++
++#define CREDITS_PER_JIFFY 128
++
++static int ebt_limit_match(const struct sk_buff *skb, const struct net_device *in,
++ const struct net_device *out, const void *data, unsigned int datalen)
++{
++ struct ebt_limit_info *info = (struct ebt_limit_info *)data;
++ unsigned long now = jiffies;
++
++ spin_lock_bh(&limit_lock);
++ info->credit += (now - xchg(&info->prev, now)) * CREDITS_PER_JIFFY;
++ if (info->credit > info->credit_cap)
++ info->credit = info->credit_cap;
++
++ if (info->credit >= info->cost) {
++ /* We're not limited. */
++ info->credit -= info->cost;
++ spin_unlock_bh(&limit_lock);
++ return EBT_MATCH;
++ }
++
++ spin_unlock_bh(&limit_lock);
++ return EBT_NOMATCH;
++}
++
++/* Precision saver. */
++static u_int32_t
++user2credits(u_int32_t user)
++{
++ /* If multiplying would overflow... */
++ if (user > 0xFFFFFFFF / (HZ*CREDITS_PER_JIFFY))
++ /* Divide first. */
++ return (user / EBT_LIMIT_SCALE) * HZ * CREDITS_PER_JIFFY;
++
++ return (user * HZ * CREDITS_PER_JIFFY) / EBT_LIMIT_SCALE;
++}
++
++static int ebt_limit_check(const char *tablename, unsigned int hookmask,
++ const struct ebt_entry *e, void *data, unsigned int datalen)
++{
++ struct ebt_limit_info *info = (struct ebt_limit_info *)data;
++
++ if (datalen != EBT_ALIGN(sizeof(struct ebt_limit_info)))
++ return -EINVAL;
++
++ /* Check for overflow. */
++ if (info->burst == 0
++ || user2credits(info->avg * info->burst) < user2credits(info->avg)) {
++ printk("Overflow in ebt_limit: %u/%u\n",
++ info->avg, info->burst);
++ return -EINVAL;
++ }
++
++ /* User avg in seconds * EBT_LIMIT_SCALE: convert to jiffies * 128. */
++ info->prev = jiffies;
++ info->credit = user2credits(info->avg * info->burst);
++ info->credit_cap = user2credits(info->avg * info->burst);
++ info->cost = user2credits(info->avg);
++ return 0;
++}
++
++static struct ebt_match ebt_limit_reg =
++{
++ {NULL, NULL}, EBT_LIMIT_MATCH, ebt_limit_match, ebt_limit_check, NULL,
++ THIS_MODULE
++};
++
++static int __init init(void)
++{
++ return ebt_register_match(&ebt_limit_reg);
++}
++
++static void __exit fini(void)
++{
++ ebt_unregister_match(&ebt_limit_reg);
++}
++
++module_init(init);
++module_exit(fini);
++EXPORT_NO_SYMBOLS;
++MODULE_LICENSE("GPL");
+diff -Nur linux-mips-cvs/net/bridge/netfilter/ebt_log.c linux-ebtables/net/bridge/netfilter/ebt_log.c
+--- linux-mips-cvs/net/bridge/netfilter/ebt_log.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-ebtables/net/bridge/netfilter/ebt_log.c 2005-02-07 05:52:50.000000000 +0100
+@@ -0,0 +1,153 @@
++/*
++ * ebt_log
++ *
++ * Authors:
++ * Bart De Schuymer <bart.de.schuymer@pandora.be>
++ *
++ * April, 2002
++ *
++ */
++
++#include <linux/netfilter_bridge/ebtables.h>
++#include <linux/netfilter_bridge/ebt_log.h>
++#include <linux/module.h>
++#include <linux/ip.h>
++#include <linux/in.h>
++#include <linux/if_arp.h>
++#include <linux/spinlock.h>
++
++static spinlock_t ebt_log_lock = SPIN_LOCK_UNLOCKED;
++
++static int ebt_log_check(const char *tablename, unsigned int hookmask,
++ const struct ebt_entry *e, void *data, unsigned int datalen)
++{
++ struct ebt_log_info *info = (struct ebt_log_info *)data;
++
++ if (datalen != EBT_ALIGN(sizeof(struct ebt_log_info)))
++ return -EINVAL;
++ if (info->bitmask & ~EBT_LOG_MASK)
++ return -EINVAL;
++ if (info->loglevel >= 8)
++ return -EINVAL;
++ info->prefix[EBT_LOG_PREFIX_SIZE - 1] = '\0';
++ return 0;
++}
++
++struct tcpudphdr
++{
++ uint16_t src;
++ uint16_t dst;
++};
++
++struct arppayload
++{
++ unsigned char mac_src[ETH_ALEN];
++ unsigned char ip_src[4];
++ unsigned char mac_dst[ETH_ALEN];
++ unsigned char ip_dst[4];
++};
++
++static void print_MAC(unsigned char *p)
++{
++ int i;
++
++ for (i = 0; i < ETH_ALEN; i++, p++)
++ printk("%02x%c", *p, i == ETH_ALEN - 1 ? ' ':':');
++}
++
++#define myNIPQUAD(a) a[0], a[1], a[2], a[3]
++static void ebt_log(const struct sk_buff *skb, unsigned int hooknr,
++ const struct net_device *in, const struct net_device *out,
++ const void *data, unsigned int datalen)
++{
++ struct ebt_log_info *info = (struct ebt_log_info *)data;
++ char level_string[4] = "< >";
++ level_string[1] = '0' + info->loglevel;
++
++ spin_lock_bh(&ebt_log_lock);
++ printk(level_string);
++ printk("%s IN=%s OUT=%s ", info->prefix, in ? in->name : "",
++ out ? out->name : "");
++
++ printk("MAC source = ");
++ print_MAC((skb->mac.ethernet)->h_source);
++ printk("MAC dest = ");
++ print_MAC((skb->mac.ethernet)->h_dest);
++
++ printk("proto = 0x%04x", ntohs(((*skb).mac.ethernet)->h_proto));
++
++ if ((info->bitmask & EBT_LOG_IP) && skb->mac.ethernet->h_proto ==
++ htons(ETH_P_IP)){
++ struct iphdr *iph = skb->nh.iph;
++ printk(" IP SRC=%u.%u.%u.%u IP DST=%u.%u.%u.%u,",
++ NIPQUAD(iph->saddr), NIPQUAD(iph->daddr));
++ printk(" IP tos=0x%02X, IP proto=%d", iph->tos, iph->protocol);
++ if (iph->protocol == IPPROTO_TCP ||
++ iph->protocol == IPPROTO_UDP) {
++ struct tcpudphdr *ports = (struct tcpudphdr *)(skb->data + iph->ihl*4);
++
++ if (skb->data + iph->ihl*4 > skb->tail) {
++ printk(" INCOMPLETE TCP/UDP header");
++ goto out;
++ }
++ printk(" SPT=%u DPT=%u", ntohs(ports->src),
++ ntohs(ports->dst));
++ }
++ goto out;
++ }
++
++ if ((info->bitmask & EBT_LOG_ARP) &&
++ ((skb->mac.ethernet->h_proto == __constant_htons(ETH_P_ARP)) ||
++ (skb->mac.ethernet->h_proto == __constant_htons(ETH_P_RARP)))) {
++ struct arphdr * arph = skb->nh.arph;
++ printk(" ARP HTYPE=%d, PTYPE=0x%04x, OPCODE=%d",
++ ntohs(arph->ar_hrd), ntohs(arph->ar_pro),
++ ntohs(arph->ar_op));
++ /* If it's for Ethernet and the lengths are OK,
++ * then log the ARP payload */
++ if (arph->ar_hrd == __constant_htons(1) &&
++ arph->ar_hln == ETH_ALEN &&
++ arph->ar_pln == sizeof(uint32_t)) {
++ struct arppayload *arpp = (struct arppayload *)(skb->data + sizeof(*arph));
++
++ if (skb->data + sizeof(*arph) > skb->tail) {
++ printk(" INCOMPLETE ARP header");
++ goto out;
++ }
++
++ printk(" ARP MAC SRC=");
++ print_MAC(arpp->mac_src);
++ printk(" ARP IP SRC=%u.%u.%u.%u",
++ myNIPQUAD(arpp->ip_src));
++ printk(" ARP MAC DST=");
++ print_MAC(arpp->mac_dst);
++ printk(" ARP IP DST=%u.%u.%u.%u",
++ myNIPQUAD(arpp->ip_dst));
++ }
++
++ }
++out:
++ printk("\n");
++ spin_unlock_bh(&ebt_log_lock);
++}
++
++static struct ebt_watcher log =
++{
++ {NULL, NULL}, EBT_LOG_WATCHER, ebt_log, ebt_log_check, NULL,
++ THIS_MODULE
++};
++
++static int __init init(void)
++{
++ return ebt_register_watcher(&log);
++}
++
++static void __exit fini(void)
++{
++ ebt_unregister_watcher(&log);
++}
++
++module_init(init);
++module_exit(fini);
++EXPORT_NO_SYMBOLS;
++MODULE_LICENSE("GPL");
+diff -Nur linux-mips-cvs/net/bridge/netfilter/ebt_mark.c linux-ebtables/net/bridge/netfilter/ebt_mark.c
+--- linux-mips-cvs/net/bridge/netfilter/ebt_mark.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-ebtables/net/bridge/netfilter/ebt_mark.c 2005-02-07 05:52:50.000000000 +0100
+@@ -0,0 +1,66 @@
++/*
++ * ebt_mark
++ *
++ * Authors:
++ * Bart De Schuymer <bart.de.schuymer@pandora.be>
++ *
++ * July, 2002
++ *
++ */
++
++// The mark target can be used in any chain
++// I believe adding a mangle table just for marking is total overkill
++// Marking a frame doesn't really change anything in the frame anyway
++
++#include <linux/netfilter_bridge/ebtables.h>
++#include <linux/netfilter_bridge/ebt_mark_t.h>
++#include <linux/module.h>
++
++static int ebt_target_mark(struct sk_buff **pskb, unsigned int hooknr,
++ const struct net_device *in, const struct net_device *out,
++ const void *data, unsigned int datalen)
++{
++ struct ebt_mark_t_info *info = (struct ebt_mark_t_info *)data;
++
++ if ((*pskb)->nfmark != info->mark) {
++ (*pskb)->nfmark = info->mark;
++ (*pskb)->nfcache |= NFC_ALTERED;
++ }
++ return info->target;
++}
++
++static int ebt_target_mark_check(const char *tablename, unsigned int hookmask,
++ const struct ebt_entry *e, void *data, unsigned int datalen)
++{
++ struct ebt_mark_t_info *info = (struct ebt_mark_t_info *)data;
++
++ if (datalen != EBT_ALIGN(sizeof(struct ebt_mark_t_info)))
++ return -EINVAL;
++ if (BASE_CHAIN && info->target == EBT_RETURN)
++ return -EINVAL;
++ CLEAR_BASE_CHAIN_BIT;
++ if (INVALID_TARGET)
++ return -EINVAL;
++ return 0;
++}
++
++static struct ebt_target mark_target =
++{
++ {NULL, NULL}, EBT_MARK_TARGET, ebt_target_mark,
++ ebt_target_mark_check, NULL, THIS_MODULE
++};
++
++static int __init init(void)
++{
++ return ebt_register_target(&mark_target);
++}
++
++static void __exit fini(void)
++{
++ ebt_unregister_target(&mark_target);
++}
++
++module_init(init);
++module_exit(fini);
++EXPORT_NO_SYMBOLS;
++MODULE_LICENSE("GPL");
+diff -Nur linux-mips-cvs/net/bridge/netfilter/ebt_mark_m.c linux-ebtables/net/bridge/netfilter/ebt_mark_m.c
+--- linux-mips-cvs/net/bridge/netfilter/ebt_mark_m.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-ebtables/net/bridge/netfilter/ebt_mark_m.c 2005-02-07 05:52:50.000000000 +0100
+@@ -0,0 +1,61 @@
++/*
++ * ebt_mark_m
++ *
++ * Authors:
++ * Bart De Schuymer <bart.de.schuymer@pandora.be>
++ *
++ * July, 2002
++ *
++ */
++
++#include <linux/netfilter_bridge/ebtables.h>
++#include <linux/netfilter_bridge/ebt_mark_m.h>
++#include <linux/module.h>
++
++static int ebt_filter_mark(const struct sk_buff *skb,
++ const struct net_device *in, const struct net_device *out, const void *data,
++ unsigned int datalen)
++{
++ struct ebt_mark_m_info *info = (struct ebt_mark_m_info *) data;
++
++ if (info->bitmask & EBT_MARK_OR)
++ return !(!!(skb->nfmark & info->mask) ^ info->invert);
++ return !(((skb->nfmark & info->mask) == info->mark) ^ info->invert);
++}
++
++static int ebt_mark_check(const char *tablename, unsigned int hookmask,
++ const struct ebt_entry *e, void *data, unsigned int datalen)
++{
++ struct ebt_mark_m_info *info = (struct ebt_mark_m_info *) data;
++
++ if (datalen != EBT_ALIGN(sizeof(struct ebt_mark_m_info)))
++ return -EINVAL;
++ if (info->bitmask & ~EBT_MARK_MASK)
++ return -EINVAL;
++ if ((info->bitmask & EBT_MARK_OR) && (info->bitmask & EBT_MARK_AND))
++ return -EINVAL;
++ if (!info->bitmask)
++ return -EINVAL;
++ return 0;
++}
++
++static struct ebt_match filter_mark =
++{
++ {NULL, NULL}, EBT_MARK_MATCH, ebt_filter_mark, ebt_mark_check, NULL,
++ THIS_MODULE
++};
++
++static int __init init(void)
++{
++ return ebt_register_match(&filter_mark);
++}
++
++static void __exit fini(void)
++{
++ ebt_unregister_match(&filter_mark);
++}
++
++module_init(init);
++module_exit(fini);
++EXPORT_NO_SYMBOLS;
++MODULE_LICENSE("GPL");
+diff -Nur linux-mips-cvs/net/bridge/netfilter/ebt_pkttype.c linux-ebtables/net/bridge/netfilter/ebt_pkttype.c
+--- linux-mips-cvs/net/bridge/netfilter/ebt_pkttype.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-ebtables/net/bridge/netfilter/ebt_pkttype.c 2005-02-07 05:52:50.000000000 +0100
+@@ -0,0 +1,60 @@
++/*
++ * ebt_pkttype
++ *
++ * Authors:
++ * Bart De Schuymer <bdschuym@pandora.be>
++ *
++ * April, 2003
++ *
++ */
++
++#include <linux/netfilter_bridge/ebtables.h>
++#include <linux/netfilter_bridge/ebt_pkttype.h>
++#include <linux/module.h>
++
++static int ebt_filter_pkttype(const struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ const void *data,
++ unsigned int datalen)
++{
++ struct ebt_pkttype_info *info = (struct ebt_pkttype_info *)data;
++
++ return (skb->pkt_type != info->pkt_type) ^ info->invert;
++}
++
++static int ebt_pkttype_check(const char *tablename, unsigned int hookmask,
++ const struct ebt_entry *e, void *data, unsigned int datalen)
++{
++ struct ebt_pkttype_info *info = (struct ebt_pkttype_info *)data;
++
++ if (datalen != EBT_ALIGN(sizeof(struct ebt_pkttype_info)))
++ return -EINVAL;
++ if (info->invert != 0 && info->invert != 1)
++ return -EINVAL;
++ /* Allow any pkt_type value */
++ return 0;
++}
++
++static struct ebt_match filter_pkttype =
++{
++ .name = EBT_PKTTYPE_MATCH,
++ .match = ebt_filter_pkttype,
++ .check = ebt_pkttype_check,
++ .me = THIS_MODULE,
++};
++
++static int __init init(void)
++{
++ return ebt_register_match(&filter_pkttype);
++}
++
++static void __exit fini(void)
++{
++ ebt_unregister_match(&filter_pkttype);
++}
++
++module_init(init);
++module_exit(fini);
++EXPORT_NO_SYMBOLS;
++MODULE_LICENSE("GPL");
+diff -Nur linux-mips-cvs/net/bridge/netfilter/ebt_redirect.c linux-ebtables/net/bridge/netfilter/ebt_redirect.c
+--- linux-mips-cvs/net/bridge/netfilter/ebt_redirect.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-ebtables/net/bridge/netfilter/ebt_redirect.c 2005-02-07 05:52:50.000000000 +0100
+@@ -0,0 +1,71 @@
++/*
++ * ebt_redirect
++ *
++ * Authors:
++ * Bart De Schuymer <bart.de.schuymer@pandora.be>
++ *
++ * April, 2002
++ *
++ */
++
++#include <linux/netfilter_bridge/ebtables.h>
++#include <linux/netfilter_bridge/ebt_redirect.h>
++#include <linux/module.h>
++#include <net/sock.h>
++#include "../br_private.h"
++
++static int ebt_target_redirect(struct sk_buff **pskb, unsigned int hooknr,
++ const struct net_device *in, const struct net_device *out,
++ const void *data, unsigned int datalen)
++{
++ struct ebt_redirect_info *info = (struct ebt_redirect_info *)data;
++
++ if (hooknr != NF_BR_BROUTING)
++ memcpy((**pskb).mac.ethernet->h_dest,
++ in->br_port->br->dev.dev_addr, ETH_ALEN);
++ else {
++ memcpy((**pskb).mac.ethernet->h_dest,
++ in->dev_addr, ETH_ALEN);
++ (*pskb)->pkt_type = PACKET_HOST;
++ }
++ return info->target;
++}
++
++static int ebt_target_redirect_check(const char *tablename, unsigned int hookmask,
++ const struct ebt_entry *e, void *data, unsigned int datalen)
++{
++ struct ebt_redirect_info *info = (struct ebt_redirect_info *)data;
++
++ if (datalen != EBT_ALIGN(sizeof(struct ebt_redirect_info)))
++ return -EINVAL;
++ if (BASE_CHAIN && info->target == EBT_RETURN)
++ return -EINVAL;
++ CLEAR_BASE_CHAIN_BIT;
++ if ( (strcmp(tablename, "nat") || hookmask & ~(1 << NF_BR_PRE_ROUTING)) &&
++ (strcmp(tablename, "broute") || hookmask & ~(1 << NF_BR_BROUTING)) )
++ return -EINVAL;
++ if (INVALID_TARGET)
++ return -EINVAL;
++ return 0;
++}
++
++static struct ebt_target redirect_target =
++{
++ {NULL, NULL}, EBT_REDIRECT_TARGET, ebt_target_redirect,
++ ebt_target_redirect_check, NULL, THIS_MODULE
++};
++
++static int __init init(void)
++{
++ return ebt_register_target(&redirect_target);
++}
++
++static void __exit fini(void)
++{
++ ebt_unregister_target(&redirect_target);
++}
++
++module_init(init);
++module_exit(fini);
++EXPORT_NO_SYMBOLS;
++MODULE_LICENSE("GPL");
+diff -Nur linux-mips-cvs/net/bridge/netfilter/ebt_snat.c linux-ebtables/net/bridge/netfilter/ebt_snat.c
+--- linux-mips-cvs/net/bridge/netfilter/ebt_snat.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-ebtables/net/bridge/netfilter/ebt_snat.c 2005-02-07 05:52:50.000000000 +0100
+@@ -0,0 +1,64 @@
++/*
++ * ebt_snat
++ *
++ * Authors:
++ * Bart De Schuymer <bart.de.schuymer@pandora.be>
++ *
++ * June, 2002
++ *
++ */
++
++#include <linux/netfilter_bridge/ebtables.h>
++#include <linux/netfilter_bridge/ebt_nat.h>
++#include <linux/module.h>
++
++static int ebt_target_snat(struct sk_buff **pskb, unsigned int hooknr,
++ const struct net_device *in, const struct net_device *out,
++ const void *data, unsigned int datalen)
++{
++ struct ebt_nat_info *info = (struct ebt_nat_info *) data;
++
++ memcpy(((**pskb).mac.ethernet)->h_source, info->mac,
++ ETH_ALEN * sizeof(unsigned char));
++ return info->target;
++}
++
++static int ebt_target_snat_check(const char *tablename, unsigned int hookmask,
++ const struct ebt_entry *e, void *data, unsigned int datalen)
++{
++ struct ebt_nat_info *info = (struct ebt_nat_info *) data;
++
++ if (datalen != EBT_ALIGN(sizeof(struct ebt_nat_info)))
++ return -EINVAL;
++ if (BASE_CHAIN && info->target == EBT_RETURN)
++ return -EINVAL;
++ CLEAR_BASE_CHAIN_BIT;
++ if (strcmp(tablename, "nat"))
++ return -EINVAL;
++ if (hookmask & ~(1 << NF_BR_POST_ROUTING))
++ return -EINVAL;
++ if (INVALID_TARGET)
++ return -EINVAL;
++ return 0;
++}
++
++static struct ebt_target snat =
++{
++ {NULL, NULL}, EBT_SNAT_TARGET, ebt_target_snat, ebt_target_snat_check,
++ NULL, THIS_MODULE
++};
++
++static int __init init(void)
++{
++ return ebt_register_target(&snat);
++}
++
++static void __exit fini(void)
++{
++ ebt_unregister_target(&snat);
++}
++
++module_init(init);
++module_exit(fini);
++EXPORT_NO_SYMBOLS;
++MODULE_LICENSE("GPL");
+diff -Nur linux-mips-cvs/net/bridge/netfilter/ebt_stp.c linux-ebtables/net/bridge/netfilter/ebt_stp.c
+--- linux-mips-cvs/net/bridge/netfilter/ebt_stp.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-ebtables/net/bridge/netfilter/ebt_stp.c 2005-02-07 05:52:50.000000000 +0100
+@@ -0,0 +1,191 @@
++/*
++ * ebt_stp
++ *
++ * Authors:
++ * Bart De Schuymer <bdschuym@pandora.be>
++ * Stephen Hemminger <shemminger@osdl.org>
++ *
++ * June, 2003
++ */
++
++#include <linux/netfilter_bridge/ebtables.h>
++#include <linux/netfilter_bridge/ebt_stp.h>
++#include <linux/module.h>
++
++#define BPDU_TYPE_CONFIG 0
++#define BPDU_TYPE_TCN 0x80
++
++struct stp_header {
++ uint8_t dsap;
++ uint8_t ssap;
++ uint8_t ctrl;
++ uint8_t pid;
++ uint8_t vers;
++ uint8_t type;
++};
++
++struct stp_config_pdu {
++ uint8_t flags;
++ uint8_t root[8];
++ uint8_t root_cost[4];
++ uint8_t sender[8];
++ uint8_t port[2];
++ uint8_t msg_age[2];
++ uint8_t max_age[2];
++ uint8_t hello_time[2];
++ uint8_t forward_delay[2];
++};
++
++#define NR16(p) (p[0] << 8 | p[1])
++#define NR32(p) ((p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3])
++
++static int ebt_filter_config(struct ebt_stp_info *info,
++ struct stp_config_pdu *stpc)
++{
++ struct ebt_stp_config_info *c;
++ uint16_t v16;
++ uint32_t v32;
++ int verdict, i;
++
++ c = &info->config;
++ if ((info->bitmask & EBT_STP_FLAGS) &&
++ FWINV(c->flags != stpc->flags, EBT_STP_FLAGS))
++ return EBT_NOMATCH;
++ if (info->bitmask & EBT_STP_ROOTPRIO) {
++ v16 = NR16(stpc->root);
++ if (FWINV(v16 < c->root_priol ||
++ v16 > c->root_priou, EBT_STP_ROOTPRIO))
++ return EBT_NOMATCH;
++ }
++ if (info->bitmask & EBT_STP_ROOTADDR) {
++ verdict = 0;
++ for (i = 0; i < 6; i++)
++ verdict |= (stpc->root[2+i] ^ c->root_addr[i]) &
++ c->root_addrmsk[i];
++ if (FWINV(verdict != 0, EBT_STP_ROOTADDR))
++ return EBT_NOMATCH;
++ }
++ if (info->bitmask & EBT_STP_ROOTCOST) {
++ v32 = NR32(stpc->root_cost);
++ if (FWINV(v32 < c->root_costl ||
++ v32 > c->root_costu, EBT_STP_ROOTCOST))
++ return EBT_NOMATCH;
++ }
++ if (info->bitmask & EBT_STP_SENDERPRIO) {
++ v16 = NR16(stpc->sender);
++ if (FWINV(v16 < c->sender_priol ||
++ v16 > c->sender_priou, EBT_STP_SENDERPRIO))
++ return EBT_NOMATCH;
++ }
++ if (info->bitmask & EBT_STP_SENDERADDR) {
++ verdict = 0;
++ for (i = 0; i < 6; i++)
++ verdict |= (stpc->sender[2+i] ^ c->sender_addr[i]) &
++ c->sender_addrmsk[i];
++ if (FWINV(verdict != 0, EBT_STP_SENDERADDR))
++ return EBT_NOMATCH;
++ }
++ if (info->bitmask & EBT_STP_PORT) {
++ v16 = NR16(stpc->port);
++ if (FWINV(v16 < c->portl ||
++ v16 > c->portu, EBT_STP_PORT))
++ return EBT_NOMATCH;
++ }
++ if (info->bitmask & EBT_STP_MSGAGE) {
++ v16 = NR16(stpc->msg_age);
++ if (FWINV(v16 < c->msg_agel ||
++ v16 > c->msg_ageu, EBT_STP_MSGAGE))
++ return EBT_NOMATCH;
++ }
++ if (info->bitmask & EBT_STP_MAXAGE) {
++ v16 = NR16(stpc->max_age);
++ if (FWINV(v16 < c->max_agel ||
++ v16 > c->max_ageu, EBT_STP_MAXAGE))
++ return EBT_NOMATCH;
++ }
++ if (info->bitmask & EBT_STP_HELLOTIME) {
++ v16 = NR16(stpc->hello_time);
++ if (FWINV(v16 < c->hello_timel ||
++ v16 > c->hello_timeu, EBT_STP_HELLOTIME))
++ return EBT_NOMATCH;
++ }
++ if (info->bitmask & EBT_STP_FWDD) {
++ v16 = NR16(stpc->forward_delay);
++ if (FWINV(v16 < c->forward_delayl ||
++ v16 > c->forward_delayu, EBT_STP_FWDD))
++ return EBT_NOMATCH;
++ }
++ return EBT_MATCH;
++}
++
++static int ebt_filter_stp(const struct sk_buff *skb, const struct net_device *in,
++ const struct net_device *out, const void *data, unsigned int datalen)
++{
++ struct ebt_stp_info *info = (struct ebt_stp_info *)data;
++ struct stp_header stph;
++ uint8_t header[6] = {0x42, 0x42, 0x03, 0x00, 0x00, 0x00};
++ if (skb_copy_bits(skb, 0, &stph, sizeof(stph)))
++ return EBT_NOMATCH;
++
++ /* The stp code only considers these */
++ if (memcmp(&stph, header, sizeof(header)))
++ return EBT_NOMATCH;
++
++ if (info->bitmask & EBT_STP_TYPE
++ && FWINV(info->type != stph.type, EBT_STP_TYPE))
++ return EBT_NOMATCH;
++
++ if (stph.type == BPDU_TYPE_CONFIG &&
++ info->bitmask & EBT_STP_CONFIG_MASK) {
++ struct stp_config_pdu stpc;
++
++ if (skb_copy_bits(skb, sizeof(stph), &stpc, sizeof(stpc)))
++ return EBT_NOMATCH;
++ return ebt_filter_config(info, &stpc);
++ }
++ return EBT_MATCH;
++}
++
++static int ebt_stp_check(const char *tablename, unsigned int hookmask,
++ const struct ebt_entry *e, void *data, unsigned int datalen)
++{
++ struct ebt_stp_info *info = (struct ebt_stp_info *)data;
++ int len = EBT_ALIGN(sizeof(struct ebt_stp_info));
++ uint8_t bridge_ula[6] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
++ uint8_t msk[6] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
++
++ if (info->bitmask & ~EBT_STP_MASK || info->invflags & ~EBT_STP_MASK ||
++ !(info->bitmask & EBT_STP_MASK))
++ return -EINVAL;
++ if (datalen != len)
++ return -EINVAL;
++ /* Make sure the match only receives stp frames */
++ if (memcmp(e->destmac, bridge_ula, ETH_ALEN) ||
++ memcmp(e->destmsk, msk, ETH_ALEN) || !(e->bitmask & EBT_DESTMAC))
++ return -EINVAL;
++
++ return 0;
++}
++
++static struct ebt_match filter_stp =
++{
++ .name = EBT_STP_MATCH,
++ .match = ebt_filter_stp,
++ .check = ebt_stp_check,
++ .me = THIS_MODULE,
++};
++
++static int __init init(void)
++{
++ return ebt_register_match(&filter_stp);
++}
++
++static void __exit fini(void)
++{
++ ebt_unregister_match(&filter_stp);
++}
++
++module_init(init);
++module_exit(fini);
++EXPORT_NO_SYMBOLS;
++MODULE_LICENSE("GPL");
+diff -Nur linux-mips-cvs/net/bridge/netfilter/ebt_ulog.c linux-ebtables/net/bridge/netfilter/ebt_ulog.c
+--- linux-mips-cvs/net/bridge/netfilter/ebt_ulog.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-ebtables/net/bridge/netfilter/ebt_ulog.c 2005-02-07 05:52:50.000000000 +0100
+@@ -0,0 +1,281 @@
++/*
++ * netfilter module for userspace bridged Ethernet frames logging daemons
++ *
++ * Authors:
++ * Bart De Schuymer <bdschuym@pandora.be>
++ *
++ * November, 2004
++ *
++ * Based on ipt_ULOG.c, which is
++ * (C) 2000-2002 by Harald Welte <laforge@netfilter.org>
++ *
++ * This module accepts two parameters:
++ *
++ * nlbufsiz:
++ * The parameter specifies how big the buffer for each netlink multicast
++ * group is. e.g. If you say nlbufsiz=8192, up to eight kb of packets will
++ * get accumulated in the kernel until they are sent to userspace. It is
++ * NOT possible to allocate more than 128kB, and it is strongly discouraged,
++ * because atomically allocating 128kB inside the network rx softirq is not
++ * reliable. Please also keep in mind that this buffer size is allocated for
++ * each nlgroup you are using, so the total kernel memory usage increases
++ * by that factor.
++ *
++ * flushtimeout:
++ * Specify, after how many hundredths of a second the queue should be
++ * flushed even if it is not full yet.
++ *
++ */
++
++#include <linux/module.h>
++#include <linux/config.h>
++#include <linux/spinlock.h>
++#include <linux/socket.h>
++#include <linux/skbuff.h>
++#include <linux/kernel.h>
++#include <linux/timer.h>
++#include <linux/netlink.h>
++#include <linux/netdevice.h>
++#include <linux/module.h>
++#include <linux/netfilter_bridge/ebtables.h>
++#include <linux/netfilter_bridge/ebt_ulog.h>
++#include <net/sock.h>
++#include "../br_private.h"
++
++#define PRINTR(format, args...) do { if (net_ratelimit()) \
++ printk(format , ## args); } while (0)
++
++static unsigned int nlbufsiz = 4096;
++MODULE_PARM(nlbufsiz, "i");
++MODULE_PARM_DESC(nlbufsiz, "netlink buffer size (number of bytes) "
++ "(defaults to 4096)");
++
++static unsigned int flushtimeout = 10;
++MODULE_PARM(flushtimeout, "i");
++MODULE_PARM_DESC(flushtimeout, "buffer flush timeout (hundredths of a second) "
++ "(defaults to 10)");
++
++typedef struct {
++ unsigned int qlen; /* number of nlmsgs' in the skb */
++ struct nlmsghdr *lastnlh; /* netlink header of last msg in skb */
++ struct sk_buff *skb; /* the pre-allocated skb */
++ struct timer_list timer; /* the timer function */
++ spinlock_t lock; /* the per-queue lock */
++} ebt_ulog_buff_t;
++
++static ebt_ulog_buff_t ulog_buffers[EBT_ULOG_MAXNLGROUPS];
++static struct sock *ebtlognl;
++
++/* send one ulog_buff_t to userspace */
++static void ulog_send(unsigned int nlgroup)
++{
++ ebt_ulog_buff_t *ub = &ulog_buffers[nlgroup];
++
++ if (timer_pending(&ub->timer))
++ del_timer(&ub->timer);
++
++ /* last nlmsg needs NLMSG_DONE */
++ if (ub->qlen > 1)
++ ub->lastnlh->nlmsg_type = NLMSG_DONE;
++
++ NETLINK_CB(ub->skb).dst_groups = 1 << nlgroup;
++ netlink_broadcast(ebtlognl, ub->skb, 0, 1 << nlgroup, GFP_ATOMIC);
++
++ ub->qlen = 0;
++ ub->skb = NULL;
++}
++
++/* timer function to flush queue in flushtimeout time */
++static void ulog_timer(unsigned long data)
++{
++ spin_lock_bh(&ulog_buffers[data].lock);
++ if (ulog_buffers[data].skb)
++ ulog_send(data);
++ spin_unlock_bh(&ulog_buffers[data].lock);
++}
++
++static struct sk_buff *ulog_alloc_skb(unsigned int size)
++{
++ struct sk_buff *skb;
++
++ skb = alloc_skb(nlbufsiz, GFP_ATOMIC);
++ if (!skb) {
++ PRINTR(KERN_ERR "ebt_ulog: can't alloc whole buffer "
++ "of size %ub!\n", nlbufsiz);
++ if (size < nlbufsiz) {
++ /* try to allocate only as much as we need for
++ * current packet */
++ skb = alloc_skb(size, GFP_ATOMIC);
++ if (!skb)
++ PRINTR(KERN_ERR "ebt_ulog: can't even allocate "
++ "buffer of size %ub\n", size);
++ }
++ }
++
++ return skb;
++}
++
++static void ebt_ulog(const struct sk_buff *skb, unsigned int hooknr,
++ const struct net_device *in, const struct net_device *out,
++ const void *data, unsigned int datalen)
++{
++ ebt_ulog_packet_msg_t *pm;
++ size_t size, copy_len;
++ struct nlmsghdr *nlh;
++ struct ebt_ulog_info *loginfo = (struct ebt_ulog_info *)data;
++ unsigned int group = loginfo->nlgroup;
++ ebt_ulog_buff_t *ub = &ulog_buffers[group];
++ spinlock_t *lock = &ub->lock;
++
++ if ((loginfo->cprange == 0) ||
++ (loginfo->cprange > skb->len + ETH_HLEN))
++ copy_len = skb->len + ETH_HLEN;
++ else
++ copy_len = loginfo->cprange;
++
++ size = NLMSG_SPACE(sizeof(*pm) + copy_len);
++
++ spin_lock_bh(lock);
++
++ if (!ub->skb) {
++ if (!(ub->skb = ulog_alloc_skb(size)))
++ goto alloc_failure;
++ } else if (size > skb_tailroom(ub->skb)) {
++ ulog_send(group);
++
++ if (!(ub->skb = ulog_alloc_skb(size)))
++ goto alloc_failure;
++ }
++
++ nlh = NLMSG_PUT(ub->skb, 0, ub->qlen, 0,
++ size - NLMSG_ALIGN(sizeof(*nlh)));
++ ub->qlen++;
++
++ pm = NLMSG_DATA(nlh);
++
++ /* Fill in the ulog data */
++ do_gettimeofday(&pm->stamp);
++ if (ub->qlen == 1)
++ ub->skb->stamp = pm->stamp;
++ pm->data_len = copy_len;
++ pm->mark = skb->nfmark;
++ pm->hook = hooknr;
++ if (loginfo->prefix != NULL)
++ strcpy(pm->prefix, loginfo->prefix);
++ else
++ *(pm->prefix) = '\0';
++
++ if (in) {
++ strcpy(pm->physindev, in->name);
++ strcpy(pm->indev, in->br_port->br->dev.name);
++ } else
++ pm->indev[0] = pm->physindev[0] = '\0';
++
++ if (out) {
++ strcpy(pm->physoutdev, out->name);
++ strcpy(pm->outdev, out->br_port->br->dev.name);
++ } else
++ pm->outdev[0] = pm->physoutdev[0] = '\0';
++
++ if (skb_copy_bits(skb, -ETH_HLEN, pm->data, copy_len) < 0)
++ BUG();
++
++ if (ub->qlen > 1)
++ ub->lastnlh->nlmsg_flags |= NLM_F_MULTI;
++
++ ub->lastnlh = nlh;
++
++ if (ub->qlen >= loginfo->qthreshold)
++ ulog_send(group);
++ else if (!timer_pending(&ub->timer)) {
++ ub->timer.expires = jiffies + flushtimeout * HZ / 100;
++ add_timer(&ub->timer);
++ }
++
++unlock:
++ spin_unlock_bh(lock);
++
++ return;
++
++nlmsg_failure:
++ PRINTR(KERN_ERR "ebt_ULOG: error during NLMSG_PUT.\n");
++ goto unlock;
++alloc_failure:
++ goto unlock;
++}
++
++static int ebt_ulog_check(const char *tablename, unsigned int hookmask,
++ const struct ebt_entry *e, void *data, unsigned int datalen)
++{
++ struct ebt_ulog_info *loginfo = (struct ebt_ulog_info *)data;
++
++ if (datalen != EBT_ALIGN(sizeof(struct ebt_ulog_info)) ||
++ loginfo->nlgroup > 31)
++ return -EINVAL;
++
++ loginfo->prefix[EBT_ULOG_PREFIX_LEN - 1] = '\0';
++
++ if (loginfo->qthreshold > EBT_ULOG_MAX_QLEN)
++ loginfo->qthreshold = EBT_ULOG_MAX_QLEN;
++
++ return 0;
++}
++
++static struct ebt_watcher ulog = {
++ {NULL, NULL}, EBT_ULOG_WATCHER, ebt_ulog, ebt_ulog_check, NULL,
++ THIS_MODULE
++};
++
++static int __init init(void)
++{
++ int i, ret = 0;
++
++ if (nlbufsiz >= 128*1024) {
++ printk(KERN_NOTICE "ebt_ulog: Netlink buffer has to be <= 128kB,"
++ " please try a smaller nlbufsiz parameter.\n");
++ return -EINVAL;
++ }
++
++ /* initialize ulog_buffers */
++ for (i = 0; i < EBT_ULOG_MAXNLGROUPS; i++) {
++ init_timer(&ulog_buffers[i].timer);
++ ulog_buffers[i].timer.function = ulog_timer;
++ ulog_buffers[i].timer.data = i;
++ ulog_buffers[i].lock = SPIN_LOCK_UNLOCKED;
++ }
++
++ ebtlognl = netlink_kernel_create(NETLINK_NFLOG, NULL);
++ if (!ebtlognl)
++ ret = -ENOMEM;
++ else if ((ret = ebt_register_watcher(&ulog)))
++ sock_release(ebtlognl->socket);
++
++ return ret;
++}
++
++static void __exit fini(void)
++{
++ ebt_ulog_buff_t *ub;
++ int i;
++
++ ebt_unregister_watcher(&ulog);
++ for (i = 0; i < EBT_ULOG_MAXNLGROUPS; i++) {
++ ub = &ulog_buffers[i];
++ if (timer_pending(&ub->timer))
++ del_timer(&ub->timer);
++ spin_lock_bh(&ub->lock);
++ if (ub->skb) {
++ kfree_skb(ub->skb);
++ ub->skb = NULL;
++ }
++ spin_unlock_bh(&ub->lock);
++ }
++ sock_release(ebtlognl->socket);
++}
++
++module_init(init);
++module_exit(fini);
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Bart De Schuymer <bdschuym@pandora.be>");
++MODULE_DESCRIPTION("ebtables userspace logging module for bridged Ethernet"
++ " frames");
+diff -Nur linux-mips-cvs/net/bridge/netfilter/ebt_vlan.c linux-ebtables/net/bridge/netfilter/ebt_vlan.c
+--- linux-mips-cvs/net/bridge/netfilter/ebt_vlan.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-ebtables/net/bridge/netfilter/ebt_vlan.c 2005-02-07 05:52:50.000000000 +0100
+@@ -0,0 +1,259 @@
++/*
++ * Description: EBTables 802.1Q match extension kernelspace module.
++ * Authors: Nick Fedchik <nick@fedchik.org.ua>
++ * Bart De Schuymer <bart.de.schuymer@pandora.be>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ */
++
++#include <linux/if_ether.h>
++#include <linux/if_vlan.h>
++#include <linux/module.h>
++#include <linux/netfilter_bridge/ebtables.h>
++#include <linux/netfilter_bridge/ebt_vlan.h>
++
++static unsigned char debug;
++#define MODULE_VERSION "0.6"
++
++MODULE_PARM(debug, "0-1b");
++MODULE_PARM_DESC(debug, "debug=1 is turn on debug messages");
++MODULE_AUTHOR("Nick Fedchik <nick@fedchik.org.ua>");
++MODULE_DESCRIPTION("802.1Q match module (ebtables extension), v"
++ MODULE_VERSION);
++MODULE_LICENSE("GPL");
++
++
++#define DEBUG_MSG(args...) if (debug) printk (KERN_DEBUG "ebt_vlan: " args)
++#define INV_FLAG(_inv_flag_) (info->invflags & _inv_flag_) ? "!" : ""
++#define GET_BITMASK(_BIT_MASK_) info->bitmask & _BIT_MASK_
++#define SET_BITMASK(_BIT_MASK_) info->bitmask |= _BIT_MASK_
++#define EXIT_ON_MISMATCH(_MATCH_,_MASK_) if (!((info->_MATCH_ == _MATCH_)^!!(info->invflags & _MASK_))) return 1;
++
++/*
++ * Function description: ebt_filter_vlan() is main engine for
++ * checking passed 802.1Q frame according to
++ * the passed extension parameters (in the *data buffer)
++ * ebt_filter_vlan() is called after successfull check the rule params
++ * by ebt_check_vlan() function.
++ * Parameters:
++ * const struct sk_buff *skb - pointer to passed ethernet frame buffer
++ * const void *data - pointer to passed extension parameters
++ * unsigned int datalen - length of passed *data buffer
++ * const struct net_device *in -
++ * const struct net_device *out -
++ * const struct ebt_counter *c -
++ * Returned values:
++ * 0 - ok (all rule params matched)
++ * 1 - miss (rule params not acceptable to the parsed frame)
++ */
++static int
++ebt_filter_vlan(const struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ const void *data, unsigned int datalen)
++{
++ struct ebt_vlan_info *info = (struct ebt_vlan_info *) data; /* userspace data */
++ struct vlan_ethhdr *frame = (struct vlan_ethhdr *) skb->mac.raw; /* Passed tagged frame */
++
++ unsigned short TCI; /* Whole TCI, given from parsed frame */
++ unsigned short id; /* VLAN ID, given from frame TCI */
++ unsigned char prio; /* user_priority, given from frame TCI */
++ unsigned short encap; /* VLAN encapsulated Type/Length field, given from orig frame */
++
++ /*
++ * Tag Control Information (TCI) consists of the following elements:
++ * - User_priority. The user_priority field is three bits in length,
++ * interpreted as a binary number.
++ * - Canonical Format Indicator (CFI). The Canonical Format Indicator
++ * (CFI) is a single bit flag value. Currently ignored.
++ * - VLAN Identifier (VID). The VID is encoded as
++ * an unsigned binary number.
++ */
++ TCI = ntohs(frame->h_vlan_TCI);
++ id = TCI & VLAN_VID_MASK;
++ prio = (TCI >> 13) & 0x7;
++ encap = frame->h_vlan_encapsulated_proto;
++
++ /*
++ * Checking VLAN Identifier (VID)
++ */
++ if (GET_BITMASK(EBT_VLAN_ID)) { /* Is VLAN ID parsed? */
++ EXIT_ON_MISMATCH(id, EBT_VLAN_ID);
++ }
++ /*
++ * Checking user_priority
++ */
++ if (GET_BITMASK(EBT_VLAN_PRIO)) { /* Is VLAN user_priority parsed? */
++ EXIT_ON_MISMATCH(prio, EBT_VLAN_PRIO);
++ }
++ /*
++ * Checking Encapsulated Proto (Length/Type) field
++ */
++ if (GET_BITMASK(EBT_VLAN_ENCAP)) { /* Is VLAN Encap parsed? */
++ EXIT_ON_MISMATCH(encap, EBT_VLAN_ENCAP);
++ }
++ /*
++ * All possible extension parameters was parsed.
++ * If rule never returned by missmatch, then all ok.
++ */
++ return 0;
++}
++
++/*
++ * Function description: ebt_vlan_check() is called when userspace
++ * delivers the table entry to the kernel,
++ * and to check that userspace doesn't give a bad table.
++ * Parameters:
++ * const char *tablename - table name string
++ * unsigned int hooknr - hook number
++ * const struct ebt_entry *e - ebtables entry basic set
++ * const void *data - pointer to passed extension parameters
++ * unsigned int datalen - length of passed *data buffer
++ * Returned values:
++ * 0 - ok (all delivered rule params are correct)
++ * 1 - miss (rule params is out of range, invalid, incompatible, etc.)
++ */
++static int
++ebt_check_vlan(const char *tablename,
++ unsigned int hooknr,
++ const struct ebt_entry *e, void *data, unsigned int datalen)
++{
++ struct ebt_vlan_info *info = (struct ebt_vlan_info *) data;
++
++ /*
++ * Parameters buffer overflow check
++ */
++ if (datalen != EBT_ALIGN(sizeof(struct ebt_vlan_info))) {
++ DEBUG_MSG
++ ("passed size %d is not eq to ebt_vlan_info (%d)\n",
++ datalen, sizeof(struct ebt_vlan_info));
++ return -EINVAL;
++ }
++
++ /*
++ * Is it 802.1Q frame checked?
++ */
++ if (e->ethproto != __constant_htons(ETH_P_8021Q)) {
++ DEBUG_MSG
++ ("passed entry proto %2.4X is not 802.1Q (8100)\n",
++ (unsigned short) ntohs(e->ethproto));
++ return -EINVAL;
++ }
++
++ /*
++ * Check for bitmask range
++ * True if even one bit is out of mask
++ */
++ if (info->bitmask & ~EBT_VLAN_MASK) {
++ DEBUG_MSG("bitmask %2X is out of mask (%2X)\n",
++ info->bitmask, EBT_VLAN_MASK);
++ return -EINVAL;
++ }
++
++ /*
++ * Check for inversion flags range
++ */
++ if (info->invflags & ~EBT_VLAN_MASK) {
++ DEBUG_MSG("inversion flags %2X is out of mask (%2X)\n",
++ info->invflags, EBT_VLAN_MASK);
++ return -EINVAL;
++ }
++
++ /*
++ * Reserved VLAN ID (VID) values
++ * -----------------------------
++ * 0 - The null VLAN ID.
++ * 1 - The default Port VID (PVID)
++ * 0x0FFF - Reserved for implementation use.
++ * if_vlan.h: VLAN_GROUP_ARRAY_LEN 4096.
++ */
++ if (GET_BITMASK(EBT_VLAN_ID)) { /* when vlan-id param was spec-ed */
++ if (!!info->id) { /* if id!=0 => check vid range */
++ if (info->id > VLAN_GROUP_ARRAY_LEN) {
++ DEBUG_MSG
++ ("id %d is out of range (1-4096)\n",
++ info->id);
++ return -EINVAL;
++ }
++ /*
++ * Note: This is valid VLAN-tagged frame point.
++ * Any value of user_priority are acceptable,
++ * but should be ignored according to 802.1Q Std.
++ * So we just drop the prio flag.
++ */
++ info->bitmask &= ~EBT_VLAN_PRIO;
++ }
++ /*
++ * Else, id=0 (null VLAN ID) => user_priority range (any?)
++ */
++ }
++
++ if (GET_BITMASK(EBT_VLAN_PRIO)) {
++ if ((unsigned char) info->prio > 7) {
++ DEBUG_MSG
++ ("prio %d is out of range (0-7)\n",
++ info->prio);
++ return -EINVAL;
++ }
++ }
++ /*
++ * Check for encapsulated proto range - it is possible to be
++ * any value for u_short range.
++ * if_ether.h: ETH_ZLEN 60 - Min. octets in frame sans FCS
++ */
++ if (GET_BITMASK(EBT_VLAN_ENCAP)) {
++ if ((unsigned short) ntohs(info->encap) < ETH_ZLEN) {
++ DEBUG_MSG
++ ("encap frame length %d is less than minimal\n",
++ ntohs(info->encap));
++ return -EINVAL;
++ }
++ }
++
++ return 0;
++}
++
++static struct ebt_match filter_vlan = {
++ {NULL, NULL},
++ EBT_VLAN_MATCH,
++ ebt_filter_vlan,
++ ebt_check_vlan,
++ NULL,
++ THIS_MODULE
++};
++
++/*
++ * Module initialization function.
++ */
++static int __init init(void)
++{
++ DEBUG_MSG("ebtables 802.1Q extension module v"
++ MODULE_VERSION "\n");
++ DEBUG_MSG("module debug=%d\n", !!debug);
++ return ebt_register_match(&filter_vlan);
++}
++
++/*
++ * Module "finalization" function
++ */
++static void __exit fini(void)
++{
++ ebt_unregister_match(&filter_vlan);
++}
++
++module_init(init);
++module_exit(fini);
++
++EXPORT_NO_SYMBOLS;
+diff -Nur linux-mips-cvs/net/bridge/netfilter/ebtable_broute.c linux-ebtables/net/bridge/netfilter/ebtable_broute.c
+--- linux-mips-cvs/net/bridge/netfilter/ebtable_broute.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-ebtables/net/bridge/netfilter/ebtable_broute.c 2005-02-07 05:52:50.000000000 +0100
+@@ -0,0 +1,79 @@
++/*
++ * ebtable_broute
++ *
++ * Authors:
++ * Bart De Schuymer <bart.de.schuymer@pandora.be>
++ *
++ * April, 2002
++ *
++ * This table lets you choose between routing and bridging for frames
++ * entering on a bridge enslaved nic. This table is traversed before any
++ * other ebtables table. See net/bridge/br_input.c.
++ */
++
++#include <linux/netfilter_bridge/ebtables.h>
++#include <linux/module.h>
++#include <linux/if_bridge.h>
++#include <linux/brlock.h>
++
++// EBT_ACCEPT means the frame will be bridged
++// EBT_DROP means the frame will be routed
++static struct ebt_entries initial_chain =
++ {0, "BROUTING", 0, EBT_ACCEPT, 0};
++
++static struct ebt_replace initial_table =
++{
++ "broute", 1 << NF_BR_BROUTING, 0, sizeof(struct ebt_entries),
++ { [NF_BR_BROUTING]&initial_chain}, 0, NULL, (char *)&initial_chain
++};
++
++static int check(const struct ebt_table_info *info, unsigned int valid_hooks)
++{
++ if (valid_hooks & ~(1 << NF_BR_BROUTING))
++ return -EINVAL;
++ return 0;
++}
++
++static struct ebt_table broute_table =
++{
++ {NULL, NULL}, "broute", &initial_table, 1 << NF_BR_BROUTING,
++ RW_LOCK_UNLOCKED, check, NULL
++};
++
++static int ebt_broute(struct sk_buff **pskb)
++{
++ int ret;
++
++ ret = ebt_do_table(NF_BR_BROUTING, pskb, (*pskb)->dev, NULL,
++ &broute_table);
++ if (ret == NF_DROP)
++ return 1; // route it
++ return 0; // bridge it
++}
++
++static int __init init(void)
++{
++ int ret;
++
++ ret = ebt_register_table(&broute_table);
++ if (ret < 0)
++ return ret;
++ br_write_lock_bh(BR_NETPROTO_LOCK);
++ // see br_input.c
++ br_should_route_hook = ebt_broute;
++ br_write_unlock_bh(BR_NETPROTO_LOCK);
++ return ret;
++}
++
++static void __exit fini(void)
++{
++ br_write_lock_bh(BR_NETPROTO_LOCK);
++ br_should_route_hook = NULL;
++ br_write_unlock_bh(BR_NETPROTO_LOCK);
++ ebt_unregister_table(&broute_table);
++}
++
++module_init(init);
++module_exit(fini);
++EXPORT_NO_SYMBOLS;
++MODULE_LICENSE("GPL");
+diff -Nur linux-mips-cvs/net/bridge/netfilter/ebtable_filter.c linux-ebtables/net/bridge/netfilter/ebtable_filter.c
+--- linux-mips-cvs/net/bridge/netfilter/ebtable_filter.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-ebtables/net/bridge/netfilter/ebtable_filter.c 2005-02-07 05:52:50.000000000 +0100
+@@ -0,0 +1,90 @@
++/*
++ * ebtable_filter
++ *
++ * Authors:
++ * Bart De Schuymer <bart.de.schuymer@pandora.be>
++ *
++ * April, 2002
++ *
++ */
++
++#include <linux/netfilter_bridge/ebtables.h>
++#include <linux/module.h>
++
++#define FILTER_VALID_HOOKS ((1 << NF_BR_LOCAL_IN) | (1 << NF_BR_FORWARD) | \
++ (1 << NF_BR_LOCAL_OUT))
++
++static struct ebt_entries initial_chains[] =
++{
++ {0, "INPUT", 0, EBT_ACCEPT, 0},
++ {0, "FORWARD", 0, EBT_ACCEPT, 0},
++ {0, "OUTPUT", 0, EBT_ACCEPT, 0}
++};
++
++static struct ebt_replace initial_table =
++{
++ "filter", FILTER_VALID_HOOKS, 0, 3 * sizeof(struct ebt_entries),
++ { [NF_BR_LOCAL_IN]&initial_chains[0], [NF_BR_FORWARD]&initial_chains[1],
++ [NF_BR_LOCAL_OUT]&initial_chains[2] }, 0, NULL, (char *)initial_chains
++};
++
++static int check(const struct ebt_table_info *info, unsigned int valid_hooks)
++{
++ if (valid_hooks & ~FILTER_VALID_HOOKS)
++ return -EINVAL;
++ return 0;
++}
++
++static struct ebt_table frame_filter =
++{
++ {NULL, NULL}, "filter", &initial_table, FILTER_VALID_HOOKS,
++ RW_LOCK_UNLOCKED, check, NULL
++};
++
++static unsigned int
++ebt_hook (unsigned int hook, struct sk_buff **pskb, const struct net_device *in,
++ const struct net_device *out, int (*okfn)(struct sk_buff *))
++{
++ return ebt_do_table(hook, pskb, in, out, &frame_filter);
++}
++
++static struct nf_hook_ops ebt_ops_filter[] = {
++ { { NULL, NULL }, ebt_hook, PF_BRIDGE, NF_BR_LOCAL_IN,
++ NF_BR_PRI_FILTER_BRIDGED},
++ { { NULL, NULL }, ebt_hook, PF_BRIDGE, NF_BR_FORWARD,
++ NF_BR_PRI_FILTER_BRIDGED},
++ { { NULL, NULL }, ebt_hook, PF_BRIDGE, NF_BR_LOCAL_OUT,
++ NF_BR_PRI_FILTER_OTHER}
++};
++
++static int __init init(void)
++{
++ int i, j, ret;
++
++ ret = ebt_register_table(&frame_filter);
++ if (ret < 0)
++ return ret;
++ for (i = 0; i < sizeof(ebt_ops_filter) / sizeof(ebt_ops_filter[0]); i++)
++ if ((ret = nf_register_hook(&ebt_ops_filter[i])) < 0)
++ goto cleanup;
++ return ret;
++cleanup:
++ for (j = 0; j < i; j++)
++ nf_unregister_hook(&ebt_ops_filter[j]);
++ ebt_unregister_table(&frame_filter);
++ return ret;
++}
++
++static void __exit fini(void)
++{
++ int i;
++
++ for (i = 0; i < sizeof(ebt_ops_filter) / sizeof(ebt_ops_filter[0]); i++)
++ nf_unregister_hook(&ebt_ops_filter[i]);
++ ebt_unregister_table(&frame_filter);
++}
++
++module_init(init);
++module_exit(fini);
++EXPORT_NO_SYMBOLS;
++MODULE_LICENSE("GPL");
+diff -Nur linux-mips-cvs/net/bridge/netfilter/ebtable_nat.c linux-ebtables/net/bridge/netfilter/ebtable_nat.c
+--- linux-mips-cvs/net/bridge/netfilter/ebtable_nat.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-ebtables/net/bridge/netfilter/ebtable_nat.c 2005-02-07 05:52:50.000000000 +0100
+@@ -0,0 +1,96 @@
++/*
++ * ebtable_nat
++ *
++ * Authors:
++ * Bart De Schuymer <bart.de.schuymer@pandora.be>
++ *
++ * April, 2002
++ *
++ */
++
++#include <linux/netfilter_bridge/ebtables.h>
++#include <linux/module.h>
++#define NAT_VALID_HOOKS ((1 << NF_BR_PRE_ROUTING) | (1 << NF_BR_LOCAL_OUT) | \
++ (1 << NF_BR_POST_ROUTING))
++
++static struct ebt_entries initial_chains[] =
++{
++ {0, "PREROUTING", 0, EBT_ACCEPT, 0},
++ {0, "OUTPUT", 0, EBT_ACCEPT, 0},
++ {0, "POSTROUTING", 0, EBT_ACCEPT, 0}
++};
++
++static struct ebt_replace initial_table =
++{
++ "nat", NAT_VALID_HOOKS, 0, 3 * sizeof(struct ebt_entries),
++ { [NF_BR_PRE_ROUTING]&initial_chains[0], [NF_BR_LOCAL_OUT]&initial_chains[1],
++ [NF_BR_POST_ROUTING]&initial_chains[2] }, 0, NULL, (char *)initial_chains
++};
++
++static int check(const struct ebt_table_info *info, unsigned int valid_hooks)
++{
++ if (valid_hooks & ~NAT_VALID_HOOKS)
++ return -EINVAL;
++ return 0;
++}
++
++static struct ebt_table frame_nat =
++{
++ {NULL, NULL}, "nat", &initial_table, NAT_VALID_HOOKS,
++ RW_LOCK_UNLOCKED, check, NULL
++};
++
++static unsigned int
++ebt_nat_dst(unsigned int hook, struct sk_buff **pskb, const struct net_device *in
++ , const struct net_device *out, int (*okfn)(struct sk_buff *))
++{
++ return ebt_do_table(hook, pskb, in, out, &frame_nat);
++}
++
++static unsigned int
++ebt_nat_src(unsigned int hook, struct sk_buff **pskb, const struct net_device *in
++ , const struct net_device *out, int (*okfn)(struct sk_buff *))
++{
++ return ebt_do_table(hook, pskb, in, out, &frame_nat);
++}
++
++static struct nf_hook_ops ebt_ops_nat[] = {
++ { { NULL, NULL }, ebt_nat_dst, PF_BRIDGE, NF_BR_LOCAL_OUT,
++ NF_BR_PRI_NAT_DST_OTHER},
++ { { NULL, NULL }, ebt_nat_src, PF_BRIDGE, NF_BR_POST_ROUTING,
++ NF_BR_PRI_NAT_SRC},
++ { { NULL, NULL }, ebt_nat_dst, PF_BRIDGE, NF_BR_PRE_ROUTING,
++ NF_BR_PRI_NAT_DST_BRIDGED},
++};
++
++static int __init init(void)
++{
++ int i, ret, j;
++
++ ret = ebt_register_table(&frame_nat);
++ if (ret < 0)
++ return ret;
++ for (i = 0; i < sizeof(ebt_ops_nat) / sizeof(ebt_ops_nat[0]); i++)
++ if ((ret = nf_register_hook(&ebt_ops_nat[i])) < 0)
++ goto cleanup;
++ return ret;
++cleanup:
++ for (j = 0; j < i; j++)
++ nf_unregister_hook(&ebt_ops_nat[j]);
++ ebt_unregister_table(&frame_nat);
++ return ret;
++}
++
++static void __exit fini(void)
++{
++ int i;
++
++ for (i = 0; i < sizeof(ebt_ops_nat) / sizeof(ebt_ops_nat[0]); i++)
++ nf_unregister_hook(&ebt_ops_nat[i]);
++ ebt_unregister_table(&frame_nat);
++}
++
++module_init(init);
++module_exit(fini);
++EXPORT_NO_SYMBOLS;
++MODULE_LICENSE("GPL");
+diff -Nur linux-mips-cvs/net/bridge/netfilter/ebtables.c linux-ebtables/net/bridge/netfilter/ebtables.c
+--- linux-mips-cvs/net/bridge/netfilter/ebtables.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-ebtables/net/bridge/netfilter/ebtables.c 2005-02-07 05:52:50.000000000 +0100
+@@ -0,0 +1,1496 @@
++/*
++ * ebtables
++ *
++ * Author:
++ * Bart De Schuymer <bart.de.schuymer@pandora.be>
++ *
++ * ebtables.c,v 2.0, July, 2002
++ *
++ * This code is stongly inspired on the iptables code which is
++ * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++// used for print_string
++#include <linux/sched.h>
++#include <linux/tty.h>
++
++#include <linux/kmod.h>
++#include <linux/module.h>
++#include <linux/vmalloc.h>
++#include <linux/netfilter_bridge/ebtables.h>
++#include <linux/spinlock.h>
++#include <asm/uaccess.h>
++#include <linux/smp.h>
++#include <net/sock.h>
++// needed for logical [in,out]-dev filtering
++#include "../br_private.h"
++
++// list_named_find
++#define ASSERT_READ_LOCK(x)
++#define ASSERT_WRITE_LOCK(x)
++#include <linux/netfilter_ipv4/listhelp.h>
++
++#if 0 // use this for remote debugging
++// Copyright (C) 1998 by Ori Pomerantz
++// Print the string to the appropriate tty, the one
++// the current task uses
++static void print_string(char *str)
++{
++ struct tty_struct *my_tty;
++
++ /* The tty for the current task */
++ my_tty = current->tty;
++ if (my_tty != NULL) {
++ (*(my_tty->driver).write)(my_tty, 0, str, strlen(str));
++ (*(my_tty->driver).write)(my_tty, 0, "\015\012", 2);
++ }
++}
++
++#define BUGPRINT(args) print_string(args);
++#else
++#define BUGPRINT(format, args...) printk("kernel msg: ebtables bug: please "\
++ "report to author: "format, ## args)
++// #define BUGPRINT(format, args...)
++#endif
++#define MEMPRINT(format, args...) printk("kernel msg: ebtables "\
++ ": out of memory: "format, ## args)
++// #define MEMPRINT(format, args...)
++
++
++
++// Each cpu has its own set of counters, so there is no need for write_lock in
++// the softirq
++// For reading or updating the counters, the user context needs to
++// get a write_lock
++
++// The size of each set of counters is altered to get cache alignment
++#define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
++#define COUNTER_OFFSET(n) (SMP_ALIGN(n * sizeof(struct ebt_counter)))
++#define COUNTER_BASE(c, n, cpu) ((struct ebt_counter *)(((char *)c) + \
++ COUNTER_OFFSET(n) * cpu))
++
++
++
++static DECLARE_MUTEX(ebt_mutex);
++static LIST_HEAD(ebt_tables);
++static LIST_HEAD(ebt_targets);
++static LIST_HEAD(ebt_matches);
++static LIST_HEAD(ebt_watchers);
++
++static struct ebt_target ebt_standard_target =
++{ {NULL, NULL}, EBT_STANDARD_TARGET, NULL, NULL, NULL, NULL};
++
++static inline int ebt_do_watcher (struct ebt_entry_watcher *w,
++ const struct sk_buff *skb, unsigned int hooknr, const struct net_device *in,
++ const struct net_device *out)
++{
++ w->u.watcher->watcher(skb, hooknr, in, out, w->data,
++ w->watcher_size);
++ // watchers don't give a verdict
++ return 0;
++}
++
++static inline int ebt_do_match (struct ebt_entry_match *m,
++ const struct sk_buff *skb, const struct net_device *in,
++ const struct net_device *out)
++{
++ return m->u.match->match(skb, in, out, m->data,
++ m->match_size);
++}
++
++static inline int ebt_dev_check(char *entry, const struct net_device *device)
++{
++ int i = 0;
++ char *devname = device->name;
++
++ if (*entry == '\0')
++ return 0;
++ if (!device)
++ return 1;
++ /* 1 is the wildcard token */
++ while (entry[i] != '\0' && entry[i] != 1 && entry[i] == devname[i])
++ i++;
++ return (devname[i] != entry[i] && entry[i] != 1);
++}
++
++#define FWINV2(bool,invflg) ((bool) ^ !!(e->invflags & invflg))
++// process standard matches
++static inline int ebt_basic_match(struct ebt_entry *e, struct ethhdr *h,
++ const struct net_device *in, const struct net_device *out)
++{
++ int verdict, i;
++
++ if (e->bitmask & EBT_802_3) {
++ if (FWINV2(ntohs(h->h_proto) >= 1536, EBT_IPROTO))
++ return 1;
++ } else if (!(e->bitmask & EBT_NOPROTO) &&
++ FWINV2(e->ethproto != h->h_proto, EBT_IPROTO))
++ return 1;
++
++ if (FWINV2(ebt_dev_check(e->in, in), EBT_IIN))
++ return 1;
++ if (FWINV2(ebt_dev_check(e->out, out), EBT_IOUT))
++ return 1;
++ if ((!in || !in->br_port) ? 0 : FWINV2(ebt_dev_check(
++ e->logical_in, &in->br_port->br->dev), EBT_ILOGICALIN))
++ return 1;
++ if ((!out || !out->br_port) ? 0 : FWINV2(ebt_dev_check(
++ e->logical_out, &out->br_port->br->dev), EBT_ILOGICALOUT))
++ return 1;
++
++ if (e->bitmask & EBT_SOURCEMAC) {
++ verdict = 0;
++ for (i = 0; i < 6; i++)
++ verdict |= (h->h_source[i] ^ e->sourcemac[i]) &
++ e->sourcemsk[i];
++ if (FWINV2(verdict != 0, EBT_ISOURCE) )
++ return 1;
++ }
++ if (e->bitmask & EBT_DESTMAC) {
++ verdict = 0;
++ for (i = 0; i < 6; i++)
++ verdict |= (h->h_dest[i] ^ e->destmac[i]) &
++ e->destmsk[i];
++ if (FWINV2(verdict != 0, EBT_IDEST) )
++ return 1;
++ }
++ return 0;
++}
++
++// Do some firewalling
++unsigned int ebt_do_table (unsigned int hook, struct sk_buff **pskb,
++ const struct net_device *in, const struct net_device *out,
++ struct ebt_table *table)
++{
++ int i, nentries;
++ struct ebt_entry *point;
++ struct ebt_counter *counter_base, *cb_base;
++ struct ebt_entry_target *t;
++ int verdict, sp = 0;
++ struct ebt_chainstack *cs;
++ struct ebt_entries *chaininfo;
++ char *base;
++ struct ebt_table_info *private = table->private;
++
++ read_lock_bh(&table->lock);
++ cb_base = COUNTER_BASE(private->counters, private->nentries,
++ cpu_number_map(smp_processor_id()));
++ if (private->chainstack)
++ cs = private->chainstack[cpu_number_map(smp_processor_id())];
++ else
++ cs = NULL;
++ chaininfo = private->hook_entry[hook];
++ nentries = private->hook_entry[hook]->nentries;
++ point = (struct ebt_entry *)(private->hook_entry[hook]->data);
++ counter_base = cb_base + private->hook_entry[hook]->counter_offset;
++ // base for chain jumps
++ base = private->entries;
++ i = 0;
++ while (i < nentries) {
++ if (ebt_basic_match(point, (**pskb).mac.ethernet, in, out))
++ goto letscontinue;
++
++ if (EBT_MATCH_ITERATE(point, ebt_do_match, *pskb, in, out) != 0)
++ goto letscontinue;
++
++ // increase counter
++ (*(counter_base + i)).pcnt++;
++ (*(counter_base + i)).bcnt+=(**pskb).len;
++
++ // these should only watch: not modify, nor tell us
++ // what to do with the packet
++ EBT_WATCHER_ITERATE(point, ebt_do_watcher, *pskb, hook, in,
++ out);
++
++ t = (struct ebt_entry_target *)
++ (((char *)point) + point->target_offset);
++ // standard target
++ if (!t->u.target->target)
++ verdict = ((struct ebt_standard_target *)t)->verdict;
++ else
++ verdict = t->u.target->target(pskb, hook,
++ in, out, t->data, t->target_size);
++ if (verdict == EBT_ACCEPT) {
++ read_unlock_bh(&table->lock);
++ return NF_ACCEPT;
++ }
++ if (verdict == EBT_DROP) {
++ read_unlock_bh(&table->lock);
++ return NF_DROP;
++ }
++ if (verdict == EBT_RETURN) {
++letsreturn:
++#ifdef CONFIG_NETFILTER_DEBUG
++ if (sp == 0) {
++ BUGPRINT("RETURN on base chain");
++ // act like this is EBT_CONTINUE
++ goto letscontinue;
++ }
++#endif
++ sp--;
++ // put all the local variables right
++ i = cs[sp].n;
++ chaininfo = cs[sp].chaininfo;
++ nentries = chaininfo->nentries;
++ point = cs[sp].e;
++ counter_base = cb_base +
++ chaininfo->counter_offset;
++ continue;
++ }
++ if (verdict == EBT_CONTINUE)
++ goto letscontinue;
++#ifdef CONFIG_NETFILTER_DEBUG
++ if (verdict < 0) {
++ BUGPRINT("bogus standard verdict\n");
++ read_unlock_bh(&table->lock);
++ return NF_DROP;
++ }
++#endif
++ // jump to a udc
++ cs[sp].n = i + 1;
++ cs[sp].chaininfo = chaininfo;
++ cs[sp].e = (struct ebt_entry *)
++ (((char *)point) + point->next_offset);
++ i = 0;
++ chaininfo = (struct ebt_entries *) (base + verdict);
++#ifdef CONFIG_NETFILTER_DEBUG
++ if (chaininfo->distinguisher) {
++ BUGPRINT("jump to non-chain\n");
++ read_unlock_bh(&table->lock);
++ return NF_DROP;
++ }
++#endif
++ nentries = chaininfo->nentries;
++ point = (struct ebt_entry *)chaininfo->data;
++ counter_base = cb_base + chaininfo->counter_offset;
++ sp++;
++ continue;
++letscontinue:
++ point = (struct ebt_entry *)
++ (((char *)point) + point->next_offset);
++ i++;
++ }
++
++ // I actually like this :)
++ if (chaininfo->policy == EBT_RETURN)
++ goto letsreturn;
++ if (chaininfo->policy == EBT_ACCEPT) {
++ read_unlock_bh(&table->lock);
++ return NF_ACCEPT;
++ }
++ read_unlock_bh(&table->lock);
++ return NF_DROP;
++}
++
++// If it succeeds, returns element and locks mutex
++static inline void *
++find_inlist_lock_noload(struct list_head *head, const char *name, int *error,
++ struct semaphore *mutex)
++{
++ void *ret;
++
++ *error = down_interruptible(mutex);
++ if (*error != 0)
++ return NULL;
++
++ ret = list_named_find(head, name);
++ if (!ret) {
++ *error = -ENOENT;
++ up(mutex);
++ }
++ return ret;
++}
++
++#ifndef CONFIG_KMOD
++#define find_inlist_lock(h,n,p,e,m) find_inlist_lock_noload((h),(n),(e),(m))
++#else
++static void *
++find_inlist_lock(struct list_head *head, const char *name, const char *prefix,
++ int *error, struct semaphore *mutex)
++{
++ void *ret;
++
++ ret = find_inlist_lock_noload(head, name, error, mutex);
++ if (!ret) {
++ char modulename[EBT_FUNCTION_MAXNAMELEN + strlen(prefix) + 1];
++ strcpy(modulename, prefix);
++ strcat(modulename, name);
++ request_module(modulename);
++ ret = find_inlist_lock_noload(head, name, error, mutex);
++ }
++ return ret;
++}
++#endif
++
++static inline struct ebt_table *
++find_table_lock(const char *name, int *error, struct semaphore *mutex)
++{
++ return find_inlist_lock(&ebt_tables, name, "ebtable_", error, mutex);
++}
++
++static inline struct ebt_match *
++find_match_lock(const char *name, int *error, struct semaphore *mutex)
++{
++ return find_inlist_lock(&ebt_matches, name, "ebt_", error, mutex);
++}
++
++static inline struct ebt_watcher *
++find_watcher_lock(const char *name, int *error, struct semaphore *mutex)
++{
++ return find_inlist_lock(&ebt_watchers, name, "ebt_", error, mutex);
++}
++
++static inline struct ebt_target *
++find_target_lock(const char *name, int *error, struct semaphore *mutex)
++{
++ return find_inlist_lock(&ebt_targets, name, "ebt_", error, mutex);
++}
++
++static inline int
++ebt_check_match(struct ebt_entry_match *m, struct ebt_entry *e,
++ const char *name, unsigned int hookmask, unsigned int *cnt)
++{
++ struct ebt_match *match;
++ int ret;
++
++ if (((char *)m) + m->match_size + sizeof(struct ebt_entry_match) >
++ ((char *)e) + e->watchers_offset)
++ return -EINVAL;
++ match = find_match_lock(m->u.name, &ret, &ebt_mutex);
++ if (!match)
++ return ret;
++ m->u.match = match;
++ if (match->me)
++ __MOD_INC_USE_COUNT(match->me);
++ up(&ebt_mutex);
++ if (match->check &&
++ match->check(name, hookmask, e, m->data, m->match_size) != 0) {
++ BUGPRINT("match->check failed\n");
++ if (match->me)
++ __MOD_DEC_USE_COUNT(match->me);
++ return -EINVAL;
++ }
++ (*cnt)++;
++ return 0;
++}
++
++static inline int
++ebt_check_watcher(struct ebt_entry_watcher *w, struct ebt_entry *e,
++ const char *name, unsigned int hookmask, unsigned int *cnt)
++{
++ struct ebt_watcher *watcher;
++ int ret;
++
++ if (((char *)w) + w->watcher_size + sizeof(struct ebt_entry_watcher) >
++ ((char *)e) + e->target_offset)
++ return -EINVAL;
++ watcher = find_watcher_lock(w->u.name, &ret, &ebt_mutex);
++ if (!watcher)
++ return ret;
++ w->u.watcher = watcher;
++ if (watcher->me)
++ __MOD_INC_USE_COUNT(watcher->me);
++ up(&ebt_mutex);
++ if (watcher->check &&
++ watcher->check(name, hookmask, e, w->data, w->watcher_size) != 0) {
++ BUGPRINT("watcher->check failed\n");
++ if (watcher->me)
++ __MOD_DEC_USE_COUNT(watcher->me);
++ return -EINVAL;
++ }
++ (*cnt)++;
++ return 0;
++}
++
++// this one is very careful, as it is the first function
++// to parse the userspace data
++static inline int
++ebt_check_entry_size_and_hooks(struct ebt_entry *e,
++ struct ebt_table_info *newinfo, char *base, char *limit,
++ struct ebt_entries **hook_entries, unsigned int *n, unsigned int *cnt,
++ unsigned int *totalcnt, unsigned int *udc_cnt, unsigned int valid_hooks)
++{
++ int i;
++
++ for (i = 0; i < NF_BR_NUMHOOKS; i++) {
++ if ((valid_hooks & (1 << i)) == 0)
++ continue;
++ if ( (char *)hook_entries[i] - base ==
++ (char *)e - newinfo->entries)
++ break;
++ }
++ // beginning of a new chain
++ // if i == NF_BR_NUMHOOKS it must be a user defined chain
++ if (i != NF_BR_NUMHOOKS || !(e->bitmask & EBT_ENTRY_OR_ENTRIES)) {
++ if ((e->bitmask & EBT_ENTRY_OR_ENTRIES) != 0) {
++ // we make userspace set this right,
++ // so there is no misunderstanding
++ BUGPRINT("EBT_ENTRY_OR_ENTRIES shouldn't be set "
++ "in distinguisher\n");
++ return -EINVAL;
++ }
++ // this checks if the previous chain has as many entries
++ // as it said it has
++ if (*n != *cnt) {
++ BUGPRINT("nentries does not equal the nr of entries "
++ "in the chain\n");
++ return -EINVAL;
++ }
++ // before we look at the struct, be sure it is not too big
++ if ((char *)hook_entries[i] + sizeof(struct ebt_entries)
++ > limit) {
++ BUGPRINT("entries_size too small\n");
++ return -EINVAL;
++ }
++ if (((struct ebt_entries *)e)->policy != EBT_DROP &&
++ ((struct ebt_entries *)e)->policy != EBT_ACCEPT) {
++ // only RETURN from udc
++ if (i != NF_BR_NUMHOOKS ||
++ ((struct ebt_entries *)e)->policy != EBT_RETURN) {
++ BUGPRINT("bad policy\n");
++ return -EINVAL;
++ }
++ }
++ if (i == NF_BR_NUMHOOKS) // it's a user defined chain
++ (*udc_cnt)++;
++ else
++ newinfo->hook_entry[i] = (struct ebt_entries *)e;
++ if (((struct ebt_entries *)e)->counter_offset != *totalcnt) {
++ BUGPRINT("counter_offset != totalcnt");
++ return -EINVAL;
++ }
++ *n = ((struct ebt_entries *)e)->nentries;
++ *cnt = 0;
++ return 0;
++ }
++ // a plain old entry, heh
++ if (sizeof(struct ebt_entry) > e->watchers_offset ||
++ e->watchers_offset > e->target_offset ||
++ e->target_offset >= e->next_offset) {
++ BUGPRINT("entry offsets not in right order\n");
++ return -EINVAL;
++ }
++ // this is not checked anywhere else
++ if (e->next_offset - e->target_offset < sizeof(struct ebt_entry_target)) {
++ BUGPRINT("target size too small\n");
++ return -EINVAL;
++ }
++
++ (*cnt)++;
++ (*totalcnt)++;
++ return 0;
++}
++
++struct ebt_cl_stack
++{
++ struct ebt_chainstack cs;
++ int from;
++ unsigned int hookmask;
++};
++
++// we need these positions to check that the jumps to a different part of the
++// entries is a jump to the beginning of a new chain.
++static inline int
++ebt_get_udc_positions(struct ebt_entry *e, struct ebt_table_info *newinfo,
++ struct ebt_entries **hook_entries, unsigned int *n, unsigned int valid_hooks,
++ struct ebt_cl_stack *udc)
++{
++ int i;
++
++ // we're only interested in chain starts
++ if (e->bitmask & EBT_ENTRY_OR_ENTRIES)
++ return 0;
++ for (i = 0; i < NF_BR_NUMHOOKS; i++) {
++ if ((valid_hooks & (1 << i)) == 0)
++ continue;
++ if (newinfo->hook_entry[i] == (struct ebt_entries *)e)
++ break;
++ }
++ // only care about udc
++ if (i != NF_BR_NUMHOOKS)
++ return 0;
++
++ udc[*n].cs.chaininfo = (struct ebt_entries *)e;
++ // these initialisations are depended on later in check_chainloops()
++ udc[*n].cs.n = 0;
++ udc[*n].hookmask = 0;
++
++ (*n)++;
++ return 0;
++}
++
++static inline int
++ebt_cleanup_match(struct ebt_entry_match *m, unsigned int *i)
++{
++ if (i && (*i)-- == 0)
++ return 1;
++ if (m->u.match->destroy)
++ m->u.match->destroy(m->data, m->match_size);
++ if (m->u.match->me)
++ __MOD_DEC_USE_COUNT(m->u.match->me);
++
++ return 0;
++}
++
++static inline int
++ebt_cleanup_watcher(struct ebt_entry_watcher *w, unsigned int *i)
++{
++ if (i && (*i)-- == 0)
++ return 1;
++ if (w->u.watcher->destroy)
++ w->u.watcher->destroy(w->data, w->watcher_size);
++ if (w->u.watcher->me)
++ __MOD_DEC_USE_COUNT(w->u.watcher->me);
++
++ return 0;
++}
++
++static inline int
++ebt_cleanup_entry(struct ebt_entry *e, unsigned int *cnt)
++{
++ struct ebt_entry_target *t;
++
++ if ((e->bitmask & EBT_ENTRY_OR_ENTRIES) == 0)
++ return 0;
++ // we're done
++ if (cnt && (*cnt)-- == 0)
++ return 1;
++ EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, NULL);
++ EBT_MATCH_ITERATE(e, ebt_cleanup_match, NULL);
++ t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
++ if (t->u.target->destroy)
++ t->u.target->destroy(t->data, t->target_size);
++ if (t->u.target->me)
++ __MOD_DEC_USE_COUNT(t->u.target->me);
++
++ return 0;
++}
++
++static inline int
++ebt_check_entry(struct ebt_entry *e, struct ebt_table_info *newinfo,
++ const char *name, unsigned int *cnt, unsigned int valid_hooks,
++ struct ebt_cl_stack *cl_s, unsigned int udc_cnt)
++{
++ struct ebt_entry_target *t;
++ struct ebt_target *target;
++ unsigned int i, j, hook = 0, hookmask = 0;
++ int ret;
++
++ // Don't mess with the struct ebt_entries
++ if ((e->bitmask & EBT_ENTRY_OR_ENTRIES) == 0)
++ return 0;
++
++ if (e->bitmask & ~EBT_F_MASK) {
++ BUGPRINT("Unknown flag for bitmask\n");
++ return -EINVAL;
++ }
++ if (e->invflags & ~EBT_INV_MASK) {
++ BUGPRINT("Unknown flag for inv bitmask\n");
++ return -EINVAL;
++ }
++ if ( (e->bitmask & EBT_NOPROTO) && (e->bitmask & EBT_802_3) ) {
++ BUGPRINT("NOPROTO & 802_3 not allowed\n");
++ return -EINVAL;
++ }
++ // what hook do we belong to?
++ for (i = 0; i < NF_BR_NUMHOOKS; i++) {
++ if ((valid_hooks & (1 << i)) == 0)
++ continue;
++ if ((char *)newinfo->hook_entry[i] < (char *)e)
++ hook = i;
++ else
++ break;
++ }
++ // (1 << NF_BR_NUMHOOKS) tells the check functions the rule is on
++ // a base chain
++ if (i < NF_BR_NUMHOOKS)
++ hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS);
++ else {
++ for (i = 0; i < udc_cnt; i++)
++ if ((char *)(cl_s[i].cs.chaininfo) > (char *)e)
++ break;
++ if (i == 0)
++ hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS);
++ else
++ hookmask = cl_s[i - 1].hookmask;
++ }
++ i = 0;
++ ret = EBT_MATCH_ITERATE(e, ebt_check_match, e, name, hookmask, &i);
++ if (ret != 0)
++ goto cleanup_matches;
++ j = 0;
++ ret = EBT_WATCHER_ITERATE(e, ebt_check_watcher, e, name, hookmask, &j);
++ if (ret != 0)
++ goto cleanup_watchers;
++ t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
++ target = find_target_lock(t->u.name, &ret, &ebt_mutex);
++ if (!target)
++ goto cleanup_watchers;
++ if (target->me)
++ __MOD_INC_USE_COUNT(target->me);
++ up(&ebt_mutex);
++
++ t->u.target = target;
++ if (t->u.target == &ebt_standard_target) {
++ if (e->target_offset + sizeof(struct ebt_standard_target) >
++ e->next_offset) {
++ BUGPRINT("Standard target size too big\n");
++ ret = -EFAULT;
++ goto cleanup_watchers;
++ }
++ if (((struct ebt_standard_target *)t)->verdict <
++ -NUM_STANDARD_TARGETS) {
++ BUGPRINT("Invalid standard target\n");
++ ret = -EFAULT;
++ goto cleanup_watchers;
++ }
++ } else if ((e->target_offset + t->target_size +
++ sizeof(struct ebt_entry_target) > e->next_offset) ||
++ (t->u.target->check &&
++ t->u.target->check(name, hookmask, e, t->data, t->target_size) != 0)){
++ if (t->u.target->me)
++ __MOD_DEC_USE_COUNT(t->u.target->me);
++ ret = -EFAULT;
++ goto cleanup_watchers;
++ }
++ (*cnt)++;
++ return 0;
++cleanup_watchers:
++ EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, &j);
++cleanup_matches:
++ EBT_MATCH_ITERATE(e, ebt_cleanup_match, &i);
++ return ret;
++}
++
++// checks for loops and sets the hook mask for udc
++// the hook mask for udc tells us from which base chains the udc can be
++// accessed. This mask is a parameter to the check() functions of the extensions
++static int check_chainloops(struct ebt_entries *chain,
++ struct ebt_cl_stack *cl_s, unsigned int udc_cnt,
++ unsigned int hooknr, char *base)
++{
++ int i, chain_nr = -1, pos = 0, nentries = chain->nentries, verdict;
++ struct ebt_entry *e = (struct ebt_entry *)chain->data;
++ struct ebt_entry_target *t;
++
++ while (pos < nentries || chain_nr != -1) {
++ // end of udc, go back one 'recursion' step
++ if (pos == nentries) {
++ // put back values of the time when this chain was called
++ e = cl_s[chain_nr].cs.e;
++ if (cl_s[chain_nr].from != -1)
++ nentries =
++ cl_s[cl_s[chain_nr].from].cs.chaininfo->nentries;
++ else
++ nentries = chain->nentries;
++ pos = cl_s[chain_nr].cs.n;
++ // make sure we won't see a loop that isn't one
++ cl_s[chain_nr].cs.n = 0;
++ chain_nr = cl_s[chain_nr].from;
++ if (pos == nentries)
++ continue;
++ }
++ t = (struct ebt_entry_target *)
++ (((char *)e) + e->target_offset);
++ if (strcmp(t->u.name, EBT_STANDARD_TARGET))
++ goto letscontinue;
++ if (e->target_offset + sizeof(struct ebt_standard_target) >
++ e->next_offset) {
++ BUGPRINT("Standard target size too big\n");
++ return -1;
++ }
++ verdict = ((struct ebt_standard_target *)t)->verdict;
++ if (verdict >= 0) { // jump to another chain
++ struct ebt_entries *hlp2 =
++ (struct ebt_entries *)(base + verdict);
++ for (i = 0; i < udc_cnt; i++)
++ if (hlp2 == cl_s[i].cs.chaininfo)
++ break;
++ // bad destination or loop
++ if (i == udc_cnt) {
++ BUGPRINT("bad destination\n");
++ return -1;
++ }
++ if (cl_s[i].cs.n) {
++ BUGPRINT("loop\n");
++ return -1;
++ }
++ // this can't be 0, so the above test is correct
++ cl_s[i].cs.n = pos + 1;
++ pos = 0;
++ cl_s[i].cs.e = ((void *)e + e->next_offset);
++ e = (struct ebt_entry *)(hlp2->data);
++ nentries = hlp2->nentries;
++ cl_s[i].from = chain_nr;
++ chain_nr = i;
++ // this udc is accessible from the base chain for hooknr
++ cl_s[i].hookmask |= (1 << hooknr);
++ continue;
++ }
++letscontinue:
++ e = (void *)e + e->next_offset;
++ pos++;
++ }
++ return 0;
++}
++
++// do the parsing of the table/chains/entries/matches/watchers/targets, heh
++static int translate_table(struct ebt_replace *repl,
++ struct ebt_table_info *newinfo)
++{
++ unsigned int i, j, k, udc_cnt;
++ int ret;
++ struct ebt_cl_stack *cl_s = NULL; // used in the checking for chain loops
++
++ i = 0;
++ while (i < NF_BR_NUMHOOKS && !(repl->valid_hooks & (1 << i)))
++ i++;
++ if (i == NF_BR_NUMHOOKS) {
++ BUGPRINT("No valid hooks specified\n");
++ return -EINVAL;
++ }
++ if (repl->hook_entry[i] != (struct ebt_entries *)repl->entries) {
++ BUGPRINT("Chains don't start at beginning\n");
++ return -EINVAL;
++ }
++ // make sure chains are ordered after each other in same order
++ // as their corresponding hooks
++ for (j = i + 1; j < NF_BR_NUMHOOKS; j++) {
++ if (!(repl->valid_hooks & (1 << j)))
++ continue;
++ if ( repl->hook_entry[j] <= repl->hook_entry[i] ) {
++ BUGPRINT("Hook order must be followed\n");
++ return -EINVAL;
++ }
++ i = j;
++ }
++
++ for (i = 0; i < NF_BR_NUMHOOKS; i++)
++ newinfo->hook_entry[i] = NULL;
++
++ newinfo->entries_size = repl->entries_size;
++ newinfo->nentries = repl->nentries;
++
++ // do some early checkings and initialize some things
++ i = 0; // holds the expected nr. of entries for the chain
++ j = 0; // holds the up to now counted entries for the chain
++ k = 0; // holds the total nr. of entries, should equal
++ // newinfo->nentries afterwards
++ udc_cnt = 0; // will hold the nr. of user defined chains (udc)
++ ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
++ ebt_check_entry_size_and_hooks, newinfo, repl->entries,
++ repl->entries + repl->entries_size, repl->hook_entry, &i, &j, &k,
++ &udc_cnt, repl->valid_hooks);
++
++ if (ret != 0)
++ return ret;
++
++ if (i != j) {
++ BUGPRINT("nentries does not equal the nr of entries in the "
++ "(last) chain\n");
++ return -EINVAL;
++ }
++ if (k != newinfo->nentries) {
++ BUGPRINT("Total nentries is wrong\n");
++ return -EINVAL;
++ }
++
++ // check if all valid hooks have a chain
++ for (i = 0; i < NF_BR_NUMHOOKS; i++) {
++ if (newinfo->hook_entry[i] == NULL &&
++ (repl->valid_hooks & (1 << i))) {
++ BUGPRINT("Valid hook without chain\n");
++ return -EINVAL;
++ }
++ }
++
++ // Get the location of the udc, put them in an array
++ // While we're at it, allocate the chainstack
++ if (udc_cnt) {
++ // this will get free'd in do_replace()/ebt_register_table()
++ // if an error occurs
++ newinfo->chainstack = (struct ebt_chainstack **)
++ vmalloc(smp_num_cpus * sizeof(struct ebt_chainstack));
++ if (!newinfo->chainstack)
++ return -ENOMEM;
++ for (i = 0; i < smp_num_cpus; i++) {
++ newinfo->chainstack[i] =
++ vmalloc(udc_cnt * sizeof(struct ebt_chainstack));
++ if (!newinfo->chainstack[i]) {
++ while (i)
++ vfree(newinfo->chainstack[--i]);
++ vfree(newinfo->chainstack);
++ newinfo->chainstack = NULL;
++ return -ENOMEM;
++ }
++ }
++
++ cl_s = (struct ebt_cl_stack *)
++ vmalloc(udc_cnt * sizeof(struct ebt_cl_stack));
++ if (!cl_s)
++ return -ENOMEM;
++ i = 0; // the i'th udc
++ EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
++ ebt_get_udc_positions, newinfo, repl->hook_entry, &i,
++ repl->valid_hooks, cl_s);
++ // sanity check
++ if (i != udc_cnt) {
++ BUGPRINT("i != udc_cnt\n");
++ vfree(cl_s);
++ return -EFAULT;
++ }
++ }
++
++ // Check for loops
++ for (i = 0; i < NF_BR_NUMHOOKS; i++)
++ if (repl->valid_hooks & (1 << i))
++ if (check_chainloops(newinfo->hook_entry[i],
++ cl_s, udc_cnt, i, newinfo->entries)) {
++ if (cl_s)
++ vfree(cl_s);
++ return -EINVAL;
++ }
++
++ // we now know the following (along with E=mc²):
++ // - the nr of entries in each chain is right
++ // - the size of the allocated space is right
++ // - all valid hooks have a corresponding chain
++ // - there are no loops
++ // - wrong data can still be on the level of a single entry
++ // - could be there are jumps to places that are not the
++ // beginning of a chain. This can only occur in chains that
++ // are not accessible from any base chains, so we don't care.
++
++ // used to know what we need to clean up if something goes wrong
++ i = 0;
++ ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
++ ebt_check_entry, newinfo, repl->name, &i, repl->valid_hooks,
++ cl_s, udc_cnt);
++ if (ret != 0) {
++ EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
++ ebt_cleanup_entry, &i);
++ }
++ if (cl_s)
++ vfree(cl_s);
++ return ret;
++}
++
++// called under write_lock
++static void get_counters(struct ebt_counter *oldcounters,
++ struct ebt_counter *counters, unsigned int nentries)
++{
++ int i, cpu;
++ struct ebt_counter *counter_base;
++
++ // counters of cpu 0
++ memcpy(counters, oldcounters,
++ sizeof(struct ebt_counter) * nentries);
++ // add other counters to those of cpu 0
++ for (cpu = 1; cpu < smp_num_cpus; cpu++) {
++ counter_base = COUNTER_BASE(oldcounters, nentries, cpu);
++ for (i = 0; i < nentries; i++) {
++ counters[i].pcnt += counter_base[i].pcnt;
++ counters[i].bcnt += counter_base[i].bcnt;
++ }
++ }
++}
++
++// replace the table
++static int do_replace(void *user, unsigned int len)
++{
++ int ret, i, countersize;
++ struct ebt_table_info *newinfo;
++ struct ebt_replace tmp;
++ struct ebt_table *t;
++ struct ebt_counter *counterstmp = NULL;
++ // used to be able to unlock earlier
++ struct ebt_table_info *table;
++
++ if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
++ return -EFAULT;
++
++ if (len != sizeof(tmp) + tmp.entries_size) {
++ BUGPRINT("Wrong len argument\n");
++ return -EINVAL;
++ }
++
++ if (tmp.entries_size == 0) {
++ BUGPRINT("Entries_size never zero\n");
++ return -EINVAL;
++ }
++ countersize = COUNTER_OFFSET(tmp.nentries) * smp_num_cpus;
++ newinfo = (struct ebt_table_info *)
++ vmalloc(sizeof(struct ebt_table_info) + countersize);
++ if (!newinfo)
++ return -ENOMEM;
++
++ if (countersize)
++ memset(newinfo->counters, 0, countersize);
++
++ newinfo->entries = (char *)vmalloc(tmp.entries_size);
++ if (!newinfo->entries) {
++ ret = -ENOMEM;
++ goto free_newinfo;
++ }
++ if (copy_from_user(
++ newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
++ BUGPRINT("Couldn't copy entries from userspace\n");
++ ret = -EFAULT;
++ goto free_entries;
++ }
++
++ // the user wants counters back
++ // the check on the size is done later, when we have the lock
++ if (tmp.num_counters) {
++ counterstmp = (struct ebt_counter *)
++ vmalloc(tmp.num_counters * sizeof(struct ebt_counter));
++ if (!counterstmp) {
++ ret = -ENOMEM;
++ goto free_entries;
++ }
++ }
++ else
++ counterstmp = NULL;
++
++ // this can get initialized by translate_table()
++ newinfo->chainstack = NULL;
++ ret = translate_table(&tmp, newinfo);
++
++ if (ret != 0)
++ goto free_counterstmp;
++
++ t = find_table_lock(tmp.name, &ret, &ebt_mutex);
++ if (!t)
++ goto free_iterate;
++
++ // the table doesn't like it
++ if (t->check && (ret = t->check(newinfo, tmp.valid_hooks)))
++ goto free_unlock;
++
++ if (tmp.num_counters && tmp.num_counters != t->private->nentries) {
++ BUGPRINT("Wrong nr. of counters requested\n");
++ ret = -EINVAL;
++ goto free_unlock;
++ }
++
++ // we have the mutex lock, so no danger in reading this pointer
++ table = t->private;
++ // we need an atomic snapshot of the counters
++ write_lock_bh(&t->lock);
++ if (tmp.num_counters)
++ get_counters(t->private->counters, counterstmp,
++ t->private->nentries);
++
++ t->private = newinfo;
++ write_unlock_bh(&t->lock);
++ up(&ebt_mutex);
++ // So, a user can change the chains while having messed up her counter
++ // allocation. Only reason why this is done is because this way the lock
++ // is held only once, while this doesn't bring the kernel into a
++ // dangerous state.
++ if (tmp.num_counters &&
++ copy_to_user(tmp.counters, counterstmp,
++ tmp.num_counters * sizeof(struct ebt_counter))) {
++ BUGPRINT("Couldn't copy counters to userspace\n");
++ ret = -EFAULT;
++ }
++ else
++ ret = 0;
++
++ // decrease module count and free resources
++ EBT_ENTRY_ITERATE(table->entries, table->entries_size,
++ ebt_cleanup_entry, NULL);
++
++ vfree(table->entries);
++ if (table->chainstack) {
++ for (i = 0; i < smp_num_cpus; i++)
++ vfree(table->chainstack[i]);
++ vfree(table->chainstack);
++ }
++ vfree(table);
++
++ if (counterstmp)
++ vfree(counterstmp);
++ return ret;
++
++free_unlock:
++ up(&ebt_mutex);
++free_iterate:
++ EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
++ ebt_cleanup_entry, NULL);
++free_counterstmp:
++ if (counterstmp)
++ vfree(counterstmp);
++ // can be initialized in translate_table()
++ if (newinfo->chainstack) {
++ for (i = 0; i < smp_num_cpus; i++)
++ vfree(newinfo->chainstack[i]);
++ vfree(newinfo->chainstack);
++ }
++free_entries:
++ if (newinfo->entries)
++ vfree(newinfo->entries);
++free_newinfo:
++ if (newinfo)
++ vfree(newinfo);
++ return ret;
++}
++
++int ebt_register_target(struct ebt_target *target)
++{
++ int ret;
++
++ ret = down_interruptible(&ebt_mutex);
++ if (ret != 0)
++ return ret;
++ if (!list_named_insert(&ebt_targets, target)) {
++ up(&ebt_mutex);
++ return -EEXIST;
++ }
++ up(&ebt_mutex);
++ MOD_INC_USE_COUNT;
++
++ return 0;
++}
++
++void ebt_unregister_target(struct ebt_target *target)
++{
++ down(&ebt_mutex);
++ LIST_DELETE(&ebt_targets, target);
++ up(&ebt_mutex);
++ MOD_DEC_USE_COUNT;
++}
++
++int ebt_register_match(struct ebt_match *match)
++{
++ int ret;
++
++ ret = down_interruptible(&ebt_mutex);
++ if (ret != 0)
++ return ret;
++ if (!list_named_insert(&ebt_matches, match)) {
++ up(&ebt_mutex);
++ return -EEXIST;
++ }
++ up(&ebt_mutex);
++ MOD_INC_USE_COUNT;
++
++ return 0;
++}
++
++void ebt_unregister_match(struct ebt_match *match)
++{
++ down(&ebt_mutex);
++ LIST_DELETE(&ebt_matches, match);
++ up(&ebt_mutex);
++ MOD_DEC_USE_COUNT;
++}
++
++int ebt_register_watcher(struct ebt_watcher *watcher)
++{
++ int ret;
++
++ ret = down_interruptible(&ebt_mutex);
++ if (ret != 0)
++ return ret;
++ if (!list_named_insert(&ebt_watchers, watcher)) {
++ up(&ebt_mutex);
++ return -EEXIST;
++ }
++ up(&ebt_mutex);
++ MOD_INC_USE_COUNT;
++
++ return 0;
++}
++
++void ebt_unregister_watcher(struct ebt_watcher *watcher)
++{
++ down(&ebt_mutex);
++ LIST_DELETE(&ebt_watchers, watcher);
++ up(&ebt_mutex);
++ MOD_DEC_USE_COUNT;
++}
++
++int ebt_register_table(struct ebt_table *table)
++{
++ struct ebt_table_info *newinfo;
++ int ret, i, countersize;
++
++ if (!table || !table->table ||!table->table->entries ||
++ table->table->entries_size == 0 ||
++ table->table->counters || table->private) {
++ BUGPRINT("Bad table data for ebt_register_table!!!\n");
++ return -EINVAL;
++ }
++
++ countersize = COUNTER_OFFSET(table->table->nentries) * smp_num_cpus;
++ newinfo = (struct ebt_table_info *)
++ vmalloc(sizeof(struct ebt_table_info) + countersize);
++ ret = -ENOMEM;
++ if (!newinfo)
++ return -ENOMEM;
++
++ newinfo->entries = (char *)vmalloc(table->table->entries_size);
++ if (!(newinfo->entries))
++ goto free_newinfo;
++
++ memcpy(newinfo->entries, table->table->entries,
++ table->table->entries_size);
++
++ if (countersize)
++ memset(newinfo->counters, 0, countersize);
++
++ // fill in newinfo and parse the entries
++ newinfo->chainstack = NULL;
++ ret = translate_table(table->table, newinfo);
++ if (ret != 0) {
++ BUGPRINT("Translate_table failed\n");
++ goto free_chainstack;
++ }
++
++ if (table->check && table->check(newinfo, table->valid_hooks)) {
++ BUGPRINT("The table doesn't like its own initial data, lol\n");
++ return -EINVAL;
++ }
++
++ table->private = newinfo;
++ table->lock = RW_LOCK_UNLOCKED;
++ ret = down_interruptible(&ebt_mutex);
++ if (ret != 0)
++ goto free_chainstack;
++
++ if (list_named_find(&ebt_tables, table->name)) {
++ ret = -EEXIST;
++ BUGPRINT("Table name already exists\n");
++ goto free_unlock;
++ }
++
++ list_prepend(&ebt_tables, table);
++ up(&ebt_mutex);
++ MOD_INC_USE_COUNT;
++ return 0;
++free_unlock:
++ up(&ebt_mutex);
++free_chainstack:
++ if (newinfo->chainstack) {
++ for (i = 0; i < smp_num_cpus; i++)
++ vfree(newinfo->chainstack[i]);
++ vfree(newinfo->chainstack);
++ }
++ vfree(newinfo->entries);
++free_newinfo:
++ vfree(newinfo);
++ return ret;
++}
++
++void ebt_unregister_table(struct ebt_table *table)
++{
++ int i;
++
++ if (!table) {
++ BUGPRINT("Request to unregister NULL table!!!\n");
++ return;
++ }
++ down(&ebt_mutex);
++ LIST_DELETE(&ebt_tables, table);
++ up(&ebt_mutex);
++ EBT_ENTRY_ITERATE(table->private->entries,
++ table->private->entries_size, ebt_cleanup_entry, NULL);
++ if (table->private->entries)
++ vfree(table->private->entries);
++ if (table->private->chainstack) {
++ for (i = 0; i < smp_num_cpus; i++)
++ vfree(table->private->chainstack[i]);
++ vfree(table->private->chainstack);
++ }
++ vfree(table->private);
++ MOD_DEC_USE_COUNT;
++}
++
++// userspace just supplied us with counters
++static int update_counters(void *user, unsigned int len)
++{
++ int i, ret;
++ struct ebt_counter *tmp;
++ struct ebt_replace hlp;
++ struct ebt_table *t;
++
++ if (copy_from_user(&hlp, user, sizeof(hlp)))
++ return -EFAULT;
++
++ if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
++ return -EINVAL;
++ if (hlp.num_counters == 0)
++ return -EINVAL;
++
++ if ( !(tmp = (struct ebt_counter *)
++ vmalloc(hlp.num_counters * sizeof(struct ebt_counter))) ){
++ MEMPRINT("Update_counters && nomemory\n");
++ return -ENOMEM;
++ }
++
++ t = find_table_lock(hlp.name, &ret, &ebt_mutex);
++ if (!t)
++ goto free_tmp;
++
++ if (hlp.num_counters != t->private->nentries) {
++ BUGPRINT("Wrong nr of counters\n");
++ ret = -EINVAL;
++ goto unlock_mutex;
++ }
++
++ if ( copy_from_user(tmp, hlp.counters,
++ hlp.num_counters * sizeof(struct ebt_counter)) ) {
++ BUGPRINT("Updata_counters && !cfu\n");
++ ret = -EFAULT;
++ goto unlock_mutex;
++ }
++
++ // we want an atomic add of the counters
++ write_lock_bh(&t->lock);
++
++ // we add to the counters of the first cpu
++ for (i = 0; i < hlp.num_counters; i++) {
++ t->private->counters[i].pcnt += tmp[i].pcnt;
++ t->private->counters[i].bcnt += tmp[i].bcnt;
++ }
++
++ write_unlock_bh(&t->lock);
++ ret = 0;
++unlock_mutex:
++ up(&ebt_mutex);
++free_tmp:
++ vfree(tmp);
++ return ret;
++}
++
++static inline int ebt_make_matchname(struct ebt_entry_match *m,
++ char *base, char *ubase)
++{
++ char *hlp = ubase - base + (char *)m;
++ if (copy_to_user(hlp, m->u.match->name, EBT_FUNCTION_MAXNAMELEN))
++ return -EFAULT;
++ return 0;
++}
++
++static inline int ebt_make_watchername(struct ebt_entry_watcher *w,
++ char *base, char *ubase)
++{
++ char *hlp = ubase - base + (char *)w;
++ if (copy_to_user(hlp , w->u.watcher->name, EBT_FUNCTION_MAXNAMELEN))
++ return -EFAULT;
++ return 0;
++}
++
++static inline int ebt_make_names(struct ebt_entry *e, char *base, char *ubase)
++{
++ int ret;
++ char *hlp;
++ struct ebt_entry_target *t;
++
++ if ((e->bitmask & EBT_ENTRY_OR_ENTRIES) == 0)
++ return 0;
++
++ hlp = ubase - base + (char *)e + e->target_offset;
++ t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
++
++ ret = EBT_MATCH_ITERATE(e, ebt_make_matchname, base, ubase);
++ if (ret != 0)
++ return ret;
++ ret = EBT_WATCHER_ITERATE(e, ebt_make_watchername, base, ubase);
++ if (ret != 0)
++ return ret;
++ if (copy_to_user(hlp, t->u.target->name, EBT_FUNCTION_MAXNAMELEN))
++ return -EFAULT;
++ return 0;
++}
++
++// called with ebt_mutex down
++static int copy_everything_to_user(struct ebt_table *t, void *user,
++ int *len, int cmd)
++{
++ struct ebt_replace tmp;
++ struct ebt_counter *counterstmp, *oldcounters;
++ unsigned int entries_size, nentries;
++ char *entries;
++
++ if (cmd == EBT_SO_GET_ENTRIES) {
++ entries_size = t->private->entries_size;
++ nentries = t->private->nentries;
++ entries = t->private->entries;
++ oldcounters = t->private->counters;
++ } else {
++ entries_size = t->table->entries_size;
++ nentries = t->table->nentries;
++ entries = t->table->entries;
++ oldcounters = t->table->counters;
++ }
++
++ if (copy_from_user(&tmp, user, sizeof(tmp))) {
++ BUGPRINT("Cfu didn't work\n");
++ return -EFAULT;
++ }
++
++ if (*len != sizeof(struct ebt_replace) + entries_size +
++ (tmp.num_counters? nentries * sizeof(struct ebt_counter): 0)) {
++ BUGPRINT("Wrong size\n");
++ return -EINVAL;
++ }
++
++ if (tmp.nentries != nentries) {
++ BUGPRINT("Nentries wrong\n");
++ return -EINVAL;
++ }
++
++ if (tmp.entries_size != entries_size) {
++ BUGPRINT("Wrong size\n");
++ return -EINVAL;
++ }
++
++ // userspace might not need the counters
++ if (tmp.num_counters) {
++ if (tmp.num_counters != nentries) {
++ BUGPRINT("Num_counters wrong\n");
++ return -EINVAL;
++ }
++ counterstmp = (struct ebt_counter *)
++ vmalloc(nentries * sizeof(struct ebt_counter));
++ if (!counterstmp) {
++ MEMPRINT("Couldn't copy counters, out of memory\n");
++ return -ENOMEM;
++ }
++ write_lock_bh(&t->lock);
++ get_counters(oldcounters, counterstmp, nentries);
++ write_unlock_bh(&t->lock);
++
++ if (copy_to_user(tmp.counters, counterstmp,
++ nentries * sizeof(struct ebt_counter))) {
++ BUGPRINT("Couldn't copy counters to userspace\n");
++ vfree(counterstmp);
++ return -EFAULT;
++ }
++ vfree(counterstmp);
++ }
++
++ if (copy_to_user(tmp.entries, entries, entries_size)) {
++ BUGPRINT("Couldn't copy entries to userspace\n");
++ return -EFAULT;
++ }
++ // set the match/watcher/target names right
++ return EBT_ENTRY_ITERATE(entries, entries_size,
++ ebt_make_names, entries, tmp.entries);
++}
++
++static int do_ebt_set_ctl(struct sock *sk,
++ int cmd, void *user, unsigned int len)
++{
++ int ret;
++
++ switch(cmd) {
++ case EBT_SO_SET_ENTRIES:
++ ret = do_replace(user, len);
++ break;
++ case EBT_SO_SET_COUNTERS:
++ ret = update_counters(user, len);
++ break;
++ default:
++ ret = -EINVAL;
++ }
++ return ret;
++}
++
++static int do_ebt_get_ctl(struct sock *sk, int cmd, void *user, int *len)
++{
++ int ret;
++ struct ebt_replace tmp;
++ struct ebt_table *t;
++
++ if (copy_from_user(&tmp, user, sizeof(tmp)))
++ return -EFAULT;
++
++ t = find_table_lock(tmp.name, &ret, &ebt_mutex);
++ if (!t)
++ return ret;
++
++ switch(cmd) {
++ case EBT_SO_GET_INFO:
++ case EBT_SO_GET_INIT_INFO:
++ if (*len != sizeof(struct ebt_replace)){
++ ret = -EINVAL;
++ up(&ebt_mutex);
++ break;
++ }
++ if (cmd == EBT_SO_GET_INFO) {
++ tmp.nentries = t->private->nentries;
++ tmp.entries_size = t->private->entries_size;
++ tmp.valid_hooks = t->valid_hooks;
++ } else {
++ tmp.nentries = t->table->nentries;
++ tmp.entries_size = t->table->entries_size;
++ tmp.valid_hooks = t->table->valid_hooks;
++ }
++ up(&ebt_mutex);
++ if (copy_to_user(user, &tmp, *len) != 0){
++ BUGPRINT("c2u Didn't work\n");
++ ret = -EFAULT;
++ break;
++ }
++ ret = 0;
++ break;
++
++ case EBT_SO_GET_ENTRIES:
++ case EBT_SO_GET_INIT_ENTRIES:
++ ret = copy_everything_to_user(t, user, len, cmd);
++ up(&ebt_mutex);
++ break;
++
++ default:
++ up(&ebt_mutex);
++ ret = -EINVAL;
++ }
++
++ return ret;
++}
++
++static struct nf_sockopt_ops ebt_sockopts =
++{ { NULL, NULL }, PF_INET, EBT_BASE_CTL, EBT_SO_SET_MAX + 1, do_ebt_set_ctl,
++ EBT_BASE_CTL, EBT_SO_GET_MAX + 1, do_ebt_get_ctl, 0, NULL
++};
++
++static int __init init(void)
++{
++ int ret;
++
++ down(&ebt_mutex);
++ list_named_insert(&ebt_targets, &ebt_standard_target);
++ up(&ebt_mutex);
++ if ((ret = nf_register_sockopt(&ebt_sockopts)) < 0)
++ return ret;
++
++ printk(KERN_NOTICE "Ebtables v2.0 registered\n");
++ return 0;
++}
++
++static void __exit fini(void)
++{
++ nf_unregister_sockopt(&ebt_sockopts);
++ printk(KERN_NOTICE "Ebtables v2.0 unregistered\n");
++}
++
++EXPORT_SYMBOL(ebt_register_table);
++EXPORT_SYMBOL(ebt_unregister_table);
++EXPORT_SYMBOL(ebt_register_match);
++EXPORT_SYMBOL(ebt_unregister_match);
++EXPORT_SYMBOL(ebt_register_watcher);
++EXPORT_SYMBOL(ebt_unregister_watcher);
++EXPORT_SYMBOL(ebt_register_target);
++EXPORT_SYMBOL(ebt_unregister_target);
++EXPORT_SYMBOL(ebt_do_table);
++module_init(init);
++module_exit(fini);
++MODULE_LICENSE("GPL");
+diff -Nur linux-mips-cvs/net/core/dev.c linux-ebtables/net/core/dev.c
+--- linux-mips-cvs/net/core/dev.c 2004-04-16 05:14:21.000000000 +0200
++++ linux-ebtables/net/core/dev.c 2005-02-07 05:52:50.000000000 +0100
+@@ -1426,7 +1426,7 @@
+
+
+ #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
+-void (*br_handle_frame_hook)(struct sk_buff *skb) = NULL;
++int (*br_handle_frame_hook)(struct sk_buff *skb) = NULL;
+ #endif
+
+ static __inline__ int handle_bridge(struct sk_buff *skb,
+@@ -1443,7 +1443,6 @@
+ }
+ }
+
+- br_handle_frame_hook(skb);
+ return ret;
+ }
+
+@@ -1503,7 +1502,12 @@
+ #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
+ if (skb->dev->br_port != NULL && br_handle_frame_hook != NULL &&
+ skb->pkt_type != PACKET_LOOPBACK) {
+- return handle_bridge(skb, pt_prev);
++ int ret;
++
++ ret = handle_bridge(skb, pt_prev);
++ if (br_handle_frame_hook(skb) == 0)
++ return ret;
++ pt_prev = NULL;
+ }
+ #endif
+
+diff -Nur linux-mips-cvs/net/core/netfilter.c linux-ebtables/net/core/netfilter.c
+--- linux-mips-cvs/net/core/netfilter.c 2005-01-20 03:19:25.000000000 +0100
++++ linux-ebtables/net/core/netfilter.c 2005-02-07 05:52:50.000000000 +0100
+@@ -342,10 +342,15 @@
+ const struct net_device *indev,
+ const struct net_device *outdev,
+ struct list_head **i,
+- int (*okfn)(struct sk_buff *))
++ int (*okfn)(struct sk_buff *),
++ int hook_thresh)
+ {
+ for (*i = (*i)->next; *i != head; *i = (*i)->next) {
+ struct nf_hook_ops *elem = (struct nf_hook_ops *)*i;
++
++ if (hook_thresh > elem->priority)
++ continue;
++
+ switch (elem->hook(hook, skb, indev, outdev, okfn)) {
+ case NF_QUEUE:
+ return NF_QUEUE;
+@@ -413,6 +418,10 @@
+ {
+ int status;
+ struct nf_info *info;
++#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
++ struct net_device *physindev = NULL;
++ struct net_device *physoutdev = NULL;
++#endif
+
+ if (!queue_handler[pf].outfn) {
+ kfree_skb(skb);
+@@ -435,11 +444,24 @@
+ if (indev) dev_hold(indev);
+ if (outdev) dev_hold(outdev);
+
++#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
++ if (skb->nf_bridge) {
++ physindev = skb->nf_bridge->physindev;
++ if (physindev) dev_hold(physindev);
++ physoutdev = skb->nf_bridge->physoutdev;
++ if (physoutdev) dev_hold(physoutdev);
++ }
++#endif
++
+ status = queue_handler[pf].outfn(skb, info, queue_handler[pf].data);
+ if (status < 0) {
+ /* James M doesn't say fuck enough. */
+ if (indev) dev_put(indev);
+ if (outdev) dev_put(outdev);
++#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
++ if (physindev) dev_put(physindev);
++ if (physoutdev) dev_put(physoutdev);
++#endif
+ kfree(info);
+ kfree_skb(skb);
+ return;
+@@ -449,7 +471,8 @@
+ int nf_hook_slow(int pf, unsigned int hook, struct sk_buff *skb,
+ struct net_device *indev,
+ struct net_device *outdev,
+- int (*okfn)(struct sk_buff *))
++ int (*okfn)(struct sk_buff *),
++ int hook_thresh)
+ {
+ struct list_head *elem;
+ unsigned int verdict;
+@@ -481,7 +504,7 @@
+
+ elem = &nf_hooks[pf][hook];
+ verdict = nf_iterate(&nf_hooks[pf][hook], &skb, hook, indev,
+- outdev, &elem, okfn);
++ outdev, &elem, okfn, hook_thresh);
+ if (verdict == NF_QUEUE) {
+ NFDEBUG("nf_hook: Verdict = QUEUE.\n");
+ nf_queue(skb, elem, pf, hook, indev, outdev, okfn);
+@@ -510,6 +533,14 @@
+
+ /* We don't have BR_NETPROTO_LOCK here */
+ br_read_lock_bh(BR_NETPROTO_LOCK);
++#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
++ if (skb->nf_bridge) {
++ if (skb->nf_bridge->physindev)
++ dev_put(skb->nf_bridge->physindev);
++ if (skb->nf_bridge->physoutdev)
++ dev_put(skb->nf_bridge->physoutdev);
++ }
++#endif
+ for (i = nf_hooks[info->pf][info->hook].next; i != elem; i = i->next) {
+ if (i == &nf_hooks[info->pf][info->hook]) {
+ /* The module which sent it to userspace is gone. */
+@@ -530,7 +561,7 @@
+ verdict = nf_iterate(&nf_hooks[info->pf][info->hook],
+ &skb, info->hook,
+ info->indev, info->outdev, &elem,
+- info->okfn);
++ info->okfn, INT_MIN);
+ }
+
+ switch (verdict) {
+diff -Nur linux-mips-cvs/net/core/skbuff.c linux-ebtables/net/core/skbuff.c
+--- linux-mips-cvs/net/core/skbuff.c 2003-08-13 19:19:30.000000000 +0200
++++ linux-ebtables/net/core/skbuff.c 2005-02-07 05:52:50.000000000 +0100
+@@ -246,6 +246,9 @@
+ #ifdef CONFIG_NETFILTER_DEBUG
+ skb->nf_debug = 0;
+ #endif
++#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
++ skb->nf_bridge = NULL;
++#endif
+ #endif
+ #ifdef CONFIG_NET_SCHED
+ skb->tc_index = 0;
+@@ -326,6 +329,9 @@
+ }
+ #ifdef CONFIG_NETFILTER
+ nf_conntrack_put(skb->nfct);
++#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
++ nf_bridge_put(skb->nf_bridge);
++#endif
+ #endif
+ skb_headerinit(skb, NULL, 0); /* clean state */
+ kfree_skbmem(skb);
+@@ -393,6 +399,9 @@
+ #ifdef CONFIG_NETFILTER_DEBUG
+ C(nf_debug);
+ #endif
++#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
++ C(nf_bridge);
++#endif
+ #endif /*CONFIG_NETFILTER*/
+ #if defined(CONFIG_HIPPI)
+ C(private);
+@@ -405,6 +414,9 @@
+ skb->cloned = 1;
+ #ifdef CONFIG_NETFILTER
+ nf_conntrack_get(skb->nfct);
++#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
++ nf_bridge_get(skb->nf_bridge);
++#endif
+ #endif
+ return n;
+ }
+@@ -440,6 +452,10 @@
+ #ifdef CONFIG_NETFILTER_DEBUG
+ new->nf_debug=old->nf_debug;
+ #endif
++#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
++ new->nf_bridge=old->nf_bridge;
++ nf_bridge_get(new->nf_bridge);
++#endif
+ #endif
+ #ifdef CONFIG_NET_SCHED
+ new->tc_index = old->tc_index;
+@@ -726,9 +742,9 @@
+ /* Set the tail pointer and length */
+ skb_put(n,skb->len);
+
+- /* Copy the data only. */
+- if (skb_copy_bits(skb, 0, n->data, skb->len))
+- BUG();
++ /* Copy the linear data and header. */
++ if (skb_copy_bits(skb, -newheadroom, n->head, newheadroom + skb->len))
++ BUG();
+
+ copy_skb_header(n, skb);
+ return n;
+diff -Nur linux-mips-cvs/net/ipv4/ip_output.c linux-ebtables/net/ipv4/ip_output.c
+--- linux-mips-cvs/net/ipv4/ip_output.c 2005-01-20 03:19:25.000000000 +0100
++++ linux-ebtables/net/ipv4/ip_output.c 2005-02-07 05:52:50.000000000 +0100
+@@ -890,6 +890,10 @@
+ /* Connection association is same as pre-frag packet */
+ skb2->nfct = skb->nfct;
+ nf_conntrack_get(skb2->nfct);
++#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
++ skb2->nf_bridge = skb->nf_bridge;
++ nf_bridge_get(skb2->nf_bridge);
++#endif
+ #ifdef CONFIG_NETFILTER_DEBUG
+ skb2->nf_debug = skb->nf_debug;
+ #endif
+diff -Nur linux-mips-cvs/net/ipv4/ip_output.c.orig linux-ebtables/net/ipv4/ip_output.c.orig
+--- linux-mips-cvs/net/ipv4/ip_output.c.orig 1970-01-01 01:00:00.000000000 +0100
++++ linux-ebtables/net/ipv4/ip_output.c.orig 2005-01-20 03:19:25.000000000 +0100
+@@ -0,0 +1,1036 @@
++/*
++ * INET An implementation of the TCP/IP protocol suite for the LINUX
++ * operating system. INET is implemented using the BSD Socket
++ * interface as the means of communication with the user level.
++ *
++ * The Internet Protocol (IP) output module.
++ *
++ * Version: $Id$
++ *
++ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
++ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
++ * Donald Becker, <becker@super.org>
++ * Alan Cox, <Alan.Cox@linux.org>
++ * Richard Underwood
++ * Stefan Becker, <stefanb@yello.ping.de>
++ * Jorge Cwik, <jorge@laser.satlink.net>
++ * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
++ *
++ * See ip_input.c for original log
++ *
++ * Fixes:
++ * Alan Cox : Missing nonblock feature in ip_build_xmit.
++ * Mike Kilburn : htons() missing in ip_build_xmit.
++ * Bradford Johnson: Fix faulty handling of some frames when
++ * no route is found.
++ * Alexander Demenshin: Missing sk/skb free in ip_queue_xmit
++ * (in case if packet not accepted by
++ * output firewall rules)
++ * Mike McLagan : Routing by source
++ * Alexey Kuznetsov: use new route cache
++ * Andi Kleen: Fix broken PMTU recovery and remove
++ * some redundant tests.
++ * Vitaly E. Lavrov : Transparent proxy revived after year coma.
++ * Andi Kleen : Replace ip_reply with ip_send_reply.
++ * Andi Kleen : Split fast and slow ip_build_xmit path
++ * for decreased register pressure on x86
++ * and more readibility.
++ * Marc Boucher : When call_out_firewall returns FW_QUEUE,
++ * silently drop skb instead of failing with -EPERM.
++ * Detlev Wengorz : Copy protocol for fragments.
++ */
++
++#include <asm/uaccess.h>
++#include <asm/system.h>
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/string.h>
++#include <linux/errno.h>
++#include <linux/config.h>
++
++#include <linux/socket.h>
++#include <linux/sockios.h>
++#include <linux/in.h>
++#include <linux/inet.h>
++#include <linux/netdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/proc_fs.h>
++#include <linux/stat.h>
++#include <linux/init.h>
++
++#include <net/snmp.h>
++#include <net/ip.h>
++#include <net/protocol.h>
++#include <net/route.h>
++#include <net/tcp.h>
++#include <net/udp.h>
++#include <linux/skbuff.h>
++#include <net/sock.h>
++#include <net/arp.h>
++#include <net/icmp.h>
++#include <net/raw.h>
++#include <net/checksum.h>
++#include <net/inetpeer.h>
++#include <linux/igmp.h>
++#include <linux/netfilter_ipv4.h>
++#include <linux/mroute.h>
++#include <linux/netlink.h>
++
++/*
++ * Shall we try to damage output packets if routing dev changes?
++ */
++
++int sysctl_ip_dynaddr = 0;
++int sysctl_ip_default_ttl = IPDEFTTL;
++
++/* Generate a checksum for an outgoing IP datagram. */
++__inline__ void ip_send_check(struct iphdr *iph)
++{
++ iph->check = 0;
++ iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
++}
++
++/* dev_loopback_xmit for use with netfilter. */
++static int ip_dev_loopback_xmit(struct sk_buff *newskb)
++{
++ newskb->mac.raw = newskb->data;
++ __skb_pull(newskb, newskb->nh.raw - newskb->data);
++ newskb->pkt_type = PACKET_LOOPBACK;
++ newskb->ip_summed = CHECKSUM_UNNECESSARY;
++ BUG_TRAP(newskb->dst);
++
++#ifdef CONFIG_NETFILTER_DEBUG
++ nf_debug_ip_loopback_xmit(newskb);
++#endif
++ netif_rx(newskb);
++ return 0;
++}
++
++/* Don't just hand NF_HOOK skb->dst->output, in case netfilter hook
++ changes route */
++static inline int
++output_maybe_reroute(struct sk_buff *skb)
++{
++ return skb->dst->output(skb);
++}
++
++/*
++ * Add an ip header to a skbuff and send it out.
++ */
++int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
++ u32 saddr, u32 daddr, struct ip_options *opt)
++{
++ struct rtable *rt = (struct rtable *)skb->dst;
++ struct iphdr *iph;
++
++ /* Build the IP header. */
++ if (opt)
++ iph=(struct iphdr *)skb_push(skb,sizeof(struct iphdr) + opt->optlen);
++ else
++ iph=(struct iphdr *)skb_push(skb,sizeof(struct iphdr));
++
++ iph->version = 4;
++ iph->ihl = 5;
++ iph->tos = sk->protinfo.af_inet.tos;
++ if (ip_dont_fragment(sk, &rt->u.dst))
++ iph->frag_off = htons(IP_DF);
++ else
++ iph->frag_off = 0;
++ iph->ttl = sk->protinfo.af_inet.ttl;
++ iph->daddr = rt->rt_dst;
++ iph->saddr = rt->rt_src;
++ iph->protocol = sk->protocol;
++ iph->tot_len = htons(skb->len);
++ ip_select_ident(iph, &rt->u.dst, sk);
++ skb->nh.iph = iph;
++
++ if (opt && opt->optlen) {
++ iph->ihl += opt->optlen>>2;
++ ip_options_build(skb, opt, daddr, rt, 0);
++ }
++ ip_send_check(iph);
++
++ /* Send it out. */
++ return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
++ output_maybe_reroute);
++}
++
++static inline int ip_finish_output2(struct sk_buff *skb)
++{
++ struct dst_entry *dst = skb->dst;
++ struct hh_cache *hh = dst->hh;
++
++#ifdef CONFIG_NETFILTER_DEBUG
++ nf_debug_ip_finish_output2(skb);
++#endif /*CONFIG_NETFILTER_DEBUG*/
++
++ if (hh) {
++ int hh_alen;
++
++ read_lock_bh(&hh->hh_lock);
++ hh_alen = HH_DATA_ALIGN(hh->hh_len);
++ memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
++ read_unlock_bh(&hh->hh_lock);
++ skb_push(skb, hh->hh_len);
++ return hh->hh_output(skb);
++ } else if (dst->neighbour)
++ return dst->neighbour->output(skb);
++
++ if (net_ratelimit())
++ printk(KERN_DEBUG "ip_finish_output2: No header cache and no neighbour!\n");
++ kfree_skb(skb);
++ return -EINVAL;
++}
++
++static __inline__ int __ip_finish_output(struct sk_buff *skb)
++{
++ struct net_device *dev = skb->dst->dev;
++
++ skb->dev = dev;
++ skb->protocol = htons(ETH_P_IP);
++
++ return NF_HOOK(PF_INET, NF_IP_POST_ROUTING, skb, NULL, dev,
++ ip_finish_output2);
++}
++
++int ip_finish_output(struct sk_buff *skb)
++{
++ return __ip_finish_output(skb);
++}
++
++int ip_mc_output(struct sk_buff *skb)
++{
++ struct sock *sk = skb->sk;
++ struct rtable *rt = (struct rtable*)skb->dst;
++ struct net_device *dev = rt->u.dst.dev;
++
++ /*
++ * If the indicated interface is up and running, send the packet.
++ */
++ IP_INC_STATS(IpOutRequests);
++#ifdef CONFIG_IP_ROUTE_NAT
++ if (rt->rt_flags & RTCF_NAT)
++ ip_do_nat(skb);
++#endif
++
++ skb->dev = dev;
++ skb->protocol = htons(ETH_P_IP);
++
++ /*
++ * Multicasts are looped back for other local users
++ */
++
++ if (rt->rt_flags&RTCF_MULTICAST) {
++ if ((!sk || sk->protinfo.af_inet.mc_loop)
++#ifdef CONFIG_IP_MROUTE
++ /* Small optimization: do not loopback not local frames,
++ which returned after forwarding; they will be dropped
++ by ip_mr_input in any case.
++ Note, that local frames are looped back to be delivered
++ to local recipients.
++
++ This check is duplicated in ip_mr_input at the moment.
++ */
++ && ((rt->rt_flags&RTCF_LOCAL) || !(IPCB(skb)->flags&IPSKB_FORWARDED))
++#endif
++ ) {
++ struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
++ if (newskb)
++ NF_HOOK(PF_INET, NF_IP_POST_ROUTING, newskb, NULL,
++ newskb->dev,
++ ip_dev_loopback_xmit);
++ }
++
++ /* Multicasts with ttl 0 must not go beyond the host */
++
++ if (skb->nh.iph->ttl == 0) {
++ kfree_skb(skb);
++ return 0;
++ }
++ }
++
++ if (rt->rt_flags&RTCF_BROADCAST) {
++ struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
++ if (newskb)
++ NF_HOOK(PF_INET, NF_IP_POST_ROUTING, newskb, NULL,
++ newskb->dev, ip_dev_loopback_xmit);
++ }
++
++ return __ip_finish_output(skb);
++}
++
++int ip_output(struct sk_buff *skb)
++{
++#ifdef CONFIG_IP_ROUTE_NAT
++ struct rtable *rt = (struct rtable*)skb->dst;
++#endif
++
++ IP_INC_STATS(IpOutRequests);
++
++#ifdef CONFIG_IP_ROUTE_NAT
++ if (rt->rt_flags&RTCF_NAT)
++ ip_do_nat(skb);
++#endif
++
++ return __ip_finish_output(skb);
++}
++
++/* Queues a packet to be sent, and starts the transmitter if necessary.
++ * This routine also needs to put in the total length and compute the
++ * checksum. We use to do this in two stages, ip_build_header() then
++ * this, but that scheme created a mess when routes disappeared etc.
++ * So we do it all here, and the TCP send engine has been changed to
++ * match. (No more unroutable FIN disasters, etc. wheee...) This will
++ * most likely make other reliable transport layers above IP easier
++ * to implement under Linux.
++ */
++static inline int ip_queue_xmit2(struct sk_buff *skb)
++{
++ struct sock *sk = skb->sk;
++ struct rtable *rt = (struct rtable *)skb->dst;
++ struct net_device *dev;
++ struct iphdr *iph = skb->nh.iph;
++
++ dev = rt->u.dst.dev;
++
++ /* This can happen when the transport layer has segments queued
++ * with a cached route, and by the time we get here things are
++ * re-routed to a device with a different MTU than the original
++ * device. Sick, but we must cover it.
++ */
++ if (skb_headroom(skb) < dev->hard_header_len && dev->hard_header) {
++ struct sk_buff *skb2;
++
++ skb2 = skb_realloc_headroom(skb, (dev->hard_header_len + 15) & ~15);
++ kfree_skb(skb);
++ if (skb2 == NULL)
++ return -ENOMEM;
++ if (sk)
++ skb_set_owner_w(skb2, sk);
++ skb = skb2;
++ iph = skb->nh.iph;
++ }
++
++ if (skb->len > rt->u.dst.pmtu)
++ goto fragment;
++
++ ip_select_ident(iph, &rt->u.dst, sk);
++
++ /* Add an IP checksum. */
++ ip_send_check(iph);
++
++ skb->priority = sk->priority;
++ return skb->dst->output(skb);
++
++fragment:
++ if (ip_dont_fragment(sk, &rt->u.dst)) {
++ /* Reject packet ONLY if TCP might fragment
++ * it itself, if were careful enough.
++ */
++ NETDEBUG(printk(KERN_DEBUG "sending pkt_too_big (len[%u] pmtu[%u]) to self\n",
++ skb->len, rt->u.dst.pmtu));
++
++ icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
++ htonl(rt->u.dst.pmtu));
++ kfree_skb(skb);
++ return -EMSGSIZE;
++ }
++ ip_select_ident(iph, &rt->u.dst, sk);
++ if (skb->ip_summed == CHECKSUM_HW &&
++ (skb = skb_checksum_help(skb)) == NULL)
++ return -ENOMEM;
++ return ip_fragment(skb, skb->dst->output);
++}
++
++int ip_queue_xmit(struct sk_buff *skb, int ipfragok)
++{
++ struct sock *sk = skb->sk;
++ struct ip_options *opt = sk->protinfo.af_inet.opt;
++ struct rtable *rt;
++ struct iphdr *iph;
++
++ /* Skip all of this if the packet is already routed,
++ * f.e. by something like SCTP.
++ */
++ rt = (struct rtable *) skb->dst;
++ if (rt != NULL)
++ goto packet_routed;
++
++ /* Make sure we can route this packet. */
++ rt = (struct rtable *)__sk_dst_check(sk, 0);
++ if (rt == NULL) {
++ u32 daddr;
++
++ /* Use correct destination address if we have options. */
++ daddr = sk->daddr;
++ if(opt && opt->srr)
++ daddr = opt->faddr;
++
++ /* If this fails, retransmit mechanism of transport layer will
++ * keep trying until route appears or the connection times itself
++ * out.
++ */
++ if (ip_route_output(&rt, daddr, sk->saddr,
++ RT_CONN_FLAGS(sk),
++ sk->bound_dev_if))
++ goto no_route;
++ __sk_dst_set(sk, &rt->u.dst);
++ sk->route_caps = rt->u.dst.dev->features;
++ }
++ skb->dst = dst_clone(&rt->u.dst);
++
++packet_routed:
++ if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
++ goto no_route;
++
++ /* OK, we know where to send it, allocate and build IP header. */
++ iph = (struct iphdr *) skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0));
++ *((__u16 *)iph) = htons((4 << 12) | (5 << 8) | (sk->protinfo.af_inet.tos & 0xff));
++ iph->tot_len = htons(skb->len);
++ if (ip_dont_fragment(sk, &rt->u.dst) && !ipfragok)
++ iph->frag_off = htons(IP_DF);
++ else
++ iph->frag_off = 0;
++ iph->ttl = sk->protinfo.af_inet.ttl;
++ iph->protocol = sk->protocol;
++ iph->saddr = rt->rt_src;
++ iph->daddr = rt->rt_dst;
++ skb->nh.iph = iph;
++ /* Transport layer set skb->h.foo itself. */
++
++ if(opt && opt->optlen) {
++ iph->ihl += opt->optlen >> 2;
++ ip_options_build(skb, opt, sk->daddr, rt, 0);
++ }
++
++ return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
++ ip_queue_xmit2);
++
++no_route:
++ IP_INC_STATS(IpOutNoRoutes);
++ kfree_skb(skb);
++ return -EHOSTUNREACH;
++}
++
++/*
++ * Build and send a packet, with as little as one copy
++ *
++ * Doesn't care much about ip options... option length can be
++ * different for fragment at 0 and other fragments.
++ *
++ * Note that the fragment at the highest offset is sent first,
++ * so the getfrag routine can fill in the TCP/UDP checksum header
++ * field in the last fragment it sends... actually it also helps
++ * the reassemblers, they can put most packets in at the head of
++ * the fragment queue, and they know the total size in advance. This
++ * last feature will measurably improve the Linux fragment handler one
++ * day.
++ *
++ * The callback has five args, an arbitrary pointer (copy of frag),
++ * the source IP address (may depend on the routing table), the
++ * destination address (char *), the offset to copy from, and the
++ * length to be copied.
++ */
++
++static int ip_build_xmit_slow(struct sock *sk,
++ int getfrag (const void *,
++ char *,
++ unsigned int,
++ unsigned int,
++ struct sk_buff *),
++ const void *frag,
++ unsigned length,
++ struct ipcm_cookie *ipc,
++ struct rtable *rt,
++ int flags)
++{
++ unsigned int fraglen, maxfraglen, fragheaderlen;
++ int err;
++ int offset, mf;
++ int mtu;
++ u16 id;
++
++ int hh_len = (rt->u.dst.dev->hard_header_len + 15)&~15;
++ int nfrags=0;
++ struct ip_options *opt = ipc->opt;
++ int df = 0;
++
++ mtu = rt->u.dst.pmtu;
++ if (ip_dont_fragment(sk, &rt->u.dst))
++ df = htons(IP_DF);
++
++ length -= sizeof(struct iphdr);
++
++ if (opt) {
++ fragheaderlen = sizeof(struct iphdr) + opt->optlen;
++ maxfraglen = ((mtu-sizeof(struct iphdr)-opt->optlen) & ~7) + fragheaderlen;
++ } else {
++ fragheaderlen = sizeof(struct iphdr);
++
++ /*
++ * Fragheaderlen is the size of 'overhead' on each buffer. Now work
++ * out the size of the frames to send.
++ */
++
++ maxfraglen = ((mtu-sizeof(struct iphdr)) & ~7) + fragheaderlen;
++ }
++
++ if (length + fragheaderlen > 0xFFFF) {
++ ip_local_error(sk, EMSGSIZE, rt->rt_dst, sk->dport, mtu);
++ return -EMSGSIZE;
++ }
++
++ /*
++ * Start at the end of the frame by handling the remainder.
++ */
++
++ offset = length - (length % (maxfraglen - fragheaderlen));
++
++ /*
++ * Amount of memory to allocate for final fragment.
++ */
++
++ fraglen = length - offset + fragheaderlen;
++
++ if (length-offset==0) {
++ fraglen = maxfraglen;
++ offset -= maxfraglen-fragheaderlen;
++ }
++
++ /*
++ * The last fragment will not have MF (more fragments) set.
++ */
++
++ mf = 0;
++
++ /*
++ * Don't fragment packets for path mtu discovery.
++ */
++
++ if (offset > 0 && sk->protinfo.af_inet.pmtudisc==IP_PMTUDISC_DO) {
++ ip_local_error(sk, EMSGSIZE, rt->rt_dst, sk->dport, mtu);
++ return -EMSGSIZE;
++ }
++ if (flags&MSG_PROBE)
++ goto out;
++
++ /*
++ * Begin outputting the bytes.
++ */
++
++ id = sk->protinfo.af_inet.id++;
++
++ do {
++ char *data;
++ struct sk_buff * skb;
++
++ /*
++ * Get the memory we require with some space left for alignment.
++ */
++ if (!(flags & MSG_DONTWAIT) || nfrags == 0) {
++ skb = sock_alloc_send_skb(sk, fraglen + hh_len + 15,
++ (flags & MSG_DONTWAIT), &err);
++ } else {
++ /* On a non-blocking write, we check for send buffer
++ * usage on the first fragment only.
++ */
++ skb = sock_wmalloc(sk, fraglen + hh_len + 15, 1,
++ sk->allocation);
++ if (!skb)
++ err = -ENOBUFS;
++ }
++ if (skb == NULL)
++ goto error;
++
++ /*
++ * Fill in the control structures
++ */
++
++ skb->priority = sk->priority;
++ skb->dst = dst_clone(&rt->u.dst);
++ skb_reserve(skb, hh_len);
++
++ /*
++ * Find where to start putting bytes.
++ */
++
++ data = skb_put(skb, fraglen);
++ skb->nh.iph = (struct iphdr *)data;
++
++ /*
++ * Only write IP header onto non-raw packets
++ */
++
++ {
++ struct iphdr *iph = (struct iphdr *)data;
++
++ iph->version = 4;
++ iph->ihl = 5;
++ if (opt) {
++ iph->ihl += opt->optlen>>2;
++ ip_options_build(skb, opt,
++ ipc->addr, rt, offset);
++ }
++ iph->tos = sk->protinfo.af_inet.tos;
++ iph->tot_len = htons(fraglen - fragheaderlen + iph->ihl*4);
++ iph->frag_off = htons(offset>>3)|mf|df;
++ iph->id = id;
++ if (!mf) {
++ if (offset || !df) {
++ /* Select an unpredictable ident only
++ * for packets without DF or having
++ * been fragmented.
++ */
++ __ip_select_ident(iph, &rt->u.dst);
++ id = iph->id;
++ }
++
++ /*
++ * Any further fragments will have MF set.
++ */
++ mf = htons(IP_MF);
++ }
++ if (rt->rt_type == RTN_MULTICAST)
++ iph->ttl = sk->protinfo.af_inet.mc_ttl;
++ else
++ iph->ttl = sk->protinfo.af_inet.ttl;
++ iph->protocol = sk->protocol;
++ iph->check = 0;
++ iph->saddr = rt->rt_src;
++ iph->daddr = rt->rt_dst;
++ iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
++ data += iph->ihl*4;
++ }
++
++ /*
++ * User data callback
++ */
++
++ if (getfrag(frag, data, offset, fraglen-fragheaderlen, skb)) {
++ err = -EFAULT;
++ kfree_skb(skb);
++ goto error;
++ }
++
++ offset -= (maxfraglen-fragheaderlen);
++ fraglen = maxfraglen;
++
++ nfrags++;
++
++ err = NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL,
++ skb->dst->dev, output_maybe_reroute);
++ if (err) {
++ if (err > 0)
++ err = sk->protinfo.af_inet.recverr ? net_xmit_errno(err) : 0;
++ if (err)
++ goto error;
++ }
++ } while (offset >= 0);
++
++ if (nfrags>1)
++ ip_statistics[smp_processor_id()*2 + !in_softirq()].IpFragCreates += nfrags;
++out:
++ return 0;
++
++error:
++ IP_INC_STATS(IpOutDiscards);
++ if (nfrags>1)
++ ip_statistics[smp_processor_id()*2 + !in_softirq()].IpFragCreates += nfrags;
++ return err;
++}
++
++/*
++ * Fast path for unfragmented packets.
++ */
++int ip_build_xmit(struct sock *sk,
++ int getfrag (const void *,
++ char *,
++ unsigned int,
++ unsigned int,
++ struct sk_buff *),
++ const void *frag,
++ unsigned length,
++ struct ipcm_cookie *ipc,
++ struct rtable *rt,
++ int flags)
++{
++ int err;
++ struct sk_buff *skb;
++ int df;
++ struct iphdr *iph;
++
++ /*
++ * Try the simple case first. This leaves fragmented frames, and by
++ * choice RAW frames within 20 bytes of maximum size(rare) to the long path
++ */
++
++ if (!sk->protinfo.af_inet.hdrincl) {
++ length += sizeof(struct iphdr);
++
++ /*
++ * Check for slow path.
++ */
++ if (length > rt->u.dst.pmtu || ipc->opt != NULL)
++ return ip_build_xmit_slow(sk,getfrag,frag,length,ipc,rt,flags);
++ } else {
++ if (length > rt->u.dst.dev->mtu) {
++ ip_local_error(sk, EMSGSIZE, rt->rt_dst, sk->dport, rt->u.dst.dev->mtu);
++ return -EMSGSIZE;
++ }
++ }
++ if (flags&MSG_PROBE)
++ goto out;
++
++ /*
++ * Do path mtu discovery if needed.
++ */
++ df = 0;
++ if (ip_dont_fragment(sk, &rt->u.dst))
++ df = htons(IP_DF);
++
++ /*
++ * Fast path for unfragmented frames without options.
++ */
++ {
++ int hh_len = (rt->u.dst.dev->hard_header_len + 15)&~15;
++
++ skb = sock_alloc_send_skb(sk, length+hh_len+15,
++ flags&MSG_DONTWAIT, &err);
++ if(skb==NULL)
++ goto error;
++ skb_reserve(skb, hh_len);
++ }
++
++ skb->priority = sk->priority;
++ skb->dst = dst_clone(&rt->u.dst);
++
++ skb->nh.iph = iph = (struct iphdr *)skb_put(skb, length);
++
++ if(!sk->protinfo.af_inet.hdrincl) {
++ iph->version=4;
++ iph->ihl=5;
++ iph->tos=sk->protinfo.af_inet.tos;
++ iph->tot_len = htons(length);
++ iph->frag_off = df;
++ iph->ttl=sk->protinfo.af_inet.mc_ttl;
++ ip_select_ident(iph, &rt->u.dst, sk);
++ if (rt->rt_type != RTN_MULTICAST)
++ iph->ttl=sk->protinfo.af_inet.ttl;
++ iph->protocol=sk->protocol;
++ iph->saddr=rt->rt_src;
++ iph->daddr=rt->rt_dst;
++ iph->check=0;
++ iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
++ err = getfrag(frag, ((char *)iph)+iph->ihl*4,0, length-iph->ihl*4, skb);
++ }
++ else
++ err = getfrag(frag, (void *)iph, 0, length, skb);
++
++ if (err)
++ goto error_fault;
++
++ err = NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
++ output_maybe_reroute);
++ if (err > 0)
++ err = sk->protinfo.af_inet.recverr ? net_xmit_errno(err) : 0;
++ if (err)
++ goto error;
++out:
++ return 0;
++
++error_fault:
++ err = -EFAULT;
++ kfree_skb(skb);
++error:
++ IP_INC_STATS(IpOutDiscards);
++ return err;
++}
++
++/*
++ * This IP datagram is too large to be sent in one piece. Break it up into
++ * smaller pieces (each of size equal to IP header plus
++ * a block of the data of the original IP data part) that will yet fit in a
++ * single device frame, and queue such a frame for sending.
++ *
++ * Yes this is inefficient, feel free to submit a quicker one.
++ */
++
++int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
++{
++ struct iphdr *iph;
++ int raw = 0;
++ int ptr;
++ struct net_device *dev;
++ struct sk_buff *skb2;
++ unsigned int mtu, hlen, left, len;
++ int offset;
++ int not_last_frag;
++ struct rtable *rt = (struct rtable*)skb->dst;
++ int err = 0;
++
++ dev = rt->u.dst.dev;
++
++ /*
++ * Point into the IP datagram header.
++ */
++
++ iph = skb->nh.iph;
++
++ /*
++ * Setup starting values.
++ */
++
++ hlen = iph->ihl * 4;
++ left = skb->len - hlen; /* Space per frame */
++ mtu = rt->u.dst.pmtu - hlen; /* Size of data space */
++ ptr = raw + hlen; /* Where to start from */
++
++ /*
++ * Fragment the datagram.
++ */
++
++ offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
++ not_last_frag = iph->frag_off & htons(IP_MF);
++
++ /*
++ * Keep copying data until we run out.
++ */
++
++ while(left > 0) {
++ len = left;
++ /* IF: it doesn't fit, use 'mtu' - the data space left */
++ if (len > mtu)
++ len = mtu;
++ /* IF: we are not sending upto and including the packet end
++ then align the next start on an eight byte boundary */
++ if (len < left) {
++ len &= ~7;
++ }
++ /*
++ * Allocate buffer.
++ */
++
++ if ((skb2 = alloc_skb(len+hlen+dev->hard_header_len+15,GFP_ATOMIC)) == NULL) {
++ NETDEBUG(printk(KERN_INFO "IP: frag: no memory for new fragment!\n"));
++ err = -ENOMEM;
++ goto fail;
++ }
++
++ /*
++ * Set up data on packet
++ */
++
++ skb2->pkt_type = skb->pkt_type;
++ skb2->priority = skb->priority;
++ skb_reserve(skb2, (dev->hard_header_len+15)&~15);
++ skb_put(skb2, len + hlen);
++ skb2->nh.raw = skb2->data;
++ skb2->h.raw = skb2->data + hlen;
++ skb2->protocol = skb->protocol;
++ skb2->security = skb->security;
++
++ /*
++ * Charge the memory for the fragment to any owner
++ * it might possess
++ */
++
++ if (skb->sk)
++ skb_set_owner_w(skb2, skb->sk);
++ skb2->dst = dst_clone(skb->dst);
++ skb2->dev = skb->dev;
++
++ /*
++ * Copy the packet header into the new buffer.
++ */
++
++ memcpy(skb2->nh.raw, skb->data, hlen);
++
++ /*
++ * Copy a block of the IP datagram.
++ */
++ if (skb_copy_bits(skb, ptr, skb2->h.raw, len))
++ BUG();
++ left -= len;
++
++ /*
++ * Fill in the new header fields.
++ */
++ iph = skb2->nh.iph;
++ iph->frag_off = htons((offset >> 3));
++
++ /* ANK: dirty, but effective trick. Upgrade options only if
++ * the segment to be fragmented was THE FIRST (otherwise,
++ * options are already fixed) and make it ONCE
++ * on the initial skb, so that all the following fragments
++ * will inherit fixed options.
++ */
++ if (offset == 0)
++ ip_options_fragment(skb);
++
++ /* Copy the flags to each fragment. */
++ IPCB(skb2)->flags = IPCB(skb)->flags;
++
++ /*
++ * Added AC : If we are fragmenting a fragment that's not the
++ * last fragment then keep MF on each bit
++ */
++ if (left > 0 || not_last_frag)
++ iph->frag_off |= htons(IP_MF);
++ ptr += len;
++ offset += len;
++
++#ifdef CONFIG_NET_SCHED
++ skb2->tc_index = skb->tc_index;
++#endif
++#ifdef CONFIG_NETFILTER
++ skb2->nfmark = skb->nfmark;
++ skb2->nfcache = skb->nfcache;
++ /* Connection association is same as pre-frag packet */
++ skb2->nfct = skb->nfct;
++ nf_conntrack_get(skb2->nfct);
++#ifdef CONFIG_NETFILTER_DEBUG
++ skb2->nf_debug = skb->nf_debug;
++#endif
++#endif
++
++ /*
++ * Put this fragment into the sending queue.
++ */
++
++ IP_INC_STATS(IpFragCreates);
++
++ iph->tot_len = htons(len + hlen);
++
++ ip_send_check(iph);
++
++ err = output(skb2);
++ if (err)
++ goto fail;
++ }
++ kfree_skb(skb);
++ IP_INC_STATS(IpFragOKs);
++ return err;
++
++fail:
++ kfree_skb(skb);
++ IP_INC_STATS(IpFragFails);
++ return err;
++}
++
++/*
++ * Fetch data from kernel space and fill in checksum if needed.
++ */
++static int ip_reply_glue_bits(const void *dptr, char *to, unsigned int offset,
++ unsigned int fraglen, struct sk_buff *skb)
++{
++ struct ip_reply_arg *dp = (struct ip_reply_arg*)dptr;
++ u16 *pktp = (u16 *)to;
++ struct iovec *iov;
++ int len;
++ int hdrflag = 1;
++
++ iov = &dp->iov[0];
++ if (offset >= iov->iov_len) {
++ offset -= iov->iov_len;
++ iov++;
++ hdrflag = 0;
++ }
++ len = iov->iov_len - offset;
++ if (fraglen > len) { /* overlapping. */
++ dp->csum = csum_partial_copy_nocheck(iov->iov_base+offset, to, len,
++ dp->csum);
++ offset = 0;
++ fraglen -= len;
++ to += len;
++ iov++;
++ }
++
++ dp->csum = csum_partial_copy_nocheck(iov->iov_base+offset, to, fraglen,
++ dp->csum);
++
++ if (hdrflag && dp->csumoffset)
++ *(pktp + dp->csumoffset) = csum_fold(dp->csum); /* fill in checksum */
++ return 0;
++}
++
++/*
++ * Generic function to send a packet as reply to another packet.
++ * Used to send TCP resets so far. ICMP should use this function too.
++ *
++ * Should run single threaded per socket because it uses the sock
++ * structure to pass arguments.
++ */
++void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *arg,
++ unsigned int len)
++{
++ struct {
++ struct ip_options opt;
++ char data[40];
++ } replyopts;
++ struct ipcm_cookie ipc;
++ u32 daddr;
++ struct rtable *rt = (struct rtable*)skb->dst;
++
++ if (ip_options_echo(&replyopts.opt, skb))
++ return;
++
++ daddr = ipc.addr = rt->rt_src;
++ ipc.opt = NULL;
++
++ if (replyopts.opt.optlen) {
++ ipc.opt = &replyopts.opt;
++
++ if (ipc.opt->srr)
++ daddr = replyopts.opt.faddr;
++ }
++
++ if (ip_route_output(&rt, daddr, rt->rt_spec_dst, RT_TOS(skb->nh.iph->tos), 0))
++ return;
++
++ /* And let IP do all the hard work.
++
++ This chunk is not reenterable, hence spinlock.
++ Note that it uses the fact, that this function is called
++ with locally disabled BH and that sk cannot be already spinlocked.
++ */
++ bh_lock_sock(sk);
++ sk->protinfo.af_inet.tos = skb->nh.iph->tos;
++ sk->priority = skb->priority;
++ sk->protocol = skb->nh.iph->protocol;
++ ip_build_xmit(sk, ip_reply_glue_bits, arg, len, &ipc, rt, MSG_DONTWAIT);
++ bh_unlock_sock(sk);
++
++ ip_rt_put(rt);
++}
++
++/*
++ * IP protocol layer initialiser
++ */
++
++static struct packet_type ip_packet_type =
++{
++ __constant_htons(ETH_P_IP),
++ NULL, /* All devices */
++ ip_rcv,
++ (void*)1,
++ NULL,
++};
++
++/*
++ * IP registers the packet type and then calls the subprotocol initialisers
++ */
++
++void __init ip_init(void)
++{
++ dev_add_pack(&ip_packet_type);
++
++ ip_rt_init();
++ inet_initpeers();
++
++#ifdef CONFIG_IP_MULTICAST
++ proc_net_create("igmp", 0, ip_mc_procinfo);
++#endif
++ proc_net_create("mcfilter", 0, ip_mcf_procinfo);
++}
+diff -Nur linux-mips-cvs/net/ipv4/netfilter/Config.in linux-ebtables/net/ipv4/netfilter/Config.in
+--- linux-mips-cvs/net/ipv4/netfilter/Config.in 2005-01-20 03:19:25.000000000 +0100
++++ linux-ebtables/net/ipv4/netfilter/Config.in 2005-02-07 05:52:50.000000000 +0100
+@@ -44,6 +44,9 @@
+ dep_tristate ' Unclean match support (EXPERIMENTAL)' CONFIG_IP_NF_MATCH_UNCLEAN $CONFIG_IP_NF_IPTABLES
+ dep_tristate ' Owner match support (EXPERIMENTAL)' CONFIG_IP_NF_MATCH_OWNER $CONFIG_IP_NF_IPTABLES
+ fi
++ if [ "$CONFIG_BRIDGE" != "n" ]; then
++ dep_tristate ' Physdev match support' CONFIG_IP_NF_MATCH_PHYSDEV $CONFIG_IP_NF_IPTABLES
++ fi
+ # The targets
+ dep_tristate ' Packet filtering' CONFIG_IP_NF_FILTER $CONFIG_IP_NF_IPTABLES
+ if [ "$CONFIG_IP_NF_FILTER" != "n" ]; then
+diff -Nur linux-mips-cvs/net/ipv4/netfilter/Makefile linux-ebtables/net/ipv4/netfilter/Makefile
+--- linux-mips-cvs/net/ipv4/netfilter/Makefile 2003-08-13 19:19:30.000000000 +0200
++++ linux-ebtables/net/ipv4/netfilter/Makefile 2005-02-07 05:52:50.000000000 +0100
+@@ -87,6 +87,8 @@
+ obj-$(CONFIG_IP_NF_MATCH_UNCLEAN) += ipt_unclean.o
+ obj-$(CONFIG_IP_NF_MATCH_TCPMSS) += ipt_tcpmss.o
+
++obj-$(CONFIG_IP_NF_MATCH_PHYSDEV) += ipt_physdev.o
++
+ # targets
+ obj-$(CONFIG_IP_NF_TARGET_REJECT) += ipt_REJECT.o
+ obj-$(CONFIG_IP_NF_TARGET_MIRROR) += ipt_MIRROR.o
+diff -Nur linux-mips-cvs/net/ipv4/netfilter/ip_tables.c linux-ebtables/net/ipv4/netfilter/ip_tables.c
+--- linux-mips-cvs/net/ipv4/netfilter/ip_tables.c 2005-01-20 03:19:25.000000000 +0100
++++ linux-ebtables/net/ipv4/netfilter/ip_tables.c 2005-02-07 05:52:50.000000000 +0100
+@@ -118,12 +118,19 @@
+ static inline int
+ ip_packet_match(const struct iphdr *ip,
+ const char *indev,
++#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
++ const char *physindev,
++#endif
+ const char *outdev,
++#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
++ const char *physoutdev,
++#endif
+ const struct ipt_ip *ipinfo,
+ int isfrag)
+ {
+ size_t i;
+ unsigned long ret;
++ unsigned long ret2 = 1;
+
+ #define FWINV(bool,invflg) ((bool) ^ !!(ipinfo->invflags & invflg))
+
+@@ -153,7 +160,15 @@
+ & ((const unsigned long *)ipinfo->iniface_mask)[i];
+ }
+
+- if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
++#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
++ for (i = 0, ret2 = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
++ ret2 |= (((const unsigned long *)physindev)[i]
++ ^ ((const unsigned long *)ipinfo->iniface)[i])
++ & ((const unsigned long *)ipinfo->iniface_mask)[i];
++ }
++#endif
++
++ if (FWINV(ret != 0 && ret2 != 0, IPT_INV_VIA_IN)) {
+ dprintf("VIA in mismatch (%s vs %s).%s\n",
+ indev, ipinfo->iniface,
+ ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":"");
+@@ -166,7 +181,15 @@
+ & ((const unsigned long *)ipinfo->outiface_mask)[i];
+ }
+
+- if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
++#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
++ for (i = 0, ret2 = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
++ ret2 |= (((const unsigned long *)physoutdev)[i]
++ ^ ((const unsigned long *)ipinfo->outiface)[i])
++ & ((const unsigned long *)ipinfo->outiface_mask)[i];
++ }
++#endif
++
++ if (FWINV(ret != 0 && ret2 != 0, IPT_INV_VIA_OUT)) {
+ dprintf("VIA out mismatch (%s vs %s).%s\n",
+ outdev, ipinfo->outiface,
+ ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":"");
+@@ -265,6 +288,9 @@
+ /* Initializing verdict to NF_DROP keeps gcc happy. */
+ unsigned int verdict = NF_DROP;
+ const char *indev, *outdev;
++#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
++ const char *physindev, *physoutdev;
++#endif
+ void *table_base;
+ struct ipt_entry *e, *back;
+
+@@ -274,6 +300,13 @@
+ datalen = (*pskb)->len - ip->ihl * 4;
+ indev = in ? in->name : nulldevname;
+ outdev = out ? out->name : nulldevname;
++#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
++ physindev = ((*pskb)->nf_bridge && (*pskb)->nf_bridge->physindev) ?
++ (*pskb)->nf_bridge->physindev->name : nulldevname;
++ physoutdev = ((*pskb)->nf_bridge && (*pskb)->nf_bridge->physoutdev) ?
++ (*pskb)->nf_bridge->physoutdev->name : nulldevname;
++#endif
++
+ /* We handle fragments by dealing with the first fragment as
+ * if it was a normal packet. All other fragments are treated
+ * normally, except that they will NEVER match rules that ask
+@@ -309,7 +342,15 @@
+ IP_NF_ASSERT(e);
+ IP_NF_ASSERT(back);
+ (*pskb)->nfcache |= e->nfcache;
+- if (ip_packet_match(ip, indev, outdev, &e->ip, offset)) {
++ if (ip_packet_match(ip, indev,
++#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
++ physindev,
++#endif
++ outdev,
++#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
++ physoutdev,
++#endif
++ &e->ip, offset)) {
+ struct ipt_entry_target *t;
+
+ if (IPT_MATCH_ITERATE(e, do_match,
+diff -Nur linux-mips-cvs/net/ipv4/netfilter/ipt_LOG.c linux-ebtables/net/ipv4/netfilter/ipt_LOG.c
+--- linux-mips-cvs/net/ipv4/netfilter/ipt_LOG.c 2003-11-17 02:07:48.000000000 +0100
++++ linux-ebtables/net/ipv4/netfilter/ipt_LOG.c 2005-02-07 05:52:50.000000000 +0100
+@@ -316,6 +316,18 @@
+ loginfo->prefix,
+ in ? in->name : "",
+ out ? out->name : "");
++#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
++ if ((*pskb)->nf_bridge) {
++ struct net_device *physindev = (*pskb)->nf_bridge->physindev;
++ struct net_device *physoutdev = (*pskb)->nf_bridge->physoutdev;
++
++ if (physindev && in != physindev)
++ printk("PHYSIN=%s ", physindev->name);
++ if (physoutdev && out != physoutdev)
++ printk("PHYSOUT=%s ", physoutdev->name);
++ }
++#endif
++
+ if (in && !out) {
+ /* MAC logging for input chain only. */
+ printk("MAC=");
+diff -Nur linux-mips-cvs/net/ipv4/netfilter/ipt_REJECT.c linux-ebtables/net/ipv4/netfilter/ipt_REJECT.c
+--- linux-mips-cvs/net/ipv4/netfilter/ipt_REJECT.c 2005-01-20 03:19:25.000000000 +0100
++++ linux-ebtables/net/ipv4/netfilter/ipt_REJECT.c 2005-02-07 05:52:50.000000000 +0100
+@@ -15,6 +15,9 @@
+ #include <net/route.h>
+ #include <linux/netfilter_ipv4/ip_tables.h>
+ #include <linux/netfilter_ipv4/ipt_REJECT.h>
++#ifdef CONFIG_BRIDGE_NETFILTER
++#include <linux/netfilter_bridge.h>
++#endif
+
+ #if 0
+ #define DEBUGP printk
+@@ -29,7 +32,13 @@
+ struct rt_key key = {};
+ struct rtable *rt;
+
+- if (hook != NF_IP_FORWARD) {
++ /* We don't require ip forwarding to be enabled to be able to
++ * send a RST reply for bridged traffic. */
++ if (hook != NF_IP_FORWARD
++#ifdef CONFIG_BRIDGE_NETFILTER
++ || (skb->nf_bridge && skb->nf_bridge->mask & BRNF_BRIDGED)
++#endif
++ ) {
+ key.dst = iph->saddr;
+ if (hook == NF_IP_LOCAL_IN)
+ key.src = iph->daddr;
+diff -Nur linux-mips-cvs/net/ipv4/netfilter/ipt_REJECT.c.orig linux-ebtables/net/ipv4/netfilter/ipt_REJECT.c.orig
+--- linux-mips-cvs/net/ipv4/netfilter/ipt_REJECT.c.orig 1970-01-01 01:00:00.000000000 +0100
++++ linux-ebtables/net/ipv4/netfilter/ipt_REJECT.c.orig 2005-01-20 03:19:25.000000000 +0100
+@@ -0,0 +1,426 @@
++/*
++ * This is a module which is used for rejecting packets.
++ * Added support for customized reject packets (Jozsef Kadlecsik).
++ * Added support for ICMP type-3-code-13 (Maciej Soltysiak). [RFC 1812]
++ */
++#include <linux/config.h>
++#include <linux/module.h>
++#include <linux/skbuff.h>
++#include <linux/ip.h>
++#include <linux/udp.h>
++#include <linux/icmp.h>
++#include <net/icmp.h>
++#include <net/ip.h>
++#include <net/tcp.h>
++#include <net/route.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ipt_REJECT.h>
++
++#if 0
++#define DEBUGP printk
++#else
++#define DEBUGP(format, args...)
++#endif
++
++static inline struct rtable *route_reverse(struct sk_buff *skb, int hook)
++{
++ struct iphdr *iph = skb->nh.iph;
++ struct dst_entry *odst;
++ struct rt_key key = {};
++ struct rtable *rt;
++
++ if (hook != NF_IP_FORWARD) {
++ key.dst = iph->saddr;
++ if (hook == NF_IP_LOCAL_IN)
++ key.src = iph->daddr;
++ key.tos = RT_TOS(iph->tos);
++
++ if (ip_route_output_key(&rt, &key) != 0)
++ return NULL;
++ } else {
++ /* non-local src, find valid iif to satisfy
++ * rp-filter when calling ip_route_input. */
++ key.dst = iph->daddr;
++ if (ip_route_output_key(&rt, &key) != 0)
++ return NULL;
++
++ odst = skb->dst;
++ if (ip_route_input(skb, iph->saddr, iph->daddr,
++ RT_TOS(iph->tos), rt->u.dst.dev) != 0) {
++ dst_release(&rt->u.dst);
++ return NULL;
++ }
++ dst_release(&rt->u.dst);
++ rt = (struct rtable *)skb->dst;
++ skb->dst = odst;
++ }
++
++ if (rt->u.dst.error) {
++ dst_release(&rt->u.dst);
++ rt = NULL;
++ }
++
++ return rt;
++}
++
++/* Send RST reply */
++static void send_reset(struct sk_buff *oldskb, int hook)
++{
++ struct sk_buff *nskb;
++ struct tcphdr *otcph, *tcph;
++ struct rtable *rt;
++ unsigned int otcplen;
++ u_int16_t tmp_port;
++ u_int32_t tmp_addr;
++ int needs_ack;
++ int hh_len;
++
++ /* IP header checks: fragment, too short. */
++ if (oldskb->nh.iph->frag_off & htons(IP_OFFSET)
++ || oldskb->len < (oldskb->nh.iph->ihl<<2) + sizeof(struct tcphdr))
++ return;
++
++ otcph = (struct tcphdr *)((u_int32_t*)oldskb->nh.iph + oldskb->nh.iph->ihl);
++ otcplen = oldskb->len - oldskb->nh.iph->ihl*4;
++
++ /* No RST for RST. */
++ if (otcph->rst)
++ return;
++
++ /* Check checksum. */
++ if (tcp_v4_check(otcph, otcplen, oldskb->nh.iph->saddr,
++ oldskb->nh.iph->daddr,
++ csum_partial((char *)otcph, otcplen, 0)) != 0)
++ return;
++
++ if ((rt = route_reverse(oldskb, hook)) == NULL)
++ return;
++
++ hh_len = (rt->u.dst.dev->hard_header_len + 15)&~15;
++
++
++ /* Copy skb (even if skb is about to be dropped, we can't just
++ clone it because there may be other things, such as tcpdump,
++ interested in it). We also need to expand headroom in case
++ hh_len of incoming interface < hh_len of outgoing interface */
++ nskb = skb_copy_expand(oldskb, hh_len, skb_tailroom(oldskb),
++ GFP_ATOMIC);
++ if (!nskb) {
++ dst_release(&rt->u.dst);
++ return;
++ }
++
++ dst_release(nskb->dst);
++ nskb->dst = &rt->u.dst;
++
++ /* This packet will not be the same as the other: clear nf fields */
++ nf_reset(nskb);
++ nskb->nfcache = 0;
++ nskb->nfmark = 0;
++
++ tcph = (struct tcphdr *)((u_int32_t*)nskb->nh.iph + nskb->nh.iph->ihl);
++
++ /* Swap source and dest */
++ tmp_addr = nskb->nh.iph->saddr;
++ nskb->nh.iph->saddr = nskb->nh.iph->daddr;
++ nskb->nh.iph->daddr = tmp_addr;
++ tmp_port = tcph->source;
++ tcph->source = tcph->dest;
++ tcph->dest = tmp_port;
++
++ /* Truncate to length (no data) */
++ tcph->doff = sizeof(struct tcphdr)/4;
++ skb_trim(nskb, nskb->nh.iph->ihl*4 + sizeof(struct tcphdr));
++ nskb->nh.iph->tot_len = htons(nskb->len);
++
++ if (tcph->ack) {
++ needs_ack = 0;
++ tcph->seq = otcph->ack_seq;
++ tcph->ack_seq = 0;
++ } else {
++ needs_ack = 1;
++ tcph->ack_seq = htonl(ntohl(otcph->seq) + otcph->syn + otcph->fin
++ + otcplen - (otcph->doff<<2));
++ tcph->seq = 0;
++ }
++
++ /* Reset flags */
++ ((u_int8_t *)tcph)[13] = 0;
++ tcph->rst = 1;
++ tcph->ack = needs_ack;
++
++ tcph->window = 0;
++ tcph->urg_ptr = 0;
++
++ /* Adjust TCP checksum */
++ tcph->check = 0;
++ tcph->check = tcp_v4_check(tcph, sizeof(struct tcphdr),
++ nskb->nh.iph->saddr,
++ nskb->nh.iph->daddr,
++ csum_partial((char *)tcph,
++ sizeof(struct tcphdr), 0));
++
++ /* Adjust IP TTL, DF */
++ nskb->nh.iph->ttl = MAXTTL;
++ /* Set DF, id = 0 */
++ nskb->nh.iph->frag_off = htons(IP_DF);
++ nskb->nh.iph->id = 0;
++
++ /* Adjust IP checksum */
++ nskb->nh.iph->check = 0;
++ nskb->nh.iph->check = ip_fast_csum((unsigned char *)nskb->nh.iph,
++ nskb->nh.iph->ihl);
++
++ /* "Never happens" */
++ if (nskb->len > nskb->dst->pmtu)
++ goto free_nskb;
++
++ nf_ct_attach(nskb, oldskb);
++
++ NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, nskb, NULL, nskb->dst->dev,
++ ip_finish_output);
++ return;
++
++ free_nskb:
++ kfree_skb(nskb);
++}
++
++static void send_unreach(struct sk_buff *skb_in, int code)
++{
++ struct iphdr *iph;
++ struct udphdr *udph;
++ struct icmphdr *icmph;
++ struct sk_buff *nskb;
++ u32 saddr;
++ u8 tos;
++ int hh_len, length;
++ struct rtable *rt = (struct rtable*)skb_in->dst;
++ unsigned char *data;
++
++ if (!rt)
++ return;
++
++ /* FIXME: Use sysctl number. --RR */
++ if (!xrlim_allow(&rt->u.dst, 1*HZ))
++ return;
++
++ iph = skb_in->nh.iph;
++
++ /* No replies to physical multicast/broadcast */
++ if (skb_in->pkt_type!=PACKET_HOST)
++ return;
++
++ /* Now check at the protocol level */
++ if (rt->rt_flags&(RTCF_BROADCAST|RTCF_MULTICAST))
++ return;
++
++ /* Only reply to fragment 0. */
++ if (iph->frag_off&htons(IP_OFFSET))
++ return;
++
++ /* if UDP checksum is set, verify it's correct */
++ if (iph->protocol == IPPROTO_UDP
++ && skb_in->tail-(u8*)iph >= sizeof(struct udphdr)) {
++ int datalen = skb_in->len - (iph->ihl<<2);
++ udph = (struct udphdr *)((char *)iph + (iph->ihl<<2));
++ if (udph->check
++ && csum_tcpudp_magic(iph->saddr, iph->daddr,
++ datalen, IPPROTO_UDP,
++ csum_partial((char *)udph, datalen,
++ 0)) != 0)
++ return;
++ }
++
++ /* If we send an ICMP error to an ICMP error a mess would result.. */
++ if (iph->protocol == IPPROTO_ICMP
++ && skb_in->tail-(u8*)iph >= sizeof(struct icmphdr)) {
++ icmph = (struct icmphdr *)((char *)iph + (iph->ihl<<2));
++ /* Between echo-reply (0) and timestamp (13),
++ everything except echo-request (8) is an error.
++ Also, anything greater than NR_ICMP_TYPES is
++ unknown, and hence should be treated as an error... */
++ if ((icmph->type < ICMP_TIMESTAMP
++ && icmph->type != ICMP_ECHOREPLY
++ && icmph->type != ICMP_ECHO)
++ || icmph->type > NR_ICMP_TYPES)
++ return;
++ }
++
++ saddr = iph->daddr;
++ if (!(rt->rt_flags & RTCF_LOCAL))
++ saddr = 0;
++
++ tos = (iph->tos & IPTOS_TOS_MASK) | IPTOS_PREC_INTERNETCONTROL;
++
++ if (ip_route_output(&rt, iph->saddr, saddr, RT_TOS(tos), 0))
++ return;
++
++ /* RFC says return as much as we can without exceeding 576 bytes. */
++ length = skb_in->len + sizeof(struct iphdr) + sizeof(struct icmphdr);
++
++ if (length > rt->u.dst.pmtu)
++ length = rt->u.dst.pmtu;
++ if (length > 576)
++ length = 576;
++
++ hh_len = (rt->u.dst.dev->hard_header_len + 15)&~15;
++
++ nskb = alloc_skb(hh_len+15+length, GFP_ATOMIC);
++ if (!nskb) {
++ ip_rt_put(rt);
++ return;
++ }
++
++ nskb->priority = 0;
++ nskb->dst = &rt->u.dst;
++ skb_reserve(nskb, hh_len);
++
++ /* Set up IP header */
++ iph = nskb->nh.iph
++ = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr));
++ iph->version=4;
++ iph->ihl=5;
++ iph->tos=tos;
++ iph->tot_len = htons(length);
++
++ /* PMTU discovery never applies to ICMP packets. */
++ iph->frag_off = 0;
++
++ iph->ttl = MAXTTL;
++ ip_select_ident(iph, &rt->u.dst, NULL);
++ iph->protocol=IPPROTO_ICMP;
++ iph->saddr=rt->rt_src;
++ iph->daddr=rt->rt_dst;
++ iph->check=0;
++ iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
++
++ /* Set up ICMP header. */
++ icmph = nskb->h.icmph
++ = (struct icmphdr *)skb_put(nskb, sizeof(struct icmphdr));
++ icmph->type = ICMP_DEST_UNREACH;
++ icmph->code = code;
++ icmph->un.gateway = 0;
++ icmph->checksum = 0;
++
++ /* Copy as much of original packet as will fit */
++ data = skb_put(nskb,
++ length - sizeof(struct iphdr) - sizeof(struct icmphdr));
++ /* FIXME: won't work with nonlinear skbs --RR */
++ memcpy(data, skb_in->nh.iph,
++ length - sizeof(struct iphdr) - sizeof(struct icmphdr));
++ icmph->checksum = ip_compute_csum((unsigned char *)icmph,
++ length - sizeof(struct iphdr));
++
++ nf_ct_attach(nskb, skb_in);
++
++ NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, nskb, NULL, nskb->dst->dev,
++ ip_finish_output);
++}
++
++static unsigned int reject(struct sk_buff **pskb,
++ unsigned int hooknum,
++ const struct net_device *in,
++ const struct net_device *out,
++ const void *targinfo,
++ void *userinfo)
++{
++ const struct ipt_reject_info *reject = targinfo;
++
++ /* Our naive response construction doesn't deal with IP
++ options, and probably shouldn't try. */
++ if ((*pskb)->nh.iph->ihl<<2 != sizeof(struct iphdr))
++ return NF_DROP;
++
++ /* WARNING: This code causes reentry within iptables.
++ This means that the iptables jump stack is now crap. We
++ must return an absolute verdict. --RR */
++ switch (reject->with) {
++ case IPT_ICMP_NET_UNREACHABLE:
++ send_unreach(*pskb, ICMP_NET_UNREACH);
++ break;
++ case IPT_ICMP_HOST_UNREACHABLE:
++ send_unreach(*pskb, ICMP_HOST_UNREACH);
++ break;
++ case IPT_ICMP_PROT_UNREACHABLE:
++ send_unreach(*pskb, ICMP_PROT_UNREACH);
++ break;
++ case IPT_ICMP_PORT_UNREACHABLE:
++ send_unreach(*pskb, ICMP_PORT_UNREACH);
++ break;
++ case IPT_ICMP_NET_PROHIBITED:
++ send_unreach(*pskb, ICMP_NET_ANO);
++ break;
++ case IPT_ICMP_HOST_PROHIBITED:
++ send_unreach(*pskb, ICMP_HOST_ANO);
++ break;
++ case IPT_ICMP_ADMIN_PROHIBITED:
++ send_unreach(*pskb, ICMP_PKT_FILTERED);
++ break;
++ case IPT_TCP_RESET:
++ send_reset(*pskb, hooknum);
++ case IPT_ICMP_ECHOREPLY:
++ /* Doesn't happen. */
++ break;
++ }
++
++ return NF_DROP;
++}
++
++static int check(const char *tablename,
++ const struct ipt_entry *e,
++ void *targinfo,
++ unsigned int targinfosize,
++ unsigned int hook_mask)
++{
++ const struct ipt_reject_info *rejinfo = targinfo;
++
++ if (targinfosize != IPT_ALIGN(sizeof(struct ipt_reject_info))) {
++ DEBUGP("REJECT: targinfosize %u != 0\n", targinfosize);
++ return 0;
++ }
++
++ /* Only allow these for packet filtering. */
++ if (strcmp(tablename, "filter") != 0) {
++ DEBUGP("REJECT: bad table `%s'.\n", tablename);
++ return 0;
++ }
++ if ((hook_mask & ~((1 << NF_IP_LOCAL_IN)
++ | (1 << NF_IP_FORWARD)
++ | (1 << NF_IP_LOCAL_OUT))) != 0) {
++ DEBUGP("REJECT: bad hook mask %X\n", hook_mask);
++ return 0;
++ }
++
++ if (rejinfo->with == IPT_ICMP_ECHOREPLY) {
++ printk("REJECT: ECHOREPLY no longer supported.\n");
++ return 0;
++ } else if (rejinfo->with == IPT_TCP_RESET) {
++ /* Must specify that it's a TCP packet */
++ if (e->ip.proto != IPPROTO_TCP
++ || (e->ip.invflags & IPT_INV_PROTO)) {
++ DEBUGP("REJECT: TCP_RESET illegal for non-tcp\n");
++ return 0;
++ }
++ }
++
++ return 1;
++}
++
++static struct ipt_target ipt_reject_reg
++= { { NULL, NULL }, "REJECT", reject, check, NULL, THIS_MODULE };
++
++static int __init init(void)
++{
++ if (ipt_register_target(&ipt_reject_reg))
++ return -EINVAL;
++ return 0;
++}
++
++static void __exit fini(void)
++{
++ ipt_unregister_target(&ipt_reject_reg);
++}
++
++module_init(init);
++module_exit(fini);
++MODULE_LICENSE("GPL");
+diff -Nur linux-mips-cvs/net/ipv4/netfilter/ipt_physdev.c linux-ebtables/net/ipv4/netfilter/ipt_physdev.c
+--- linux-mips-cvs/net/ipv4/netfilter/ipt_physdev.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-ebtables/net/ipv4/netfilter/ipt_physdev.c 2005-02-07 05:52:50.000000000 +0100
+@@ -0,0 +1,127 @@
++/* Kernel module to match the bridge port in and
++ * out device for IP packets coming into contact with a bridge. */
++#include <linux/module.h>
++#include <linux/skbuff.h>
++#include <linux/netfilter_ipv4/ipt_physdev.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_bridge.h>
++#include <linux/netdevice.h>
++#define MATCH 1
++#define NOMATCH 0
++
++static int
++match(const struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ const void *matchinfo,
++ int offset,
++ const void *hdr,
++ u_int16_t datalen,
++ int *hotdrop)
++{
++ int i;
++ static const char nulldevname[IFNAMSIZ] = { 0 };
++ const struct ipt_physdev_info *info = matchinfo;
++ unsigned long ret;
++ const char *indev, *outdev;
++ struct nf_bridge_info *nf_bridge;
++
++ /* Not a bridged IP packet or no info available yet:
++ * LOCAL_OUT/mangle and LOCAL_OUT/nat don't know if
++ * the destination device will be a bridge. */
++ if (!(nf_bridge = skb->nf_bridge)) {
++ /* Return MATCH if the invert flags of the used options are on */
++ if ((info->bitmask & IPT_PHYSDEV_OP_BRIDGED) &&
++ !(info->invert & IPT_PHYSDEV_OP_BRIDGED))
++ return NOMATCH;
++ if ((info->bitmask & IPT_PHYSDEV_OP_ISIN) &&
++ !(info->invert & IPT_PHYSDEV_OP_ISIN))
++ return NOMATCH;
++ if ((info->bitmask & IPT_PHYSDEV_OP_ISOUT) &&
++ !(info->invert & IPT_PHYSDEV_OP_ISOUT))
++ return NOMATCH;
++ if ((info->bitmask & IPT_PHYSDEV_OP_IN) &&
++ !(info->invert & IPT_PHYSDEV_OP_IN))
++ return NOMATCH;
++ if ((info->bitmask & IPT_PHYSDEV_OP_OUT) &&
++ !(info->invert & IPT_PHYSDEV_OP_OUT))
++ return NOMATCH;
++ return MATCH;
++ }
++
++ /* This only makes sense in the FORWARD and POSTROUTING chains */
++ if ((info->bitmask & IPT_PHYSDEV_OP_BRIDGED) &&
++ (!!(nf_bridge->mask & BRNF_BRIDGED) ^
++ !(info->invert & IPT_PHYSDEV_OP_BRIDGED)))
++ return NOMATCH;
++
++ if ((info->bitmask & IPT_PHYSDEV_OP_ISIN &&
++ (!nf_bridge->physindev ^ !!(info->invert & IPT_PHYSDEV_OP_ISIN))) ||
++ (info->bitmask & IPT_PHYSDEV_OP_ISOUT &&
++ (!nf_bridge->physoutdev ^ !!(info->invert & IPT_PHYSDEV_OP_ISOUT))))
++ return NOMATCH;
++
++ if (!(info->bitmask & IPT_PHYSDEV_OP_IN))
++ goto match_outdev;
++ indev = nf_bridge->physindev ? nf_bridge->physindev->name : nulldevname;
++ for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
++ ret |= (((const unsigned long *)indev)[i]
++ ^ ((const unsigned long *)info->physindev)[i])
++ & ((const unsigned long *)info->in_mask)[i];
++ }
++
++ if ((ret == 0) ^ !(info->invert & IPT_PHYSDEV_OP_IN))
++ return NOMATCH;
++
++match_outdev:
++ if (!(info->bitmask & IPT_PHYSDEV_OP_OUT))
++ return MATCH;
++ outdev = nf_bridge->physoutdev ?
++ nf_bridge->physoutdev->name : nulldevname;
++ for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
++ ret |= (((const unsigned long *)outdev)[i]
++ ^ ((const unsigned long *)info->physoutdev)[i])
++ & ((const unsigned long *)info->out_mask)[i];
++ }
++
++ return (ret != 0) ^ !(info->invert & IPT_PHYSDEV_OP_OUT);
++}
++
++static int
++checkentry(const char *tablename,
++ const struct ipt_ip *ip,
++ void *matchinfo,
++ unsigned int matchsize,
++ unsigned int hook_mask)
++{
++ const struct ipt_physdev_info *info = matchinfo;
++
++ if (matchsize != IPT_ALIGN(sizeof(struct ipt_physdev_info)))
++ return 0;
++ if (!(info->bitmask & IPT_PHYSDEV_OP_MASK) ||
++ info->bitmask & ~IPT_PHYSDEV_OP_MASK)
++ return 0;
++ return 1;
++}
++
++static struct ipt_match physdev_match = {
++ .name = "physdev",
++ .match = &match,
++ .checkentry = &checkentry,
++ .me = THIS_MODULE,
++};
++
++static int __init init(void)
++{
++ return ipt_register_match(&physdev_match);
++}
++
++static void __exit fini(void)
++{
++ ipt_unregister_match(&physdev_match);
++}
++
++module_init(init);
++module_exit(fini);
++MODULE_LICENSE("GPL");
++EXPORT_NO_SYMBOLS;