summaryrefslogtreecommitdiff
path: root/target/linux
diff options
context:
space:
mode:
Diffstat (limited to 'target/linux')
-rw-r--r--target/linux/generic-2.6/patches-2.6.21/130-netfilter_ipset.patch5476
-rw-r--r--target/linux/generic-2.6/patches-2.6.21/160-netfilter_route.patch2
-rw-r--r--target/linux/generic-2.6/patches-2.6.25/130-netfilter_ipset.patch5473
-rw-r--r--target/linux/generic-2.6/patches-2.6.27/130-netfilter_ipset.patch5623
-rw-r--r--target/linux/generic-2.6/patches-2.6.28/130-netfilter_ipset.patch5623
-rw-r--r--target/linux/generic-2.6/patches-2.6.30/130-netfilter_ipset.patch5623
-rw-r--r--target/linux/generic-2.6/patches-2.6.31/130-netfilter_ipset.patch5623
7 files changed, 16908 insertions, 16535 deletions
diff --git a/target/linux/generic-2.6/patches-2.6.21/130-netfilter_ipset.patch b/target/linux/generic-2.6/patches-2.6.21/130-netfilter_ipset.patch
index befffb0ebe..02bfe23641 100644
--- a/target/linux/generic-2.6/patches-2.6.21/130-netfilter_ipset.patch
+++ b/target/linux/generic-2.6/patches-2.6.21/130-netfilter_ipset.patch
@@ -1,23 +1,29 @@
--- a/include/linux/netfilter_ipv4/Kbuild
+++ b/include/linux/netfilter_ipv4/Kbuild
-@@ -59,3 +59,14 @@ unifdef-y += ip_nat.h
+@@ -59,3 +59,20 @@ unifdef-y += ip_nat.h
unifdef-y += ip_nat_rule.h
unifdef-y += ip_queue.h
unifdef-y += ip_tables.h
+
+unifdef-y += ip_set.h
+header-y += ip_set_iphash.h
++unifdef-y += ip_set_bitmaps.h
++unifdef-y += ip_set_getport.h
++unifdef-y += ip_set_hashes.h
+header-y += ip_set_ipmap.h
+header-y += ip_set_ipporthash.h
++header-y += ip_set_ipportiphash.h
++header-y += ip_set_ipportnethash.h
+unifdef-y += ip_set_iptree.h
+unifdef-y += ip_set_iptreemap.h
+header-y += ip_set_jhash.h
+header-y += ip_set_macipmap.h
-+unifdef-y += ip_set_nethash.h
++header-y += ip_set_nethash.h
+header-y += ip_set_portmap.h
++header-y += ip_set_setlist.h
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set.h
-@@ -0,0 +1,498 @@
+@@ -0,0 +1,574 @@
+#ifndef _IP_SET_H
+#define _IP_SET_H
+
@@ -60,7 +66,7 @@
+/*
+ * Used so that the kernel module and ipset-binary can match their versions
+ */
-+#define IP_SET_PROTOCOL_VERSION 2
++#define IP_SET_PROTOCOL_VERSION 3
+
+#define IP_SET_MAXNAMELEN 32 /* set names and set typenames */
+
@@ -107,6 +113,9 @@
+#define IPSET_TYPE_PORT 0x02 /* Port type of set */
+#define IPSET_DATA_SINGLE 0x04 /* Single data storage */
+#define IPSET_DATA_DOUBLE 0x08 /* Double data storage */
++#define IPSET_DATA_TRIPLE 0x10 /* Triple data storage */
++#define IPSET_TYPE_IP1 0x20 /* IP address type of set */
++#define IPSET_TYPE_SETNAME 0x40 /* setname type of set */
+
+/* Reserved keywords */
+#define IPSET_TOKEN_DEFAULT ":default:"
@@ -245,7 +254,7 @@
+struct ip_set_req_setnames {
+ unsigned op;
+ ip_set_id_t index; /* set to list/save */
-+ size_t size; /* size to get setdata/bindings */
++ u_int32_t size; /* size to get setdata/bindings */
+ /* followed by sets number of struct ip_set_name_list */
+};
+
@@ -267,9 +276,9 @@
+ ip_set_id_t index;
+ ip_set_id_t binding;
+ u_int32_t ref;
-+ size_t header_size; /* Set header data of header_size */
-+ size_t members_size; /* Set members data of members_size */
-+ size_t bindings_size; /* Set bindings data of bindings_size */
++ u_int32_t header_size; /* Set header data of header_size */
++ u_int32_t members_size; /* Set members data of members_size */
++ u_int32_t bindings_size;/* Set bindings data of bindings_size */
+};
+
+struct ip_set_hash_list {
@@ -286,8 +295,8 @@
+struct ip_set_save {
+ ip_set_id_t index;
+ ip_set_id_t binding;
-+ size_t header_size; /* Set header data of header_size */
-+ size_t members_size; /* Set members data of members_size */
++ u_int32_t header_size; /* Set header data of header_size */
++ u_int32_t members_size; /* Set members data of members_size */
+};
+
+/* At restoring, ip == 0 means default binding for the given set: */
@@ -307,8 +316,8 @@
+ char name[IP_SET_MAXNAMELEN];
+ char typename[IP_SET_MAXNAMELEN];
+ ip_set_id_t index;
-+ size_t header_size; /* Create data of header_size */
-+ size_t members_size; /* Set members data of members_size */
++ u_int32_t header_size; /* Create data of header_size */
++ u_int32_t members_size; /* Set members data of members_size */
+};
+
+static inline int bitmap_bytes(ip_set_ip_t a, ip_set_ip_t b)
@@ -316,7 +325,12 @@
+ return 4 * ((((b - a + 8) / 8) + 3) / 4);
+}
+
++/* General limit for the elements in a set */
++#define MAX_RANGE 0x0000FFFF
++
+#ifdef __KERNEL__
++#include <linux/netfilter_ipv4/ip_set_compat.h>
++#include <linux/netfilter_ipv4/ip_set_malloc.h>
+
+#define ip_set_printk(format, args...) \
+ do { \
@@ -370,14 +384,14 @@
+ * return 0 if not in set, 1 if in set.
+ */
+ int (*testip) (struct ip_set *set,
-+ const void *data, size_t size,
++ const void *data, u_int32_t size,
+ ip_set_ip_t *ip);
+
+ /*
+ * Size of the data structure passed by when
+ * adding/deletin/testing an entry.
+ */
-+ size_t reqsize;
++ u_int32_t reqsize;
+
+ /* Add IP into set (userspace: ipset -A set IP)
+ * Return -EEXIST if the address is already in the set,
@@ -385,7 +399,7 @@
+ * If the address was not already in the set, 0 is returned.
+ */
+ int (*addip) (struct ip_set *set,
-+ const void *data, size_t size,
++ const void *data, u_int32_t size,
+ ip_set_ip_t *ip);
+
+ /* Add IP into set (kernel: iptables ... -j SET set src|dst)
@@ -405,7 +419,7 @@
+ * If the address really was in the set, 0 is returned.
+ */
+ int (*delip) (struct ip_set *set,
-+ const void *data, size_t size,
++ const void *data, u_int32_t size,
+ ip_set_ip_t *ip);
+
+ /* remove IP from set (kernel: iptables ... -j SET --entry x)
@@ -422,7 +436,7 @@
+ /* new set creation - allocated type specific items
+ */
+ int (*create) (struct ip_set *set,
-+ const void *data, size_t size);
++ const void *data, u_int32_t size);
+
+ /* retry the operation after successfully tweaking the set
+ */
@@ -441,7 +455,7 @@
+
+ /* Listing: size needed for header
+ */
-+ size_t header_size;
++ u_int32_t header_size;
+
+ /* Listing: Get the header
+ *
@@ -499,33 +513,659 @@
+
+/* register and unregister set references */
+extern ip_set_id_t ip_set_get_byname(const char name[IP_SET_MAXNAMELEN]);
-+extern ip_set_id_t ip_set_get_byindex(ip_set_id_t id);
-+extern void ip_set_put(ip_set_id_t id);
++extern ip_set_id_t ip_set_get_byindex(ip_set_id_t index);
++extern void ip_set_put_byindex(ip_set_id_t index);
++extern ip_set_id_t ip_set_id(ip_set_id_t index);
++extern ip_set_id_t __ip_set_get_byname(const char name[IP_SET_MAXNAMELEN],
++ struct ip_set **set);
++extern void __ip_set_put_byindex(ip_set_id_t index);
+
+/* API for iptables set match, and SET target */
-+extern void ip_set_addip_kernel(ip_set_id_t id,
-+ const struct sk_buff *skb,
-+ const u_int32_t *flags);
-+extern void ip_set_delip_kernel(ip_set_id_t id,
-+ const struct sk_buff *skb,
-+ const u_int32_t *flags);
++extern int ip_set_addip_kernel(ip_set_id_t id,
++ const struct sk_buff *skb,
++ const u_int32_t *flags);
++extern int ip_set_delip_kernel(ip_set_id_t id,
++ const struct sk_buff *skb,
++ const u_int32_t *flags);
+extern int ip_set_testip_kernel(ip_set_id_t id,
+ const struct sk_buff *skb,
+ const u_int32_t *flags);
+
++/* Macros to generate functions */
++
++#define STRUCT(pre, type) CONCAT2(pre, type)
++#define CONCAT2(pre, type) struct pre##type
++
++#define FNAME(pre, mid, post) CONCAT3(pre, mid, post)
++#define CONCAT3(pre, mid, post) pre##mid##post
++
++#define UADT0(type, adt, args...) \
++static int \
++FNAME(type,_u,adt)(struct ip_set *set, const void *data, u_int32_t size,\
++ ip_set_ip_t *hash_ip) \
++{ \
++ const STRUCT(ip_set_req_,type) *req = data; \
++ \
++ return FNAME(type,_,adt)(set, hash_ip , ## args); \
++}
++
++#define UADT(type, adt, args...) \
++ UADT0(type, adt, req->ip , ## args)
++
++#define KADT(type, adt, getfn, args...) \
++static int \
++FNAME(type,_k,adt)(struct ip_set *set, \
++ const struct sk_buff *skb, \
++ ip_set_ip_t *hash_ip, \
++ const u_int32_t *flags, \
++ unsigned char index) \
++{ \
++ ip_set_ip_t ip = getfn(skb, flags[index]); \
++ \
++ KADT_CONDITION \
++ return FNAME(type,_,adt)(set, hash_ip, ip , ##args); \
++}
++
++#define REGISTER_MODULE(type) \
++static int __init ip_set_##type##_init(void) \
++{ \
++ init_max_page_size(); \
++ return ip_set_register_set_type(&ip_set_##type); \
++} \
++ \
++static void __exit ip_set_##type##_fini(void) \
++{ \
++ /* FIXME: possible race with ip_set_create() */ \
++ ip_set_unregister_set_type(&ip_set_##type); \
++} \
++ \
++module_init(ip_set_##type##_init); \
++module_exit(ip_set_##type##_fini);
++
++/* Common functions */
++
++static inline ip_set_ip_t
++ipaddr(const struct sk_buff *skb, u_int32_t flag)
++{
++ return ntohl(flag & IPSET_SRC ? ip_hdr(skb)->saddr : ip_hdr(skb)->daddr);
++}
++
++#define jhash_ip(map, i, ip) jhash_1word(ip, *(map->initval + i))
++
++#define pack_ip_port(map, ip, port) \
++ (port + ((ip - ((map)->first_ip)) << 16))
++
+#endif /* __KERNEL__ */
+
+#endif /*_IP_SET_H*/
--- /dev/null
++++ b/include/linux/netfilter_ipv4/ip_set_bitmaps.h
+@@ -0,0 +1,121 @@
++#ifndef __IP_SET_BITMAPS_H
++#define __IP_SET_BITMAPS_H
++
++/* Macros to generate functions */
++
++#ifdef __KERNEL__
++#define BITMAP_CREATE(type) \
++static int \
++type##_create(struct ip_set *set, const void *data, u_int32_t size) \
++{ \
++ int newbytes; \
++ const struct ip_set_req_##type##_create *req = data; \
++ struct ip_set_##type *map; \
++ \
++ if (req->from > req->to) { \
++ DP("bad range"); \
++ return -ENOEXEC; \
++ } \
++ \
++ map = kmalloc(sizeof(struct ip_set_##type), GFP_KERNEL); \
++ if (!map) { \
++ DP("out of memory for %zu bytes", \
++ sizeof(struct ip_set_##type)); \
++ return -ENOMEM; \
++ } \
++ map->first_ip = req->from; \
++ map->last_ip = req->to; \
++ \
++ newbytes = __##type##_create(req, map); \
++ if (newbytes < 0) { \
++ kfree(map); \
++ return newbytes; \
++ } \
++ \
++ map->size = newbytes; \
++ map->members = ip_set_malloc(newbytes); \
++ if (!map->members) { \
++ DP("out of memory for %i bytes", newbytes); \
++ kfree(map); \
++ return -ENOMEM; \
++ } \
++ memset(map->members, 0, newbytes); \
++ \
++ set->data = map; \
++ return 0; \
++}
++
++#define BITMAP_DESTROY(type) \
++static void \
++type##_destroy(struct ip_set *set) \
++{ \
++ struct ip_set_##type *map = set->data; \
++ \
++ ip_set_free(map->members, map->size); \
++ kfree(map); \
++ \
++ set->data = NULL; \
++}
++
++#define BITMAP_FLUSH(type) \
++static void \
++type##_flush(struct ip_set *set) \
++{ \
++ struct ip_set_##type *map = set->data; \
++ memset(map->members, 0, map->size); \
++}
++
++#define BITMAP_LIST_HEADER(type) \
++static void \
++type##_list_header(const struct ip_set *set, void *data) \
++{ \
++ const struct ip_set_##type *map = set->data; \
++ struct ip_set_req_##type##_create *header = data; \
++ \
++ header->from = map->first_ip; \
++ header->to = map->last_ip; \
++ __##type##_list_header(map, header); \
++}
++
++#define BITMAP_LIST_MEMBERS_SIZE(type) \
++static int \
++type##_list_members_size(const struct ip_set *set) \
++{ \
++ const struct ip_set_##type *map = set->data; \
++ \
++ return map->size; \
++}
++
++#define BITMAP_LIST_MEMBERS(type) \
++static void \
++type##_list_members(const struct ip_set *set, void *data) \
++{ \
++ const struct ip_set_##type *map = set->data; \
++ \
++ memcpy(data, map->members, map->size); \
++}
++
++#define IP_SET_TYPE(type, __features) \
++struct ip_set_type ip_set_##type = { \
++ .typename = #type, \
++ .features = __features, \
++ .protocol_version = IP_SET_PROTOCOL_VERSION, \
++ .create = &type##_create, \
++ .destroy = &type##_destroy, \
++ .flush = &type##_flush, \
++ .reqsize = sizeof(struct ip_set_req_##type), \
++ .addip = &type##_uadd, \
++ .addip_kernel = &type##_kadd, \
++ .delip = &type##_udel, \
++ .delip_kernel = &type##_kdel, \
++ .testip = &type##_utest, \
++ .testip_kernel = &type##_ktest, \
++ .header_size = sizeof(struct ip_set_req_##type##_create),\
++ .list_header = &type##_list_header, \
++ .list_members_size = &type##_list_members_size, \
++ .list_members = &type##_list_members, \
++ .me = THIS_MODULE, \
++};
++#endif /* __KERNEL */
++
++#endif /* __IP_SET_BITMAPS_H */
+--- /dev/null
++++ b/include/linux/netfilter_ipv4/ip_set_compat.h
+@@ -0,0 +1,71 @@
++#ifndef _IP_SET_COMPAT_H
++#define _IP_SET_COMPAT_H
++
++#ifdef __KERNEL__
++#include <linux/version.h>
++
++/* Arrgh */
++#ifdef MODULE
++#define __MOD_INC(foo) __MOD_INC_USE_COUNT(foo)
++#define __MOD_DEC(foo) __MOD_DEC_USE_COUNT(foo)
++#else
++#define __MOD_INC(foo) 1
++#define __MOD_DEC(foo)
++#endif
++
++/* Backward compatibility */
++#ifndef __nocast
++#define __nocast
++#endif
++#ifndef __bitwise__
++#define __bitwise__
++#endif
++
++/* Compatibility glue code */
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++#include <linux/interrupt.h>
++#define DEFINE_RWLOCK(x) rwlock_t x = RW_LOCK_UNLOCKED
++#define try_module_get(x) __MOD_INC(x)
++#define module_put(x) __MOD_DEC(x)
++#define __clear_bit(nr, addr) clear_bit(nr, addr)
++#define __set_bit(nr, addr) set_bit(nr, addr)
++#define __test_and_set_bit(nr, addr) test_and_set_bit(nr, addr)
++#define __test_and_clear_bit(nr, addr) test_and_clear_bit(nr, addr)
++
++typedef unsigned __bitwise__ gfp_t;
++
++static inline void *kzalloc(size_t size, gfp_t flags)
++{
++ void *data = kmalloc(size, flags);
++
++ if (data)
++ memset(data, 0, size);
++
++ return data;
++}
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
++#define __KMEM_CACHE_T__ kmem_cache_t
++#else
++#define __KMEM_CACHE_T__ struct kmem_cache
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
++#define ip_hdr(skb) ((skb)->nh.iph)
++#define skb_mac_header(skb) ((skb)->mac.raw)
++#define eth_hdr(skb) ((struct ethhdr *)skb_mac_header(skb))
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
++#include <linux/netfilter.h>
++#define KMEM_CACHE_CREATE(name, size) \
++ kmem_cache_create(name, size, 0, 0, NULL, NULL)
++#else
++#define KMEM_CACHE_CREATE(name, size) \
++ kmem_cache_create(name, size, 0, 0, NULL)
++#endif
++
++
++#endif /* __KERNEL__ */
++#endif /* _IP_SET_COMPAT_H */
+--- /dev/null
++++ b/include/linux/netfilter_ipv4/ip_set_getport.h
+@@ -0,0 +1,48 @@
++#ifndef _IP_SET_GETPORT_H
++#define _IP_SET_GETPORT_H
++
++#ifdef __KERNEL__
++
++#define INVALID_PORT (MAX_RANGE + 1)
++
++/* We must handle non-linear skbs */
++static inline ip_set_ip_t
++get_port(const struct sk_buff *skb, u_int32_t flags)
++{
++ struct iphdr *iph = ip_hdr(skb);
++ u_int16_t offset = ntohs(iph->frag_off) & IP_OFFSET;
++ switch (iph->protocol) {
++ case IPPROTO_TCP: {
++ struct tcphdr tcph;
++
++ /* See comments at tcp_match in ip_tables.c */
++ if (offset)
++ return INVALID_PORT;
++
++ if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &tcph, sizeof(tcph)) < 0)
++ /* No choice either */
++ return INVALID_PORT;
++
++ return ntohs(flags & IPSET_SRC ?
++ tcph.source : tcph.dest);
++ }
++ case IPPROTO_UDP: {
++ struct udphdr udph;
++
++ if (offset)
++ return INVALID_PORT;
++
++ if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &udph, sizeof(udph)) < 0)
++ /* No choice either */
++ return INVALID_PORT;
++
++ return ntohs(flags & IPSET_SRC ?
++ udph.source : udph.dest);
++ }
++ default:
++ return INVALID_PORT;
++ }
++}
++#endif /* __KERNEL__ */
++
++#endif /*_IP_SET_GETPORT_H*/
+--- /dev/null
++++ b/include/linux/netfilter_ipv4/ip_set_hashes.h
+@@ -0,0 +1,306 @@
++#ifndef __IP_SET_HASHES_H
++#define __IP_SET_HASHES_H
++
++#define initval_t uint32_t
++
++/* Macros to generate functions */
++
++#ifdef __KERNEL__
++#define HASH_RETRY0(type, dtype, cond) \
++static int \
++type##_retry(struct ip_set *set) \
++{ \
++ struct ip_set_##type *map = set->data, *tmp; \
++ dtype *elem; \
++ void *members; \
++ u_int32_t i, hashsize = map->hashsize; \
++ int res; \
++ \
++ if (map->resize == 0) \
++ return -ERANGE; \
++ \
++ again: \
++ res = 0; \
++ \
++ /* Calculate new hash size */ \
++ hashsize += (hashsize * map->resize)/100; \
++ if (hashsize == map->hashsize) \
++ hashsize++; \
++ \
++ ip_set_printk("rehashing of set %s triggered: " \
++ "hashsize grows from %lu to %lu", \
++ set->name, \
++ (long unsigned)map->hashsize, \
++ (long unsigned)hashsize); \
++ \
++ tmp = kmalloc(sizeof(struct ip_set_##type) \
++ + map->probes * sizeof(initval_t), GFP_ATOMIC); \
++ if (!tmp) { \
++ DP("out of memory for %zu bytes", \
++ sizeof(struct ip_set_##type) \
++ + map->probes * sizeof(initval_t)); \
++ return -ENOMEM; \
++ } \
++ tmp->members = harray_malloc(hashsize, sizeof(dtype), GFP_ATOMIC);\
++ if (!tmp->members) { \
++ DP("out of memory for %zu bytes", hashsize * sizeof(dtype));\
++ kfree(tmp); \
++ return -ENOMEM; \
++ } \
++ tmp->hashsize = hashsize; \
++ tmp->elements = 0; \
++ tmp->probes = map->probes; \
++ tmp->resize = map->resize; \
++ memcpy(tmp->initval, map->initval, map->probes * sizeof(initval_t));\
++ __##type##_retry(tmp, map); \
++ \
++ write_lock_bh(&set->lock); \
++ map = set->data; /* Play safe */ \
++ for (i = 0; i < map->hashsize && res == 0; i++) { \
++ elem = HARRAY_ELEM(map->members, dtype *, i); \
++ if (cond) \
++ res = __##type##_add(tmp, elem); \
++ } \
++ if (res) { \
++ /* Failure, try again */ \
++ write_unlock_bh(&set->lock); \
++ harray_free(tmp->members); \
++ kfree(tmp); \
++ goto again; \
++ } \
++ \
++ /* Success at resizing! */ \
++ members = map->members; \
++ \
++ map->hashsize = tmp->hashsize; \
++ map->members = tmp->members; \
++ write_unlock_bh(&set->lock); \
++ \
++ harray_free(members); \
++ kfree(tmp); \
++ \
++ return 0; \
++}
++
++#define HASH_RETRY(type, dtype) \
++ HASH_RETRY0(type, dtype, *elem)
++
++#define HASH_RETRY2(type, dtype) \
++ HASH_RETRY0(type, dtype, elem->ip || elem->ip1)
++
++#define HASH_CREATE(type, dtype) \
++static int \
++type##_create(struct ip_set *set, const void *data, u_int32_t size) \
++{ \
++ const struct ip_set_req_##type##_create *req = data; \
++ struct ip_set_##type *map; \
++ uint16_t i; \
++ \
++ if (req->hashsize < 1) { \
++ ip_set_printk("hashsize too small"); \
++ return -ENOEXEC; \
++ } \
++ \
++ if (req->probes < 1) { \
++ ip_set_printk("probes too small"); \
++ return -ENOEXEC; \
++ } \
++ \
++ map = kmalloc(sizeof(struct ip_set_##type) \
++ + req->probes * sizeof(initval_t), GFP_KERNEL); \
++ if (!map) { \
++ DP("out of memory for %zu bytes", \
++ sizeof(struct ip_set_##type) \
++ + req->probes * sizeof(initval_t)); \
++ return -ENOMEM; \
++ } \
++ for (i = 0; i < req->probes; i++) \
++ get_random_bytes(((initval_t *) map->initval)+i, 4); \
++ map->elements = 0; \
++ map->hashsize = req->hashsize; \
++ map->probes = req->probes; \
++ map->resize = req->resize; \
++ if (__##type##_create(req, map)) { \
++ kfree(map); \
++ return -ENOEXEC; \
++ } \
++ map->members = harray_malloc(map->hashsize, sizeof(dtype), GFP_KERNEL);\
++ if (!map->members) { \
++ DP("out of memory for %zu bytes", map->hashsize * sizeof(dtype));\
++ kfree(map); \
++ return -ENOMEM; \
++ } \
++ \
++ set->data = map; \
++ return 0; \
++}
++
++#define HASH_DESTROY(type) \
++static void \
++type##_destroy(struct ip_set *set) \
++{ \
++ struct ip_set_##type *map = set->data; \
++ \
++ harray_free(map->members); \
++ kfree(map); \
++ \
++ set->data = NULL; \
++}
++
++#define HASH_FLUSH(type, dtype) \
++static void \
++type##_flush(struct ip_set *set) \
++{ \
++ struct ip_set_##type *map = set->data; \
++ harray_flush(map->members, map->hashsize, sizeof(dtype)); \
++ map->elements = 0; \
++}
++
++#define HASH_FLUSH_CIDR(type, dtype) \
++static void \
++type##_flush(struct ip_set *set) \
++{ \
++ struct ip_set_##type *map = set->data; \
++ harray_flush(map->members, map->hashsize, sizeof(dtype)); \
++ memset(map->cidr, 0, sizeof(map->cidr)); \
++ memset(map->nets, 0, sizeof(map->nets)); \
++ map->elements = 0; \
++}
++
++#define HASH_LIST_HEADER(type) \
++static void \
++type##_list_header(const struct ip_set *set, void *data) \
++{ \
++ const struct ip_set_##type *map = set->data; \
++ struct ip_set_req_##type##_create *header = data; \
++ \
++ header->hashsize = map->hashsize; \
++ header->probes = map->probes; \
++ header->resize = map->resize; \
++ __##type##_list_header(map, header); \
++}
++
++#define HASH_LIST_MEMBERS_SIZE(type, dtype) \
++static int \
++type##_list_members_size(const struct ip_set *set) \
++{ \
++ const struct ip_set_##type *map = set->data; \
++ \
++ return (map->hashsize * sizeof(dtype)); \
++}
++
++#define HASH_LIST_MEMBERS(type, dtype) \
++static void \
++type##_list_members(const struct ip_set *set, void *data) \
++{ \
++ const struct ip_set_##type *map = set->data; \
++ dtype *elem; \
++ uint32_t i; \
++ \
++ for (i = 0; i < map->hashsize; i++) { \
++ elem = HARRAY_ELEM(map->members, dtype *, i); \
++ ((dtype *)data)[i] = *elem; \
++ } \
++}
++
++#define HASH_LIST_MEMBERS_MEMCPY(type, dtype) \
++static void \
++type##_list_members(const struct ip_set *set, void *data) \
++{ \
++ const struct ip_set_##type *map = set->data; \
++ dtype *elem; \
++ uint32_t i; \
++ \
++ for (i = 0; i < map->hashsize; i++) { \
++ elem = HARRAY_ELEM(map->members, dtype *, i); \
++ memcpy((((dtype *)data)+i), elem, sizeof(dtype)); \
++ } \
++}
++
++#define IP_SET_RTYPE(type, __features) \
++struct ip_set_type ip_set_##type = { \
++ .typename = #type, \
++ .features = __features, \
++ .protocol_version = IP_SET_PROTOCOL_VERSION, \
++ .create = &type##_create, \
++ .retry = &type##_retry, \
++ .destroy = &type##_destroy, \
++ .flush = &type##_flush, \
++ .reqsize = sizeof(struct ip_set_req_##type), \
++ .addip = &type##_uadd, \
++ .addip_kernel = &type##_kadd, \
++ .delip = &type##_udel, \
++ .delip_kernel = &type##_kdel, \
++ .testip = &type##_utest, \
++ .testip_kernel = &type##_ktest, \
++ .header_size = sizeof(struct ip_set_req_##type##_create),\
++ .list_header = &type##_list_header, \
++ .list_members_size = &type##_list_members_size, \
++ .list_members = &type##_list_members, \
++ .me = THIS_MODULE, \
++};
++
++/* Helper functions */
++static inline void
++add_cidr_size(uint8_t *cidr, uint8_t size)
++{
++ uint8_t next;
++ int i;
++
++ for (i = 0; i < 30 && cidr[i]; i++) {
++ if (cidr[i] < size) {
++ next = cidr[i];
++ cidr[i] = size;
++ size = next;
++ }
++ }
++ if (i < 30)
++ cidr[i] = size;
++}
++
++static inline void
++del_cidr_size(uint8_t *cidr, uint8_t size)
++{
++ int i;
++
++ for (i = 0; i < 29 && cidr[i]; i++) {
++ if (cidr[i] == size)
++ cidr[i] = size = cidr[i+1];
++ }
++ cidr[29] = 0;
++}
++#else
++#include <arpa/inet.h>
++#endif /* __KERNEL */
++
++#ifndef UINT16_MAX
++#define UINT16_MAX 65535
++#endif
++
++static unsigned char shifts[] = {255, 253, 249, 241, 225, 193, 129, 1};
++
++static inline ip_set_ip_t
++pack_ip_cidr(ip_set_ip_t ip, unsigned char cidr)
++{
++ ip_set_ip_t addr, *paddr = &addr;
++ unsigned char n, t, *a;
++
++ addr = htonl(ip & (0xFFFFFFFF << (32 - (cidr))));
++#ifdef __KERNEL__
++ DP("ip:%u.%u.%u.%u/%u", NIPQUAD(addr), cidr);
++#endif
++ n = cidr / 8;
++ t = cidr % 8;
++ a = &((unsigned char *)paddr)[n];
++ *a = *a /(1 << (8 - t)) + shifts[t];
++#ifdef __KERNEL__
++ DP("n: %u, t: %u, a: %u", n, t, *a);
++ DP("ip:%u.%u.%u.%u/%u, %u.%u.%u.%u",
++ HIPQUAD(ip), cidr, NIPQUAD(addr));
++#endif
++
++ return ntohl(addr);
++}
++
++
++#endif /* __IP_SET_HASHES_H */
+--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_iphash.h
@@ -0,0 +1,30 @@
+#ifndef __IP_SET_IPHASH_H
+#define __IP_SET_IPHASH_H
+
+#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_hashes.h>
+
+#define SETTYPE_NAME "iphash"
-+#define MAX_RANGE 0x0000FFFF
+
+struct ip_set_iphash {
+ ip_set_ip_t *members; /* the iphash proper */
@@ -534,7 +1174,7 @@
+ uint16_t probes; /* max number of probes */
+ uint16_t resize; /* resize factor in percent */
+ ip_set_ip_t netmask; /* netmask */
-+ void *initval[0]; /* initvals for jhash_1word */
++ initval_t initval[0]; /* initvals for jhash_1word */
+};
+
+struct ip_set_req_iphash_create {
@@ -551,14 +1191,14 @@
+#endif /* __IP_SET_IPHASH_H */
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_ipmap.h
-@@ -0,0 +1,56 @@
+@@ -0,0 +1,57 @@
+#ifndef __IP_SET_IPMAP_H
+#define __IP_SET_IPMAP_H
+
+#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_bitmaps.h>
+
+#define SETTYPE_NAME "ipmap"
-+#define MAX_RANGE 0x0000FFFF
+
+struct ip_set_ipmap {
+ void *members; /* the ipmap proper */
@@ -567,6 +1207,7 @@
+ ip_set_ip_t netmask; /* subnet netmask */
+ ip_set_ip_t sizeid; /* size of set in IPs */
+ ip_set_ip_t hosts; /* number of hosts in a subnet */
++ u_int32_t size; /* size of the ipmap proper */
+};
+
+struct ip_set_req_ipmap_create {
@@ -579,7 +1220,7 @@
+ ip_set_ip_t ip;
+};
+
-+unsigned int
++static inline unsigned int
+mask_to_bits(ip_set_ip_t mask)
+{
+ unsigned int bits = 32;
@@ -589,19 +1230,19 @@
+ return bits;
+
+ maskaddr = 0xFFFFFFFE;
-+ while (--bits >= 0 && maskaddr != mask)
++ while (--bits > 0 && maskaddr != mask)
+ maskaddr <<= 1;
+
+ return bits;
+}
+
-+ip_set_ip_t
++static inline ip_set_ip_t
+range_to_mask(ip_set_ip_t from, ip_set_ip_t to, unsigned int *bits)
+{
+ ip_set_ip_t mask = 0xFFFFFFFE;
+
+ *bits = 32;
-+ while (--(*bits) >= 0 && mask && (to & mask) != from)
++ while (--(*bits) > 0 && mask && (to & mask) != from)
+ mask <<= 1;
+
+ return mask;
@@ -610,15 +1251,14 @@
+#endif /* __IP_SET_IPMAP_H */
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_ipporthash.h
-@@ -0,0 +1,34 @@
+@@ -0,0 +1,33 @@
+#ifndef __IP_SET_IPPORTHASH_H
+#define __IP_SET_IPPORTHASH_H
+
+#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_hashes.h>
+
+#define SETTYPE_NAME "ipporthash"
-+#define MAX_RANGE 0x0000FFFF
-+#define INVALID_PORT (MAX_RANGE + 1)
+
+struct ip_set_ipporthash {
+ ip_set_ip_t *members; /* the ipporthash proper */
@@ -628,7 +1268,7 @@
+ uint16_t resize; /* resize factor in percent */
+ ip_set_ip_t first_ip; /* host byte order, included in range */
+ ip_set_ip_t last_ip; /* host byte order, included in range */
-+ void *initval[0]; /* initvals for jhash_1word */
++ initval_t initval[0]; /* initvals for jhash_1word */
+};
+
+struct ip_set_req_ipporthash_create {
@@ -646,15 +1286,101 @@
+
+#endif /* __IP_SET_IPPORTHASH_H */
--- /dev/null
++++ b/include/linux/netfilter_ipv4/ip_set_ipportiphash.h
+@@ -0,0 +1,39 @@
++#ifndef __IP_SET_IPPORTIPHASH_H
++#define __IP_SET_IPPORTIPHASH_H
++
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_hashes.h>
++
++#define SETTYPE_NAME "ipportiphash"
++
++struct ipportip {
++ ip_set_ip_t ip;
++ ip_set_ip_t ip1;
++};
++
++struct ip_set_ipportiphash {
++ struct ipportip *members; /* the ipportip proper */
++ uint32_t elements; /* number of elements */
++ uint32_t hashsize; /* hash size */
++ uint16_t probes; /* max number of probes */
++ uint16_t resize; /* resize factor in percent */
++ ip_set_ip_t first_ip; /* host byte order, included in range */
++ ip_set_ip_t last_ip; /* host byte order, included in range */
++ initval_t initval[0]; /* initvals for jhash_1word */
++};
++
++struct ip_set_req_ipportiphash_create {
++ uint32_t hashsize;
++ uint16_t probes;
++ uint16_t resize;
++ ip_set_ip_t from;
++ ip_set_ip_t to;
++};
++
++struct ip_set_req_ipportiphash {
++ ip_set_ip_t ip;
++ ip_set_ip_t port;
++ ip_set_ip_t ip1;
++};
++
++#endif /* __IP_SET_IPPORTIPHASH_H */
+--- /dev/null
++++ b/include/linux/netfilter_ipv4/ip_set_ipportnethash.h
+@@ -0,0 +1,42 @@
++#ifndef __IP_SET_IPPORTNETHASH_H
++#define __IP_SET_IPPORTNETHASH_H
++
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_hashes.h>
++
++#define SETTYPE_NAME "ipportnethash"
++
++struct ipportip {
++ ip_set_ip_t ip;
++ ip_set_ip_t ip1;
++};
++
++struct ip_set_ipportnethash {
++ struct ipportip *members; /* the ipportip proper */
++ uint32_t elements; /* number of elements */
++ uint32_t hashsize; /* hash size */
++ uint16_t probes; /* max number of probes */
++ uint16_t resize; /* resize factor in percent */
++ ip_set_ip_t first_ip; /* host byte order, included in range */
++ ip_set_ip_t last_ip; /* host byte order, included in range */
++ uint8_t cidr[30]; /* CIDR sizes */
++ uint16_t nets[30]; /* nr of nets by CIDR sizes */
++ initval_t initval[0]; /* initvals for jhash_1word */
++};
++
++struct ip_set_req_ipportnethash_create {
++ uint32_t hashsize;
++ uint16_t probes;
++ uint16_t resize;
++ ip_set_ip_t from;
++ ip_set_ip_t to;
++};
++
++struct ip_set_req_ipportnethash {
++ ip_set_ip_t ip;
++ ip_set_ip_t port;
++ ip_set_ip_t ip1;
++ uint8_t cidr;
++};
++
++#endif /* __IP_SET_IPPORTNETHASH_H */
+--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_iptree.h
-@@ -0,0 +1,40 @@
+@@ -0,0 +1,39 @@
+#ifndef __IP_SET_IPTREE_H
+#define __IP_SET_IPTREE_H
+
+#include <linux/netfilter_ipv4/ip_set.h>
+
+#define SETTYPE_NAME "iptree"
-+#define MAX_RANGE 0x0000FFFF
+
+struct ip_set_iptreed {
+ unsigned long expires[256]; /* x.x.x.ADDR */
@@ -726,172 +1452,181 @@
+};
+
+struct ip_set_req_iptreemap {
-+ ip_set_ip_t start;
++ ip_set_ip_t ip;
+ ip_set_ip_t end;
+};
+
+#endif /* __IP_SET_IPTREEMAP_H */
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_jhash.h
-@@ -0,0 +1,148 @@
-+#ifndef _LINUX_IPSET_JHASH_H
-+#define _LINUX_IPSET_JHASH_H
-+
-+/* This is a copy of linux/jhash.h but the types u32/u8 are changed
-+ * to __u32/__u8 so that the header file can be included into
-+ * userspace code as well. Jozsef Kadlecsik (kadlec@blackhole.kfki.hu)
-+ */
+@@ -0,0 +1,157 @@
++#ifndef _LINUX_JHASH_H
++#define _LINUX_JHASH_H
+
+/* jhash.h: Jenkins hash support.
+ *
-+ * Copyright (C) 1996 Bob Jenkins (bob_jenkins@burtleburtle.net)
++ * Copyright (C) 2006. Bob Jenkins (bob_jenkins@burtleburtle.net)
+ *
+ * http://burtleburtle.net/bob/hash/
+ *
+ * These are the credits from Bob's sources:
+ *
-+ * lookup2.c, by Bob Jenkins, December 1996, Public Domain.
-+ * hash(), hash2(), hash3, and mix() are externally useful functions.
-+ * Routines to test the hash are included if SELF_TEST is defined.
-+ * You can use this free for any purpose. It has no warranty.
++ * lookup3.c, by Bob Jenkins, May 2006, Public Domain.
++ *
++ * These are functions for producing 32-bit hashes for hash table lookup.
++ * hashword(), hashlittle(), hashlittle2(), hashbig(), mix(), and final()
++ * are externally useful functions. Routines to test the hash are included
++ * if SELF_TEST is defined. You can use this free for any purpose. It's in
++ * the public domain. It has no warranty.
+ *
-+ * Copyright (C) 2003 David S. Miller (davem@redhat.com)
++ * Copyright (C) 2009 Jozsef Kadlecsik (kadlec@blackhole.kfki.hu)
+ *
+ * I've modified Bob's hash to be useful in the Linux kernel, and
-+ * any bugs present are surely my fault. -DaveM
++ * any bugs present are my fault. Jozsef
+ */
+
-+/* NOTE: Arguments are modified. */
-+#define __jhash_mix(a, b, c) \
++#define __rot(x,k) (((x)<<(k)) | ((x)>>(32-(k))))
++
++/* __jhash_mix - mix 3 32-bit values reversibly. */
++#define __jhash_mix(a,b,c) \
++{ \
++ a -= c; a ^= __rot(c, 4); c += b; \
++ b -= a; b ^= __rot(a, 6); a += c; \
++ c -= b; c ^= __rot(b, 8); b += a; \
++ a -= c; a ^= __rot(c,16); c += b; \
++ b -= a; b ^= __rot(a,19); a += c; \
++ c -= b; c ^= __rot(b, 4); b += a; \
++}
++
++/* __jhash_final - final mixing of 3 32-bit values (a,b,c) into c */
++#define __jhash_final(a,b,c) \
+{ \
-+ a -= b; a -= c; a ^= (c>>13); \
-+ b -= c; b -= a; b ^= (a<<8); \
-+ c -= a; c -= b; c ^= (b>>13); \
-+ a -= b; a -= c; a ^= (c>>12); \
-+ b -= c; b -= a; b ^= (a<<16); \
-+ c -= a; c -= b; c ^= (b>>5); \
-+ a -= b; a -= c; a ^= (c>>3); \
-+ b -= c; b -= a; b ^= (a<<10); \
-+ c -= a; c -= b; c ^= (b>>15); \
++ c ^= b; c -= __rot(b,14); \
++ a ^= c; a -= __rot(c,11); \
++ b ^= a; b -= __rot(a,25); \
++ c ^= b; c -= __rot(b,16); \
++ a ^= c; a -= __rot(c,4); \
++ b ^= a; b -= __rot(a,14); \
++ c ^= b; c -= __rot(b,24); \
+}
+
+/* The golden ration: an arbitrary value */
-+#define JHASH_GOLDEN_RATIO 0x9e3779b9
++#define JHASH_GOLDEN_RATIO 0xdeadbeef
+
+/* The most generic version, hashes an arbitrary sequence
+ * of bytes. No alignment or length assumptions are made about
-+ * the input key.
++ * the input key. The result depends on endianness.
+ */
-+static inline __u32 jhash(void *key, __u32 length, __u32 initval)
++static inline u32 jhash(const void *key, u32 length, u32 initval)
+{
-+ __u32 a, b, c, len;
-+ __u8 *k = key;
++ u32 a,b,c;
++ const u8 *k = key;
+
-+ len = length;
-+ a = b = JHASH_GOLDEN_RATIO;
-+ c = initval;
-+
-+ while (len >= 12) {
-+ a += (k[0] +((__u32)k[1]<<8) +((__u32)k[2]<<16) +((__u32)k[3]<<24));
-+ b += (k[4] +((__u32)k[5]<<8) +((__u32)k[6]<<16) +((__u32)k[7]<<24));
-+ c += (k[8] +((__u32)k[9]<<8) +((__u32)k[10]<<16)+((__u32)k[11]<<24));
-+
-+ __jhash_mix(a,b,c);
++ /* Set up the internal state */
++ a = b = c = JHASH_GOLDEN_RATIO + length + initval;
+
++ /* all but the last block: affect some 32 bits of (a,b,c) */
++ while (length > 12) {
++ a += (k[0] + ((u32)k[1]<<8) + ((u32)k[2]<<16) + ((u32)k[3]<<24));
++ b += (k[4] + ((u32)k[5]<<8) + ((u32)k[6]<<16) + ((u32)k[7]<<24));
++ c += (k[8] + ((u32)k[9]<<8) + ((u32)k[10]<<16) + ((u32)k[11]<<24));
++ __jhash_mix(a, b, c);
++ length -= 12;
+ k += 12;
-+ len -= 12;
+ }
+
-+ c += length;
-+ switch (len) {
-+ case 11: c += ((__u32)k[10]<<24);
-+ case 10: c += ((__u32)k[9]<<16);
-+ case 9 : c += ((__u32)k[8]<<8);
-+ case 8 : b += ((__u32)k[7]<<24);
-+ case 7 : b += ((__u32)k[6]<<16);
-+ case 6 : b += ((__u32)k[5]<<8);
++ /* last block: affect all 32 bits of (c) */
++ /* all the case statements fall through */
++ switch (length) {
++ case 12: c += (u32)k[11]<<24;
++ case 11: c += (u32)k[10]<<16;
++ case 10: c += (u32)k[9]<<8;
++ case 9 : c += k[8];
++ case 8 : b += (u32)k[7]<<24;
++ case 7 : b += (u32)k[6]<<16;
++ case 6 : b += (u32)k[5]<<8;
+ case 5 : b += k[4];
-+ case 4 : a += ((__u32)k[3]<<24);
-+ case 3 : a += ((__u32)k[2]<<16);
-+ case 2 : a += ((__u32)k[1]<<8);
++ case 4 : a += (u32)k[3]<<24;
++ case 3 : a += (u32)k[2]<<16;
++ case 2 : a += (u32)k[1]<<8;
+ case 1 : a += k[0];
-+ };
-+
-+ __jhash_mix(a,b,c);
++ __jhash_final(a, b, c);
++ case 0 :
++ break;
++ }
+
+ return c;
+}
+
-+/* A special optimized version that handles 1 or more of __u32s.
-+ * The length parameter here is the number of __u32s in the key.
++/* A special optimized version that handles 1 or more of u32s.
++ * The length parameter here is the number of u32s in the key.
+ */
-+static inline __u32 jhash2(__u32 *k, __u32 length, __u32 initval)
++static inline u32 jhash2(const u32 *k, u32 length, u32 initval)
+{
-+ __u32 a, b, c, len;
++ u32 a, b, c;
+
-+ a = b = JHASH_GOLDEN_RATIO;
-+ c = initval;
-+ len = length;
++ /* Set up the internal state */
++ a = b = c = JHASH_GOLDEN_RATIO + (length<<2) + initval;
+
-+ while (len >= 3) {
++ /* handle most of the key */
++ while (length > 3) {
+ a += k[0];
+ b += k[1];
+ c += k[2];
+ __jhash_mix(a, b, c);
-+ k += 3; len -= 3;
++ length -= 3;
++ k += 3;
+ }
+
-+ c += length * 4;
-+
-+ switch (len) {
-+ case 2 : b += k[1];
-+ case 1 : a += k[0];
-+ };
-+
-+ __jhash_mix(a,b,c);
++ /* handle the last 3 u32's */
++ /* all the case statements fall through */
++ switch (length) {
++ case 3: c += k[2];
++ case 2: b += k[1];
++ case 1: a += k[0];
++ __jhash_final(a, b, c);
++ case 0: /* case 0: nothing left to add */
++ break;
++ }
+
+ return c;
+}
+
-+
+/* A special ultra-optimized versions that knows they are hashing exactly
+ * 3, 2 or 1 word(s).
-+ *
-+ * NOTE: In partilar the "c += length; __jhash_mix(a,b,c);" normally
-+ * done at the end is not done here.
+ */
-+static inline __u32 jhash_3words(__u32 a, __u32 b, __u32 c, __u32 initval)
++static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval)
+{
-+ a += JHASH_GOLDEN_RATIO;
-+ b += JHASH_GOLDEN_RATIO;
-+ c += initval;
++ a += JHASH_GOLDEN_RATIO + initval;
++ b += JHASH_GOLDEN_RATIO + initval;
++ c += JHASH_GOLDEN_RATIO + initval;
+
-+ __jhash_mix(a, b, c);
++ __jhash_final(a, b, c);
+
+ return c;
+}
+
-+static inline __u32 jhash_2words(__u32 a, __u32 b, __u32 initval)
++static inline u32 jhash_2words(u32 a, u32 b, u32 initval)
+{
-+ return jhash_3words(a, b, 0, initval);
++ return jhash_3words(0, a, b, initval);
+}
+
-+static inline __u32 jhash_1word(__u32 a, __u32 initval)
++static inline u32 jhash_1word(u32 a, u32 initval)
+{
-+ return jhash_3words(a, 0, 0, initval);
++ return jhash_3words(0, 0, a, initval);
+}
+
-+#endif /* _LINUX_IPSET_JHASH_H */
++#endif /* _LINUX_JHASH_H */
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_macipmap.h
-@@ -0,0 +1,38 @@
+@@ -0,0 +1,39 @@
+#ifndef __IP_SET_MACIPMAP_H
+#define __IP_SET_MACIPMAP_H
+
+#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_bitmaps.h>
+
+#define SETTYPE_NAME "macipmap"
-+#define MAX_RANGE 0x0000FFFF
+
+/* general flags */
+#define IPSET_MACIP_MATCHUNSET 1
@@ -904,6 +1639,7 @@
+ ip_set_ip_t first_ip; /* host byte order, included in range */
+ ip_set_ip_t last_ip; /* host byte order, included in range */
+ u_int32_t flags;
++ u_int32_t size; /* size of the ipmap proper */
+};
+
+struct ip_set_req_macipmap_create {
@@ -918,43 +1654,48 @@
+};
+
+struct ip_set_macip {
-+ unsigned short flags;
++ unsigned short match;
+ unsigned char ethernet[ETH_ALEN];
+};
+
+#endif /* __IP_SET_MACIPMAP_H */
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_malloc.h
-@@ -0,0 +1,116 @@
+@@ -0,0 +1,153 @@
+#ifndef _IP_SET_MALLOC_H
+#define _IP_SET_MALLOC_H
+
+#ifdef __KERNEL__
++#include <linux/vmalloc.h>
+
-+/* Memory allocation and deallocation */
-+static size_t max_malloc_size = 0;
++static size_t max_malloc_size = 0, max_page_size = 0;
++static size_t default_max_malloc_size = 131072; /* Guaranteed: slab.c */
+
-+static inline void init_max_malloc_size(void)
++static inline int init_max_page_size(void)
+{
-+#define CACHE(x) max_malloc_size = x;
++/* Compatibility glues to support 2.4.36 */
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++#define __GFP_NOWARN 0
++
++ /* Guaranteed: slab.c */
++ max_malloc_size = max_page_size = default_max_malloc_size;
++#else
++ size_t page_size = 0;
++
++#define CACHE(x) if (max_page_size == 0 || x < max_page_size) \
++ page_size = x;
+#include <linux/kmalloc_sizes.h>
+#undef CACHE
-+}
++ if (page_size) {
++ if (max_malloc_size == 0)
++ max_malloc_size = page_size;
+
-+static inline void * ip_set_malloc(size_t bytes)
-+{
-+ if (bytes > max_malloc_size)
-+ return vmalloc(bytes);
-+ else
-+ return kmalloc(bytes, GFP_KERNEL);
-+}
++ max_page_size = page_size;
+
-+static inline void ip_set_free(void * data, size_t bytes)
-+{
-+ if (bytes > max_malloc_size)
-+ vfree(data);
-+ else
-+ kfree(data);
++ return 1;
++ }
++#endif
++ return 0;
+}
+
+struct harray {
@@ -963,18 +1704,17 @@
+};
+
+static inline void *
-+harray_malloc(size_t hashsize, size_t typesize, int flags)
++__harray_malloc(size_t hashsize, size_t typesize, gfp_t flags)
+{
+ struct harray *harray;
+ size_t max_elements, size, i, j;
+
-+ if (!max_malloc_size)
-+ init_max_malloc_size();
++ BUG_ON(max_page_size == 0);
+
-+ if (typesize > max_malloc_size)
++ if (typesize > max_page_size)
+ return NULL;
+
-+ max_elements = max_malloc_size/typesize;
++ max_elements = max_page_size/typesize;
+ size = hashsize/max_elements;
+ if (hashsize % max_elements)
+ size++;
@@ -1011,6 +1751,18 @@
+ return NULL;
+}
+
++static inline void *
++harray_malloc(size_t hashsize, size_t typesize, gfp_t flags)
++{
++ void *harray;
++
++ do {
++ harray = __harray_malloc(hashsize, typesize, flags|__GFP_NOWARN);
++ } while (harray == NULL && init_max_page_size());
++
++ return harray;
++}
++
+static inline void harray_free(void *h)
+{
+ struct harray *harray = (struct harray *) h;
@@ -1039,19 +1791,40 @@
+ + (which)%(__h)->max_elements); \
+})
+
++/* General memory allocation and deallocation */
++static inline void * ip_set_malloc(size_t bytes)
++{
++ BUG_ON(max_malloc_size == 0);
++
++ if (bytes > default_max_malloc_size)
++ return vmalloc(bytes);
++ else
++ return kmalloc(bytes, GFP_KERNEL | __GFP_NOWARN);
++}
++
++static inline void ip_set_free(void * data, size_t bytes)
++{
++ BUG_ON(max_malloc_size == 0);
++
++ if (bytes > default_max_malloc_size)
++ vfree(data);
++ else
++ kfree(data);
++}
++
+#endif /* __KERNEL__ */
+
+#endif /*_IP_SET_MALLOC_H*/
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_nethash.h
-@@ -0,0 +1,55 @@
+@@ -0,0 +1,31 @@
+#ifndef __IP_SET_NETHASH_H
+#define __IP_SET_NETHASH_H
+
+#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_hashes.h>
+
+#define SETTYPE_NAME "nethash"
-+#define MAX_RANGE 0x0000FFFF
+
+struct ip_set_nethash {
+ ip_set_ip_t *members; /* the nethash proper */
@@ -1059,8 +1832,9 @@
+ uint32_t hashsize; /* hash size */
+ uint16_t probes; /* max number of probes */
+ uint16_t resize; /* resize factor in percent */
-+ unsigned char cidr[30]; /* CIDR sizes */
-+ void *initval[0]; /* initvals for jhash_1word */
++ uint8_t cidr[30]; /* CIDR sizes */
++ uint16_t nets[30]; /* nr of nets by CIDR sizes */
++ initval_t initval[0]; /* initvals for jhash_1word */
+};
+
+struct ip_set_req_nethash_create {
@@ -1071,34 +1845,9 @@
+
+struct ip_set_req_nethash {
+ ip_set_ip_t ip;
-+ unsigned char cidr;
++ uint8_t cidr;
+};
+
-+static unsigned char shifts[] = {255, 253, 249, 241, 225, 193, 129, 1};
-+
-+static inline ip_set_ip_t
-+pack(ip_set_ip_t ip, unsigned char cidr)
-+{
-+ ip_set_ip_t addr, *paddr = &addr;
-+ unsigned char n, t, *a;
-+
-+ addr = htonl(ip & (0xFFFFFFFF << (32 - (cidr))));
-+#ifdef __KERNEL__
-+ DP("ip:%u.%u.%u.%u/%u", NIPQUAD(addr), cidr);
-+#endif
-+ n = cidr / 8;
-+ t = cidr % 8;
-+ a = &((unsigned char *)paddr)[n];
-+ *a = *a /(1 << (8 - t)) + shifts[t];
-+#ifdef __KERNEL__
-+ DP("n: %u, t: %u, a: %u", n, t, *a);
-+ DP("ip:%u.%u.%u.%u/%u, %u.%u.%u.%u",
-+ HIPQUAD(ip), cidr, NIPQUAD(addr));
-+#endif
-+
-+ return ntohl(addr);
-+}
-+
+#endif /* __IP_SET_NETHASH_H */
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_portmap.h
@@ -1107,15 +1856,15 @@
+#define __IP_SET_PORTMAP_H
+
+#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_bitmaps.h>
+
+#define SETTYPE_NAME "portmap"
-+#define MAX_RANGE 0x0000FFFF
-+#define INVALID_PORT (MAX_RANGE + 1)
+
+struct ip_set_portmap {
+ void *members; /* the portmap proper */
-+ ip_set_ip_t first_port; /* host byte order, included in range */
-+ ip_set_ip_t last_port; /* host byte order, included in range */
++ ip_set_ip_t first_ip; /* host byte order, included in range */
++ ip_set_ip_t last_ip; /* host byte order, included in range */
++ u_int32_t size; /* size of the ipmap proper */
+};
+
+struct ip_set_req_portmap_create {
@@ -1124,11 +1873,40 @@
+};
+
+struct ip_set_req_portmap {
-+ ip_set_ip_t port;
++ ip_set_ip_t ip;
+};
+
+#endif /* __IP_SET_PORTMAP_H */
--- /dev/null
++++ b/include/linux/netfilter_ipv4/ip_set_setlist.h
+@@ -0,0 +1,26 @@
++#ifndef __IP_SET_SETLIST_H
++#define __IP_SET_SETLIST_H
++
++#include <linux/netfilter_ipv4/ip_set.h>
++
++#define SETTYPE_NAME "setlist"
++
++#define IP_SET_SETLIST_ADD_AFTER 0
++#define IP_SET_SETLIST_ADD_BEFORE 1
++
++struct ip_set_setlist {
++ uint8_t size;
++ ip_set_id_t index[0];
++};
++
++struct ip_set_req_setlist_create {
++ uint8_t size;
++};
++
++struct ip_set_req_setlist {
++ char name[IP_SET_MAXNAMELEN];
++ char ref[IP_SET_MAXNAMELEN];
++ uint8_t before;
++};
++
++#endif /* __IP_SET_SETLIST_H */
+--- /dev/null
+++ b/include/linux/netfilter_ipv4/ipt_set.h
@@ -0,0 +1,21 @@
+#ifndef _IPT_SET_H
@@ -1154,14 +1932,14 @@
+#endif /*_IPT_SET_H*/
--- /dev/null
+++ b/net/ipv4/netfilter/ip_set.c
-@@ -0,0 +1,2003 @@
+@@ -0,0 +1,2076 @@
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ * Patrick Schaaf <bof@bof.de>
+ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
++ * published by the Free Software Foundation.
+ */
+
+/* Kernel module for IP set management */
@@ -1176,17 +1954,21 @@
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/random.h>
-+#include <linux/jhash.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ip_set_jhash.h>
+#include <linux/errno.h>
++#include <linux/capability.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
+#include <asm/semaphore.h>
++#else
++#include <linux/semaphore.h>
++#endif
+#include <linux/spinlock.h>
-+#include <linux/vmalloc.h>
+
+#define ASSERT_READ_LOCK(x)
+#define ASSERT_WRITE_LOCK(x)
++#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4/ip_set.h>
+
+static struct list_head set_type_list; /* all registered sets */
@@ -1198,14 +1980,16 @@
+static struct list_head *ip_set_hash; /* hash of bindings */
+static unsigned int ip_set_hash_random; /* random seed */
+
++#define SETNAME_EQ(a,b) (strncmp(a,b,IP_SET_MAXNAMELEN) == 0)
++
+/*
+ * Sets are identified either by the index in ip_set_list or by id.
-+ * The id never changes and is used to find a key in the hash.
-+ * The index may change by swapping and used at all other places
++ * The id never changes and is used to find a key in the hash.
++ * The index may change by swapping and used at all other places
+ * (set/SET netfilter modules, binding value, etc.)
+ *
+ * Userspace requests are serialized by ip_set_mutex and sets can
-+ * be deleted only from userspace. Therefore ip_set_list locking
++ * be deleted only from userspace. Therefore ip_set_list locking
+ * must obey the following rules:
+ *
+ * - kernel requests: read and write locking mandatory
@@ -1243,7 +2027,7 @@
+static ip_set_id_t
+ip_set_find_in_hash(ip_set_id_t id, ip_set_ip_t ip)
+{
-+ u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
++ u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
+ % ip_set_bindings_hash_size;
+ struct ip_set_hash *set_hash;
+
@@ -1253,14 +2037,14 @@
+
+ set_hash = __ip_set_find(key, id, ip);
+
-+ DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
++ DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
+ HIPQUAD(ip),
+ set_hash != NULL ? ip_set_list[set_hash->binding]->name : "");
+
+ return (set_hash != NULL ? set_hash->binding : IP_SET_INVALID_ID);
+}
+
-+static inline void
++static inline void
+__set_hash_del(struct ip_set_hash *set_hash)
+{
+ ASSERT_WRITE_LOCK(&ip_set_lock);
@@ -1288,11 +2072,11 @@
+
+ if (set_hash != NULL)
+ __set_hash_del(set_hash);
-+ write_unlock_bh(&ip_set_lock);
++ write_unlock_bh(&ip_set_lock);
+ return 0;
+}
+
-+static int
++static int
+ip_set_hash_add(ip_set_id_t id, ip_set_ip_t ip, ip_set_id_t binding)
+{
+ u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
@@ -1302,7 +2086,7 @@
+
+ IP_SET_ASSERT(ip_set_list[id]);
+ IP_SET_ASSERT(ip_set_list[binding]);
-+ DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
++ DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
+ HIPQUAD(ip), ip_set_list[binding]->name);
+ write_lock_bh(&ip_set_lock);
+ set_hash = __ip_set_find(key, id, ip);
@@ -1381,15 +2165,15 @@
+ res = set->type->testip_kernel(set, skb, &ip, flags, i++);
+ read_unlock_bh(&set->lock);
+ i += !!(set->type->features & IPSET_DATA_DOUBLE);
-+ } while (res > 0
-+ && flags[i]
++ } while (res > 0
++ && flags[i]
+ && follow_bindings(index, set, ip));
+ read_unlock_bh(&ip_set_lock);
+
-+ return res;
++ return (res < 0 ? 0 : res);
+}
+
-+void
++int
+ip_set_addip_kernel(ip_set_id_t index,
+ const struct sk_buff *skb,
+ const u_int32_t *flags)
@@ -1411,7 +2195,7 @@
+ write_unlock_bh(&set->lock);
+ i += !!(set->type->features & IPSET_DATA_DOUBLE);
+ } while ((res == 0 || res == -EEXIST)
-+ && flags[i]
++ && flags[i]
+ && follow_bindings(index, set, ip));
+ read_unlock_bh(&ip_set_lock);
+
@@ -1419,9 +2203,11 @@
+ && set->type->retry
+ && (res = set->type->retry(set)) == 0)
+ goto retry;
++
++ return res;
+}
+
-+void
++int
+ip_set_delip_kernel(ip_set_id_t index,
+ const struct sk_buff *skb,
+ const u_int32_t *flags)
@@ -1442,9 +2228,11 @@
+ write_unlock_bh(&set->lock);
+ i += !!(set->type->features & IPSET_DATA_DOUBLE);
+ } while ((res == 0 || res == -EEXIST)
-+ && flags[i]
++ && flags[i]
+ && follow_bindings(index, set, ip));
+ read_unlock_bh(&ip_set_lock);
++
++ return res;
+}
+
+/* Register and deregister settype */
@@ -1460,7 +2248,7 @@
+ return NULL;
+}
+
-+int
++int
+ip_set_register_set_type(struct ip_set_type *set_type)
+{
+ int ret = 0;
@@ -1476,7 +2264,7 @@
+ write_lock_bh(&ip_set_lock);
+ if (find_set_type(set_type->typename)) {
+ /* Duplicate! */
-+ ip_set_printk("'%s' already registered!",
++ ip_set_printk("'%s' already registered!",
+ set_type->typename);
+ ret = -EINVAL;
+ goto unlock;
@@ -1509,6 +2297,29 @@
+
+}
+
++ip_set_id_t
++__ip_set_get_byname(const char *name, struct ip_set **set)
++{
++ ip_set_id_t i, index = IP_SET_INVALID_ID;
++
++ for (i = 0; i < ip_set_max; i++) {
++ if (ip_set_list[i] != NULL
++ && SETNAME_EQ(ip_set_list[i]->name, name)) {
++ __ip_set_get(i);
++ index = i;
++ *set = ip_set_list[i];
++ break;
++ }
++ }
++ return index;
++}
++
++void __ip_set_put_byindex(ip_set_id_t index)
++{
++ if (ip_set_list[index])
++ __ip_set_put(index);
++}
++
+/*
+ * Userspace routines
+ */
@@ -1526,7 +2337,7 @@
+ down(&ip_set_app_mutex);
+ for (i = 0; i < ip_set_max; i++) {
+ if (ip_set_list[i] != NULL
-+ && strcmp(ip_set_list[i]->name, name) == 0) {
++ && SETNAME_EQ(ip_set_list[i]->name, name)) {
+ __ip_set_get(i);
+ index = i;
+ break;
@@ -1559,11 +2370,25 @@
+}
+
+/*
++ * Find the set id belonging to the index.
++ * We are protected by the mutex, so we do not need to use
++ * ip_set_lock. There is no need to reference the sets either.
++ */
++ip_set_id_t
++ip_set_id(ip_set_id_t index)
++{
++ if (index >= ip_set_max || !ip_set_list[index])
++ return IP_SET_INVALID_ID;
++
++ return ip_set_list[index]->id;
++}
++
++/*
+ * If the given set pointer points to a valid set, decrement
+ * reference count by 1. The caller shall not assume the index
+ * to be valid, after calling this function.
+ */
-+void ip_set_put(ip_set_id_t index)
++void ip_set_put_byindex(ip_set_id_t index)
+{
+ down(&ip_set_app_mutex);
+ if (ip_set_list[index])
@@ -1579,7 +2404,7 @@
+
+ for (i = 0; i < ip_set_max; i++) {
+ if (ip_set_list[i] != NULL
-+ && strcmp(ip_set_list[i]->name, name) == 0) {
++ && SETNAME_EQ(ip_set_list[i]->name, name)) {
+ index = i;
+ break;
+ }
@@ -1603,7 +2428,7 @@
+static inline int
+__ip_set_testip(struct ip_set *set,
+ const void *data,
-+ size_t size,
++ u_int32_t size,
+ ip_set_ip_t *ip)
+{
+ int res;
@@ -1618,7 +2443,7 @@
+static int
+__ip_set_addip(ip_set_id_t index,
+ const void *data,
-+ size_t size)
++ u_int32_t size)
+{
+ struct ip_set *set = ip_set_list[index];
+ ip_set_ip_t ip;
@@ -1639,9 +2464,18 @@
+static int
+ip_set_addip(ip_set_id_t index,
+ const void *data,
-+ size_t size)
++ u_int32_t size)
+{
++ struct ip_set *set = ip_set_list[index];
++
++ IP_SET_ASSERT(set);
+
++ if (size - sizeof(struct ip_set_req_adt) != set->type->reqsize) {
++ ip_set_printk("data length wrong (want %lu, have %zu)",
++ (long unsigned)set->type->reqsize,
++ size - sizeof(struct ip_set_req_adt));
++ return -EINVAL;
++ }
+ return __ip_set_addip(index,
+ data + sizeof(struct ip_set_req_adt),
+ size - sizeof(struct ip_set_req_adt));
@@ -1650,13 +2484,20 @@
+static int
+ip_set_delip(ip_set_id_t index,
+ const void *data,
-+ size_t size)
++ u_int32_t size)
+{
+ struct ip_set *set = ip_set_list[index];
+ ip_set_ip_t ip;
+ int res;
+
+ IP_SET_ASSERT(set);
++
++ if (size - sizeof(struct ip_set_req_adt) != set->type->reqsize) {
++ ip_set_printk("data length wrong (want %lu, have %zu)",
++ (long unsigned)set->type->reqsize,
++ size - sizeof(struct ip_set_req_adt));
++ return -EINVAL;
++ }
+ write_lock_bh(&set->lock);
+ res = set->type->delip(set,
+ data + sizeof(struct ip_set_req_adt),
@@ -1670,13 +2511,20 @@
+static int
+ip_set_testip(ip_set_id_t index,
+ const void *data,
-+ size_t size)
++ u_int32_t size)
+{
+ struct ip_set *set = ip_set_list[index];
+ ip_set_ip_t ip;
+ int res;
+
+ IP_SET_ASSERT(set);
++
++ if (size - sizeof(struct ip_set_req_adt) != set->type->reqsize) {
++ ip_set_printk("data length wrong (want %lu, have %zu)",
++ (long unsigned)set->type->reqsize,
++ size - sizeof(struct ip_set_req_adt));
++ return -EINVAL;
++ }
+ res = __ip_set_testip(set,
+ data + sizeof(struct ip_set_req_adt),
+ size - sizeof(struct ip_set_req_adt),
@@ -1688,10 +2536,10 @@
+static int
+ip_set_bindip(ip_set_id_t index,
+ const void *data,
-+ size_t size)
++ u_int32_t size)
+{
+ struct ip_set *set = ip_set_list[index];
-+ struct ip_set_req_bind *req_bind;
++ const struct ip_set_req_bind *req_bind;
+ ip_set_id_t binding;
+ ip_set_ip_t ip;
+ int res;
@@ -1700,18 +2548,16 @@
+ if (size < sizeof(struct ip_set_req_bind))
+ return -EINVAL;
+
-+ req_bind = (struct ip_set_req_bind *) data;
-+ req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
++ req_bind = data;
+
-+ if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
++ if (SETNAME_EQ(req_bind->binding, IPSET_TOKEN_DEFAULT)) {
+ /* Default binding of a set */
-+ char *binding_name;
++ const char *binding_name;
+
+ if (size != sizeof(struct ip_set_req_bind) + IP_SET_MAXNAMELEN)
+ return -EINVAL;
+
-+ binding_name = (char *)(data + sizeof(struct ip_set_req_bind));
-+ binding_name[IP_SET_MAXNAMELEN - 1] = '\0';
++ binding_name = data + sizeof(struct ip_set_req_bind);
+
+ binding = ip_set_find_byname(binding_name);
+ if (binding == IP_SET_INVALID_ID)
@@ -1776,10 +2622,10 @@
+static int
+ip_set_unbindip(ip_set_id_t index,
+ const void *data,
-+ size_t size)
++ u_int32_t size)
+{
+ struct ip_set *set;
-+ struct ip_set_req_bind *req_bind;
++ const struct ip_set_req_bind *req_bind;
+ ip_set_ip_t ip;
+ int res;
+
@@ -1787,19 +2633,18 @@
+ if (size < sizeof(struct ip_set_req_bind))
+ return -EINVAL;
+
-+ req_bind = (struct ip_set_req_bind *) data;
-+ req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
++ req_bind = data;
+
+ DP("%u %s", index, req_bind->binding);
+ if (index == IP_SET_INVALID_ID) {
+ /* unbind :all: */
-+ if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
++ if (SETNAME_EQ(req_bind->binding, IPSET_TOKEN_DEFAULT)) {
+ /* Default binding of sets */
+ write_lock_bh(&ip_set_lock);
+ FOREACH_SET_DO(__unbind_default);
+ write_unlock_bh(&ip_set_lock);
+ return 0;
-+ } else if (strcmp(req_bind->binding, IPSET_TOKEN_ALL) == 0) {
++ } else if (SETNAME_EQ(req_bind->binding, IPSET_TOKEN_ALL)) {
+ /* Flush all bindings of all sets*/
+ write_lock_bh(&ip_set_lock);
+ FOREACH_HASH_RW_DO(__set_hash_del);
@@ -1812,7 +2657,7 @@
+
+ set = ip_set_list[index];
+ IP_SET_ASSERT(set);
-+ if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
++ if (SETNAME_EQ(req_bind->binding, IPSET_TOKEN_DEFAULT)) {
+ /* Default binding of set */
+ ip_set_id_t binding = ip_set_find_byindex(set->binding);
+
@@ -1826,7 +2671,7 @@
+ write_unlock_bh(&ip_set_lock);
+
+ return 0;
-+ } else if (strcmp(req_bind->binding, IPSET_TOKEN_ALL) == 0) {
++ } else if (SETNAME_EQ(req_bind->binding, IPSET_TOKEN_ALL)) {
+ /* Flush all bindings */
+
+ write_lock_bh(&ip_set_lock);
@@ -1850,10 +2695,10 @@
+static int
+ip_set_testbind(ip_set_id_t index,
+ const void *data,
-+ size_t size)
++ u_int32_t size)
+{
+ struct ip_set *set = ip_set_list[index];
-+ struct ip_set_req_bind *req_bind;
++ const struct ip_set_req_bind *req_bind;
+ ip_set_id_t binding;
+ ip_set_ip_t ip;
+ int res;
@@ -1862,18 +2707,16 @@
+ if (size < sizeof(struct ip_set_req_bind))
+ return -EINVAL;
+
-+ req_bind = (struct ip_set_req_bind *) data;
-+ req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
++ req_bind = data;
+
-+ if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
++ if (SETNAME_EQ(req_bind->binding, IPSET_TOKEN_DEFAULT)) {
+ /* Default binding of set */
-+ char *binding_name;
++ const char *binding_name;
+
+ if (size != sizeof(struct ip_set_req_bind) + IP_SET_MAXNAMELEN)
+ return -EINVAL;
+
-+ binding_name = (char *)(data + sizeof(struct ip_set_req_bind));
-+ binding_name[IP_SET_MAXNAMELEN - 1] = '\0';
++ binding_name = data + sizeof(struct ip_set_req_bind);
+
+ binding = ip_set_find_byname(binding_name);
+ if (binding == IP_SET_INVALID_ID)
@@ -1894,7 +2737,7 @@
+ &ip);
+ DP("set %s, ip: %u.%u.%u.%u, binding %s",
+ set->name, HIPQUAD(ip), ip_set_list[binding]->name);
-+
++
+ if (res >= 0)
+ res = (ip_set_find_in_hash(set->id, ip) == binding)
+ ? -EEXIST : 0;
@@ -1927,7 +2770,7 @@
+ if (ip_set_list[i] == NULL) {
+ if (*id == IP_SET_INVALID_ID)
+ *id = *index = i;
-+ } else if (strcmp(name, ip_set_list[i]->name) == 0)
++ } else if (SETNAME_EQ(name, ip_set_list[i]->name))
+ /* Name clash */
+ return -EEXIST;
+ }
@@ -1954,13 +2797,14 @@
+ const char *typename,
+ ip_set_id_t restore,
+ const void *data,
-+ size_t size)
++ u_int32_t size)
+{
+ struct ip_set *set;
+ ip_set_id_t index = 0, id;
+ int res = 0;
+
+ DP("setname: %s, typename: %s, id: %u", name, typename, restore);
++
+ /*
+ * First, and without any locks, allocate and initialize
+ * a normal base set structure.
@@ -1968,7 +2812,7 @@
+ set = kmalloc(sizeof(struct ip_set), GFP_KERNEL);
+ if (!set)
+ return -ENOMEM;
-+ set->lock = RW_LOCK_UNLOCKED;
++ rwlock_init(&set->lock);
+ strncpy(set->name, name, IP_SET_MAXNAMELEN);
+ set->binding = IP_SET_INVALID_ID;
+ atomic_set(&set->ref, 0);
@@ -2004,6 +2848,14 @@
+ }
+ read_unlock_bh(&ip_set_lock);
+
++ /* Check request size */
++ if (size != set->type->header_size) {
++ ip_set_printk("data length wrong (want %lu, have %lu)",
++ (long unsigned)set->type->header_size,
++ (long unsigned)size);
++ goto put_out;
++ }
++
+ /*
+ * Without holding any locks, create private part.
+ */
@@ -2015,7 +2867,7 @@
+
+ /*
+ * Here, we have a valid, constructed set. &ip_set_lock again,
-+ * find free id/index and check that it is not already in
++ * find free id/index and check that it is not already in
+ * ip_set_list.
+ */
+ write_lock_bh(&ip_set_lock);
@@ -2030,7 +2882,7 @@
+ res = -ERANGE;
+ goto cleanup;
+ }
-+
++
+ /*
+ * Finally! Add our shiny new set to the list, and be done.
+ */
@@ -2089,7 +2941,7 @@
+ ip_set_destroy_set(index);
+ } else {
+ for (i = 0; i < ip_set_max; i++) {
-+ if (ip_set_list[i] != NULL
++ if (ip_set_list[i] != NULL
+ && (atomic_read(&ip_set_list[i]->ref)))
+ return -EBUSY;
+ }
@@ -2112,7 +2964,7 @@
+ write_unlock_bh(&set->lock);
+}
+
-+/*
++/*
+ * Flush data in a set - or in all sets
+ */
+static int
@@ -2139,9 +2991,7 @@
+ write_lock_bh(&ip_set_lock);
+ for (i = 0; i < ip_set_max; i++) {
+ if (ip_set_list[i] != NULL
-+ && strncmp(ip_set_list[i]->name,
-+ name,
-+ IP_SET_MAXNAMELEN - 1) == 0) {
++ && SETNAME_EQ(ip_set_list[i]->name, name)) {
+ res = -EEXIST;
+ goto unlock;
+ }
@@ -2165,7 +3015,9 @@
+ u_int32_t from_ref;
+
+ DP("set: %s to %s", from->name, to->name);
-+ /* Features must not change. Artifical restriction. */
++ /* Features must not change.
++ * Not an artifical restriction anymore, as we must prevent
++ * possible loops created by swapping in setlist type of sets. */
+ if (from->type->features != to->type->features)
+ return -ENOEXEC;
+
@@ -2192,7 +3044,7 @@
+
+static inline void
+__set_hash_bindings_size_list(struct ip_set_hash *set_hash,
-+ ip_set_id_t id, size_t *size)
++ ip_set_id_t id, u_int32_t *size)
+{
+ if (set_hash->id == id)
+ *size += sizeof(struct ip_set_hash_list);
@@ -2200,7 +3052,7 @@
+
+static inline void
+__set_hash_bindings_size_save(struct ip_set_hash *set_hash,
-+ ip_set_id_t id, size_t *size)
++ ip_set_id_t id, u_int32_t *size)
+{
+ if (set_hash->id == id)
+ *size += sizeof(struct ip_set_hash_save);
@@ -2211,8 +3063,7 @@
+ ip_set_id_t id, void *data, int *used)
+{
+ if (set_hash->id == id) {
-+ struct ip_set_hash_list *hash_list =
-+ (struct ip_set_hash_list *)(data + *used);
++ struct ip_set_hash_list *hash_list = data + *used;
+
+ hash_list->ip = set_hash->ip;
+ hash_list->binding = set_hash->binding;
@@ -2229,7 +3080,7 @@
+ struct ip_set_list *set_list;
+
+ /* Pointer to our header */
-+ set_list = (struct ip_set_list *) (data + *used);
++ set_list = data + *used;
+
+ DP("set: %s, used: %d %p %p", set->name, *used, data, data + *used);
+
@@ -2296,7 +3147,7 @@
+ struct ip_set_save *set_save;
+
+ /* Pointer to our header */
-+ set_save = (struct ip_set_save *) (data + *used);
++ set_save = data + *used;
+
+ /* Get and ensure header size */
+ if (*used + sizeof(struct ip_set_save) > len)
@@ -2304,7 +3155,7 @@
+ *used += sizeof(struct ip_set_save);
+
+ set = ip_set_list[index];
-+ DP("set: %s, used: %u(%u) %p %p", set->name, *used, len,
++ DP("set: %s, used: %d(%d) %p %p", set->name, *used, len,
+ data, data + *used);
+
+ read_lock_bh(&set->lock);
@@ -2321,8 +3172,8 @@
+ set->type->list_header(set, data + *used);
+ *used += set_save->header_size;
+
-+ DP("set header filled: %s, used: %u(%u) %p %p", set->name, *used,
-+ set_save->header_size, data, data + *used);
++ DP("set header filled: %s, used: %d(%lu) %p %p", set->name, *used,
++ (unsigned long)set_save->header_size, data, data + *used);
+ /* Get and ensure set specific members size */
+ set_save->members_size = set->type->list_members_size(set);
+ if (*used + set_save->members_size > len)
@@ -2332,8 +3183,8 @@
+ set->type->list_members(set, data + *used);
+ *used += set_save->members_size;
+ read_unlock_bh(&set->lock);
-+ DP("set members filled: %s, used: %u(%u) %p %p", set->name, *used,
-+ set_save->members_size, data, data + *used);
++ DP("set members filled: %s, used: %d(%lu) %p %p", set->name, *used,
++ (unsigned long)set_save->members_size, data, data + *used);
+ return 0;
+
+ unlock_set:
@@ -2353,8 +3204,7 @@
+{
+ if (*res == 0
+ && (id == IP_SET_INVALID_ID || set_hash->id == id)) {
-+ struct ip_set_hash_save *hash_save =
-+ (struct ip_set_hash_save *)(data + *used);
++ struct ip_set_hash_save *hash_save = data + *used;
+ /* Ensure bindings size */
+ if (*used + sizeof(struct ip_set_hash_save) > len) {
+ *res = -ENOMEM;
@@ -2381,7 +3231,7 @@
+ return -ENOMEM;
+
+ /* Marker */
-+ set_save = (struct ip_set_save *) (data + *used);
++ set_save = data + *used;
+ set_save->index = IP_SET_INVALID_ID;
+ set_save->header_size = 0;
+ set_save->members_size = 0;
@@ -2414,16 +3264,16 @@
+ while (1) {
+ line++;
+
-+ DP("%u %u %u", used, sizeof(struct ip_set_restore), len);
++ DP("%d %zu %d", used, sizeof(struct ip_set_restore), len);
+ /* Get and ensure header size */
+ if (used + sizeof(struct ip_set_restore) > len)
+ return line;
-+ set_restore = (struct ip_set_restore *) (data + used);
++ set_restore = data + used;
+ used += sizeof(struct ip_set_restore);
+
+ /* Ensure data size */
-+ if (used
-+ + set_restore->header_size
++ if (used
++ + set_restore->header_size
+ + set_restore->members_size > len)
+ return line;
+
@@ -2452,22 +3302,23 @@
+ /* Try to restore members data */
+ set = ip_set_list[index];
+ members_size = 0;
-+ DP("members_size %u reqsize %u",
-+ set_restore->members_size, set->type->reqsize);
++ DP("members_size %lu reqsize %lu",
++ (unsigned long)set_restore->members_size,
++ (unsigned long)set->type->reqsize);
+ while (members_size + set->type->reqsize <=
+ set_restore->members_size) {
+ line++;
-+ DP("members: %u, line %u", members_size, line);
++ DP("members: %d, line %d", members_size, line);
+ res = __ip_set_addip(index,
+ data + used + members_size,
+ set->type->reqsize);
-+ if (!(res == 0 || res == -EEXIST))
++ if (!(res == 0 || res == -EEXIST))
+ return line;
+ members_size += set->type->reqsize;
+ }
+
-+ DP("members_size %u %u",
-+ set_restore->members_size, members_size);
++ DP("members_size %lu %d",
++ (unsigned long)set_restore->members_size, members_size);
+ if (members_size != set_restore->members_size)
+ return line++;
+ used += set_restore->members_size;
@@ -2482,7 +3333,7 @@
+ /* Get and ensure size */
+ if (used + sizeof(struct ip_set_hash_save) > len)
+ return line;
-+ hash_save = (struct ip_set_hash_save *) (data + used);
++ hash_save = data + used;
+ used += sizeof(struct ip_set_hash_save);
+
+ /* hash_save->id is used to store the index */
@@ -2498,7 +3349,7 @@
+ set = ip_set_list[hash_save->id];
+ /* Null valued IP means default binding */
+ if (hash_save->ip)
-+ res = ip_set_hash_add(set->id,
++ res = ip_set_hash_add(set->id,
+ hash_save->ip,
+ hash_save->binding);
+ else {
@@ -2527,10 +3378,10 @@
+ struct ip_set_req_adt *req_adt;
+ ip_set_id_t index = IP_SET_INVALID_ID;
+ int (*adtfn)(ip_set_id_t index,
-+ const void *data, size_t size);
++ const void *data, u_int32_t size);
+ struct fn_table {
+ int (*fn)(ip_set_id_t index,
-+ const void *data, size_t size);
++ const void *data, u_int32_t size);
+ } adtfn_table[] =
+ { { ip_set_addip }, { ip_set_delip }, { ip_set_testip},
+ { ip_set_bindip}, { ip_set_unbindip }, { ip_set_testbind },
@@ -2565,8 +3416,7 @@
+
+ if (*op < IP_SET_OP_VERSION) {
+ /* Check the version at the beginning of operations */
-+ struct ip_set_req_version *req_version =
-+ (struct ip_set_req_version *) data;
++ struct ip_set_req_version *req_version = data;
+ if (req_version->version != IP_SET_PROTOCOL_VERSION) {
+ res = -EPROTO;
+ goto done;
@@ -2575,8 +3425,7 @@
+
+ switch (*op) {
+ case IP_SET_OP_CREATE:{
-+ struct ip_set_req_create *req_create
-+ = (struct ip_set_req_create *) data;
++ struct ip_set_req_create *req_create = data;
+
+ if (len < sizeof(struct ip_set_req_create)) {
+ ip_set_printk("short CREATE data (want >=%zu, got %u)",
@@ -2594,8 +3443,7 @@
+ goto done;
+ }
+ case IP_SET_OP_DESTROY:{
-+ struct ip_set_req_std *req_destroy
-+ = (struct ip_set_req_std *) data;
++ struct ip_set_req_std *req_destroy = data;
+
+ if (len != sizeof(struct ip_set_req_std)) {
+ ip_set_printk("invalid DESTROY data (want %zu, got %u)",
@@ -2603,7 +3451,7 @@
+ res = -EINVAL;
+ goto done;
+ }
-+ if (strcmp(req_destroy->name, IPSET_TOKEN_ALL) == 0) {
++ if (SETNAME_EQ(req_destroy->name, IPSET_TOKEN_ALL)) {
+ /* Destroy all sets */
+ index = IP_SET_INVALID_ID;
+ } else {
@@ -2620,8 +3468,7 @@
+ goto done;
+ }
+ case IP_SET_OP_FLUSH:{
-+ struct ip_set_req_std *req_flush =
-+ (struct ip_set_req_std *) data;
++ struct ip_set_req_std *req_flush = data;
+
+ if (len != sizeof(struct ip_set_req_std)) {
+ ip_set_printk("invalid FLUSH data (want %zu, got %u)",
@@ -2629,7 +3476,7 @@
+ res = -EINVAL;
+ goto done;
+ }
-+ if (strcmp(req_flush->name, IPSET_TOKEN_ALL) == 0) {
++ if (SETNAME_EQ(req_flush->name, IPSET_TOKEN_ALL)) {
+ /* Flush all sets */
+ index = IP_SET_INVALID_ID;
+ } else {
@@ -2645,8 +3492,7 @@
+ goto done;
+ }
+ case IP_SET_OP_RENAME:{
-+ struct ip_set_req_create *req_rename
-+ = (struct ip_set_req_create *) data;
++ struct ip_set_req_create *req_rename = data;
+
+ if (len != sizeof(struct ip_set_req_create)) {
+ ip_set_printk("invalid RENAME data (want %zu, got %u)",
@@ -2667,8 +3513,7 @@
+ goto done;
+ }
+ case IP_SET_OP_SWAP:{
-+ struct ip_set_req_create *req_swap
-+ = (struct ip_set_req_create *) data;
++ struct ip_set_req_create *req_swap = data;
+ ip_set_id_t to_index;
+
+ if (len != sizeof(struct ip_set_req_create)) {
@@ -2694,7 +3539,7 @@
+ res = ip_set_swap(index, to_index);
+ goto done;
+ }
-+ default:
++ default:
+ break; /* Set identified by id */
+ }
+
@@ -2711,10 +3556,10 @@
+ res = -EINVAL;
+ goto done;
+ }
-+ req_adt = (struct ip_set_req_adt *) data;
++ req_adt = data;
+
+ /* -U :all: :all:|:default: uses IP_SET_INVALID_ID */
-+ if (!(*op == IP_SET_OP_UNBIND_SET
++ if (!(*op == IP_SET_OP_UNBIND_SET
+ && req_adt->index == IP_SET_INVALID_ID)) {
+ index = ip_set_find_byindex(req_adt->index);
+ if (index == IP_SET_INVALID_ID) {
@@ -2733,7 +3578,7 @@
+ return res;
+}
+
-+static int
++static int
+ip_set_sockfn_get(struct sock *sk, int optval, void *user, int *len)
+{
+ int res = 0;
@@ -2771,8 +3616,7 @@
+
+ if (*op < IP_SET_OP_VERSION) {
+ /* Check the version at the beginning of operations */
-+ struct ip_set_req_version *req_version =
-+ (struct ip_set_req_version *) data;
++ struct ip_set_req_version *req_version = data;
+ if (req_version->version != IP_SET_PROTOCOL_VERSION) {
+ res = -EPROTO;
+ goto done;
@@ -2781,8 +3625,7 @@
+
+ switch (*op) {
+ case IP_SET_OP_VERSION: {
-+ struct ip_set_req_version *req_version =
-+ (struct ip_set_req_version *) data;
++ struct ip_set_req_version *req_version = data;
+
+ if (*len != sizeof(struct ip_set_req_version)) {
+ ip_set_printk("invalid VERSION (want %zu, got %d)",
@@ -2798,8 +3641,7 @@
+ goto done;
+ }
+ case IP_SET_OP_GET_BYNAME: {
-+ struct ip_set_req_get_set *req_get
-+ = (struct ip_set_req_get_set *) data;
++ struct ip_set_req_get_set *req_get = data;
+
+ if (*len != sizeof(struct ip_set_req_get_set)) {
+ ip_set_printk("invalid GET_BYNAME (want %zu, got %d)",
@@ -2813,8 +3655,7 @@
+ goto copy;
+ }
+ case IP_SET_OP_GET_BYINDEX: {
-+ struct ip_set_req_get_set *req_get
-+ = (struct ip_set_req_get_set *) data;
++ struct ip_set_req_get_set *req_get = data;
+
+ if (*len != sizeof(struct ip_set_req_get_set)) {
+ ip_set_printk("invalid GET_BYINDEX (want %zu, got %d)",
@@ -2830,8 +3671,7 @@
+ goto copy;
+ }
+ case IP_SET_OP_ADT_GET: {
-+ struct ip_set_req_adt_get *req_get
-+ = (struct ip_set_req_adt_get *) data;
++ struct ip_set_req_adt_get *req_get = data;
+
+ if (*len != sizeof(struct ip_set_req_adt_get)) {
+ ip_set_printk("invalid ADT_GET (want %zu, got %d)",
@@ -2853,8 +3693,7 @@
+ goto copy;
+ }
+ case IP_SET_OP_MAX_SETS: {
-+ struct ip_set_req_max_sets *req_max_sets
-+ = (struct ip_set_req_max_sets *) data;
++ struct ip_set_req_max_sets *req_max_sets = data;
+ ip_set_id_t i;
+
+ if (*len != sizeof(struct ip_set_req_max_sets)) {
@@ -2864,11 +3703,11 @@
+ goto done;
+ }
+
-+ if (strcmp(req_max_sets->set.name, IPSET_TOKEN_ALL) == 0) {
++ if (SETNAME_EQ(req_max_sets->set.name, IPSET_TOKEN_ALL)) {
+ req_max_sets->set.index = IP_SET_INVALID_ID;
+ } else {
+ req_max_sets->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
-+ req_max_sets->set.index =
++ req_max_sets->set.index =
+ ip_set_find_byname(req_max_sets->set.name);
+ if (req_max_sets->set.index == IP_SET_INVALID_ID) {
+ res = -ENOENT;
@@ -2883,10 +3722,9 @@
+ }
+ goto copy;
+ }
-+ case IP_SET_OP_LIST_SIZE:
++ case IP_SET_OP_LIST_SIZE:
+ case IP_SET_OP_SAVE_SIZE: {
-+ struct ip_set_req_setnames *req_setnames
-+ = (struct ip_set_req_setnames *) data;
++ struct ip_set_req_setnames *req_setnames = data;
+ struct ip_set_name_list *name_list;
+ struct ip_set *set;
+ ip_set_id_t i;
@@ -2904,8 +3742,7 @@
+ for (i = 0; i < ip_set_max; i++) {
+ if (ip_set_list[i] == NULL)
+ continue;
-+ name_list = (struct ip_set_name_list *)
-+ (data + used);
++ name_list = data + used;
+ used += sizeof(struct ip_set_name_list);
+ if (used > copylen) {
+ res = -EAGAIN;
@@ -2934,7 +3771,7 @@
+ + set->type->header_size
+ + set->type->list_members_size(set);
+ /* Sets are identified by id in the hash */
-+ FOREACH_HASH_DO(__set_hash_bindings_size_list,
++ FOREACH_HASH_DO(__set_hash_bindings_size_list,
+ set->id, &req_setnames->size);
+ break;
+ }
@@ -2957,8 +3794,7 @@
+ goto copy;
+ }
+ case IP_SET_OP_LIST: {
-+ struct ip_set_req_list *req_list
-+ = (struct ip_set_req_list *) data;
++ struct ip_set_req_list *req_list = data;
+ ip_set_id_t i;
+ int used;
+
@@ -2994,8 +3830,7 @@
+ goto copy;
+ }
+ case IP_SET_OP_SAVE: {
-+ struct ip_set_req_list *req_save
-+ = (struct ip_set_req_list *) data;
++ struct ip_set_req_list *req_save = data;
+ ip_set_id_t i;
+ int used;
+
@@ -3011,13 +3846,23 @@
+ res = -ENOENT;
+ goto done;
+ }
++
++#define SETLIST(set) (strcmp(set->type->typename, "setlist") == 0)
++
+ used = 0;
+ if (index == IP_SET_INVALID_ID) {
-+ /* Save all sets */
++ /* Save all sets: ugly setlist type dependency */
++ int setlist = 0;
++ setlists:
+ for (i = 0; i < ip_set_max && res == 0; i++) {
-+ if (ip_set_list[i] != NULL)
++ if (ip_set_list[i] != NULL
++ && !(setlist ^ SETLIST(ip_set_list[i])))
+ res = ip_set_save_set(i, data, &used, *len);
+ }
++ if (!setlist) {
++ setlist = 1;
++ goto setlists;
++ }
+ } else {
+ /* Save an individual set */
+ res = ip_set_save_set(index, data, &used, *len);
@@ -3034,20 +3879,19 @@
+ goto copy;
+ }
+ case IP_SET_OP_RESTORE: {
-+ struct ip_set_req_setnames *req_restore
-+ = (struct ip_set_req_setnames *) data;
++ struct ip_set_req_setnames *req_restore = data;
+ int line;
+
+ if (*len < sizeof(struct ip_set_req_setnames)
+ || *len != req_restore->size) {
-+ ip_set_printk("invalid RESTORE (want =%zu, got %d)",
-+ req_restore->size, *len);
++ ip_set_printk("invalid RESTORE (want =%lu, got %d)",
++ (long unsigned)req_restore->size, *len);
+ res = -EINVAL;
+ goto done;
+ }
+ line = ip_set_restore(data + sizeof(struct ip_set_req_setnames),
+ req_restore->size - sizeof(struct ip_set_req_setnames));
-+ DP("ip_set_restore: %u", line);
++ DP("ip_set_restore: %d", line);
+ if (line != 0) {
+ res = -EAGAIN;
+ req_restore->size = line;
@@ -3062,7 +3906,7 @@
+ } /* end of switch(op) */
+
+ copy:
-+ DP("set %s, copylen %u", index != IP_SET_INVALID_ID
++ DP("set %s, copylen %d", index != IP_SET_INVALID_ID
+ && ip_set_list[index]
+ ? ip_set_list[index]->name
+ : ":all:", copylen);
@@ -3085,12 +3929,15 @@
+ .get_optmin = SO_IP_SET,
+ .get_optmax = SO_IP_SET + 1,
+ .get = &ip_set_sockfn_get,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
++ .use = 0,
++#else
+ .owner = THIS_MODULE,
+#endif
+};
+
+static int max_sets, hash_size;
++
+module_param(max_sets, int, 0600);
+MODULE_PARM_DESC(max_sets, "maximal number of sets");
+module_param(hash_size, int, 0600);
@@ -3133,6 +3980,7 @@
+ vfree(ip_set_hash);
+ return res;
+ }
++
+ return 0;
+}
+
@@ -3150,7 +3998,10 @@
+
+EXPORT_SYMBOL(ip_set_get_byname);
+EXPORT_SYMBOL(ip_set_get_byindex);
-+EXPORT_SYMBOL(ip_set_put);
++EXPORT_SYMBOL(ip_set_put_byindex);
++EXPORT_SYMBOL(ip_set_id);
++EXPORT_SYMBOL(__ip_set_get_byname);
++EXPORT_SYMBOL(__ip_set_put_byindex);
+
+EXPORT_SYMBOL(ip_set_addip_kernel);
+EXPORT_SYMBOL(ip_set_delip_kernel);
@@ -3160,47 +4011,37 @@
+module_exit(ip_set_fini);
--- /dev/null
+++ b/net/ipv4/netfilter/ip_set_iphash.c
-@@ -0,0 +1,429 @@
-+/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+@@ -0,0 +1,166 @@
++/* Copyright (C) 2003-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
++ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an ip hash set */
+
+#include <linux/module.h>
++#include <linux/moduleparam.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
-+#include <linux/version.h>
-+#include <linux/jhash.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_jhash.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
+#include <linux/spinlock.h>
-+#include <linux/vmalloc.h>
+#include <linux/random.h>
+
+#include <net/ip.h>
+
-+#include <linux/netfilter_ipv4/ip_set_malloc.h>
+#include <linux/netfilter_ipv4/ip_set_iphash.h>
+
+static int limit = MAX_RANGE;
+
+static inline __u32
-+jhash_ip(const struct ip_set_iphash *map, uint16_t i, ip_set_ip_t ip)
-+{
-+ return jhash_1word(ip, *(((uint32_t *) map->initval) + i));
-+}
-+
-+static inline __u32
-+hash_id(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++iphash_id(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
++ struct ip_set_iphash *map = set->data;
+ __u32 id;
+ u_int16_t i;
+ ip_set_ip_t *elem;
@@ -3215,198 +4056,81 @@
+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
+ if (*elem == *hash_ip)
+ return id;
-+ /* No shortcut at testing - there can be deleted
-+ * entries. */
++ /* No shortcut - there can be deleted entries. */
+ }
+ return UINT_MAX;
+}
+
+static inline int
-+__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++iphash_test(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ return (ip && hash_id(set, ip, hash_ip) != UINT_MAX);
++ return (ip && iphash_id(set, hash_ip, ip) != UINT_MAX);
+}
+
-+static int
-+testip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_iphash *req =
-+ (struct ip_set_req_iphash *) data;
++#define KADT_CONDITION
+
-+ if (size != sizeof(struct ip_set_req_iphash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iphash),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __testip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+testip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __testip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+}
++UADT(iphash, test)
++KADT(iphash, test, ipaddr)
+
+static inline int
-+__addip(struct ip_set_iphash *map, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++__iphash_add(struct ip_set_iphash *map, ip_set_ip_t *ip)
+{
+ __u32 probe;
+ u_int16_t i;
-+ ip_set_ip_t *elem;
-+
-+ if (!ip || map->elements >= limit)
-+ return -ERANGE;
-+
-+ *hash_ip = ip & map->netmask;
++ ip_set_ip_t *elem, *slot = NULL;
+
+ for (i = 0; i < map->probes; i++) {
-+ probe = jhash_ip(map, i, *hash_ip) % map->hashsize;
++ probe = jhash_ip(map, i, *ip) % map->hashsize;
+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
-+ if (*elem == *hash_ip)
++ if (*elem == *ip)
+ return -EEXIST;
-+ if (!*elem) {
-+ *elem = *hash_ip;
-+ map->elements++;
-+ return 0;
-+ }
++ if (!(slot || *elem))
++ slot = elem;
++ /* There can be deleted entries, must check all slots */
++ }
++ if (slot) {
++ *slot = *ip;
++ map->elements++;
++ return 0;
+ }
+ /* Trigger rehashing */
+ return -EAGAIN;
+}
+
-+static int
-+addip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_iphash *req =
-+ (struct ip_set_req_iphash *) data;
-+
-+ if (size != sizeof(struct ip_set_req_iphash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iphash),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __addip((struct ip_set_iphash *) set->data, req->ip, hash_ip);
-+}
-+
-+static int
-+addip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __addip((struct ip_set_iphash *) set->data,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+}
-+
-+static int retry(struct ip_set *set)
++static inline int
++iphash_add(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
-+ ip_set_ip_t hash_ip, *elem;
-+ void *members;
-+ u_int32_t i, hashsize = map->hashsize;
-+ int res;
-+ struct ip_set_iphash *tmp;
++ struct ip_set_iphash *map = set->data;
+
-+ if (map->resize == 0)
++ if (!ip || map->elements >= limit)
+ return -ERANGE;
+
-+ again:
-+ res = 0;
-+
-+ /* Calculate new hash size */
-+ hashsize += (hashsize * map->resize)/100;
-+ if (hashsize == map->hashsize)
-+ hashsize++;
-+
-+ ip_set_printk("rehashing of set %s triggered: "
-+ "hashsize grows from %u to %u",
-+ set->name, map->hashsize, hashsize);
-+
-+ tmp = kmalloc(sizeof(struct ip_set_iphash)
-+ + map->probes * sizeof(uint32_t), GFP_ATOMIC);
-+ if (!tmp) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_iphash)
-+ + map->probes * sizeof(uint32_t));
-+ return -ENOMEM;
-+ }
-+ tmp->members = harray_malloc(hashsize, sizeof(ip_set_ip_t), GFP_ATOMIC);
-+ if (!tmp->members) {
-+ DP("out of memory for %d bytes", hashsize * sizeof(ip_set_ip_t));
-+ kfree(tmp);
-+ return -ENOMEM;
-+ }
-+ tmp->hashsize = hashsize;
-+ tmp->elements = 0;
-+ tmp->probes = map->probes;
-+ tmp->resize = map->resize;
-+ tmp->netmask = map->netmask;
-+ memcpy(tmp->initval, map->initval, map->probes * sizeof(uint32_t));
-+
-+ write_lock_bh(&set->lock);
-+ map = (struct ip_set_iphash *) set->data; /* Play safe */
-+ for (i = 0; i < map->hashsize && res == 0; i++) {
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
-+ if (*elem)
-+ res = __addip(tmp, *elem, &hash_ip);
-+ }
-+ if (res) {
-+ /* Failure, try again */
-+ write_unlock_bh(&set->lock);
-+ harray_free(tmp->members);
-+ kfree(tmp);
-+ goto again;
-+ }
++ *hash_ip = ip & map->netmask;
+
-+ /* Success at resizing! */
-+ members = map->members;
-+
-+ map->hashsize = tmp->hashsize;
-+ map->members = tmp->members;
-+ write_unlock_bh(&set->lock);
++ return __iphash_add(map, hash_ip);
++}
+
-+ harray_free(members);
-+ kfree(tmp);
++UADT(iphash, add)
++KADT(iphash, add, ipaddr)
+
-+ return 0;
++static inline void
++__iphash_retry(struct ip_set_iphash *tmp, struct ip_set_iphash *map)
++{
++ tmp->netmask = map->netmask;
+}
+
++HASH_RETRY(iphash, ip_set_ip_t)
++
+static inline int
-+__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++iphash_del(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
++ struct ip_set_iphash *map = set->data;
+ ip_set_ip_t id, *elem;
+
+ if (!ip)
+ return -ERANGE;
+
-+ id = hash_id(set, ip, hash_ip);
++ id = iphash_id(set, hash_ip, ip);
+ if (id == UINT_MAX)
+ return -EEXIST;
+
@@ -3417,159 +4141,35 @@
+ return 0;
+}
+
-+static int
-+delip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_iphash *req =
-+ (struct ip_set_req_iphash *) data;
-+
-+ if (size != sizeof(struct ip_set_req_iphash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iphash),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __delip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+delip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __delip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+}
++UADT(iphash, del)
++KADT(iphash, del, ipaddr)
+
-+static int create(struct ip_set *set, const void *data, size_t size)
++static inline int
++__iphash_create(const struct ip_set_req_iphash_create *req,
++ struct ip_set_iphash *map)
+{
-+ struct ip_set_req_iphash_create *req =
-+ (struct ip_set_req_iphash_create *) data;
-+ struct ip_set_iphash *map;
-+ uint16_t i;
-+
-+ if (size != sizeof(struct ip_set_req_iphash_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iphash_create),
-+ size);
-+ return -EINVAL;
-+ }
-+
-+ if (req->hashsize < 1) {
-+ ip_set_printk("hashsize too small");
-+ return -ENOEXEC;
-+ }
-+
-+ if (req->probes < 1) {
-+ ip_set_printk("probes too small");
-+ return -ENOEXEC;
-+ }
-+
-+ map = kmalloc(sizeof(struct ip_set_iphash)
-+ + req->probes * sizeof(uint32_t), GFP_KERNEL);
-+ if (!map) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_iphash)
-+ + req->probes * sizeof(uint32_t));
-+ return -ENOMEM;
-+ }
-+ for (i = 0; i < req->probes; i++)
-+ get_random_bytes(((uint32_t *) map->initval)+i, 4);
-+ map->elements = 0;
-+ map->hashsize = req->hashsize;
-+ map->probes = req->probes;
-+ map->resize = req->resize;
+ map->netmask = req->netmask;
-+ map->members = harray_malloc(map->hashsize, sizeof(ip_set_ip_t), GFP_KERNEL);
-+ if (!map->members) {
-+ DP("out of memory for %d bytes", map->hashsize * sizeof(ip_set_ip_t));
-+ kfree(map);
-+ return -ENOMEM;
-+ }
-+
-+ set->data = map;
++
+ return 0;
+}
+
-+static void destroy(struct ip_set *set)
-+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
-+
-+ harray_free(map->members);
-+ kfree(map);
-+
-+ set->data = NULL;
-+}
-+
-+static void flush(struct ip_set *set)
-+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
-+ harray_flush(map->members, map->hashsize, sizeof(ip_set_ip_t));
-+ map->elements = 0;
-+}
++HASH_CREATE(iphash, ip_set_ip_t)
++HASH_DESTROY(iphash)
+
-+static void list_header(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
-+ struct ip_set_req_iphash_create *header =
-+ (struct ip_set_req_iphash_create *) data;
++HASH_FLUSH(iphash, ip_set_ip_t)
+
-+ header->hashsize = map->hashsize;
-+ header->probes = map->probes;
-+ header->resize = map->resize;
++static inline void
++__iphash_list_header(const struct ip_set_iphash *map,
++ struct ip_set_req_iphash_create *header)
++{
+ header->netmask = map->netmask;
+}
+
-+static int list_members_size(const struct ip_set *set)
-+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
++HASH_LIST_HEADER(iphash)
++HASH_LIST_MEMBERS_SIZE(iphash, ip_set_ip_t)
++HASH_LIST_MEMBERS(iphash, ip_set_ip_t)
+
-+ return (map->hashsize * sizeof(ip_set_ip_t));
-+}
-+
-+static void list_members(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
-+ ip_set_ip_t i, *elem;
-+
-+ for (i = 0; i < map->hashsize; i++) {
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
-+ ((ip_set_ip_t *)data)[i] = *elem;
-+ }
-+}
-+
-+static struct ip_set_type ip_set_iphash = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_iphash),
-+ .addip = &addip,
-+ .addip_kernel = &addip_kernel,
-+ .retry = &retry,
-+ .delip = &delip,
-+ .delip_kernel = &delip_kernel,
-+ .testip = &testip,
-+ .testip_kernel = &testip_kernel,
-+ .header_size = sizeof(struct ip_set_req_iphash_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
++IP_SET_RTYPE(iphash, IPSET_TYPE_IP | IPSET_DATA_SINGLE)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
@@ -3577,29 +4177,17 @@
+module_param(limit, int, 0600);
+MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
+
-+static int __init ip_set_iphash_init(void)
-+{
-+ return ip_set_register_set_type(&ip_set_iphash);
-+}
-+
-+static void __exit ip_set_iphash_fini(void)
-+{
-+ /* FIXME: possible race with ip_set_create() */
-+ ip_set_unregister_set_type(&ip_set_iphash);
-+}
-+
-+module_init(ip_set_iphash_init);
-+module_exit(ip_set_iphash_fini);
++REGISTER_MODULE(iphash)
--- /dev/null
+++ b/net/ipv4/netfilter/ip_set_ipmap.c
-@@ -0,0 +1,336 @@
+@@ -0,0 +1,142 @@
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ * Patrick Schaaf <bof@bof.de>
-+ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ * Copyright (C) 2003-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
++ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the single bitmap type */
@@ -3607,9 +4195,6 @@
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
-+#include <linux/version.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
@@ -3624,9 +4209,9 @@
+}
+
+static inline int
-+__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++ipmap_test(const struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
++ const struct ip_set_ipmap *map = set->data;
+
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
@@ -3637,46 +4222,15 @@
+ return !!test_bit(ip_to_id(map, *hash_ip), map->members);
+}
+
-+static int
-+testip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_ipmap *req =
-+ (struct ip_set_req_ipmap *) data;
++#define KADT_CONDITION
+
-+ if (size != sizeof(struct ip_set_req_ipmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipmap),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __testip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+testip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ int res = __testip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+ return (res < 0 ? 0 : res);
-+}
++UADT(ipmap, test)
++KADT(ipmap, test, ipaddr)
+
+static inline int
-+__addip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++ipmap_add(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
++ struct ip_set_ipmap *map = set->data;
+
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
@@ -3689,46 +4243,13 @@
+ return 0;
+}
+
-+static int
-+addip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_ipmap *req =
-+ (struct ip_set_req_ipmap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_ipmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipmap),
-+ size);
-+ return -EINVAL;
-+ }
-+ DP("%u.%u.%u.%u", HIPQUAD(req->ip));
-+ return __addip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+addip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __addip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+}
++UADT(ipmap, add)
++KADT(ipmap, add, ipaddr)
+
-+static inline int
-+__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++static inline int
++ipmap_del(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
++ struct ip_set_ipmap *map = set->data;
+
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
@@ -3741,71 +4262,13 @@
+ return 0;
+}
+
-+static int
-+delip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_ipmap *req =
-+ (struct ip_set_req_ipmap *) data;
++UADT(ipmap, del)
++KADT(ipmap, del, ipaddr)
+
-+ if (size != sizeof(struct ip_set_req_ipmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipmap),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __delip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+delip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __delip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+}
-+
-+static int create(struct ip_set *set, const void *data, size_t size)
++static inline int
++__ipmap_create(const struct ip_set_req_ipmap_create *req,
++ struct ip_set_ipmap *map)
+{
-+ int newbytes;
-+ struct ip_set_req_ipmap_create *req =
-+ (struct ip_set_req_ipmap_create *) data;
-+ struct ip_set_ipmap *map;
-+
-+ if (size != sizeof(struct ip_set_req_ipmap_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipmap_create),
-+ size);
-+ return -EINVAL;
-+ }
-+
-+ DP("from %u.%u.%u.%u to %u.%u.%u.%u",
-+ HIPQUAD(req->from), HIPQUAD(req->to));
-+
-+ if (req->from > req->to) {
-+ DP("bad ip range");
-+ return -ENOEXEC;
-+ }
-+
-+ map = kmalloc(sizeof(struct ip_set_ipmap), GFP_KERNEL);
-+ if (!map) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_ipmap));
-+ return -ENOMEM;
-+ }
-+ map->first_ip = req->from;
-+ map->last_ip = req->to;
+ map->netmask = req->netmask;
+
+ if (req->netmask == 0xFFFFFFFF) {
@@ -3814,12 +4277,12 @@
+ } else {
+ unsigned int mask_bits, netmask_bits;
+ ip_set_ip_t mask;
-+
++
+ map->first_ip &= map->netmask; /* Should we better bark? */
-+
++
+ mask = range_to_mask(map->first_ip, map->last_ip, &mask_bits);
+ netmask_bits = mask_to_bits(map->netmask);
-+
++
+ if ((!mask && (map->first_ip || map->last_ip != 0xFFFFFFFF))
+ || netmask_bits <= mask_bits)
+ return -ENOEXEC;
@@ -3830,213 +4293,83 @@
+ map->sizeid = 2 << (netmask_bits - mask_bits - 1);
+ }
+ if (map->sizeid > MAX_RANGE + 1) {
-+ ip_set_printk("range too big (max %d addresses)",
-+ MAX_RANGE+1);
-+ kfree(map);
++ ip_set_printk("range too big, %d elements (max %d)",
++ map->sizeid, MAX_RANGE+1);
+ return -ENOEXEC;
+ }
+ DP("hosts %u, sizeid %u", map->hosts, map->sizeid);
-+ newbytes = bitmap_bytes(0, map->sizeid - 1);
-+ map->members = kmalloc(newbytes, GFP_KERNEL);
-+ if (!map->members) {
-+ DP("out of memory for %d bytes", newbytes);
-+ kfree(map);
-+ return -ENOMEM;
-+ }
-+ memset(map->members, 0, newbytes);
-+
-+ set->data = map;
-+ return 0;
-+}
-+
-+static void destroy(struct ip_set *set)
-+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
-+
-+ kfree(map->members);
-+ kfree(map);
-+
-+ set->data = NULL;
++ return bitmap_bytes(0, map->sizeid - 1);
+}
+
-+static void flush(struct ip_set *set)
-+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
-+ memset(map->members, 0, bitmap_bytes(0, map->sizeid - 1));
-+}
++BITMAP_CREATE(ipmap)
++BITMAP_DESTROY(ipmap)
++BITMAP_FLUSH(ipmap)
+
-+static void list_header(const struct ip_set *set, void *data)
++static inline void
++__ipmap_list_header(const struct ip_set_ipmap *map,
++ struct ip_set_req_ipmap_create *header)
+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
-+ struct ip_set_req_ipmap_create *header =
-+ (struct ip_set_req_ipmap_create *) data;
-+
-+ header->from = map->first_ip;
-+ header->to = map->last_ip;
+ header->netmask = map->netmask;
+}
+
-+static int list_members_size(const struct ip_set *set)
-+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
++BITMAP_LIST_HEADER(ipmap)
++BITMAP_LIST_MEMBERS_SIZE(ipmap)
++BITMAP_LIST_MEMBERS(ipmap)
+
-+ return bitmap_bytes(0, map->sizeid - 1);
-+}
-+
-+static void list_members(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
-+ int bytes = bitmap_bytes(0, map->sizeid - 1);
-+
-+ memcpy(data, map->members, bytes);
-+}
-+
-+static struct ip_set_type ip_set_ipmap = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_ipmap),
-+ .addip = &addip,
-+ .addip_kernel = &addip_kernel,
-+ .delip = &delip,
-+ .delip_kernel = &delip_kernel,
-+ .testip = &testip,
-+ .testip_kernel = &testip_kernel,
-+ .header_size = sizeof(struct ip_set_req_ipmap_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
++IP_SET_TYPE(ipmap, IPSET_TYPE_IP | IPSET_DATA_SINGLE)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("ipmap type of IP sets");
+
-+static int __init ip_set_ipmap_init(void)
-+{
-+ return ip_set_register_set_type(&ip_set_ipmap);
-+}
-+
-+static void __exit ip_set_ipmap_fini(void)
-+{
-+ /* FIXME: possible race with ip_set_create() */
-+ ip_set_unregister_set_type(&ip_set_ipmap);
-+}
-+
-+module_init(ip_set_ipmap_init);
-+module_exit(ip_set_ipmap_fini);
++REGISTER_MODULE(ipmap)
--- /dev/null
+++ b/net/ipv4/netfilter/ip_set_ipporthash.c
-@@ -0,0 +1,581 @@
-+/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+@@ -0,0 +1,203 @@
++/* Copyright (C) 2003-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
++ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an ip+port hash set */
+
+#include <linux/module.h>
++#include <linux/moduleparam.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/skbuff.h>
-+#include <linux/version.h>
-+#include <linux/jhash.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_jhash.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
+#include <linux/spinlock.h>
-+#include <linux/vmalloc.h>
+#include <linux/random.h>
+
+#include <net/ip.h>
+
-+#include <linux/netfilter_ipv4/ip_set_malloc.h>
+#include <linux/netfilter_ipv4/ip_set_ipporthash.h>
++#include <linux/netfilter_ipv4/ip_set_getport.h>
+
+static int limit = MAX_RANGE;
+
-+/* We must handle non-linear skbs */
-+static inline ip_set_ip_t
-+get_port(const struct sk_buff *skb, u_int32_t flags)
-+{
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ struct iphdr *iph = ip_hdr(skb);
-+#else
-+ struct iphdr *iph = skb->nh.iph;
-+#endif
-+ u_int16_t offset = ntohs(iph->frag_off) & IP_OFFSET;
-+
-+ switch (iph->protocol) {
-+ case IPPROTO_TCP: {
-+ struct tcphdr tcph;
-+
-+ /* See comments at tcp_match in ip_tables.c */
-+ if (offset)
-+ return INVALID_PORT;
-+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &tcph, sizeof(tcph)) < 0)
-+#else
-+ if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &tcph, sizeof(tcph)) < 0)
-+#endif
-+ /* No choice either */
-+ return INVALID_PORT;
-+
-+ return ntohs(flags & IPSET_SRC ?
-+ tcph.source : tcph.dest);
-+ }
-+ case IPPROTO_UDP: {
-+ struct udphdr udph;
-+
-+ if (offset)
-+ return INVALID_PORT;
-+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &udph, sizeof(udph)) < 0)
-+#else
-+ if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &udph, sizeof(udph)) < 0)
-+#endif
-+ /* No choice either */
-+ return INVALID_PORT;
-+
-+ return ntohs(flags & IPSET_SRC ?
-+ udph.source : udph.dest);
-+ }
-+ default:
-+ return INVALID_PORT;
-+ }
-+}
-+
-+static inline __u32
-+jhash_ip(const struct ip_set_ipporthash *map, uint16_t i, ip_set_ip_t ip)
-+{
-+ return jhash_1word(ip, *(((uint32_t *) map->initval) + i));
-+}
-+
-+#define HASH_IP(map, ip, port) (port + ((ip - ((map)->first_ip)) << 16))
-+
+static inline __u32
-+hash_id(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t port,
-+ ip_set_ip_t *hash_ip)
++ipporthash_id(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port)
+{
-+ struct ip_set_ipporthash *map =
-+ (struct ip_set_ipporthash *) set->data;
++ struct ip_set_ipporthash *map = set->data;
+ __u32 id;
+ u_int16_t i;
+ ip_set_ip_t *elem;
+
-+ *hash_ip = HASH_IP(map, ip, port);
++ *hash_ip = pack_ip_port(map, ip, port);
++
+ DP("set: %s, ipport:%u.%u.%u.%u:%u, %u.%u.%u.%u",
+ set->name, HIPQUAD(ip), port, HIPQUAD(*hash_ip));
++ if (!*hash_ip)
++ return UINT_MAX;
+
+ for (i = 0; i < map->probes; i++) {
+ id = jhash_ip(map, i, *hash_ip) % map->hashsize;
@@ -4044,522 +4377,723 @@
+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
+ if (*elem == *hash_ip)
+ return id;
-+ /* No shortcut at testing - there can be deleted
-+ * entries. */
++ /* No shortcut - there can be deleted entries. */
+ }
+ return UINT_MAX;
+}
+
+static inline int
-+__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t port,
-+ ip_set_ip_t *hash_ip)
++ipporthash_test(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port)
+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
++ struct ip_set_ipporthash *map = set->data;
+
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
+
-+ return (hash_id(set, ip, port, hash_ip) != UINT_MAX);
-+}
-+
-+static int
-+testip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_ipporthash *req =
-+ (struct ip_set_req_ipporthash *) data;
-+
-+ if (size != sizeof(struct ip_set_req_ipporthash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipporthash),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __testip(set, req->ip, req->port, hash_ip);
++ return (ipporthash_id(set, hash_ip, ip, port) != UINT_MAX);
+}
+
-+static int
-+testip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ ip_set_ip_t port;
-+ int res;
-+
-+ if (flags[index+1] == 0)
++#define KADT_CONDITION \
++ ip_set_ip_t port; \
++ \
++ if (flags[index+1] == 0) \
++ return 0; \
++ \
++ port = get_port(skb, flags[index+1]); \
++ \
++ if (port == INVALID_PORT) \
+ return 0;
-+
-+ port = get_port(skb, flags[index+1]);
+
-+ DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
-+ flags[index] & IPSET_SRC ? "SRC" : "DST",
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ NIPQUAD(ip_hdr(skb)->saddr),
-+ NIPQUAD(ip_hdr(skb)->daddr));
-+#else
-+ NIPQUAD(skb->nh.iph->saddr),
-+ NIPQUAD(skb->nh.iph->daddr));
-+#endif
-+ DP("flag %s port %u",
-+ flags[index+1] & IPSET_SRC ? "SRC" : "DST",
-+ port);
-+ if (port == INVALID_PORT)
-+ return 0;
-+
-+ res = __testip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ port,
-+ hash_ip);
-+ return (res < 0 ? 0 : res);
-+
-+}
++UADT(ipporthash, test, req->port)
++KADT(ipporthash, test, ipaddr, port)
+
+static inline int
-+__add_haship(struct ip_set_ipporthash *map, ip_set_ip_t hash_ip)
++__ipporthash_add(struct ip_set_ipporthash *map, ip_set_ip_t *ip)
+{
+ __u32 probe;
+ u_int16_t i;
-+ ip_set_ip_t *elem;
++ ip_set_ip_t *elem, *slot = NULL;
+
+ for (i = 0; i < map->probes; i++) {
-+ probe = jhash_ip(map, i, hash_ip) % map->hashsize;
++ probe = jhash_ip(map, i, *ip) % map->hashsize;
+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
-+ if (*elem == hash_ip)
++ if (*elem == *ip)
+ return -EEXIST;
-+ if (!*elem) {
-+ *elem = hash_ip;
-+ map->elements++;
-+ return 0;
-+ }
++ if (!(slot || *elem))
++ slot = elem;
++ /* There can be deleted entries, must check all slots */
++ }
++ if (slot) {
++ *slot = *ip;
++ map->elements++;
++ return 0;
+ }
+ /* Trigger rehashing */
+ return -EAGAIN;
+}
+
+static inline int
-+__addip(struct ip_set_ipporthash *map, ip_set_ip_t ip, ip_set_ip_t port,
-+ ip_set_ip_t *hash_ip)
++ipporthash_add(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port)
+{
++ struct ip_set_ipporthash *map = set->data;
+ if (map->elements > limit)
+ return -ERANGE;
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
+
-+ *hash_ip = HASH_IP(map, ip, port);
++ *hash_ip = pack_ip_port(map, ip, port);
++
++ if (!*hash_ip)
++ return -ERANGE;
+
-+ return __add_haship(map, *hash_ip);
++ return __ipporthash_add(map, hash_ip);
+}
+
-+static int
-+addip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
++UADT(ipporthash, add, req->port)
++KADT(ipporthash, add, ipaddr, port)
++
++static inline void
++__ipporthash_retry(struct ip_set_ipporthash *tmp,
++ struct ip_set_ipporthash *map)
+{
-+ struct ip_set_req_ipporthash *req =
-+ (struct ip_set_req_ipporthash *) data;
++ tmp->first_ip = map->first_ip;
++ tmp->last_ip = map->last_ip;
++}
+
-+ if (size != sizeof(struct ip_set_req_ipporthash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipporthash),
-+ size);
-+ return -EINVAL;
++HASH_RETRY(ipporthash, ip_set_ip_t)
++
++static inline int
++ipporthash_del(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port)
++{
++ struct ip_set_ipporthash *map = set->data;
++ ip_set_ip_t id;
++ ip_set_ip_t *elem;
++
++ if (ip < map->first_ip || ip > map->last_ip)
++ return -ERANGE;
++
++ id = ipporthash_id(set, hash_ip, ip, port);
++
++ if (id == UINT_MAX)
++ return -EEXIST;
++
++ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
++ *elem = 0;
++ map->elements--;
++
++ return 0;
++}
++
++UADT(ipporthash, del, req->port)
++KADT(ipporthash, del, ipaddr, port)
++
++static inline int
++__ipporthash_create(const struct ip_set_req_ipporthash_create *req,
++ struct ip_set_ipporthash *map)
++{
++ if (req->to - req->from > MAX_RANGE) {
++ ip_set_printk("range too big, %d elements (max %d)",
++ req->to - req->from + 1, MAX_RANGE+1);
++ return -ENOEXEC;
+ }
-+ return __addip((struct ip_set_ipporthash *) set->data,
-+ req->ip, req->port, hash_ip);
++ map->first_ip = req->from;
++ map->last_ip = req->to;
++ return 0;
+}
+
-+static int
-+addip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
++HASH_CREATE(ipporthash, ip_set_ip_t)
++HASH_DESTROY(ipporthash)
++HASH_FLUSH(ipporthash, ip_set_ip_t)
++
++static inline void
++__ipporthash_list_header(const struct ip_set_ipporthash *map,
++ struct ip_set_req_ipporthash_create *header)
+{
-+ ip_set_ip_t port;
++ header->from = map->first_ip;
++ header->to = map->last_ip;
++}
+
-+ if (flags[index+1] == 0)
-+ return -EINVAL;
-+
-+ port = get_port(skb, flags[index+1]);
++HASH_LIST_HEADER(ipporthash)
++HASH_LIST_MEMBERS_SIZE(ipporthash, ip_set_ip_t)
++HASH_LIST_MEMBERS(ipporthash, ip_set_ip_t)
+
-+ DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
-+ flags[index] & IPSET_SRC ? "SRC" : "DST",
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ NIPQUAD(ip_hdr(skb)->saddr),
-+ NIPQUAD(ip_hdr(skb)->daddr));
-+#else
-+ NIPQUAD(skb->nh.iph->saddr),
-+ NIPQUAD(skb->nh.iph->daddr));
-+#endif
-+ DP("flag %s port %u",
-+ flags[index+1] & IPSET_SRC ? "SRC" : "DST",
-+ port);
-+ if (port == INVALID_PORT)
-+ return -EINVAL;
++IP_SET_RTYPE(ipporthash, IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_DATA_DOUBLE)
+
-+ return __addip((struct ip_set_ipporthash *) set->data,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ port,
-+ hash_ip);
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("ipporthash type of IP sets");
++module_param(limit, int, 0600);
++MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
++
++REGISTER_MODULE(ipporthash)
+--- /dev/null
++++ b/net/ipv4/netfilter/ip_set_ipportiphash.c
+@@ -0,0 +1,216 @@
++/* Copyright (C) 2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/* Kernel module implementing an ip+port+ip hash set */
++
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++#include <linux/ip.h>
++#include <linux/tcp.h>
++#include <linux/udp.h>
++#include <linux/skbuff.h>
++#include <linux/netfilter_ipv4/ip_set_jhash.h>
++#include <linux/errno.h>
++#include <asm/uaccess.h>
++#include <asm/bitops.h>
++#include <linux/spinlock.h>
++#include <linux/random.h>
++
++#include <net/ip.h>
++
++#include <linux/netfilter_ipv4/ip_set_ipportiphash.h>
++#include <linux/netfilter_ipv4/ip_set_getport.h>
++
++static int limit = MAX_RANGE;
++
++#define jhash_ip2(map, i, ipport, ip1) \
++ jhash_2words(ipport, ip1, *(map->initval + i))
++
++static inline __u32
++ipportiphash_id(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port, ip_set_ip_t ip1)
++{
++ struct ip_set_ipportiphash *map = set->data;
++ __u32 id;
++ u_int16_t i;
++ struct ipportip *elem;
++
++ *hash_ip = pack_ip_port(map, ip, port);
++ DP("set: %s, ipport:%u.%u.%u.%u:%u, %u.%u.%u.%u",
++ set->name, HIPQUAD(ip), port, HIPQUAD(*hash_ip));
++ if (!(*hash_ip || ip1))
++ return UINT_MAX;
++
++ for (i = 0; i < map->probes; i++) {
++ id = jhash_ip2(map, i, *hash_ip, ip1) % map->hashsize;
++ DP("hash key: %u", id);
++ elem = HARRAY_ELEM(map->members, struct ipportip *, id);
++ if (elem->ip == *hash_ip && elem->ip1 == ip1)
++ return id;
++ /* No shortcut - there can be deleted entries. */
++ }
++ return UINT_MAX;
+}
+
-+static int retry(struct ip_set *set)
++static inline int
++ipportiphash_test(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port, ip_set_ip_t ip1)
+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+ ip_set_ip_t *elem;
-+ void *members;
-+ u_int32_t i, hashsize = map->hashsize;
-+ int res;
-+ struct ip_set_ipporthash *tmp;
++ struct ip_set_ipportiphash *map = set->data;
+
-+ if (map->resize == 0)
++ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
+
-+ again:
-+ res = 0;
-+
-+ /* Calculate new hash size */
-+ hashsize += (hashsize * map->resize)/100;
-+ if (hashsize == map->hashsize)
-+ hashsize++;
-+
-+ ip_set_printk("rehashing of set %s triggered: "
-+ "hashsize grows from %u to %u",
-+ set->name, map->hashsize, hashsize);
-+
-+ tmp = kmalloc(sizeof(struct ip_set_ipporthash)
-+ + map->probes * sizeof(uint32_t), GFP_ATOMIC);
-+ if (!tmp) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_ipporthash)
-+ + map->probes * sizeof(uint32_t));
-+ return -ENOMEM;
-+ }
-+ tmp->members = harray_malloc(hashsize, sizeof(ip_set_ip_t), GFP_ATOMIC);
-+ if (!tmp->members) {
-+ DP("out of memory for %d bytes", hashsize * sizeof(ip_set_ip_t));
-+ kfree(tmp);
-+ return -ENOMEM;
++ return (ipportiphash_id(set, hash_ip, ip, port, ip1) != UINT_MAX);
++}
++
++#define KADT_CONDITION \
++ ip_set_ip_t port, ip1; \
++ \
++ if (flags[index+2] == 0) \
++ return 0; \
++ \
++ port = get_port(skb, flags[index+1]); \
++ ip1 = ipaddr(skb, flags[index+2]); \
++ \
++ if (port == INVALID_PORT) \
++ return 0;
++
++UADT(ipportiphash, test, req->port, req->ip1)
++KADT(ipportiphash, test, ipaddr, port, ip1)
++
++static inline int
++__ipportip_add(struct ip_set_ipportiphash *map,
++ ip_set_ip_t hash_ip, ip_set_ip_t ip1)
++{
++ __u32 probe;
++ u_int16_t i;
++ struct ipportip *elem, *slot = NULL;
++
++ for (i = 0; i < map->probes; i++) {
++ probe = jhash_ip2(map, i, hash_ip, ip1) % map->hashsize;
++ elem = HARRAY_ELEM(map->members, struct ipportip *, probe);
++ if (elem->ip == hash_ip && elem->ip1 == ip1)
++ return -EEXIST;
++ if (!(slot || elem->ip || elem->ip1))
++ slot = elem;
++ /* There can be deleted entries, must check all slots */
+ }
-+ tmp->hashsize = hashsize;
-+ tmp->elements = 0;
-+ tmp->probes = map->probes;
-+ tmp->resize = map->resize;
-+ tmp->first_ip = map->first_ip;
-+ tmp->last_ip = map->last_ip;
-+ memcpy(tmp->initval, map->initval, map->probes * sizeof(uint32_t));
-+
-+ write_lock_bh(&set->lock);
-+ map = (struct ip_set_ipporthash *) set->data; /* Play safe */
-+ for (i = 0; i < map->hashsize && res == 0; i++) {
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
-+ if (*elem)
-+ res = __add_haship(tmp, *elem);
-+ }
-+ if (res) {
-+ /* Failure, try again */
-+ write_unlock_bh(&set->lock);
-+ harray_free(tmp->members);
-+ kfree(tmp);
-+ goto again;
++ if (slot) {
++ slot->ip = hash_ip;
++ slot->ip1 = ip1;
++ map->elements++;
++ return 0;
+ }
++ /* Trigger rehashing */
++ return -EAGAIN;
++}
++
++static inline int
++__ipportiphash_add(struct ip_set_ipportiphash *map,
++ struct ipportip *elem)
++{
++ return __ipportip_add(map, elem->ip, elem->ip1);
++}
++
++static inline int
++ipportiphash_add(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port, ip_set_ip_t ip1)
++{
++ struct ip_set_ipportiphash *map = set->data;
+
-+ /* Success at resizing! */
-+ members = map->members;
++ if (map->elements > limit)
++ return -ERANGE;
++ if (ip < map->first_ip || ip > map->last_ip)
++ return -ERANGE;
+
-+ map->hashsize = tmp->hashsize;
-+ map->members = tmp->members;
-+ write_unlock_bh(&set->lock);
++ *hash_ip = pack_ip_port(map, ip, port);
++ if (!(*hash_ip || ip1))
++ return -ERANGE;
++
++ return __ipportip_add(map, *hash_ip, ip1);
++}
+
-+ harray_free(members);
-+ kfree(tmp);
++UADT(ipportiphash, add, req->port, req->ip1)
++KADT(ipportiphash, add, ipaddr, port, ip1)
+
-+ return 0;
++static inline void
++__ipportiphash_retry(struct ip_set_ipportiphash *tmp,
++ struct ip_set_ipportiphash *map)
++{
++ tmp->first_ip = map->first_ip;
++ tmp->last_ip = map->last_ip;
+}
+
++HASH_RETRY2(ipportiphash, struct ipportip)
++
+static inline int
-+__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t port,
-+ ip_set_ip_t *hash_ip)
++ipportiphash_del(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port, ip_set_ip_t ip1)
+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
++ struct ip_set_ipportiphash *map = set->data;
+ ip_set_ip_t id;
-+ ip_set_ip_t *elem;
++ struct ipportip *elem;
+
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
+
-+ id = hash_id(set, ip, port, hash_ip);
++ id = ipportiphash_id(set, hash_ip, ip, port, ip1);
+
+ if (id == UINT_MAX)
+ return -EEXIST;
+
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
-+ *elem = 0;
++ elem = HARRAY_ELEM(map->members, struct ipportip *, id);
++ elem->ip = elem->ip1 = 0;
+ map->elements--;
+
+ return 0;
+}
+
-+static int
-+delip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_ipporthash *req =
-+ (struct ip_set_req_ipporthash *) data;
++UADT(ipportiphash, del, req->port, req->ip1)
++KADT(ipportiphash, del, ipaddr, port, ip1)
+
-+ if (size != sizeof(struct ip_set_req_ipporthash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipporthash),
-+ size);
-+ return -EINVAL;
++static inline int
++__ipportiphash_create(const struct ip_set_req_ipportiphash_create *req,
++ struct ip_set_ipportiphash *map)
++{
++ if (req->to - req->from > MAX_RANGE) {
++ ip_set_printk("range too big, %d elements (max %d)",
++ req->to - req->from + 1, MAX_RANGE+1);
++ return -ENOEXEC;
+ }
-+ return __delip(set, req->ip, req->port, hash_ip);
++ map->first_ip = req->from;
++ map->last_ip = req->to;
++ return 0;
+}
+
-+static int
-+delip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
++HASH_CREATE(ipportiphash, struct ipportip)
++HASH_DESTROY(ipportiphash)
++HASH_FLUSH(ipportiphash, struct ipportip)
++
++static inline void
++__ipportiphash_list_header(const struct ip_set_ipportiphash *map,
++ struct ip_set_req_ipportiphash_create *header)
+{
-+ ip_set_ip_t port;
++ header->from = map->first_ip;
++ header->to = map->last_ip;
++}
+
-+ if (flags[index+1] == 0)
-+ return -EINVAL;
-+
-+ port = get_port(skb, flags[index+1]);
++HASH_LIST_HEADER(ipportiphash)
++HASH_LIST_MEMBERS_SIZE(ipportiphash, struct ipportip)
++HASH_LIST_MEMBERS_MEMCPY(ipportiphash, struct ipportip)
+
-+ DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
-+ flags[index] & IPSET_SRC ? "SRC" : "DST",
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ NIPQUAD(ip_hdr(skb)->saddr),
-+ NIPQUAD(ip_hdr(skb)->daddr));
-+#else
-+ NIPQUAD(skb->nh.iph->saddr),
-+ NIPQUAD(skb->nh.iph->daddr));
-+#endif
-+ DP("flag %s port %u",
-+ flags[index+1] & IPSET_SRC ? "SRC" : "DST",
-+ port);
-+ if (port == INVALID_PORT)
-+ return -EINVAL;
++IP_SET_RTYPE(ipportiphash, IPSET_TYPE_IP | IPSET_TYPE_PORT
++ | IPSET_TYPE_IP1 | IPSET_DATA_TRIPLE)
+
-+ return __delip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ port,
-+ hash_ip);
-+}
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("ipportiphash type of IP sets");
++module_param(limit, int, 0600);
++MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
++
++REGISTER_MODULE(ipportiphash)
+--- /dev/null
++++ b/net/ipv4/netfilter/ip_set_ipportnethash.c
+@@ -0,0 +1,304 @@
++/* Copyright (C) 2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/* Kernel module implementing an ip+port+net hash set */
+
-+static int create(struct ip_set *set, const void *data, size_t size)
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++#include <linux/ip.h>
++#include <linux/tcp.h>
++#include <linux/udp.h>
++#include <linux/skbuff.h>
++#include <linux/netfilter_ipv4/ip_set_jhash.h>
++#include <linux/errno.h>
++#include <asm/uaccess.h>
++#include <asm/bitops.h>
++#include <linux/spinlock.h>
++#include <linux/random.h>
++
++#include <net/ip.h>
++
++#include <linux/netfilter_ipv4/ip_set_ipportnethash.h>
++#include <linux/netfilter_ipv4/ip_set_getport.h>
++
++static int limit = MAX_RANGE;
++
++#define jhash_ip2(map, i, ipport, ip1) \
++ jhash_2words(ipport, ip1, *(map->initval + i))
++
++static inline __u32
++ipportnethash_id_cidr(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port,
++ ip_set_ip_t ip1, uint8_t cidr)
+{
-+ struct ip_set_req_ipporthash_create *req =
-+ (struct ip_set_req_ipporthash_create *) data;
-+ struct ip_set_ipporthash *map;
-+ uint16_t i;
++ struct ip_set_ipportnethash *map = set->data;
++ __u32 id;
++ u_int16_t i;
++ struct ipportip *elem;
+
-+ if (size != sizeof(struct ip_set_req_ipporthash_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipporthash_create),
-+ size);
-+ return -EINVAL;
++ *hash_ip = pack_ip_port(map, ip, port);
++ DP("set: %s, ipport:%u.%u.%u.%u:%u, %u.%u.%u.%u",
++ set->name, HIPQUAD(ip), port, HIPQUAD(*hash_ip));
++ ip1 = pack_ip_cidr(ip1, cidr);
++ if (!(*hash_ip || ip1))
++ return UINT_MAX;
++
++ for (i = 0; i < map->probes; i++) {
++ id = jhash_ip2(map, i, *hash_ip, ip1) % map->hashsize;
++ DP("hash key: %u", id);
++ elem = HARRAY_ELEM(map->members, struct ipportip *, id);
++ if (elem->ip == *hash_ip && elem->ip1 == ip1)
++ return id;
++ /* No shortcut - there can be deleted entries. */
+ }
++ return UINT_MAX;
++}
+
-+ if (req->hashsize < 1) {
-+ ip_set_printk("hashsize too small");
-+ return -ENOEXEC;
-+ }
++static inline __u32
++ipportnethash_id(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port, ip_set_ip_t ip1)
++{
++ struct ip_set_ipportnethash *map = set->data;
++ __u32 id = UINT_MAX;
++ int i;
+
-+ if (req->probes < 1) {
-+ ip_set_printk("probes too small");
-+ return -ENOEXEC;
++ for (i = 0; i < 30 && map->cidr[i]; i++) {
++ id = ipportnethash_id_cidr(set, hash_ip, ip, port, ip1,
++ map->cidr[i]);
++ if (id != UINT_MAX)
++ break;
+ }
++ return id;
++}
+
-+ map = kmalloc(sizeof(struct ip_set_ipporthash)
-+ + req->probes * sizeof(uint32_t), GFP_KERNEL);
-+ if (!map) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_ipporthash)
-+ + req->probes * sizeof(uint32_t));
-+ return -ENOMEM;
-+ }
-+ for (i = 0; i < req->probes; i++)
-+ get_random_bytes(((uint32_t *) map->initval)+i, 4);
-+ map->elements = 0;
-+ map->hashsize = req->hashsize;
-+ map->probes = req->probes;
-+ map->resize = req->resize;
-+ map->first_ip = req->from;
-+ map->last_ip = req->to;
-+ map->members = harray_malloc(map->hashsize, sizeof(ip_set_ip_t), GFP_KERNEL);
-+ if (!map->members) {
-+ DP("out of memory for %d bytes", map->hashsize * sizeof(ip_set_ip_t));
-+ kfree(map);
-+ return -ENOMEM;
-+ }
++static inline int
++ipportnethash_test_cidr(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port,
++ ip_set_ip_t ip1, uint8_t cidr)
++{
++ struct ip_set_ipportnethash *map = set->data;
++
++ if (ip < map->first_ip || ip > map->last_ip)
++ return -ERANGE;
+
-+ set->data = map;
-+ return 0;
++ return (ipportnethash_id_cidr(set, hash_ip, ip, port, ip1,
++ cidr) != UINT_MAX);
+}
+
-+static void destroy(struct ip_set *set)
++static inline int
++ipportnethash_test(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port, ip_set_ip_t ip1)
+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+
-+ harray_free(map->members);
-+ kfree(map);
++ struct ip_set_ipportnethash *map = set->data;
++
++ if (ip < map->first_ip || ip > map->last_ip)
++ return -ERANGE;
+
-+ set->data = NULL;
++ return (ipportnethash_id(set, hash_ip, ip, port, ip1) != UINT_MAX);
+}
+
-+static void flush(struct ip_set *set)
++static int
++ipportnethash_utest(struct ip_set *set, const void *data, u_int32_t size,
++ ip_set_ip_t *hash_ip)
+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+ harray_flush(map->members, map->hashsize, sizeof(ip_set_ip_t));
-+ map->elements = 0;
++ const struct ip_set_req_ipportnethash *req = data;
++
++ if (req->cidr <= 0 || req->cidr > 32)
++ return -EINVAL;
++ return (req->cidr == 32
++ ? ipportnethash_test(set, hash_ip, req->ip, req->port,
++ req->ip1)
++ : ipportnethash_test_cidr(set, hash_ip, req->ip, req->port,
++ req->ip1, req->cidr));
+}
+
-+static void list_header(const struct ip_set *set, void *data)
++#define KADT_CONDITION \
++ ip_set_ip_t port, ip1; \
++ \
++ if (flags[index+2] == 0) \
++ return 0; \
++ \
++ port = get_port(skb, flags[index+1]); \
++ ip1 = ipaddr(skb, flags[index+2]); \
++ \
++ if (port == INVALID_PORT) \
++ return 0;
++
++KADT(ipportnethash, test, ipaddr, port, ip1)
++
++static inline int
++__ipportnet_add(struct ip_set_ipportnethash *map,
++ ip_set_ip_t hash_ip, ip_set_ip_t ip1)
+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+ struct ip_set_req_ipporthash_create *header =
-+ (struct ip_set_req_ipporthash_create *) data;
++ __u32 probe;
++ u_int16_t i;
++ struct ipportip *elem, *slot = NULL;
+
-+ header->hashsize = map->hashsize;
-+ header->probes = map->probes;
-+ header->resize = map->resize;
-+ header->from = map->first_ip;
-+ header->to = map->last_ip;
++ for (i = 0; i < map->probes; i++) {
++ probe = jhash_ip2(map, i, hash_ip, ip1) % map->hashsize;
++ elem = HARRAY_ELEM(map->members, struct ipportip *, probe);
++ if (elem->ip == hash_ip && elem->ip1 == ip1)
++ return -EEXIST;
++ if (!(slot || elem->ip || elem->ip1))
++ slot = elem;
++ /* There can be deleted entries, must check all slots */
++ }
++ if (slot) {
++ slot->ip = hash_ip;
++ slot->ip1 = ip1;
++ map->elements++;
++ return 0;
++ }
++ /* Trigger rehashing */
++ return -EAGAIN;
+}
+
-+static int list_members_size(const struct ip_set *set)
++static inline int
++__ipportnethash_add(struct ip_set_ipportnethash *map,
++ struct ipportip *elem)
+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+
-+ return (map->hashsize * sizeof(ip_set_ip_t));
++ return __ipportnet_add(map, elem->ip, elem->ip1);
+}
+
-+static void list_members(const struct ip_set *set, void *data)
++static inline int
++ipportnethash_add(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port,
++ ip_set_ip_t ip1, uint8_t cidr)
+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+ ip_set_ip_t i, *elem;
++ struct ip_set_ipportnethash *map = set->data;
++ struct ipportip;
++ int ret;
++
++ if (map->elements > limit)
++ return -ERANGE;
++ if (ip < map->first_ip || ip > map->last_ip)
++ return -ERANGE;
++ if (cidr <= 0 || cidr >= 32)
++ return -EINVAL;
++ if (map->nets[cidr-1] == UINT16_MAX)
++ return -ERANGE;
+
-+ for (i = 0; i < map->hashsize; i++) {
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
-+ ((ip_set_ip_t *)data)[i] = *elem;
++ *hash_ip = pack_ip_port(map, ip, port);
++ ip1 = pack_ip_cidr(ip1, cidr);
++ if (!(*hash_ip || ip1))
++ return -ERANGE;
++
++ ret =__ipportnet_add(map, *hash_ip, ip1);
++ if (ret == 0) {
++ if (!map->nets[cidr-1]++)
++ add_cidr_size(map->cidr, cidr);
++ map->elements++;
+ }
++ return ret;
+}
+
-+static struct ip_set_type ip_set_ipporthash = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_DATA_DOUBLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_ipporthash),
-+ .addip = &addip,
-+ .addip_kernel = &addip_kernel,
-+ .retry = &retry,
-+ .delip = &delip,
-+ .delip_kernel = &delip_kernel,
-+ .testip = &testip,
-+ .testip_kernel = &testip_kernel,
-+ .header_size = sizeof(struct ip_set_req_ipporthash_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
++#undef KADT_CONDITION
++#define KADT_CONDITION \
++ struct ip_set_ipportnethash *map = set->data; \
++ uint8_t cidr = map->cidr[0] ? map->cidr[0] : 31; \
++ ip_set_ip_t port, ip1; \
++ \
++ if (flags[index+2] == 0) \
++ return 0; \
++ \
++ port = get_port(skb, flags[index+1]); \
++ ip1 = ipaddr(skb, flags[index+2]); \
++ \
++ if (port == INVALID_PORT) \
++ return 0;
+
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-+MODULE_DESCRIPTION("ipporthash type of IP sets");
-+module_param(limit, int, 0600);
-+MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
++UADT(ipportnethash, add, req->port, req->ip1, req->cidr)
++KADT(ipportnethash, add, ipaddr, port, ip1, cidr)
+
-+static int __init ip_set_ipporthash_init(void)
++static inline void
++__ipportnethash_retry(struct ip_set_ipportnethash *tmp,
++ struct ip_set_ipportnethash *map)
+{
-+ return ip_set_register_set_type(&ip_set_ipporthash);
++ tmp->first_ip = map->first_ip;
++ tmp->last_ip = map->last_ip;
++ memcpy(tmp->cidr, map->cidr, sizeof(tmp->cidr));
++ memcpy(tmp->nets, map->nets, sizeof(tmp->nets));
+}
+
-+static void __exit ip_set_ipporthash_fini(void)
++HASH_RETRY2(ipportnethash, struct ipportip)
++
++static inline int
++ipportnethash_del(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port,
++ ip_set_ip_t ip1, uint8_t cidr)
+{
-+ /* FIXME: possible race with ip_set_create() */
-+ ip_set_unregister_set_type(&ip_set_ipporthash);
++ struct ip_set_ipportnethash *map = set->data;
++ ip_set_ip_t id;
++ struct ipportip *elem;
++
++ if (ip < map->first_ip || ip > map->last_ip)
++ return -ERANGE;
++ if (!ip)
++ return -ERANGE;
++ if (cidr <= 0 || cidr >= 32)
++ return -EINVAL;
++
++ id = ipportnethash_id_cidr(set, hash_ip, ip, port, ip1, cidr);
++
++ if (id == UINT_MAX)
++ return -EEXIST;
++
++ elem = HARRAY_ELEM(map->members, struct ipportip *, id);
++ elem->ip = elem->ip1 = 0;
++ map->elements--;
++ if (!map->nets[cidr-1]--)
++ del_cidr_size(map->cidr, cidr);
++
++ return 0;
+}
+
-+module_init(ip_set_ipporthash_init);
-+module_exit(ip_set_ipporthash_fini);
++UADT(ipportnethash, del, req->port, req->ip1, req->cidr)
++KADT(ipportnethash, del, ipaddr, port, ip1, cidr)
++
++static inline int
++__ipportnethash_create(const struct ip_set_req_ipportnethash_create *req,
++ struct ip_set_ipportnethash *map)
++{
++ if (req->to - req->from > MAX_RANGE) {
++ ip_set_printk("range too big, %d elements (max %d)",
++ req->to - req->from + 1, MAX_RANGE+1);
++ return -ENOEXEC;
++ }
++ map->first_ip = req->from;
++ map->last_ip = req->to;
++ memset(map->cidr, 0, sizeof(map->cidr));
++ memset(map->nets, 0, sizeof(map->nets));
++ return 0;
++}
++
++HASH_CREATE(ipportnethash, struct ipportip)
++HASH_DESTROY(ipportnethash)
++HASH_FLUSH_CIDR(ipportnethash, struct ipportip);
++
++static inline void
++__ipportnethash_list_header(const struct ip_set_ipportnethash *map,
++ struct ip_set_req_ipportnethash_create *header)
++{
++ header->from = map->first_ip;
++ header->to = map->last_ip;
++}
++
++HASH_LIST_HEADER(ipportnethash)
++
++HASH_LIST_MEMBERS_SIZE(ipportnethash, struct ipportip)
++HASH_LIST_MEMBERS_MEMCPY(ipportnethash, struct ipportip)
++
++IP_SET_RTYPE(ipportnethash, IPSET_TYPE_IP | IPSET_TYPE_PORT
++ | IPSET_TYPE_IP1 | IPSET_DATA_TRIPLE)
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("ipportnethash type of IP sets");
++module_param(limit, int, 0600);
++MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
++
++REGISTER_MODULE(ipportnethash)
--- /dev/null
+++ b/net/ipv4/netfilter/ip_set_iptree.c
-@@ -0,0 +1,612 @@
-+/* Copyright (C) 2005 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+@@ -0,0 +1,466 @@
++/* Copyright (C) 2005-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
++ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the iptree type */
+
-+#include <linux/version.h>
+#include <linux/module.h>
++#include <linux/moduleparam.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
+#include <linux/spinlock.h>
++#include <linux/timer.h>
+
-+/* Backward compatibility */
-+#ifndef __nocast
-+#define __nocast
-+#endif
-+
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_bitmaps.h>
+#include <linux/netfilter_ipv4/ip_set_iptree.h>
+
+static int limit = MAX_RANGE;
+
+/* Garbage collection interval in seconds: */
+#define IPTREE_GC_TIME 5*60
-+/* Sleep so many milliseconds before trying again
-+ * to delete the gc timer at destroying/flushing a set */
++/* Sleep so many milliseconds before trying again
++ * to delete the gc timer at destroying/flushing a set */
+#define IPTREE_DESTROY_SLEEP 100
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
-+static struct kmem_cache *branch_cachep;
-+static struct kmem_cache *leaf_cachep;
-+#else
-+static kmem_cache_t *branch_cachep;
-+static kmem_cache_t *leaf_cachep;
-+#endif
++static __KMEM_CACHE_T__ *branch_cachep;
++static __KMEM_CACHE_T__ *leaf_cachep;
++
+
+#if defined(__LITTLE_ENDIAN)
+#define ABCD(a,b,c,d,addrp) do { \
@@ -4587,9 +5121,9 @@
+} while (0)
+
+static inline int
-+__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++iptree_test(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set_iptree *map = set->data;
+ struct ip_set_iptreeb *btree;
+ struct ip_set_iptreec *ctree;
+ struct ip_set_iptreed *dtree;
@@ -4610,53 +5144,10 @@
+ || time_after(dtree->expires[d], jiffies));
+}
+
-+static int
-+testip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_iptree *req =
-+ (struct ip_set_req_iptree *) data;
-+
-+ if (size != sizeof(struct ip_set_req_iptree)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iptree),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __testip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+testip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ int res;
-+
-+ DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
-+ flags[index] & IPSET_SRC ? "SRC" : "DST",
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ NIPQUAD(ip_hdr(skb)->saddr),
-+ NIPQUAD(ip_hdr(skb)->daddr));
-+#else
-+ NIPQUAD(skb->nh.iph->saddr),
-+ NIPQUAD(skb->nh.iph->daddr));
-+#endif
++#define KADT_CONDITION
+
-+ res = __testip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+ return (res < 0 ? 0 : res);
-+}
++UADT(iptree, test)
++KADT(iptree, test, ipaddr)
+
+#define ADDIP_WALK(map, elem, branch, type, cachep) do { \
+ if ((map)->tree[elem]) { \
@@ -4674,10 +5165,10 @@
+} while (0)
+
+static inline int
-+__addip(struct ip_set *set, ip_set_ip_t ip, unsigned int timeout,
-+ ip_set_ip_t *hash_ip)
++iptree_add(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, unsigned int timeout)
+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set_iptree *map = set->data;
+ struct ip_set_iptreeb *btree;
+ struct ip_set_iptreec *ctree;
+ struct ip_set_iptreed *dtree;
@@ -4698,6 +5189,8 @@
+ if (dtree->expires[d]
+ && (!map->timeout || time_after(dtree->expires[d], jiffies)))
+ ret = -EEXIST;
++ if (map->timeout && timeout == 0)
++ timeout = map->timeout;
+ dtree->expires[d] = map->timeout ? (timeout * HZ + jiffies) : 1;
+ /* Lottery: I won! */
+ if (dtree->expires[d] == 0)
@@ -4708,47 +5201,8 @@
+ return ret;
+}
+
-+static int
-+addip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
-+ struct ip_set_req_iptree *req =
-+ (struct ip_set_req_iptree *) data;
-+
-+ if (size != sizeof(struct ip_set_req_iptree)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iptree),
-+ size);
-+ return -EINVAL;
-+ }
-+ DP("%u.%u.%u.%u %u", HIPQUAD(req->ip), req->timeout);
-+ return __addip(set, req->ip,
-+ req->timeout ? req->timeout : map->timeout,
-+ hash_ip);
-+}
-+
-+static int
-+addip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
-+
-+ return __addip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ map->timeout,
-+ hash_ip);
-+}
++UADT(iptree, add, req->timeout)
++KADT(iptree, add, ipaddr, 0)
+
+#define DELIP_WALK(map, elem, branch) do { \
+ if ((map)->tree[elem]) { \
@@ -4757,10 +5211,10 @@
+ return -EEXIST; \
+} while (0)
+
-+static inline int
-+__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++static inline int
++iptree_del(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set_iptree *map = set->data;
+ struct ip_set_iptreeb *btree;
+ struct ip_set_iptreec *ctree;
+ struct ip_set_iptreed *dtree;
@@ -4783,40 +5237,8 @@
+ return -EEXIST;
+}
+
-+static int
-+delip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_iptree *req =
-+ (struct ip_set_req_iptree *) data;
-+
-+ if (size != sizeof(struct ip_set_req_iptree)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iptree),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __delip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+delip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __delip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+}
++UADT(iptree, del)
++KADT(iptree, del, ipaddr)
+
+#define LOOP_WALK_BEGIN(map, i, branch) \
+ for (i = 0; i < 256; i++) { \
@@ -4826,10 +5248,11 @@
+
+#define LOOP_WALK_END }
+
-+static void ip_tree_gc(unsigned long ul_set)
++static void
++ip_tree_gc(unsigned long ul_set)
+{
-+ struct ip_set *set = (void *) ul_set;
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set *set = (struct ip_set *) ul_set;
++ struct ip_set_iptree *map = set->data;
+ struct ip_set_iptreeb *btree;
+ struct ip_set_iptreec *ctree;
+ struct ip_set_iptreed *dtree;
@@ -4896,9 +5319,10 @@
+ add_timer(&map->gc);
+}
+
-+static inline void init_gc_timer(struct ip_set *set)
++static inline void
++init_gc_timer(struct ip_set *set)
+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set_iptree *map = set->data;
+
+ /* Even if there is no timeout for the entries,
+ * we still have to call gc because delete
@@ -4911,22 +5335,22 @@
+ add_timer(&map->gc);
+}
+
-+static int create(struct ip_set *set, const void *data, size_t size)
++static int
++iptree_create(struct ip_set *set, const void *data, u_int32_t size)
+{
-+ struct ip_set_req_iptree_create *req =
-+ (struct ip_set_req_iptree_create *) data;
++ const struct ip_set_req_iptree_create *req = data;
+ struct ip_set_iptree *map;
+
+ if (size != sizeof(struct ip_set_req_iptree_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
++ ip_set_printk("data length wrong (want %zu, have %lu)",
+ sizeof(struct ip_set_req_iptree_create),
-+ size);
++ (unsigned long)size);
+ return -EINVAL;
+ }
+
+ map = kmalloc(sizeof(struct ip_set_iptree), GFP_KERNEL);
+ if (!map) {
-+ DP("out of memory for %d bytes",
++ DP("out of memory for %zu bytes",
+ sizeof(struct ip_set_iptree));
+ return -ENOMEM;
+ }
@@ -4940,7 +5364,8 @@
+ return 0;
+}
+
-+static void __flush(struct ip_set_iptree *map)
++static inline void
++__flush(struct ip_set_iptree *map)
+{
+ struct ip_set_iptreeb *btree;
+ struct ip_set_iptreec *ctree;
@@ -4959,9 +5384,10 @@
+ map->elements = 0;
+}
+
-+static void destroy(struct ip_set *set)
++static void
++iptree_destroy(struct ip_set *set)
+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set_iptree *map = set->data;
+
+ /* gc might be running */
+ while (!del_timer(&map->gc))
@@ -4971,9 +5397,10 @@
+ set->data = NULL;
+}
+
-+static void flush(struct ip_set *set)
++static void
++iptree_flush(struct ip_set *set)
+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set_iptree *map = set->data;
+ unsigned int timeout = map->timeout;
+
+ /* gc might be running */
@@ -4986,18 +5413,19 @@
+ init_gc_timer(set);
+}
+
-+static void list_header(const struct ip_set *set, void *data)
++static void
++iptree_list_header(const struct ip_set *set, void *data)
+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
-+ struct ip_set_req_iptree_create *header =
-+ (struct ip_set_req_iptree_create *) data;
++ const struct ip_set_iptree *map = set->data;
++ struct ip_set_req_iptree_create *header = data;
+
+ header->timeout = map->timeout;
+}
+
-+static int list_members_size(const struct ip_set *set)
++static int
++iptree_list_members_size(const struct ip_set *set)
+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ const struct ip_set_iptree *map = set->data;
+ struct ip_set_iptreeb *btree;
+ struct ip_set_iptreec *ctree;
+ struct ip_set_iptreed *dtree;
@@ -5020,9 +5448,10 @@
+ return (count * sizeof(struct ip_set_req_iptree));
+}
+
-+static void list_members(const struct ip_set *set, void *data)
++static void
++iptree_list_members(const struct ip_set *set, void *data)
+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ const struct ip_set_iptree *map = set->data;
+ struct ip_set_iptreeb *btree;
+ struct ip_set_iptreec *ctree;
+ struct ip_set_iptreed *dtree;
@@ -5036,9 +5465,9 @@
+ for (d = 0; d < 256; d++) {
+ if (dtree->expires[d]
+ && (!map->timeout || time_after(dtree->expires[d], jiffies))) {
-+ entry = (struct ip_set_req_iptree *)(data + offset);
++ entry = data + offset;
+ entry->ip = ((a << 24) | (b << 16) | (c << 8) | d);
-+ entry->timeout = !map->timeout ? 0
++ entry->timeout = !map->timeout ? 0
+ : (dtree->expires[d] - jiffies)/HZ;
+ offset += sizeof(struct ip_set_req_iptree);
+ }
@@ -5048,26 +5477,7 @@
+ LOOP_WALK_END;
+}
+
-+static struct ip_set_type ip_set_iptree = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_iptree),
-+ .addip = &addip,
-+ .addip_kernel = &addip_kernel,
-+ .delip = &delip,
-+ .delip_kernel = &delip_kernel,
-+ .testip = &testip,
-+ .testip_kernel = &testip_kernel,
-+ .header_size = sizeof(struct ip_set_req_iptree_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
++IP_SET_TYPE(iptree, IPSET_TYPE_IP | IPSET_DATA_SINGLE)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
@@ -5079,29 +5489,15 @@
+{
+ int ret;
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
-+ branch_cachep = kmem_cache_create("ip_set_iptreeb",
-+ sizeof(struct ip_set_iptreeb),
-+ 0, 0, NULL);
-+#else
-+ branch_cachep = kmem_cache_create("ip_set_iptreeb",
-+ sizeof(struct ip_set_iptreeb),
-+ 0, 0, NULL, NULL);
-+#endif
++ branch_cachep = KMEM_CACHE_CREATE("ip_set_iptreeb",
++ sizeof(struct ip_set_iptreeb));
+ if (!branch_cachep) {
+ printk(KERN_ERR "Unable to create ip_set_iptreeb slab cache\n");
+ ret = -ENOMEM;
+ goto out;
+ }
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
-+ leaf_cachep = kmem_cache_create("ip_set_iptreed",
-+ sizeof(struct ip_set_iptreed),
-+ 0, 0, NULL);
-+#else
-+ leaf_cachep = kmem_cache_create("ip_set_iptreed",
-+ sizeof(struct ip_set_iptreed),
-+ 0, 0, NULL, NULL);
-+#endif
++ leaf_cachep = KMEM_CACHE_CREATE("ip_set_iptreed",
++ sizeof(struct ip_set_iptreed));
+ if (!leaf_cachep) {
+ printk(KERN_ERR "Unable to create ip_set_iptreed slab cache\n");
+ ret = -ENOMEM;
@@ -5130,7 +5526,7 @@
+module_exit(ip_set_iptree_fini);
--- /dev/null
+++ b/net/ipv4/netfilter/ip_set_iptreemap.c
-@@ -0,0 +1,829 @@
+@@ -0,0 +1,708 @@
+/* Copyright (C) 2007 Sven Wegener <sven.wegener@stealer.net>
+ *
+ * This program is free software; you can redistribute it and/or modify it
@@ -5139,38 +5535,33 @@
+ */
+
+/* This modules implements the iptreemap ipset type. It uses bitmaps to
-+ * represent every single IPv4 address as a single bit. The bitmaps are managed
-+ * in a tree structure, where the first three octets of an addresses are used
-+ * as an index to find the bitmap and the last octet is used as the bit number.
++ * represent every single IPv4 address as a bit. The bitmaps are managed in a
++ * tree structure, where the first three octets of an address are used as an
++ * index to find the bitmap and the last octet is used as the bit number.
+ */
+
-+#include <linux/version.h>
++#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
+#include <linux/spinlock.h>
++#include <linux/timer.h>
+
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_bitmaps.h>
+#include <linux/netfilter_ipv4/ip_set_iptreemap.h>
+
+#define IPTREEMAP_DEFAULT_GC_TIME (5 * 60)
+#define IPTREEMAP_DESTROY_SLEEP (100)
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
-+static struct kmem_cache *cachep_b;
-+static struct kmem_cache *cachep_c;
-+static struct kmem_cache *cachep_d;
-+#else
-+static kmem_cache_t *cachep_b;
-+static kmem_cache_t *cachep_c;
-+static kmem_cache_t *cachep_d;
-+#endif
++static __KMEM_CACHE_T__ *cachep_b;
++static __KMEM_CACHE_T__ *cachep_c;
++static __KMEM_CACHE_T__ *cachep_d;
+
+static struct ip_set_iptreemap_d *fullbitmap_d;
+static struct ip_set_iptreemap_c *fullbitmap_c;
@@ -5319,9 +5710,6 @@
+#define LOOP_WALK_END_COUNT() \
+ }
+
-+#define MIN(a, b) (a < b ? a : b)
-+#define MAX(a, b) (a > b ? a : b)
-+
+#define GETVALUE1(a, a1, b1, r) \
+ (a == a1 ? b1 : r)
+
@@ -5391,9 +5779,9 @@
+}
+
+static inline int
-+__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++iptreemap_test(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap *map = set->data;
+ struct ip_set_iptreemap_b *btree;
+ struct ip_set_iptreemap_c *ctree;
+ struct ip_set_iptreemap_d *dtree;
@@ -5410,40 +5798,13 @@
+ return !!test_bit(d, (void *) dtree->bitmap);
+}
+
-+static int
-+testip(struct ip_set *set, const void *data, size_t size, ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_iptreemap *req = (struct ip_set_req_iptreemap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_iptreemap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)", sizeof(struct ip_set_req_iptreemap), size);
-+ return -EINVAL;
-+ }
++#define KADT_CONDITION
+
-+ return __testip(set, req->start, hash_ip);
-+}
-+
-+static int
-+testip_kernel(struct ip_set *set, const struct sk_buff *skb, ip_set_ip_t *hash_ip, const u_int32_t *flags, unsigned char index)
-+{
-+ int res;
-+
-+ res = __testip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+
-+ return (res < 0 ? 0 : res);
-+}
++UADT(iptreemap, test)
++KADT(iptreemap, test, ipaddr)
+
+static inline int
-+__addip_single(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++__addip_single(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
+ struct ip_set_iptreemap_b *btree;
@@ -5459,18 +5820,19 @@
+ ADDIP_WALK(btree, b, ctree, struct ip_set_iptreemap_c, cachep_c, fullbitmap_c);
+ ADDIP_WALK(ctree, c, dtree, struct ip_set_iptreemap_d, cachep_d, fullbitmap_d);
+
-+ if (test_and_set_bit(d, (void *) dtree->bitmap))
++ if (__test_and_set_bit(d, (void *) dtree->bitmap))
+ return -EEXIST;
+
-+ set_bit(b, (void *) btree->dirty);
++ __set_bit(b, (void *) btree->dirty);
+
+ return 0;
+}
+
+static inline int
-+__addip_range(struct ip_set *set, ip_set_ip_t start, ip_set_ip_t end, ip_set_ip_t *hash_ip)
++iptreemap_add(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t start, ip_set_ip_t end)
+{
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap *map = set->data;
+ struct ip_set_iptreemap_b *btree;
+ struct ip_set_iptreemap_c *ctree;
+ struct ip_set_iptreemap_d *dtree;
@@ -5479,7 +5841,7 @@
+ unsigned char a2, b2, c2, d2;
+
+ if (start == end)
-+ return __addip_single(set, start, hash_ip);
++ return __addip_single(set, hash_ip, start);
+
+ *hash_ip = start;
+
@@ -5491,8 +5853,8 @@
+ ADDIP_RANGE_LOOP(btree, b, GETVALUE1(a, a1, b1, 0), GETVALUE1(a, a2, b2, 255), CHECK2(a, b, a1, a2, b1, b2, c1, c2, d1, d2), ctree, fullbitmap_c, cachep_c, free_c) {
+ ADDIP_RANGE_LOOP(ctree, c, GETVALUE2(a, b, a1, b1, c1, 0), GETVALUE2(a, b, a2, b2, c2, 255), CHECK3(a, b, c, a1, a2, b1, b2, c1, c2, d1, d2), dtree, fullbitmap_d, cachep_d, free_d) {
+ for (d = GETVALUE3(a, b, c, a1, b1, c1, d1, 0); d <= GETVALUE3(a, b, c, a2, b2, c2, d2, 255); d++)
-+ set_bit(d, (void *) dtree->bitmap);
-+ set_bit(b, (void *) btree->dirty);
++ __set_bit(d, (void *) dtree->bitmap);
++ __set_bit(b, (void *) btree->dirty);
+ } ADDIP_RANGE_LOOP_END();
+ } ADDIP_RANGE_LOOP_END();
+ } ADDIP_RANGE_LOOP_END();
@@ -5500,39 +5862,14 @@
+ return 0;
+}
+
-+static int
-+addip(struct ip_set *set, const void *data, size_t size, ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_iptreemap *req = (struct ip_set_req_iptreemap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_iptreemap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)", sizeof(struct ip_set_req_iptreemap), size);
-+ return -EINVAL;
-+ }
-+
-+ return __addip_range(set, MIN(req->start, req->end), MAX(req->start, req->end), hash_ip);
-+}
-+
-+static int
-+addip_kernel(struct ip_set *set, const struct sk_buff *skb, ip_set_ip_t *hash_ip, const u_int32_t *flags, unsigned char index)
-+{
-+
-+ return __addip_single(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+}
++UADT0(iptreemap, add, min(req->ip, req->end), max(req->ip, req->end))
++KADT(iptreemap, add, ipaddr, ip)
+
+static inline int
-+__delip_single(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip, unsigned int __nocast flags)
++__delip_single(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, gfp_t flags)
+{
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap *map = set->data;
+ struct ip_set_iptreemap_b *btree;
+ struct ip_set_iptreemap_c *ctree;
+ struct ip_set_iptreemap_d *dtree;
@@ -5546,18 +5883,19 @@
+ DELIP_WALK(btree, b, ctree, cachep_c, fullbitmap_c, flags);
+ DELIP_WALK(ctree, c, dtree, cachep_d, fullbitmap_d, flags);
+
-+ if (!test_and_clear_bit(d, (void *) dtree->bitmap))
++ if (!__test_and_clear_bit(d, (void *) dtree->bitmap))
+ return -EEXIST;
+
-+ set_bit(b, (void *) btree->dirty);
++ __set_bit(b, (void *) btree->dirty);
+
+ return 0;
+}
+
+static inline int
-+__delip_range(struct ip_set *set, ip_set_ip_t start, ip_set_ip_t end, ip_set_ip_t *hash_ip, unsigned int __nocast flags)
++iptreemap_del(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t start, ip_set_ip_t end, gfp_t flags)
+{
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap *map = set->data;
+ struct ip_set_iptreemap_b *btree;
+ struct ip_set_iptreemap_c *ctree;
+ struct ip_set_iptreemap_d *dtree;
@@ -5566,7 +5904,7 @@
+ unsigned char a2, b2, c2, d2;
+
+ if (start == end)
-+ return __delip_single(set, start, hash_ip, flags);
++ return __delip_single(set, hash_ip, start, flags);
+
+ *hash_ip = start;
+
@@ -5578,8 +5916,8 @@
+ DELIP_RANGE_LOOP(btree, b, GETVALUE1(a, a1, b1, 0), GETVALUE1(a, a2, b2, 255), CHECK2(a, b, a1, a2, b1, b2, c1, c2, d1, d2), ctree, fullbitmap_c, cachep_c, free_c, flags) {
+ DELIP_RANGE_LOOP(ctree, c, GETVALUE2(a, b, a1, b1, c1, 0), GETVALUE2(a, b, a2, b2, c2, 255), CHECK3(a, b, c, a1, a2, b1, b2, c1, c2, d1, d2), dtree, fullbitmap_d, cachep_d, free_d, flags) {
+ for (d = GETVALUE3(a, b, c, a1, b1, c1, d1, 0); d <= GETVALUE3(a, b, c, a2, b2, c2, d2, 255); d++)
-+ clear_bit(d, (void *) dtree->bitmap);
-+ set_bit(b, (void *) btree->dirty);
++ __clear_bit(d, (void *) dtree->bitmap);
++ __set_bit(b, (void *) btree->dirty);
+ } DELIP_RANGE_LOOP_END();
+ } DELIP_RANGE_LOOP_END();
+ } DELIP_RANGE_LOOP_END();
@@ -5587,34 +5925,8 @@
+ return 0;
+}
+
-+static int
-+delip(struct ip_set *set, const void *data, size_t size, ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_iptreemap *req = (struct ip_set_req_iptreemap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_iptreemap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)", sizeof(struct ip_set_req_iptreemap), size);
-+ return -EINVAL;
-+ }
-+
-+ return __delip_range(set, MIN(req->start, req->end), MAX(req->start, req->end), hash_ip, GFP_KERNEL);
-+}
-+
-+static int
-+delip_kernel(struct ip_set *set, const struct sk_buff *skb, ip_set_ip_t *hash_ip, const u_int32_t *flags, unsigned char index)
-+{
-+ return __delip_single(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip,
-+ GFP_ATOMIC);
-+}
++UADT0(iptreemap, del, min(req->ip, req->end), max(req->ip, req->end), GFP_KERNEL)
++KADT(iptreemap, del, ipaddr, ip, GFP_ATOMIC)
+
+/* Check the status of the bitmap
+ * -1 == all bits cleared
@@ -5638,7 +5950,7 @@
+gc(unsigned long addr)
+{
+ struct ip_set *set = (struct ip_set *) addr;
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap *map = set->data;
+ struct ip_set_iptreemap_b *btree;
+ struct ip_set_iptreemap_c *ctree;
+ struct ip_set_iptreemap_d *dtree;
@@ -5649,7 +5961,7 @@
+
+ LOOP_WALK_BEGIN_GC(map, a, btree, fullbitmap_b, cachep_b, i) {
+ LOOP_WALK_BEGIN_GC(btree, b, ctree, fullbitmap_c, cachep_c, j) {
-+ if (!test_and_clear_bit(b, (void *) btree->dirty))
++ if (!__test_and_clear_bit(b, (void *) btree->dirty))
+ continue;
+ LOOP_WALK_BEGIN_GC(ctree, c, dtree, fullbitmap_d, cachep_d, k) {
+ switch (bitmap_status(dtree)) {
@@ -5677,7 +5989,7 @@
+static inline void
+init_gc_timer(struct ip_set *set)
+{
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap *map = set->data;
+
+ init_timer(&map->gc);
+ map->gc.data = (unsigned long) set;
@@ -5686,16 +5998,12 @@
+ add_timer(&map->gc);
+}
+
-+static int create(struct ip_set *set, const void *data, size_t size)
++static int
++iptreemap_create(struct ip_set *set, const void *data, u_int32_t size)
+{
-+ struct ip_set_req_iptreemap_create *req = (struct ip_set_req_iptreemap_create *) data;
++ const struct ip_set_req_iptreemap_create *req = data;
+ struct ip_set_iptreemap *map;
+
-+ if (size != sizeof(struct ip_set_req_iptreemap_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)", sizeof(struct ip_set_req_iptreemap_create), size);
-+ return -EINVAL;
-+ }
-+
+ map = kzalloc(sizeof(*map), GFP_KERNEL);
+ if (!map)
+ return -ENOMEM;
@@ -5708,7 +6016,8 @@
+ return 0;
+}
+
-+static inline void __flush(struct ip_set_iptreemap *map)
++static inline void
++__flush(struct ip_set_iptreemap *map)
+{
+ struct ip_set_iptreemap_b *btree;
+ unsigned int a;
@@ -5719,9 +6028,10 @@
+ LOOP_WALK_END();
+}
+
-+static void destroy(struct ip_set *set)
++static void
++iptreemap_destroy(struct ip_set *set)
+{
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap *map = set->data;
+
+ while (!del_timer(&map->gc))
+ msleep(IPTREEMAP_DESTROY_SLEEP);
@@ -5732,9 +6042,10 @@
+ set->data = NULL;
+}
+
-+static void flush(struct ip_set *set)
++static void
++iptreemap_flush(struct ip_set *set)
+{
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap *map = set->data;
+
+ while (!del_timer(&map->gc))
+ msleep(IPTREEMAP_DESTROY_SLEEP);
@@ -5746,17 +6057,19 @@
+ init_gc_timer(set);
+}
+
-+static void list_header(const struct ip_set *set, void *data)
++static void
++iptreemap_list_header(const struct ip_set *set, void *data)
+{
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
-+ struct ip_set_req_iptreemap_create *header = (struct ip_set_req_iptreemap_create *) data;
++ struct ip_set_iptreemap *map = set->data;
++ struct ip_set_req_iptreemap_create *header = data;
+
+ header->gc_interval = map->gc_interval;
+}
+
-+static int list_members_size(const struct ip_set *set)
++static int
++iptreemap_list_members_size(const struct ip_set *set)
+{
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap *map = set->data;
+ struct ip_set_iptreemap_b *btree;
+ struct ip_set_iptreemap_c *ctree;
+ struct ip_set_iptreemap_d *dtree;
@@ -5783,19 +6096,21 @@
+ return (count * sizeof(struct ip_set_req_iptreemap));
+}
+
-+static inline size_t add_member(void *data, size_t offset, ip_set_ip_t start, ip_set_ip_t end)
++static inline u_int32_t
++add_member(void *data, size_t offset, ip_set_ip_t start, ip_set_ip_t end)
+{
-+ struct ip_set_req_iptreemap *entry = (struct ip_set_req_iptreemap *) (data + offset);
++ struct ip_set_req_iptreemap *entry = data + offset;
+
-+ entry->start = start;
++ entry->ip = start;
+ entry->end = end;
+
+ return sizeof(*entry);
+}
+
-+static void list_members(const struct ip_set *set, void *data)
++static void
++iptreemap_list_members(const struct ip_set *set, void *data)
+{
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap *map = set->data;
+ struct ip_set_iptreemap_b *btree;
+ struct ip_set_iptreemap_c *ctree;
+ struct ip_set_iptreemap_d *dtree;
@@ -5830,26 +6145,7 @@
+ add_member(data, offset, start, end);
+}
+
-+static struct ip_set_type ip_set_iptreemap = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = create,
-+ .destroy = destroy,
-+ .flush = flush,
-+ .reqsize = sizeof(struct ip_set_req_iptreemap),
-+ .addip = addip,
-+ .addip_kernel = addip_kernel,
-+ .delip = delip,
-+ .delip_kernel = delip_kernel,
-+ .testip = testip,
-+ .testip_kernel = testip_kernel,
-+ .header_size = sizeof(struct ip_set_req_iptreemap_create),
-+ .list_header = list_header,
-+ .list_members_size = list_members_size,
-+ .list_members = list_members,
-+ .me = THIS_MODULE,
-+};
++IP_SET_TYPE(iptreemap, IPSET_TYPE_IP | IPSET_DATA_SINGLE)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Sven Wegener <sven.wegener@stealer.net>");
@@ -5860,43 +6156,22 @@
+ int ret = -ENOMEM;
+ int a;
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
-+ cachep_b = kmem_cache_create("ip_set_iptreemap_b",
-+ sizeof(struct ip_set_iptreemap_b),
-+ 0, 0, NULL);
-+#else
-+ cachep_b = kmem_cache_create("ip_set_iptreemap_b",
-+ sizeof(struct ip_set_iptreemap_b),
-+ 0, 0, NULL, NULL);
-+#endif
++ cachep_b = KMEM_CACHE_CREATE("ip_set_iptreemap_b",
++ sizeof(struct ip_set_iptreemap_b));
+ if (!cachep_b) {
+ ip_set_printk("Unable to create ip_set_iptreemap_b slab cache");
+ goto out;
+ }
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
-+ cachep_c = kmem_cache_create("ip_set_iptreemap_c",
-+ sizeof(struct ip_set_iptreemap_c),
-+ 0, 0, NULL);
-+#else
-+ cachep_c = kmem_cache_create("ip_set_iptreemap_c",
-+ sizeof(struct ip_set_iptreemap_c),
-+ 0, 0, NULL, NULL);
-+#endif
++ cachep_c = KMEM_CACHE_CREATE("ip_set_iptreemap_c",
++ sizeof(struct ip_set_iptreemap_c));
+ if (!cachep_c) {
+ ip_set_printk("Unable to create ip_set_iptreemap_c slab cache");
+ goto outb;
+ }
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
-+ cachep_d = kmem_cache_create("ip_set_iptreemap_d",
-+ sizeof(struct ip_set_iptreemap_d),
-+ 0, 0, NULL);
-+#else
-+ cachep_d = kmem_cache_create("ip_set_iptreemap_d",
-+ sizeof(struct ip_set_iptreemap_d),
-+ 0, 0, NULL, NULL);
-+#endif
++ cachep_d = KMEM_CACHE_CREATE("ip_set_iptreemap_d",
++ sizeof(struct ip_set_iptreemap_d));
+ if (!cachep_d) {
+ ip_set_printk("Unable to create ip_set_iptreemap_d slab cache");
+ goto outc;
@@ -5962,15 +6237,15 @@
+module_exit(ip_set_iptreemap_fini);
--- /dev/null
+++ b/net/ipv4/netfilter/ip_set_macipmap.c
-@@ -0,0 +1,375 @@
+@@ -0,0 +1,164 @@
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ * Patrick Schaaf <bof@bof.de>
+ * Martin Josefsson <gandalf@wlug.westbo.se>
-+ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ * Copyright (C) 2003-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
++ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the macipmap type */
@@ -5978,32 +6253,21 @@
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
-+#include <linux/version.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
+#include <linux/spinlock.h>
+#include <linux/if_ether.h>
-+#include <linux/vmalloc.h>
+
-+#include <linux/netfilter_ipv4/ip_set_malloc.h>
+#include <linux/netfilter_ipv4/ip_set_macipmap.h>
+
+static int
-+testip(struct ip_set *set, const void *data, size_t size, ip_set_ip_t *hash_ip)
++macipmap_utest(struct ip_set *set, const void *data, u_int32_t size,
++ ip_set_ip_t *hash_ip)
+{
-+ struct ip_set_macipmap *map = (struct ip_set_macipmap *) set->data;
-+ struct ip_set_macip *table = (struct ip_set_macip *) map->members;
-+ struct ip_set_req_macipmap *req = (struct ip_set_req_macipmap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_macipmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_macipmap),
-+ size);
-+ return -EINVAL;
-+ }
++ const struct ip_set_macipmap *map = set->data;
++ const struct ip_set_macip *table = map->members;
++ const struct ip_set_req_macipmap *req = data;
+
+ if (req->ip < map->first_ip || req->ip > map->last_ip)
+ return -ERANGE;
@@ -6011,8 +6275,7 @@
+ *hash_ip = req->ip;
+ DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
+ set->name, HIPQUAD(req->ip), HIPQUAD(*hash_ip));
-+ if (test_bit(IPSET_MACIP_ISSET,
-+ (void *) &table[req->ip - map->first_ip].flags)) {
++ if (table[req->ip - map->first_ip].match) {
+ return (memcmp(req->ethernet,
+ &table[req->ip - map->first_ip].ethernet,
+ ETH_ALEN) == 0);
@@ -6022,26 +6285,17 @@
+}
+
+static int
-+testip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+ struct ip_set_macip *table =
-+ (struct ip_set_macip *) map->members;
++macipmap_ktest(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ const struct ip_set_macipmap *map = set->data;
++ const struct ip_set_macip *table = map->members;
+ ip_set_ip_t ip;
+
-+ ip = ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr);
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr);
-+#endif
++ ip = ipaddr(skb, flags[index]);
+
+ if (ip < map->first_ip || ip > map->last_ip)
+ return 0;
@@ -6049,17 +6303,11 @@
+ *hash_ip = ip;
+ DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
+ set->name, HIPQUAD(ip), HIPQUAD(*hash_ip));
-+ if (test_bit(IPSET_MACIP_ISSET,
-+ (void *) &table[ip - map->first_ip].flags)) {
++ if (table[ip - map->first_ip].match) {
+ /* Is mac pointer valid?
+ * If so, compare... */
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
+ return (skb_mac_header(skb) >= skb->head
+ && (skb_mac_header(skb) + ETH_HLEN) <= skb->data
-+#else
-+ return (skb->mac.raw >= skb->head
-+ && (skb->mac.raw + ETH_HLEN) <= skb->data
-+#endif
+ && (memcmp(eth_hdr(skb)->h_source,
+ &table[ip - map->first_ip].ethernet,
+ ETH_ALEN) == 0));
@@ -6070,324 +6318,132 @@
+
+/* returns 0 on success */
+static inline int
-+__addip(struct ip_set *set,
-+ ip_set_ip_t ip, unsigned char *ethernet, ip_set_ip_t *hash_ip)
++macipmap_add(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, const unsigned char *ethernet)
+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+ struct ip_set_macip *table =
-+ (struct ip_set_macip *) map->members;
++ struct ip_set_macipmap *map = set->data;
++ struct ip_set_macip *table = map->members;
+
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
-+ if (test_and_set_bit(IPSET_MACIP_ISSET,
-+ (void *) &table[ip - map->first_ip].flags))
++ if (table[ip - map->first_ip].match)
+ return -EEXIST;
+
+ *hash_ip = ip;
+ DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
+ memcpy(&table[ip - map->first_ip].ethernet, ethernet, ETH_ALEN);
++ table[ip - map->first_ip].match = IPSET_MACIP_ISSET;
+ return 0;
+}
+
-+static int
-+addip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_macipmap *req =
-+ (struct ip_set_req_macipmap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_macipmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_macipmap),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __addip(set, req->ip, req->ethernet, hash_ip);
-+}
-+
-+static int
-+addip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ ip_set_ip_t ip;
-+
-+ ip = ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr);
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr);
-+#endif
-+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ if (!(skb_mac_header(skb) >= skb->head
-+ && (skb_mac_header(skb) + ETH_HLEN) <= skb->data))
-+#else
-+ if (!(skb->mac.raw >= skb->head
-+ && (skb->mac.raw + ETH_HLEN) <= skb->data))
-+#endif
++#define KADT_CONDITION \
++ if (!(skb_mac_header(skb) >= skb->head \
++ && (skb_mac_header(skb) + ETH_HLEN) <= skb->data))\
+ return -EINVAL;
+
-+ return __addip(set, ip, eth_hdr(skb)->h_source, hash_ip);
-+}
++UADT(macipmap, add, req->ethernet)
++KADT(macipmap, add, ipaddr, eth_hdr(skb)->h_source)
+
+static inline int
-+__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++macipmap_del(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+ struct ip_set_macip *table =
-+ (struct ip_set_macip *) map->members;
++ struct ip_set_macipmap *map = set->data;
++ struct ip_set_macip *table = map->members;
+
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
-+ if (!test_and_clear_bit(IPSET_MACIP_ISSET,
-+ (void *)&table[ip - map->first_ip].flags))
++ if (!table[ip - map->first_ip].match)
+ return -EEXIST;
+
+ *hash_ip = ip;
++ table[ip - map->first_ip].match = 0;
+ DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
+ return 0;
+}
+
-+static int
-+delip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_macipmap *req =
-+ (struct ip_set_req_macipmap *) data;
++#undef KADT_CONDITION
++#define KADT_CONDITION
+
-+ if (size != sizeof(struct ip_set_req_macipmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_macipmap),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __delip(set, req->ip, hash_ip);
-+}
++UADT(macipmap, del)
++KADT(macipmap, del, ipaddr)
+
-+static int
-+delip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __delip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+}
-+
-+static inline size_t members_size(ip_set_id_t from, ip_set_id_t to)
-+{
-+ return (size_t)((to - from + 1) * sizeof(struct ip_set_macip));
-+}
-+
-+static int create(struct ip_set *set, const void *data, size_t size)
++static inline int
++__macipmap_create(const struct ip_set_req_macipmap_create *req,
++ struct ip_set_macipmap *map)
+{
-+ int newbytes;
-+ struct ip_set_req_macipmap_create *req =
-+ (struct ip_set_req_macipmap_create *) data;
-+ struct ip_set_macipmap *map;
-+
-+ if (size != sizeof(struct ip_set_req_macipmap_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_macipmap_create),
-+ size);
-+ return -EINVAL;
-+ }
-+
-+ DP("from %u.%u.%u.%u to %u.%u.%u.%u",
-+ HIPQUAD(req->from), HIPQUAD(req->to));
-+
-+ if (req->from > req->to) {
-+ DP("bad ip range");
-+ return -ENOEXEC;
-+ }
-+
+ if (req->to - req->from > MAX_RANGE) {
-+ ip_set_printk("range too big (max %d addresses)",
-+ MAX_RANGE+1);
++ ip_set_printk("range too big, %d elements (max %d)",
++ req->to - req->from + 1, MAX_RANGE+1);
+ return -ENOEXEC;
+ }
-+
-+ map = kmalloc(sizeof(struct ip_set_macipmap), GFP_KERNEL);
-+ if (!map) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_macipmap));
-+ return -ENOMEM;
-+ }
+ map->flags = req->flags;
-+ map->first_ip = req->from;
-+ map->last_ip = req->to;
-+ newbytes = members_size(map->first_ip, map->last_ip);
-+ map->members = ip_set_malloc(newbytes);
-+ DP("members: %u %p", newbytes, map->members);
-+ if (!map->members) {
-+ DP("out of memory for %d bytes", newbytes);
-+ kfree(map);
-+ return -ENOMEM;
-+ }
-+ memset(map->members, 0, newbytes);
-+
-+ set->data = map;
-+ return 0;
-+}
-+
-+static void destroy(struct ip_set *set)
-+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+
-+ ip_set_free(map->members, members_size(map->first_ip, map->last_ip));
-+ kfree(map);
-+
-+ set->data = NULL;
++ return (req->to - req->from + 1) * sizeof(struct ip_set_macip);
+}
+
-+static void flush(struct ip_set *set)
-+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+ memset(map->members, 0, members_size(map->first_ip, map->last_ip));
-+}
++BITMAP_CREATE(macipmap)
++BITMAP_DESTROY(macipmap)
++BITMAP_FLUSH(macipmap)
+
-+static void list_header(const struct ip_set *set, void *data)
++static inline void
++__macipmap_list_header(const struct ip_set_macipmap *map,
++ struct ip_set_req_macipmap_create *header)
+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+ struct ip_set_req_macipmap_create *header =
-+ (struct ip_set_req_macipmap_create *) data;
-+
-+ DP("list_header %x %x %u", map->first_ip, map->last_ip,
-+ map->flags);
-+
-+ header->from = map->first_ip;
-+ header->to = map->last_ip;
+ header->flags = map->flags;
+}
+
-+static int list_members_size(const struct ip_set *set)
-+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+
-+ DP("%u", members_size(map->first_ip, map->last_ip));
-+ return members_size(map->first_ip, map->last_ip);
-+}
-+
-+static void list_members(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+
-+ int bytes = members_size(map->first_ip, map->last_ip);
-+
-+ DP("members: %u %p", bytes, map->members);
-+ memcpy(data, map->members, bytes);
-+}
++BITMAP_LIST_HEADER(macipmap)
++BITMAP_LIST_MEMBERS_SIZE(macipmap)
++BITMAP_LIST_MEMBERS(macipmap)
+
-+static struct ip_set_type ip_set_macipmap = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_macipmap),
-+ .addip = &addip,
-+ .addip_kernel = &addip_kernel,
-+ .delip = &delip,
-+ .delip_kernel = &delip_kernel,
-+ .testip = &testip,
-+ .testip_kernel = &testip_kernel,
-+ .header_size = sizeof(struct ip_set_req_macipmap_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
++IP_SET_TYPE(macipmap, IPSET_TYPE_IP | IPSET_DATA_SINGLE)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("macipmap type of IP sets");
+
-+static int __init ip_set_macipmap_init(void)
-+{
-+ init_max_malloc_size();
-+ return ip_set_register_set_type(&ip_set_macipmap);
-+}
-+
-+static void __exit ip_set_macipmap_fini(void)
-+{
-+ /* FIXME: possible race with ip_set_create() */
-+ ip_set_unregister_set_type(&ip_set_macipmap);
-+}
-+
-+module_init(ip_set_macipmap_init);
-+module_exit(ip_set_macipmap_fini);
++REGISTER_MODULE(macipmap)
--- /dev/null
+++ b/net/ipv4/netfilter/ip_set_nethash.c
-@@ -0,0 +1,497 @@
-+/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+@@ -0,0 +1,225 @@
++/* Copyright (C) 2003-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
++ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing a cidr nethash set */
+
+#include <linux/module.h>
++#include <linux/moduleparam.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
-+#include <linux/version.h>
-+#include <linux/jhash.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_jhash.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
+#include <linux/spinlock.h>
-+#include <linux/vmalloc.h>
+#include <linux/random.h>
+
+#include <net/ip.h>
+
-+#include <linux/netfilter_ipv4/ip_set_malloc.h>
+#include <linux/netfilter_ipv4/ip_set_nethash.h>
+
+static int limit = MAX_RANGE;
+
+static inline __u32
-+jhash_ip(const struct ip_set_nethash *map, uint16_t i, ip_set_ip_t ip)
-+{
-+ return jhash_1word(ip, *(((uint32_t *) map->initval) + i));
-+}
-+
-+static inline __u32
-+hash_id_cidr(struct ip_set_nethash *map,
-+ ip_set_ip_t ip,
-+ unsigned char cidr,
-+ ip_set_ip_t *hash_ip)
++nethash_id_cidr(const struct ip_set_nethash *map,
++ ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip,
++ uint8_t cidr)
+{
+ __u32 id;
+ u_int16_t i;
+ ip_set_ip_t *elem;
+
-+ *hash_ip = pack(ip, cidr);
++ *hash_ip = pack_ip_cidr(ip, cidr);
++ if (!*hash_ip)
++ return MAX_RANGE;
+
+ for (i = 0; i < map->probes; i++) {
+ id = jhash_ip(map, i, *hash_ip) % map->hashsize;
@@ -6395,19 +6451,20 @@
+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
+ if (*elem == *hash_ip)
+ return id;
++ /* No shortcut - there can be deleted entries. */
+ }
+ return UINT_MAX;
+}
+
+static inline __u32
-+hash_id(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++nethash_id(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
++ const struct ip_set_nethash *map = set->data;
+ __u32 id = UINT_MAX;
+ int i;
+
+ for (i = 0; i < 30 && map->cidr[i]; i++) {
-+ id = hash_id_cidr(map, ip, map->cidr[i], hash_ip);
++ id = nethash_id_cidr(map, hash_ip, ip, map->cidr[i]);
+ if (id != UINT_MAX)
+ break;
+ }
@@ -6415,409 +6472,156 @@
+}
+
+static inline int
-+__testip_cidr(struct ip_set *set, ip_set_ip_t ip, unsigned char cidr,
-+ ip_set_ip_t *hash_ip)
++nethash_test_cidr(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, uint8_t cidr)
+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
++ const struct ip_set_nethash *map = set->data;
+
-+ return (ip && hash_id_cidr(map, ip, cidr, hash_ip) != UINT_MAX);
++ return (nethash_id_cidr(map, hash_ip, ip, cidr) != UINT_MAX);
+}
+
+static inline int
-+__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++nethash_test(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ return (ip && hash_id(set, ip, hash_ip) != UINT_MAX);
++ return (nethash_id(set, hash_ip, ip) != UINT_MAX);
+}
+
+static int
-+testip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
++nethash_utest(struct ip_set *set, const void *data, u_int32_t size,
++ ip_set_ip_t *hash_ip)
+{
-+ struct ip_set_req_nethash *req =
-+ (struct ip_set_req_nethash *) data;
++ const struct ip_set_req_nethash *req = data;
+
-+ if (size != sizeof(struct ip_set_req_nethash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_nethash),
-+ size);
++ if (req->cidr <= 0 || req->cidr > 32)
+ return -EINVAL;
-+ }
-+ return (req->cidr == 32 ? __testip(set, req->ip, hash_ip)
-+ : __testip_cidr(set, req->ip, req->cidr, hash_ip));
++ return (req->cidr == 32 ? nethash_test(set, hash_ip, req->ip)
++ : nethash_test_cidr(set, hash_ip, req->ip, req->cidr));
+}
+
-+static int
-+testip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __testip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+}
++#define KADT_CONDITION
++
++KADT(nethash, test, ipaddr)
+
+static inline int
-+__addip_base(struct ip_set_nethash *map, ip_set_ip_t ip)
++__nethash_add(struct ip_set_nethash *map, ip_set_ip_t *ip)
+{
+ __u32 probe;
+ u_int16_t i;
-+ ip_set_ip_t *elem;
++ ip_set_ip_t *elem, *slot = NULL;
+
+ for (i = 0; i < map->probes; i++) {
-+ probe = jhash_ip(map, i, ip) % map->hashsize;
++ probe = jhash_ip(map, i, *ip) % map->hashsize;
+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
-+ if (*elem == ip)
++ if (*elem == *ip)
+ return -EEXIST;
-+ if (!*elem) {
-+ *elem = ip;
-+ map->elements++;
-+ return 0;
-+ }
++ if (!(slot || *elem))
++ slot = elem;
++ /* There can be deleted entries, must check all slots */
++ }
++ if (slot) {
++ *slot = *ip;
++ map->elements++;
++ return 0;
+ }
+ /* Trigger rehashing */
+ return -EAGAIN;
+}
+
+static inline int
-+__addip(struct ip_set_nethash *map, ip_set_ip_t ip, unsigned char cidr,
-+ ip_set_ip_t *hash_ip)
++nethash_add(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, uint8_t cidr)
+{
-+ if (!ip || map->elements >= limit)
-+ return -ERANGE;
-+
-+ *hash_ip = pack(ip, cidr);
-+ DP("%u.%u.%u.%u/%u, %u.%u.%u.%u", HIPQUAD(ip), cidr, HIPQUAD(*hash_ip));
++ struct ip_set_nethash *map = set->data;
++ int ret;
+
-+ return __addip_base(map, *hash_ip);
-+}
++ if (map->elements >= limit || map->nets[cidr-1] == UINT16_MAX)
++ return -ERANGE;
++ if (cidr <= 0 || cidr >= 32)
++ return -EINVAL;
+
-+static void
-+update_cidr_sizes(struct ip_set_nethash *map, unsigned char cidr)
-+{
-+ unsigned char next;
-+ int i;
++ *hash_ip = pack_ip_cidr(ip, cidr);
++ DP("%u.%u.%u.%u/%u, %u.%u.%u.%u", HIPQUAD(ip), cidr, HIPQUAD(*hash_ip));
++ if (!*hash_ip)
++ return -ERANGE;
+
-+ for (i = 0; i < 30 && map->cidr[i]; i++) {
-+ if (map->cidr[i] == cidr) {
-+ return;
-+ } else if (map->cidr[i] < cidr) {
-+ next = map->cidr[i];
-+ map->cidr[i] = cidr;
-+ cidr = next;
-+ }
-+ }
-+ if (i < 30)
-+ map->cidr[i] = cidr;
-+}
-+
-+static int
-+addip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_nethash *req =
-+ (struct ip_set_req_nethash *) data;
-+ int ret;
-+
-+ if (size != sizeof(struct ip_set_req_nethash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_nethash),
-+ size);
-+ return -EINVAL;
++ ret = __nethash_add(map, hash_ip);
++ if (ret == 0) {
++ if (!map->nets[cidr-1]++)
++ add_cidr_size(map->cidr, cidr);
++ map->elements++;
+ }
-+ ret = __addip((struct ip_set_nethash *) set->data,
-+ req->ip, req->cidr, hash_ip);
-+
-+ if (ret == 0)
-+ update_cidr_sizes((struct ip_set_nethash *) set->data,
-+ req->cidr);
+
+ return ret;
+}
+
-+static int
-+addip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+ int ret = -ERANGE;
-+ ip_set_ip_t ip = ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr);
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr);
-+#endif
-+
-+ if (map->cidr[0])
-+ ret = __addip(map, ip, map->cidr[0], hash_ip);
-+
-+ return ret;
-+}
++#undef KADT_CONDITION
++#define KADT_CONDITION \
++ struct ip_set_nethash *map = set->data; \
++ uint8_t cidr = map->cidr[0] ? map->cidr[0] : 31;
+
-+static int retry(struct ip_set *set)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+ ip_set_ip_t *elem;
-+ void *members;
-+ u_int32_t i, hashsize = map->hashsize;
-+ int res;
-+ struct ip_set_nethash *tmp;
-+
-+ if (map->resize == 0)
-+ return -ERANGE;
++UADT(nethash, add, req->cidr)
++KADT(nethash, add, ipaddr, cidr)
+
-+ again:
-+ res = 0;
-+
-+ /* Calculate new parameters */
-+ hashsize += (hashsize * map->resize)/100;
-+ if (hashsize == map->hashsize)
-+ hashsize++;
-+
-+ ip_set_printk("rehashing of set %s triggered: "
-+ "hashsize grows from %u to %u",
-+ set->name, map->hashsize, hashsize);
-+
-+ tmp = kmalloc(sizeof(struct ip_set_nethash)
-+ + map->probes * sizeof(uint32_t), GFP_ATOMIC);
-+ if (!tmp) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_nethash)
-+ + map->probes * sizeof(uint32_t));
-+ return -ENOMEM;
-+ }
-+ tmp->members = harray_malloc(hashsize, sizeof(ip_set_ip_t), GFP_ATOMIC);
-+ if (!tmp->members) {
-+ DP("out of memory for %d bytes", hashsize * sizeof(ip_set_ip_t));
-+ kfree(tmp);
-+ return -ENOMEM;
-+ }
-+ tmp->hashsize = hashsize;
-+ tmp->elements = 0;
-+ tmp->probes = map->probes;
-+ tmp->resize = map->resize;
-+ memcpy(tmp->initval, map->initval, map->probes * sizeof(uint32_t));
-+ memcpy(tmp->cidr, map->cidr, 30 * sizeof(unsigned char));
-+
-+ write_lock_bh(&set->lock);
-+ map = (struct ip_set_nethash *) set->data; /* Play safe */
-+ for (i = 0; i < map->hashsize && res == 0; i++) {
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
-+ if (*elem)
-+ res = __addip_base(tmp, *elem);
-+ }
-+ if (res) {
-+ /* Failure, try again */
-+ write_unlock_bh(&set->lock);
-+ harray_free(tmp->members);
-+ kfree(tmp);
-+ goto again;
-+ }
-+
-+ /* Success at resizing! */
-+ members = map->members;
-+
-+ map->hashsize = tmp->hashsize;
-+ map->members = tmp->members;
-+ write_unlock_bh(&set->lock);
-+
-+ harray_free(members);
-+ kfree(tmp);
-+
-+ return 0;
++static inline void
++__nethash_retry(struct ip_set_nethash *tmp, struct ip_set_nethash *map)
++{
++ memcpy(tmp->cidr, map->cidr, sizeof(tmp->cidr));
++ memcpy(tmp->nets, map->nets, sizeof(tmp->nets));
+}
+
++HASH_RETRY(nethash, ip_set_ip_t)
++
+static inline int
-+__delip(struct ip_set_nethash *map, ip_set_ip_t ip, unsigned char cidr,
-+ ip_set_ip_t *hash_ip)
++nethash_del(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, uint8_t cidr)
+{
++ struct ip_set_nethash *map = set->data;
+ ip_set_ip_t id, *elem;
+
-+ if (!ip)
-+ return -ERANGE;
++ if (cidr <= 0 || cidr >= 32)
++ return -EINVAL;
+
-+ id = hash_id_cidr(map, ip, cidr, hash_ip);
++ id = nethash_id_cidr(map, hash_ip, ip, cidr);
+ if (id == UINT_MAX)
+ return -EEXIST;
+
+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
+ *elem = 0;
+ map->elements--;
++ if (!map->nets[cidr-1]--)
++ del_cidr_size(map->cidr, cidr);
+ return 0;
+}
+
-+static int
-+delip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_nethash *req =
-+ (struct ip_set_req_nethash *) data;
-+
-+ if (size != sizeof(struct ip_set_req_nethash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_nethash),
-+ size);
-+ return -EINVAL;
-+ }
-+ /* TODO: no garbage collection in map->cidr */
-+ return __delip((struct ip_set_nethash *) set->data,
-+ req->ip, req->cidr, hash_ip);
-+}
-+
-+static int
-+delip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+ int ret = -ERANGE;
-+ ip_set_ip_t ip = ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr);
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr);
-+#endif
-+
-+ if (map->cidr[0])
-+ ret = __delip(map, ip, map->cidr[0], hash_ip);
-+
-+ return ret;
-+}
++UADT(nethash, del, req->cidr)
++KADT(nethash, del, ipaddr, cidr)
+
-+static int create(struct ip_set *set, const void *data, size_t size)
++static inline int
++__nethash_create(const struct ip_set_req_nethash_create *req,
++ struct ip_set_nethash *map)
+{
-+ struct ip_set_req_nethash_create *req =
-+ (struct ip_set_req_nethash_create *) data;
-+ struct ip_set_nethash *map;
-+ uint16_t i;
-+
-+ if (size != sizeof(struct ip_set_req_nethash_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_nethash_create),
-+ size);
-+ return -EINVAL;
-+ }
-+
-+ if (req->hashsize < 1) {
-+ ip_set_printk("hashsize too small");
-+ return -ENOEXEC;
-+ }
-+ if (req->probes < 1) {
-+ ip_set_printk("probes too small");
-+ return -ENOEXEC;
-+ }
-+
-+ map = kmalloc(sizeof(struct ip_set_nethash)
-+ + req->probes * sizeof(uint32_t), GFP_KERNEL);
-+ if (!map) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_nethash)
-+ + req->probes * sizeof(uint32_t));
-+ return -ENOMEM;
-+ }
-+ for (i = 0; i < req->probes; i++)
-+ get_random_bytes(((uint32_t *) map->initval)+i, 4);
-+ map->elements = 0;
-+ map->hashsize = req->hashsize;
-+ map->probes = req->probes;
-+ map->resize = req->resize;
-+ memset(map->cidr, 0, 30 * sizeof(unsigned char));
-+ map->members = harray_malloc(map->hashsize, sizeof(ip_set_ip_t), GFP_KERNEL);
-+ if (!map->members) {
-+ DP("out of memory for %d bytes", map->hashsize * sizeof(ip_set_ip_t));
-+ kfree(map);
-+ return -ENOMEM;
-+ }
++ memset(map->cidr, 0, sizeof(map->cidr));
++ memset(map->nets, 0, sizeof(map->nets));
+
-+ set->data = map;
+ return 0;
+}
+
-+static void destroy(struct ip_set *set)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+
-+ harray_free(map->members);
-+ kfree(map);
-+
-+ set->data = NULL;
-+}
-+
-+static void flush(struct ip_set *set)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+ harray_flush(map->members, map->hashsize, sizeof(ip_set_ip_t));
-+ memset(map->cidr, 0, 30 * sizeof(unsigned char));
-+ map->elements = 0;
-+}
-+
-+static void list_header(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+ struct ip_set_req_nethash_create *header =
-+ (struct ip_set_req_nethash_create *) data;
-+
-+ header->hashsize = map->hashsize;
-+ header->probes = map->probes;
-+ header->resize = map->resize;
-+}
++HASH_CREATE(nethash, ip_set_ip_t)
++HASH_DESTROY(nethash)
+
-+static int list_members_size(const struct ip_set *set)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
++HASH_FLUSH_CIDR(nethash, ip_set_ip_t)
+
-+ return (map->hashsize * sizeof(ip_set_ip_t));
++static inline void
++__nethash_list_header(const struct ip_set_nethash *map,
++ struct ip_set_req_nethash_create *header)
++{
+}
+
-+static void list_members(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+ ip_set_ip_t i, *elem;
++HASH_LIST_HEADER(nethash)
++HASH_LIST_MEMBERS_SIZE(nethash, ip_set_ip_t)
++HASH_LIST_MEMBERS(nethash, ip_set_ip_t)
+
-+ for (i = 0; i < map->hashsize; i++) {
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
-+ ((ip_set_ip_t *)data)[i] = *elem;
-+ }
-+}
-+
-+static struct ip_set_type ip_set_nethash = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_nethash),
-+ .addip = &addip,
-+ .addip_kernel = &addip_kernel,
-+ .retry = &retry,
-+ .delip = &delip,
-+ .delip_kernel = &delip_kernel,
-+ .testip = &testip,
-+ .testip_kernel = &testip_kernel,
-+ .header_size = sizeof(struct ip_set_req_nethash_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
++IP_SET_RTYPE(nethash, IPSET_TYPE_IP | IPSET_DATA_SINGLE)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
@@ -6825,27 +6629,15 @@
+module_param(limit, int, 0600);
+MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
+
-+static int __init ip_set_nethash_init(void)
-+{
-+ return ip_set_register_set_type(&ip_set_nethash);
-+}
-+
-+static void __exit ip_set_nethash_fini(void)
-+{
-+ /* FIXME: possible race with ip_set_create() */
-+ ip_set_unregister_set_type(&ip_set_nethash);
-+}
-+
-+module_init(ip_set_nethash_init);
-+module_exit(ip_set_nethash_fini);
++REGISTER_MODULE(nethash)
--- /dev/null
+++ b/net/ipv4/netfilter/ip_set_portmap.c
-@@ -0,0 +1,346 @@
-+/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+@@ -0,0 +1,114 @@
++/* Copyright (C) 2003-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
++ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing a port set type as a bitmap */
@@ -6855,9 +6647,6 @@
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/skbuff.h>
-+#include <linux/version.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
@@ -6866,330 +6655,434 @@
+#include <net/ip.h>
+
+#include <linux/netfilter_ipv4/ip_set_portmap.h>
++#include <linux/netfilter_ipv4/ip_set_getport.h>
+
-+/* We must handle non-linear skbs */
-+static inline ip_set_ip_t
-+get_port(const struct sk_buff *skb, u_int32_t flags)
++static inline int
++portmap_test(const struct ip_set *set, ip_set_ip_t *hash_port,
++ ip_set_ip_t port)
+{
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ struct iphdr *iph = ip_hdr(skb);
-+#else
-+ struct iphdr *iph = skb->nh.iph;
-+#endif
-+ u_int16_t offset = ntohs(iph->frag_off) & IP_OFFSET;
-+ switch (iph->protocol) {
-+ case IPPROTO_TCP: {
-+ struct tcphdr tcph;
++ const struct ip_set_portmap *map = set->data;
++
++ if (port < map->first_ip || port > map->last_ip)
++ return -ERANGE;
+
-+ /* See comments at tcp_match in ip_tables.c */
-+ if (offset)
-+ return INVALID_PORT;
++ *hash_port = port;
++ DP("set: %s, port:%u, %u", set->name, port, *hash_port);
++ return !!test_bit(port - map->first_ip, map->members);
++}
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &tcph, sizeof(tcph)) < 0)
-+#else
-+ if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &tcph, sizeof(tcph)) < 0)
-+#endif
-+ /* No choice either */
-+ return INVALID_PORT;
-+
-+ return ntohs(flags & IPSET_SRC ?
-+ tcph.source : tcph.dest);
-+ }
-+ case IPPROTO_UDP: {
-+ struct udphdr udph;
++#define KADT_CONDITION \
++ if (ip == INVALID_PORT) \
++ return 0;
+
-+ if (offset)
-+ return INVALID_PORT;
++UADT(portmap, test)
++KADT(portmap, test, get_port)
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &udph, sizeof(udph)) < 0)
-+#else
-+ if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &udph, sizeof(udph)) < 0)
-+#endif
-+ /* No choice either */
-+ return INVALID_PORT;
-+
-+ return ntohs(flags & IPSET_SRC ?
-+ udph.source : udph.dest);
-+ }
-+ default:
-+ return INVALID_PORT;
-+ }
++static inline int
++portmap_add(struct ip_set *set, ip_set_ip_t *hash_port, ip_set_ip_t port)
++{
++ struct ip_set_portmap *map = set->data;
++
++ if (port < map->first_ip || port > map->last_ip)
++ return -ERANGE;
++ if (test_and_set_bit(port - map->first_ip, map->members))
++ return -EEXIST;
++
++ *hash_port = port;
++ DP("port %u", port);
++ return 0;
+}
+
++UADT(portmap, add)
++KADT(portmap, add, get_port)
++
+static inline int
-+__testport(struct ip_set *set, ip_set_ip_t port, ip_set_ip_t *hash_port)
++portmap_del(struct ip_set *set, ip_set_ip_t *hash_port, ip_set_ip_t port)
+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
++ struct ip_set_portmap *map = set->data;
+
-+ if (port < map->first_port || port > map->last_port)
++ if (port < map->first_ip || port > map->last_ip)
+ return -ERANGE;
++ if (!test_and_clear_bit(port - map->first_ip, map->members))
++ return -EEXIST;
+
+ *hash_port = port;
-+ DP("set: %s, port:%u, %u", set->name, port, *hash_port);
-+ return !!test_bit(port - map->first_port, map->members);
++ DP("port %u", port);
++ return 0;
+}
+
-+static int
-+testport(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_port)
-+{
-+ struct ip_set_req_portmap *req =
-+ (struct ip_set_req_portmap *) data;
++UADT(portmap, del)
++KADT(portmap, del, get_port)
+
-+ if (size != sizeof(struct ip_set_req_portmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_portmap),
-+ size);
-+ return -EINVAL;
++static inline int
++__portmap_create(const struct ip_set_req_portmap_create *req,
++ struct ip_set_portmap *map)
++{
++ if (req->to - req->from > MAX_RANGE) {
++ ip_set_printk("range too big, %d elements (max %d)",
++ req->to - req->from + 1, MAX_RANGE+1);
++ return -ENOEXEC;
+ }
-+ return __testport(set, req->port, hash_port);
++ return bitmap_bytes(req->from, req->to);
+}
+
-+static int
-+testport_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_port,
-+ const u_int32_t *flags,
-+ unsigned char index)
++BITMAP_CREATE(portmap)
++BITMAP_DESTROY(portmap)
++BITMAP_FLUSH(portmap)
++
++static inline void
++__portmap_list_header(const struct ip_set_portmap *map,
++ struct ip_set_req_portmap_create *header)
+{
-+ int res;
-+ ip_set_ip_t port = get_port(skb, flags[index]);
++}
+
-+ DP("flag %s port %u", flags[index] & IPSET_SRC ? "SRC" : "DST", port);
-+ if (port == INVALID_PORT)
-+ return 0;
++BITMAP_LIST_HEADER(portmap)
++BITMAP_LIST_MEMBERS_SIZE(portmap)
++BITMAP_LIST_MEMBERS(portmap)
+
-+ res = __testport(set, port, hash_port);
-+
-+ return (res < 0 ? 0 : res);
-+}
++IP_SET_TYPE(portmap, IPSET_TYPE_PORT | IPSET_DATA_SINGLE)
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("portmap type of IP sets");
++
++REGISTER_MODULE(portmap)
+--- /dev/null
++++ b/net/ipv4/netfilter/ip_set_setlist.c
+@@ -0,0 +1,330 @@
++/* Copyright (C) 2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/* Kernel module implementing an IP set type: the setlist type */
++
++#include <linux/module.h>
++#include <linux/ip.h>
++#include <linux/skbuff.h>
++#include <linux/errno.h>
++
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_bitmaps.h>
++#include <linux/netfilter_ipv4/ip_set_setlist.h>
++
++/*
++ * before ==> index, ref
++ * after ==> ref, index
++ */
+
+static inline int
-+__addport(struct ip_set *set, ip_set_ip_t port, ip_set_ip_t *hash_port)
++next_index_eq(const struct ip_set_setlist *map, int i, ip_set_id_t index)
+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
-+
-+ if (port < map->first_port || port > map->last_port)
-+ return -ERANGE;
-+ if (test_and_set_bit(port - map->first_port, map->members))
-+ return -EEXIST;
-+
-+ *hash_port = port;
-+ DP("port %u", port);
-+ return 0;
++ return i < map->size && map->index[i] == index;
+}
+
+static int
-+addport(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_port)
-+{
-+ struct ip_set_req_portmap *req =
-+ (struct ip_set_req_portmap *) data;
++setlist_utest(struct ip_set *set, const void *data, u_int32_t size,
++ ip_set_ip_t *hash_ip)
++{
++ const struct ip_set_setlist *map = set->data;
++ const struct ip_set_req_setlist *req = data;
++ ip_set_id_t index, ref = IP_SET_INVALID_ID;
++ int i, res = 0;
++ struct ip_set *s;
++
++ if (req->before && req->ref[0] == '\0')
++ return 0;
+
-+ if (size != sizeof(struct ip_set_req_portmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_portmap),
-+ size);
-+ return -EINVAL;
++ index = __ip_set_get_byname(req->name, &s);
++ if (index == IP_SET_INVALID_ID)
++ return 0;
++ if (req->ref[0] != '\0') {
++ ref = __ip_set_get_byname(req->ref, &s);
++ if (ref == IP_SET_INVALID_ID)
++ goto finish;
++ }
++ for (i = 0; i < map->size
++ && map->index[i] != IP_SET_INVALID_ID; i++) {
++ if (req->before && map->index[i] == index) {
++ res = next_index_eq(map, i + 1, ref);
++ break;
++ } else if (!req->before) {
++ if ((ref == IP_SET_INVALID_ID
++ && map->index[i] == index)
++ || (map->index[i] == ref
++ && next_index_eq(map, i + 1, index))) {
++ res = 1;
++ break;
++ }
++ }
+ }
-+ return __addport(set, req->port, hash_port);
++ if (ref != IP_SET_INVALID_ID)
++ __ip_set_put_byindex(ref);
++finish:
++ __ip_set_put_byindex(index);
++ return res;
+}
+
+static int
-+addport_kernel(struct ip_set *set,
++setlist_ktest(struct ip_set *set,
+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_port,
++ ip_set_ip_t *hash_ip,
+ const u_int32_t *flags,
+ unsigned char index)
+{
-+ ip_set_ip_t port = get_port(skb, flags[index]);
++ struct ip_set_setlist *map = set->data;
++ int i, res = 0;
+
-+ if (port == INVALID_PORT)
-+ return -EINVAL;
-+
-+ return __addport(set, port, hash_port);
++ for (i = 0; i < map->size
++ && map->index[i] != IP_SET_INVALID_ID
++ && res == 0; i++)
++ res = ip_set_testip_kernel(map->index[i], skb, flags);
++ return res;
+}
+
+static inline int
-+__delport(struct ip_set *set, ip_set_ip_t port, ip_set_ip_t *hash_port)
++insert_setlist(struct ip_set_setlist *map, int i, ip_set_id_t index)
+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
++ ip_set_id_t tmp;
++ int j;
+
-+ if (port < map->first_port || port > map->last_port)
++ DP("i: %u, last %u\n", i, map->index[map->size - 1]);
++ if (i >= map->size || map->index[map->size - 1] != IP_SET_INVALID_ID)
+ return -ERANGE;
-+ if (!test_and_clear_bit(port - map->first_port, map->members))
-+ return -EEXIST;
-+
-+ *hash_port = port;
-+ DP("port %u", port);
++
++ for (j = i; j < map->size
++ && index != IP_SET_INVALID_ID; j++) {
++ tmp = map->index[j];
++ map->index[j] = index;
++ index = tmp;
++ }
+ return 0;
+}
+
+static int
-+delport(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_port)
++setlist_uadd(struct ip_set *set, const void *data, u_int32_t size,
++ ip_set_ip_t *hash_ip)
+{
-+ struct ip_set_req_portmap *req =
-+ (struct ip_set_req_portmap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_portmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_portmap),
-+ size);
++ struct ip_set_setlist *map = set->data;
++ const struct ip_set_req_setlist *req = data;
++ ip_set_id_t index, ref = IP_SET_INVALID_ID;
++ int i, res = -ERANGE;
++ struct ip_set *s;
++
++ if (req->before && req->ref[0] == '\0')
+ return -EINVAL;
++
++ index = __ip_set_get_byname(req->name, &s);
++ if (index == IP_SET_INVALID_ID)
++ return -EEXIST;
++ /* "Loop detection" */
++ if (strcmp(s->type->typename, "setlist") == 0)
++ goto finish;
++
++ if (req->ref[0] != '\0') {
++ ref = __ip_set_get_byname(req->ref, &s);
++ if (ref == IP_SET_INVALID_ID) {
++ res = -EEXIST;
++ goto finish;
++ }
+ }
-+ return __delport(set, req->port, hash_port);
++ for (i = 0; i < map->size; i++) {
++ if (map->index[i] != ref)
++ continue;
++ if (req->before)
++ res = insert_setlist(map, i, index);
++ else
++ res = insert_setlist(map,
++ ref == IP_SET_INVALID_ID ? i : i + 1,
++ index);
++ break;
++ }
++ if (ref != IP_SET_INVALID_ID)
++ __ip_set_put_byindex(ref);
++ /* In case of success, we keep the reference to the set */
++finish:
++ if (res != 0)
++ __ip_set_put_byindex(index);
++ return res;
+}
+
+static int
-+delport_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_port,
-+ const u_int32_t *flags,
-+ unsigned char index)
++setlist_kadd(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
+{
-+ ip_set_ip_t port = get_port(skb, flags[index]);
++ struct ip_set_setlist *map = set->data;
++ int i, res = -EINVAL;
+
-+ if (port == INVALID_PORT)
-+ return -EINVAL;
-+
-+ return __delport(set, port, hash_port);
++ for (i = 0; i < map->size
++ && map->index[i] != IP_SET_INVALID_ID
++ && res != 0; i++)
++ res = ip_set_addip_kernel(map->index[i], skb, flags);
++ return res;
+}
+
-+static int create(struct ip_set *set, const void *data, size_t size)
++static inline int
++unshift_setlist(struct ip_set_setlist *map, int i)
+{
-+ int newbytes;
-+ struct ip_set_req_portmap_create *req =
-+ (struct ip_set_req_portmap_create *) data;
-+ struct ip_set_portmap *map;
++ int j;
++
++ for (j = i; j < map->size - 1; j++)
++ map->index[j] = map->index[j+1];
++ map->index[map->size-1] = IP_SET_INVALID_ID;
++ return 0;
++}
+
-+ if (size != sizeof(struct ip_set_req_portmap_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_portmap_create),
-+ size);
++static int
++setlist_udel(struct ip_set *set, const void *data, u_int32_t size,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_setlist *map = set->data;
++ const struct ip_set_req_setlist *req = data;
++ ip_set_id_t index, ref = IP_SET_INVALID_ID;
++ int i, res = -EEXIST;
++ struct ip_set *s;
++
++ if (req->before && req->ref[0] == '\0')
+ return -EINVAL;
-+ }
+
-+ DP("from %u to %u", req->from, req->to);
-+
-+ if (req->from > req->to) {
-+ DP("bad port range");
-+ return -ENOEXEC;
++ index = __ip_set_get_byname(req->name, &s);
++ if (index == IP_SET_INVALID_ID)
++ return -EEXIST;
++ if (req->ref[0] != '\0') {
++ ref = __ip_set_get_byname(req->ref, &s);
++ if (ref == IP_SET_INVALID_ID)
++ goto finish;
++ }
++ for (i = 0; i < map->size
++ && map->index[i] != IP_SET_INVALID_ID; i++) {
++ if (req->before) {
++ if (map->index[i] == index
++ && next_index_eq(map, i + 1, ref)) {
++ res = unshift_setlist(map, i);
++ break;
++ }
++ } else if (ref == IP_SET_INVALID_ID) {
++ if (map->index[i] == index) {
++ res = unshift_setlist(map, i);
++ break;
++ }
++ } else if (map->index[i] == ref
++ && next_index_eq(map, i + 1, index)) {
++ res = unshift_setlist(map, i + 1);
++ break;
++ }
+ }
++ if (ref != IP_SET_INVALID_ID)
++ __ip_set_put_byindex(ref);
++finish:
++ __ip_set_put_byindex(index);
++ /* In case of success, release the reference to the set */
++ if (res == 0)
++ __ip_set_put_byindex(index);
++ return res;
++}
+
-+ if (req->to - req->from > MAX_RANGE) {
-+ ip_set_printk("range too big (max %d ports)",
-+ MAX_RANGE+1);
-+ return -ENOEXEC;
-+ }
++static int
++setlist_kdel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ struct ip_set_setlist *map = set->data;
++ int i, res = -EINVAL;
++
++ for (i = 0; i < map->size
++ && map->index[i] != IP_SET_INVALID_ID
++ && res != 0; i++)
++ res = ip_set_delip_kernel(map->index[i], skb, flags);
++ return res;
++}
+
-+ map = kmalloc(sizeof(struct ip_set_portmap), GFP_KERNEL);
-+ if (!map) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_portmap));
-+ return -ENOMEM;
-+ }
-+ map->first_port = req->from;
-+ map->last_port = req->to;
-+ newbytes = bitmap_bytes(req->from, req->to);
-+ map->members = kmalloc(newbytes, GFP_KERNEL);
-+ if (!map->members) {
-+ DP("out of memory for %d bytes", newbytes);
-+ kfree(map);
++static int
++setlist_create(struct ip_set *set, const void *data, u_int32_t size)
++{
++ struct ip_set_setlist *map;
++ const struct ip_set_req_setlist_create *req = data;
++ int i;
++
++ map = kmalloc(sizeof(struct ip_set_setlist) +
++ req->size * sizeof(ip_set_id_t), GFP_KERNEL);
++ if (!map)
+ return -ENOMEM;
-+ }
-+ memset(map->members, 0, newbytes);
-+
++ map->size = req->size;
++ for (i = 0; i < map->size; i++)
++ map->index[i] = IP_SET_INVALID_ID;
++
+ set->data = map;
+ return 0;
-+}
++}
+
-+static void destroy(struct ip_set *set)
++static void
++setlist_destroy(struct ip_set *set)
+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
++ struct ip_set_setlist *map = set->data;
++ int i;
++
++ for (i = 0; i < map->size
++ && map->index[i] != IP_SET_INVALID_ID; i++)
++ __ip_set_put_byindex(map->index[i]);
+
-+ kfree(map->members);
+ kfree(map);
-+
+ set->data = NULL;
+}
+
-+static void flush(struct ip_set *set)
++static void
++setlist_flush(struct ip_set *set)
+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
-+ memset(map->members, 0, bitmap_bytes(map->first_port, map->last_port));
++ struct ip_set_setlist *map = set->data;
++ int i;
++
++ for (i = 0; i < map->size
++ && map->index[i] != IP_SET_INVALID_ID; i++) {
++ __ip_set_put_byindex(map->index[i]);
++ map->index[i] = IP_SET_INVALID_ID;
++ }
+}
+
-+static void list_header(const struct ip_set *set, void *data)
++static void
++setlist_list_header(const struct ip_set *set, void *data)
+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
-+ struct ip_set_req_portmap_create *header =
-+ (struct ip_set_req_portmap_create *) data;
-+
-+ DP("list_header %u %u", map->first_port, map->last_port);
-+
-+ header->from = map->first_port;
-+ header->to = map->last_port;
++ const struct ip_set_setlist *map = set->data;
++ struct ip_set_req_setlist_create *header = data;
++
++ header->size = map->size;
+}
+
-+static int list_members_size(const struct ip_set *set)
++static int
++setlist_list_members_size(const struct ip_set *set)
+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
-+
-+ return bitmap_bytes(map->first_port, map->last_port);
++ const struct ip_set_setlist *map = set->data;
++
++ return map->size * sizeof(ip_set_id_t);
+}
+
-+static void list_members(const struct ip_set *set, void *data)
++static void
++setlist_list_members(const struct ip_set *set, void *data)
+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
-+ int bytes = bitmap_bytes(map->first_port, map->last_port);
-+
-+ memcpy(data, map->members, bytes);
++ struct ip_set_setlist *map = set->data;
++ int i;
++
++ for (i = 0; i < map->size; i++)
++ *((ip_set_id_t *)data + i) = ip_set_id(map->index[i]);
+}
+
-+static struct ip_set_type ip_set_portmap = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_PORT | IPSET_DATA_SINGLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_portmap),
-+ .addip = &addport,
-+ .addip_kernel = &addport_kernel,
-+ .delip = &delport,
-+ .delip_kernel = &delport_kernel,
-+ .testip = &testport,
-+ .testip_kernel = &testport_kernel,
-+ .header_size = sizeof(struct ip_set_req_portmap_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
++IP_SET_TYPE(setlist, IPSET_TYPE_SETNAME | IPSET_DATA_SINGLE)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-+MODULE_DESCRIPTION("portmap type of IP sets");
-+
-+static int __init ip_set_portmap_init(void)
-+{
-+ return ip_set_register_set_type(&ip_set_portmap);
-+}
++MODULE_DESCRIPTION("setlist type of IP sets");
+
-+static void __exit ip_set_portmap_fini(void)
-+{
-+ /* FIXME: possible race with ip_set_create() */
-+ ip_set_unregister_set_type(&ip_set_portmap);
-+}
-+
-+module_init(ip_set_portmap_init);
-+module_exit(ip_set_portmap_fini);
++REGISTER_MODULE(setlist)
--- /dev/null
+++ b/net/ipv4/netfilter/ipt_set.c
-@@ -0,0 +1,160 @@
+@@ -0,0 +1,238 @@
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ * Patrick Schaaf <bof@bof.de>
+ * Martin Josefsson <gandalf@wlug.westbo.se>
@@ -7197,7 +7090,7 @@
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
++ * published by the Free Software Foundation.
+ */
+
+/* Kernel module to match an IP set. */
@@ -7207,7 +7100,14 @@
+#include <linux/skbuff.h>
+#include <linux/version.h>
+
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
+#include <linux/netfilter_ipv4/ip_tables.h>
++#define xt_register_match ipt_register_match
++#define xt_unregister_match ipt_unregister_match
++#define xt_match ipt_match
++#else
++#include <linux/netfilter/x_tables.h>
++#endif
+#include <linux/netfilter_ipv4/ip_set.h>
+#include <linux/netfilter_ipv4/ipt_set.h>
+
@@ -7221,58 +7121,119 @@
+ return inv;
+}
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
-+static bool
-+#else
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++static int
++match(const struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ const void *matchinfo,
++ int offset,
++ const void *hdr,
++ u_int16_t datalen,
++ int *hotdrop)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
++static int
++match(const struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ const void *matchinfo,
++ int offset,
++ int *hotdrop)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
++static int
++match(const struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ const void *matchinfo,
++ int offset,
++ unsigned int protoff,
++ int *hotdrop)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
+static int
-+#endif
+match(const struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
+ const struct xt_match *match,
-+#endif
+ const void *matchinfo,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
-+ int offset, unsigned int protoff, bool *hotdrop)
-+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
-+ int offset, unsigned int protoff, int *hotdrop)
-+#else
-+ int offset, int *hotdrop)
++ int offset,
++ unsigned int protoff,
++ int *hotdrop)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++static bool
++match(const struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ const struct xt_match *match,
++ const void *matchinfo,
++ int offset,
++ unsigned int protoff,
++ bool *hotdrop)
++#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */
++static bool
++match(const struct sk_buff *skb,
++ const struct xt_match_param *par)
+#endif
+{
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
+ const struct ipt_set_info_match *info = matchinfo;
++#else
++ const struct ipt_set_info_match *info = par->matchinfo;
++#endif
+
+ return match_set(&info->match_set,
+ skb,
+ info->match_set.flags[0] & IPSET_MATCH_INV);
+}
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
-+bool
-+#else
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
+static int
-+#endif
+checkentry(const char *tablename,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
-+ const void *inf,
-+#else
+ const struct ipt_ip *ip,
-+#endif
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
++ void *matchinfo,
++ unsigned int matchsize,
++ unsigned int hook_mask)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
++static int
++checkentry(const char *tablename,
++ const void *inf,
++ void *matchinfo,
++ unsigned int matchsize,
++ unsigned int hook_mask)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++static int
++checkentry(const char *tablename,
++ const void *inf,
+ const struct xt_match *match,
-+#endif
+ void *matchinfo,
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
+ unsigned int matchsize,
-+#endif
+ unsigned int hook_mask)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
++static int
++checkentry(const char *tablename,
++ const void *inf,
++ const struct xt_match *match,
++ void *matchinfo,
++ unsigned int hook_mask)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++static bool
++checkentry(const char *tablename,
++ const void *inf,
++ const struct xt_match *match,
++ void *matchinfo,
++ unsigned int hook_mask)
++#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */
++static bool
++checkentry(const struct xt_mtchk_param *par)
++#endif
+{
-+ struct ipt_set_info_match *info =
-+ (struct ipt_set_info_match *) matchinfo;
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++ struct ipt_set_info_match *info = matchinfo;
++#else
++ struct ipt_set_info_match *info = par->matchinfo;
++#endif
+ ip_set_id_t index;
+
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
+ if (matchsize != IPT_ALIGN(sizeof(struct ipt_set_info_match))) {
+ ip_set_printk("invalid matchsize %d", matchsize);
+ return 0;
@@ -7294,65 +7255,75 @@
+ return 1;
+}
+
-+static void destroy(
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
-+ const struct xt_match *match,
-+#endif
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-+ void *matchinfo, unsigned int matchsize)
-+#else
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
++static void destroy(void *matchinfo,
++ unsigned int matchsize)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++static void destroy(const struct xt_match *match,
++ void *matchinfo,
++ unsigned int matchsize)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++static void destroy(const struct xt_match *match,
+ void *matchinfo)
++#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */
++static void destroy(const struct xt_mtdtor_param *par)
+#endif
+{
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
+ struct ipt_set_info_match *info = matchinfo;
++#else
++ struct ipt_set_info_match *info = par->matchinfo;
++#endif
+
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
+ if (matchsize != IPT_ALIGN(sizeof(struct ipt_set_info_match))) {
+ ip_set_printk("invalid matchsize %d", matchsize);
+ return;
+ }
+#endif
-+ ip_set_put(info->match_set.index);
++ ip_set_put_byindex(info->match_set.index);
+}
+
-+static struct ipt_match set_match = {
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
++static struct xt_match set_match = {
++ .name = "set",
++ .match = &match,
++ .checkentry = &checkentry,
++ .destroy = &destroy,
++ .me = THIS_MODULE
++};
++#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17) */
++static struct xt_match set_match = {
+ .name = "set",
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
+ .family = AF_INET,
-+#endif
+ .match = &match,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
+ .matchsize = sizeof(struct ipt_set_info_match),
-+#endif
+ .checkentry = &checkentry,
+ .destroy = &destroy,
+ .me = THIS_MODULE
+};
++#endif
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("iptables IP set match module");
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
-+#define ipt_register_match xt_register_match
-+#define ipt_unregister_match xt_unregister_match
-+#endif
-+
+static int __init ipt_ipset_init(void)
+{
-+ return ipt_register_match(&set_match);
++ return xt_register_match(&set_match);
+}
+
+static void __exit ipt_ipset_fini(void)
+{
-+ ipt_unregister_match(&set_match);
++ xt_unregister_match(&set_match);
+}
+
+module_init(ipt_ipset_init);
+module_exit(ipt_ipset_fini);
--- /dev/null
+++ b/net/ipv4/netfilter/ipt_SET.c
-@@ -0,0 +1,172 @@
+@@ -0,0 +1,242 @@
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ * Patrick Schaaf <bof@bof.de>
+ * Martin Josefsson <gandalf@wlug.westbo.se>
@@ -7360,80 +7331,141 @@
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
++ * published by the Free Software Foundation.
+ */
+
+/* ipt_SET.c - netfilter target to manipulate IP sets */
+
-+#include <linux/types.h>
-+#include <linux/ip.h>
-+#include <linux/timer.h>
+#include <linux/module.h>
-+#include <linux/netfilter.h>
-+#include <linux/netdevice.h>
-+#include <linux/if.h>
-+#include <linux/inetdevice.h>
++#include <linux/ip.h>
++#include <linux/skbuff.h>
+#include <linux/version.h>
-+#include <net/protocol.h>
-+#include <net/checksum.h>
++
+#include <linux/netfilter_ipv4.h>
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
+#include <linux/netfilter_ipv4/ip_tables.h>
++#define xt_register_target ipt_register_target
++#define xt_unregister_target ipt_unregister_target
++#define xt_target ipt_target
++#define XT_CONTINUE IPT_CONTINUE
++#else
++#include <linux/netfilter/x_tables.h>
++#endif
+#include <linux/netfilter_ipv4/ipt_set.h>
+
+static unsigned int
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++target(struct sk_buff **pskb,
++ unsigned int hooknum,
++ const struct net_device *in,
++ const struct net_device *out,
++ const void *targinfo,
++ void *userinfo)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
++target(struct sk_buff **pskb,
++ const struct net_device *in,
++ const struct net_device *out,
++ unsigned int hooknum,
++ const void *targinfo,
++ void *userinfo)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
+target(struct sk_buff **pskb,
+ const struct net_device *in,
+ const struct net_device *out,
+ unsigned int hooknum,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
+ const struct xt_target *target,
-+#endif
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
+ const void *targinfo,
+ void *userinfo)
-+#else
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
++target(struct sk_buff **pskb,
++ const struct net_device *in,
++ const struct net_device *out,
++ unsigned int hooknum,
++ const struct xt_target *target,
++ const void *targinfo)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++target(struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ unsigned int hooknum,
++ const struct xt_target *target,
+ const void *targinfo)
++#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */
++target(struct sk_buff *skb,
++ const struct xt_target_param *par)
+#endif
+{
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
+ const struct ipt_set_info_target *info = targinfo;
++#else
++ const struct ipt_set_info_target *info = par->targinfo;
++#endif
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
++ struct sk_buff *skb = *pskb;
++#endif
++
+
+ if (info->add_set.index != IP_SET_INVALID_ID)
+ ip_set_addip_kernel(info->add_set.index,
-+ *pskb,
++ skb,
+ info->add_set.flags);
+ if (info->del_set.index != IP_SET_INVALID_ID)
+ ip_set_delip_kernel(info->del_set.index,
-+ *pskb,
++ skb,
+ info->del_set.flags);
+
-+ return IPT_CONTINUE;
++ return XT_CONTINUE;
+}
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
-+static bool
-+#else
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
+static int
-+#endif
+checkentry(const char *tablename,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
-+ const void *e,
-+#else
+ const struct ipt_entry *e,
-+#endif
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
++ void *targinfo,
++ unsigned int targinfosize,
++ unsigned int hook_mask)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
++static int
++checkentry(const char *tablename,
++ const void *e,
++ void *targinfo,
++ unsigned int targinfosize,
++ unsigned int hook_mask)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++static int
++checkentry(const char *tablename,
++ const void *e,
++ const struct xt_target *target,
++ void *targinfo,
++ unsigned int targinfosize,
++ unsigned int hook_mask)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
++static int
++checkentry(const char *tablename,
++ const void *e,
+ const struct xt_target *target,
-+#endif
+ void *targinfo,
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-+ unsigned int targinfosize,
-+#endif
+ unsigned int hook_mask)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++static bool
++checkentry(const char *tablename,
++ const void *e,
++ const struct xt_target *target,
++ void *targinfo,
++ unsigned int hook_mask)
++#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */
++static bool
++checkentry(const struct xt_tgchk_param *par)
++#endif
+{
-+ struct ipt_set_info_target *info =
-+ (struct ipt_set_info_target *) targinfo;
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++ const struct ipt_set_info_target *info = targinfo;
++#else
++ const struct ipt_set_info_target *info = par->targinfo;
++#endif
+ ip_set_id_t index;
+
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
+ if (targinfosize != IPT_ALIGN(sizeof(*info))) {
+ DP("bad target info size %u", targinfosize);
+ return 0;
@@ -7466,68 +7498,77 @@
+ return 1;
+}
+
-+static void destroy(
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
-+ const struct xt_target *target,
-+#endif
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-+ void *targetinfo, unsigned int targetsize)
-+#else
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
++static void destroy(void *targetinfo,
++ unsigned int targetsize)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++static void destroy(const struct xt_target *target,
++ void *targetinfo,
++ unsigned int targetsize)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++static void destroy(const struct xt_target *target,
+ void *targetinfo)
++#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */
++static void destroy(const struct xt_tgdtor_param *par)
+#endif
+{
-+ struct ipt_set_info_target *info = targetinfo;
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++ const struct ipt_set_info_target *info = targetinfo;
++#else
++ const struct ipt_set_info_target *info = par->targinfo;
++#endif
+
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
+ if (targetsize != IPT_ALIGN(sizeof(struct ipt_set_info_target))) {
+ ip_set_printk("invalid targetsize %d", targetsize);
+ return;
+ }
+#endif
+ if (info->add_set.index != IP_SET_INVALID_ID)
-+ ip_set_put(info->add_set.index);
++ ip_set_put_byindex(info->add_set.index);
+ if (info->del_set.index != IP_SET_INVALID_ID)
-+ ip_set_put(info->del_set.index);
++ ip_set_put_byindex(info->del_set.index);
+}
+
-+static struct ipt_target SET_target = {
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
++static struct xt_target SET_target = {
++ .name = "SET",
++ .target = target,
++ .checkentry = checkentry,
++ .destroy = destroy,
++ .me = THIS_MODULE
++};
++#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17) */
++static struct xt_target SET_target = {
+ .name = "SET",
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
+ .family = AF_INET,
-+#endif
+ .target = target,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
+ .targetsize = sizeof(struct ipt_set_info_target),
-+#endif
+ .checkentry = checkentry,
+ .destroy = destroy,
+ .me = THIS_MODULE
+};
++#endif
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("iptables IP set target module");
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
-+#define ipt_register_target xt_register_target
-+#define ipt_unregister_target xt_unregister_target
-+#endif
-+
+static int __init ipt_SET_init(void)
+{
-+ return ipt_register_target(&SET_target);
++ return xt_register_target(&SET_target);
+}
+
+static void __exit ipt_SET_fini(void)
+{
-+ ipt_unregister_target(&SET_target);
++ xt_unregister_target(&SET_target);
+}
+
+module_init(ipt_SET_init);
+module_exit(ipt_SET_fini);
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
-@@ -657,5 +657,122 @@ config IP_NF_ARP_MANGLE
+@@ -657,5 +657,146 @@ config IP_NF_ARP_MANGLE
Allows altering the ARP packet payload: source and destination
hardware and network addresses.
@@ -7612,6 +7653,22 @@
+
+ To compile it as a module, choose M here. If unsure, say N.
+
++config IP_NF_SET_IPPORTIPHASH
++ tristate "ipportiphash set support"
++ depends on IP_NF_SET
++ help
++ This option adds the ipportiphash set type support.
++
++ To compile it as a module, choose M here. If unsure, say N.
++
++config IP_NF_SET_IPPORTNETHASH
++ tristate "ipportnethash set support"
++ depends on IP_NF_SET
++ help
++ This option adds the ipportnethash set type support.
++
++ To compile it as a module, choose M here. If unsure, say N.
++
+config IP_NF_SET_IPTREE
+ tristate "iptree set support"
+ depends on IP_NF_SET
@@ -7628,6 +7685,14 @@
+
+ To compile it as a module, choose M here. If unsure, say N.
+
++config IP_NF_SET_SETLIST
++ tristate "setlist set support"
++ depends on IP_NF_SET
++ help
++ This option adds the setlist set type support.
++
++ To compile it as a module, choose M here. If unsure, say N.
++
+config IP_NF_MATCH_SET
+ tristate "set match support"
+ depends on IP_NF_SET
@@ -7660,7 +7725,7 @@
obj-$(CONFIG_IP_NF_MATCH_ADDRTYPE) += ipt_addrtype.o
# targets
-@@ -105,6 +106,18 @@ obj-$(CONFIG_IP_NF_TARGET_LOG) += ipt_LO
+@@ -105,6 +106,21 @@ obj-$(CONFIG_IP_NF_TARGET_LOG) += ipt_LO
obj-$(CONFIG_IP_NF_TARGET_ULOG) += ipt_ULOG.o
obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o
obj-$(CONFIG_IP_NF_TARGET_TTL) += ipt_TTL.o
@@ -7674,8 +7739,11 @@
+obj-$(CONFIG_IP_NF_SET_IPHASH) += ip_set_iphash.o
+obj-$(CONFIG_IP_NF_SET_NETHASH) += ip_set_nethash.o
+obj-$(CONFIG_IP_NF_SET_IPPORTHASH) += ip_set_ipporthash.o
++obj-$(CONFIG_IP_NF_SET_IPPORTIPHASH) += ip_set_ipportiphash.o
++obj-$(CONFIG_IP_NF_SET_IPPORTNETHASH) += ip_set_ipportnethash.o
+obj-$(CONFIG_IP_NF_SET_IPTREE) += ip_set_iptree.o
+obj-$(CONFIG_IP_NF_SET_IPTREEMAP) += ip_set_iptreemap.o
++obj-$(CONFIG_IP_NF_SET_SETLIST) += ip_set_setlist.o
# generic ARP tables
obj-$(CONFIG_IP_NF_ARPTABLES) += arp_tables.o
diff --git a/target/linux/generic-2.6/patches-2.6.21/160-netfilter_route.patch b/target/linux/generic-2.6/patches-2.6.21/160-netfilter_route.patch
index 8042c4be26..0d305e3ddb 100644
--- a/target/linux/generic-2.6/patches-2.6.21/160-netfilter_route.patch
+++ b/target/linux/generic-2.6/patches-2.6.21/160-netfilter_route.patch
@@ -538,7 +538,7 @@
+module_exit(fini);
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
-@@ -801,5 +801,22 @@ config IP_NF_TARGET_SET
+@@ -825,5 +825,22 @@ config IP_NF_TARGET_SET
To compile it as a module, choose M here. If unsure, say N.
diff --git a/target/linux/generic-2.6/patches-2.6.25/130-netfilter_ipset.patch b/target/linux/generic-2.6/patches-2.6.25/130-netfilter_ipset.patch
index dee15049c8..b4405d20bb 100644
--- a/target/linux/generic-2.6/patches-2.6.25/130-netfilter_ipset.patch
+++ b/target/linux/generic-2.6/patches-2.6.25/130-netfilter_ipset.patch
@@ -1,23 +1,29 @@
--- a/include/linux/netfilter_ipv4/Kbuild
+++ b/include/linux/netfilter_ipv4/Kbuild
-@@ -45,3 +45,14 @@ header-y += ipt_ttl.h
+@@ -45,3 +45,20 @@ header-y += ipt_ttl.h
unifdef-y += ip_queue.h
unifdef-y += ip_tables.h
+
+unifdef-y += ip_set.h
+header-y += ip_set_iphash.h
++unifdef-y += ip_set_bitmaps.h
++unifdef-y += ip_set_getport.h
++unifdef-y += ip_set_hashes.h
+header-y += ip_set_ipmap.h
+header-y += ip_set_ipporthash.h
++header-y += ip_set_ipportiphash.h
++header-y += ip_set_ipportnethash.h
+unifdef-y += ip_set_iptree.h
+unifdef-y += ip_set_iptreemap.h
+header-y += ip_set_jhash.h
+header-y += ip_set_macipmap.h
-+unifdef-y += ip_set_nethash.h
++header-y += ip_set_nethash.h
+header-y += ip_set_portmap.h
++header-y += ip_set_setlist.h
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set.h
-@@ -0,0 +1,498 @@
+@@ -0,0 +1,574 @@
+#ifndef _IP_SET_H
+#define _IP_SET_H
+
@@ -60,7 +66,7 @@
+/*
+ * Used so that the kernel module and ipset-binary can match their versions
+ */
-+#define IP_SET_PROTOCOL_VERSION 2
++#define IP_SET_PROTOCOL_VERSION 3
+
+#define IP_SET_MAXNAMELEN 32 /* set names and set typenames */
+
@@ -107,6 +113,9 @@
+#define IPSET_TYPE_PORT 0x02 /* Port type of set */
+#define IPSET_DATA_SINGLE 0x04 /* Single data storage */
+#define IPSET_DATA_DOUBLE 0x08 /* Double data storage */
++#define IPSET_DATA_TRIPLE 0x10 /* Triple data storage */
++#define IPSET_TYPE_IP1 0x20 /* IP address type of set */
++#define IPSET_TYPE_SETNAME 0x40 /* setname type of set */
+
+/* Reserved keywords */
+#define IPSET_TOKEN_DEFAULT ":default:"
@@ -245,7 +254,7 @@
+struct ip_set_req_setnames {
+ unsigned op;
+ ip_set_id_t index; /* set to list/save */
-+ size_t size; /* size to get setdata/bindings */
++ u_int32_t size; /* size to get setdata/bindings */
+ /* followed by sets number of struct ip_set_name_list */
+};
+
@@ -267,9 +276,9 @@
+ ip_set_id_t index;
+ ip_set_id_t binding;
+ u_int32_t ref;
-+ size_t header_size; /* Set header data of header_size */
-+ size_t members_size; /* Set members data of members_size */
-+ size_t bindings_size; /* Set bindings data of bindings_size */
++ u_int32_t header_size; /* Set header data of header_size */
++ u_int32_t members_size; /* Set members data of members_size */
++ u_int32_t bindings_size;/* Set bindings data of bindings_size */
+};
+
+struct ip_set_hash_list {
@@ -286,8 +295,8 @@
+struct ip_set_save {
+ ip_set_id_t index;
+ ip_set_id_t binding;
-+ size_t header_size; /* Set header data of header_size */
-+ size_t members_size; /* Set members data of members_size */
++ u_int32_t header_size; /* Set header data of header_size */
++ u_int32_t members_size; /* Set members data of members_size */
+};
+
+/* At restoring, ip == 0 means default binding for the given set: */
@@ -307,8 +316,8 @@
+ char name[IP_SET_MAXNAMELEN];
+ char typename[IP_SET_MAXNAMELEN];
+ ip_set_id_t index;
-+ size_t header_size; /* Create data of header_size */
-+ size_t members_size; /* Set members data of members_size */
++ u_int32_t header_size; /* Create data of header_size */
++ u_int32_t members_size; /* Set members data of members_size */
+};
+
+static inline int bitmap_bytes(ip_set_ip_t a, ip_set_ip_t b)
@@ -316,7 +325,12 @@
+ return 4 * ((((b - a + 8) / 8) + 3) / 4);
+}
+
++/* General limit for the elements in a set */
++#define MAX_RANGE 0x0000FFFF
++
+#ifdef __KERNEL__
++#include <linux/netfilter_ipv4/ip_set_compat.h>
++#include <linux/netfilter_ipv4/ip_set_malloc.h>
+
+#define ip_set_printk(format, args...) \
+ do { \
@@ -370,14 +384,14 @@
+ * return 0 if not in set, 1 if in set.
+ */
+ int (*testip) (struct ip_set *set,
-+ const void *data, size_t size,
++ const void *data, u_int32_t size,
+ ip_set_ip_t *ip);
+
+ /*
+ * Size of the data structure passed by when
+ * adding/deletin/testing an entry.
+ */
-+ size_t reqsize;
++ u_int32_t reqsize;
+
+ /* Add IP into set (userspace: ipset -A set IP)
+ * Return -EEXIST if the address is already in the set,
@@ -385,7 +399,7 @@
+ * If the address was not already in the set, 0 is returned.
+ */
+ int (*addip) (struct ip_set *set,
-+ const void *data, size_t size,
++ const void *data, u_int32_t size,
+ ip_set_ip_t *ip);
+
+ /* Add IP into set (kernel: iptables ... -j SET set src|dst)
@@ -405,7 +419,7 @@
+ * If the address really was in the set, 0 is returned.
+ */
+ int (*delip) (struct ip_set *set,
-+ const void *data, size_t size,
++ const void *data, u_int32_t size,
+ ip_set_ip_t *ip);
+
+ /* remove IP from set (kernel: iptables ... -j SET --entry x)
@@ -422,7 +436,7 @@
+ /* new set creation - allocated type specific items
+ */
+ int (*create) (struct ip_set *set,
-+ const void *data, size_t size);
++ const void *data, u_int32_t size);
+
+ /* retry the operation after successfully tweaking the set
+ */
@@ -441,7 +455,7 @@
+
+ /* Listing: size needed for header
+ */
-+ size_t header_size;
++ u_int32_t header_size;
+
+ /* Listing: Get the header
+ *
@@ -499,33 +513,659 @@
+
+/* register and unregister set references */
+extern ip_set_id_t ip_set_get_byname(const char name[IP_SET_MAXNAMELEN]);
-+extern ip_set_id_t ip_set_get_byindex(ip_set_id_t id);
-+extern void ip_set_put(ip_set_id_t id);
++extern ip_set_id_t ip_set_get_byindex(ip_set_id_t index);
++extern void ip_set_put_byindex(ip_set_id_t index);
++extern ip_set_id_t ip_set_id(ip_set_id_t index);
++extern ip_set_id_t __ip_set_get_byname(const char name[IP_SET_MAXNAMELEN],
++ struct ip_set **set);
++extern void __ip_set_put_byindex(ip_set_id_t index);
+
+/* API for iptables set match, and SET target */
-+extern void ip_set_addip_kernel(ip_set_id_t id,
-+ const struct sk_buff *skb,
-+ const u_int32_t *flags);
-+extern void ip_set_delip_kernel(ip_set_id_t id,
-+ const struct sk_buff *skb,
-+ const u_int32_t *flags);
++extern int ip_set_addip_kernel(ip_set_id_t id,
++ const struct sk_buff *skb,
++ const u_int32_t *flags);
++extern int ip_set_delip_kernel(ip_set_id_t id,
++ const struct sk_buff *skb,
++ const u_int32_t *flags);
+extern int ip_set_testip_kernel(ip_set_id_t id,
+ const struct sk_buff *skb,
+ const u_int32_t *flags);
+
++/* Macros to generate functions */
++
++#define STRUCT(pre, type) CONCAT2(pre, type)
++#define CONCAT2(pre, type) struct pre##type
++
++#define FNAME(pre, mid, post) CONCAT3(pre, mid, post)
++#define CONCAT3(pre, mid, post) pre##mid##post
++
++#define UADT0(type, adt, args...) \
++static int \
++FNAME(type,_u,adt)(struct ip_set *set, const void *data, u_int32_t size,\
++ ip_set_ip_t *hash_ip) \
++{ \
++ const STRUCT(ip_set_req_,type) *req = data; \
++ \
++ return FNAME(type,_,adt)(set, hash_ip , ## args); \
++}
++
++#define UADT(type, adt, args...) \
++ UADT0(type, adt, req->ip , ## args)
++
++#define KADT(type, adt, getfn, args...) \
++static int \
++FNAME(type,_k,adt)(struct ip_set *set, \
++ const struct sk_buff *skb, \
++ ip_set_ip_t *hash_ip, \
++ const u_int32_t *flags, \
++ unsigned char index) \
++{ \
++ ip_set_ip_t ip = getfn(skb, flags[index]); \
++ \
++ KADT_CONDITION \
++ return FNAME(type,_,adt)(set, hash_ip, ip , ##args); \
++}
++
++#define REGISTER_MODULE(type) \
++static int __init ip_set_##type##_init(void) \
++{ \
++ init_max_page_size(); \
++ return ip_set_register_set_type(&ip_set_##type); \
++} \
++ \
++static void __exit ip_set_##type##_fini(void) \
++{ \
++ /* FIXME: possible race with ip_set_create() */ \
++ ip_set_unregister_set_type(&ip_set_##type); \
++} \
++ \
++module_init(ip_set_##type##_init); \
++module_exit(ip_set_##type##_fini);
++
++/* Common functions */
++
++static inline ip_set_ip_t
++ipaddr(const struct sk_buff *skb, u_int32_t flag)
++{
++ return ntohl(flag & IPSET_SRC ? ip_hdr(skb)->saddr : ip_hdr(skb)->daddr);
++}
++
++#define jhash_ip(map, i, ip) jhash_1word(ip, *(map->initval + i))
++
++#define pack_ip_port(map, ip, port) \
++ (port + ((ip - ((map)->first_ip)) << 16))
++
+#endif /* __KERNEL__ */
+
+#endif /*_IP_SET_H*/
--- /dev/null
++++ b/include/linux/netfilter_ipv4/ip_set_bitmaps.h
+@@ -0,0 +1,121 @@
++#ifndef __IP_SET_BITMAPS_H
++#define __IP_SET_BITMAPS_H
++
++/* Macros to generate functions */
++
++#ifdef __KERNEL__
++#define BITMAP_CREATE(type) \
++static int \
++type##_create(struct ip_set *set, const void *data, u_int32_t size) \
++{ \
++ int newbytes; \
++ const struct ip_set_req_##type##_create *req = data; \
++ struct ip_set_##type *map; \
++ \
++ if (req->from > req->to) { \
++ DP("bad range"); \
++ return -ENOEXEC; \
++ } \
++ \
++ map = kmalloc(sizeof(struct ip_set_##type), GFP_KERNEL); \
++ if (!map) { \
++ DP("out of memory for %zu bytes", \
++ sizeof(struct ip_set_##type)); \
++ return -ENOMEM; \
++ } \
++ map->first_ip = req->from; \
++ map->last_ip = req->to; \
++ \
++ newbytes = __##type##_create(req, map); \
++ if (newbytes < 0) { \
++ kfree(map); \
++ return newbytes; \
++ } \
++ \
++ map->size = newbytes; \
++ map->members = ip_set_malloc(newbytes); \
++ if (!map->members) { \
++ DP("out of memory for %i bytes", newbytes); \
++ kfree(map); \
++ return -ENOMEM; \
++ } \
++ memset(map->members, 0, newbytes); \
++ \
++ set->data = map; \
++ return 0; \
++}
++
++#define BITMAP_DESTROY(type) \
++static void \
++type##_destroy(struct ip_set *set) \
++{ \
++ struct ip_set_##type *map = set->data; \
++ \
++ ip_set_free(map->members, map->size); \
++ kfree(map); \
++ \
++ set->data = NULL; \
++}
++
++#define BITMAP_FLUSH(type) \
++static void \
++type##_flush(struct ip_set *set) \
++{ \
++ struct ip_set_##type *map = set->data; \
++ memset(map->members, 0, map->size); \
++}
++
++#define BITMAP_LIST_HEADER(type) \
++static void \
++type##_list_header(const struct ip_set *set, void *data) \
++{ \
++ const struct ip_set_##type *map = set->data; \
++ struct ip_set_req_##type##_create *header = data; \
++ \
++ header->from = map->first_ip; \
++ header->to = map->last_ip; \
++ __##type##_list_header(map, header); \
++}
++
++#define BITMAP_LIST_MEMBERS_SIZE(type) \
++static int \
++type##_list_members_size(const struct ip_set *set) \
++{ \
++ const struct ip_set_##type *map = set->data; \
++ \
++ return map->size; \
++}
++
++#define BITMAP_LIST_MEMBERS(type) \
++static void \
++type##_list_members(const struct ip_set *set, void *data) \
++{ \
++ const struct ip_set_##type *map = set->data; \
++ \
++ memcpy(data, map->members, map->size); \
++}
++
++#define IP_SET_TYPE(type, __features) \
++struct ip_set_type ip_set_##type = { \
++ .typename = #type, \
++ .features = __features, \
++ .protocol_version = IP_SET_PROTOCOL_VERSION, \
++ .create = &type##_create, \
++ .destroy = &type##_destroy, \
++ .flush = &type##_flush, \
++ .reqsize = sizeof(struct ip_set_req_##type), \
++ .addip = &type##_uadd, \
++ .addip_kernel = &type##_kadd, \
++ .delip = &type##_udel, \
++ .delip_kernel = &type##_kdel, \
++ .testip = &type##_utest, \
++ .testip_kernel = &type##_ktest, \
++ .header_size = sizeof(struct ip_set_req_##type##_create),\
++ .list_header = &type##_list_header, \
++ .list_members_size = &type##_list_members_size, \
++ .list_members = &type##_list_members, \
++ .me = THIS_MODULE, \
++};
++#endif /* __KERNEL */
++
++#endif /* __IP_SET_BITMAPS_H */
+--- /dev/null
++++ b/include/linux/netfilter_ipv4/ip_set_compat.h
+@@ -0,0 +1,71 @@
++#ifndef _IP_SET_COMPAT_H
++#define _IP_SET_COMPAT_H
++
++#ifdef __KERNEL__
++#include <linux/version.h>
++
++/* Arrgh */
++#ifdef MODULE
++#define __MOD_INC(foo) __MOD_INC_USE_COUNT(foo)
++#define __MOD_DEC(foo) __MOD_DEC_USE_COUNT(foo)
++#else
++#define __MOD_INC(foo) 1
++#define __MOD_DEC(foo)
++#endif
++
++/* Backward compatibility */
++#ifndef __nocast
++#define __nocast
++#endif
++#ifndef __bitwise__
++#define __bitwise__
++#endif
++
++/* Compatibility glue code */
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++#include <linux/interrupt.h>
++#define DEFINE_RWLOCK(x) rwlock_t x = RW_LOCK_UNLOCKED
++#define try_module_get(x) __MOD_INC(x)
++#define module_put(x) __MOD_DEC(x)
++#define __clear_bit(nr, addr) clear_bit(nr, addr)
++#define __set_bit(nr, addr) set_bit(nr, addr)
++#define __test_and_set_bit(nr, addr) test_and_set_bit(nr, addr)
++#define __test_and_clear_bit(nr, addr) test_and_clear_bit(nr, addr)
++
++typedef unsigned __bitwise__ gfp_t;
++
++static inline void *kzalloc(size_t size, gfp_t flags)
++{
++ void *data = kmalloc(size, flags);
++
++ if (data)
++ memset(data, 0, size);
++
++ return data;
++}
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
++#define __KMEM_CACHE_T__ kmem_cache_t
++#else
++#define __KMEM_CACHE_T__ struct kmem_cache
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
++#define ip_hdr(skb) ((skb)->nh.iph)
++#define skb_mac_header(skb) ((skb)->mac.raw)
++#define eth_hdr(skb) ((struct ethhdr *)skb_mac_header(skb))
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
++#include <linux/netfilter.h>
++#define KMEM_CACHE_CREATE(name, size) \
++ kmem_cache_create(name, size, 0, 0, NULL, NULL)
++#else
++#define KMEM_CACHE_CREATE(name, size) \
++ kmem_cache_create(name, size, 0, 0, NULL)
++#endif
++
++
++#endif /* __KERNEL__ */
++#endif /* _IP_SET_COMPAT_H */
+--- /dev/null
++++ b/include/linux/netfilter_ipv4/ip_set_getport.h
+@@ -0,0 +1,48 @@
++#ifndef _IP_SET_GETPORT_H
++#define _IP_SET_GETPORT_H
++
++#ifdef __KERNEL__
++
++#define INVALID_PORT (MAX_RANGE + 1)
++
++/* We must handle non-linear skbs */
++static inline ip_set_ip_t
++get_port(const struct sk_buff *skb, u_int32_t flags)
++{
++ struct iphdr *iph = ip_hdr(skb);
++ u_int16_t offset = ntohs(iph->frag_off) & IP_OFFSET;
++ switch (iph->protocol) {
++ case IPPROTO_TCP: {
++ struct tcphdr tcph;
++
++ /* See comments at tcp_match in ip_tables.c */
++ if (offset)
++ return INVALID_PORT;
++
++ if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &tcph, sizeof(tcph)) < 0)
++ /* No choice either */
++ return INVALID_PORT;
++
++ return ntohs(flags & IPSET_SRC ?
++ tcph.source : tcph.dest);
++ }
++ case IPPROTO_UDP: {
++ struct udphdr udph;
++
++ if (offset)
++ return INVALID_PORT;
++
++ if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &udph, sizeof(udph)) < 0)
++ /* No choice either */
++ return INVALID_PORT;
++
++ return ntohs(flags & IPSET_SRC ?
++ udph.source : udph.dest);
++ }
++ default:
++ return INVALID_PORT;
++ }
++}
++#endif /* __KERNEL__ */
++
++#endif /*_IP_SET_GETPORT_H*/
+--- /dev/null
++++ b/include/linux/netfilter_ipv4/ip_set_hashes.h
+@@ -0,0 +1,306 @@
++#ifndef __IP_SET_HASHES_H
++#define __IP_SET_HASHES_H
++
++#define initval_t uint32_t
++
++/* Macros to generate functions */
++
++#ifdef __KERNEL__
++#define HASH_RETRY0(type, dtype, cond) \
++static int \
++type##_retry(struct ip_set *set) \
++{ \
++ struct ip_set_##type *map = set->data, *tmp; \
++ dtype *elem; \
++ void *members; \
++ u_int32_t i, hashsize = map->hashsize; \
++ int res; \
++ \
++ if (map->resize == 0) \
++ return -ERANGE; \
++ \
++ again: \
++ res = 0; \
++ \
++ /* Calculate new hash size */ \
++ hashsize += (hashsize * map->resize)/100; \
++ if (hashsize == map->hashsize) \
++ hashsize++; \
++ \
++ ip_set_printk("rehashing of set %s triggered: " \
++ "hashsize grows from %lu to %lu", \
++ set->name, \
++ (long unsigned)map->hashsize, \
++ (long unsigned)hashsize); \
++ \
++ tmp = kmalloc(sizeof(struct ip_set_##type) \
++ + map->probes * sizeof(initval_t), GFP_ATOMIC); \
++ if (!tmp) { \
++ DP("out of memory for %zu bytes", \
++ sizeof(struct ip_set_##type) \
++ + map->probes * sizeof(initval_t)); \
++ return -ENOMEM; \
++ } \
++ tmp->members = harray_malloc(hashsize, sizeof(dtype), GFP_ATOMIC);\
++ if (!tmp->members) { \
++ DP("out of memory for %zu bytes", hashsize * sizeof(dtype));\
++ kfree(tmp); \
++ return -ENOMEM; \
++ } \
++ tmp->hashsize = hashsize; \
++ tmp->elements = 0; \
++ tmp->probes = map->probes; \
++ tmp->resize = map->resize; \
++ memcpy(tmp->initval, map->initval, map->probes * sizeof(initval_t));\
++ __##type##_retry(tmp, map); \
++ \
++ write_lock_bh(&set->lock); \
++ map = set->data; /* Play safe */ \
++ for (i = 0; i < map->hashsize && res == 0; i++) { \
++ elem = HARRAY_ELEM(map->members, dtype *, i); \
++ if (cond) \
++ res = __##type##_add(tmp, elem); \
++ } \
++ if (res) { \
++ /* Failure, try again */ \
++ write_unlock_bh(&set->lock); \
++ harray_free(tmp->members); \
++ kfree(tmp); \
++ goto again; \
++ } \
++ \
++ /* Success at resizing! */ \
++ members = map->members; \
++ \
++ map->hashsize = tmp->hashsize; \
++ map->members = tmp->members; \
++ write_unlock_bh(&set->lock); \
++ \
++ harray_free(members); \
++ kfree(tmp); \
++ \
++ return 0; \
++}
++
++#define HASH_RETRY(type, dtype) \
++ HASH_RETRY0(type, dtype, *elem)
++
++#define HASH_RETRY2(type, dtype) \
++ HASH_RETRY0(type, dtype, elem->ip || elem->ip1)
++
++#define HASH_CREATE(type, dtype) \
++static int \
++type##_create(struct ip_set *set, const void *data, u_int32_t size) \
++{ \
++ const struct ip_set_req_##type##_create *req = data; \
++ struct ip_set_##type *map; \
++ uint16_t i; \
++ \
++ if (req->hashsize < 1) { \
++ ip_set_printk("hashsize too small"); \
++ return -ENOEXEC; \
++ } \
++ \
++ if (req->probes < 1) { \
++ ip_set_printk("probes too small"); \
++ return -ENOEXEC; \
++ } \
++ \
++ map = kmalloc(sizeof(struct ip_set_##type) \
++ + req->probes * sizeof(initval_t), GFP_KERNEL); \
++ if (!map) { \
++ DP("out of memory for %zu bytes", \
++ sizeof(struct ip_set_##type) \
++ + req->probes * sizeof(initval_t)); \
++ return -ENOMEM; \
++ } \
++ for (i = 0; i < req->probes; i++) \
++ get_random_bytes(((initval_t *) map->initval)+i, 4); \
++ map->elements = 0; \
++ map->hashsize = req->hashsize; \
++ map->probes = req->probes; \
++ map->resize = req->resize; \
++ if (__##type##_create(req, map)) { \
++ kfree(map); \
++ return -ENOEXEC; \
++ } \
++ map->members = harray_malloc(map->hashsize, sizeof(dtype), GFP_KERNEL);\
++ if (!map->members) { \
++ DP("out of memory for %zu bytes", map->hashsize * sizeof(dtype));\
++ kfree(map); \
++ return -ENOMEM; \
++ } \
++ \
++ set->data = map; \
++ return 0; \
++}
++
++#define HASH_DESTROY(type) \
++static void \
++type##_destroy(struct ip_set *set) \
++{ \
++ struct ip_set_##type *map = set->data; \
++ \
++ harray_free(map->members); \
++ kfree(map); \
++ \
++ set->data = NULL; \
++}
++
++#define HASH_FLUSH(type, dtype) \
++static void \
++type##_flush(struct ip_set *set) \
++{ \
++ struct ip_set_##type *map = set->data; \
++ harray_flush(map->members, map->hashsize, sizeof(dtype)); \
++ map->elements = 0; \
++}
++
++#define HASH_FLUSH_CIDR(type, dtype) \
++static void \
++type##_flush(struct ip_set *set) \
++{ \
++ struct ip_set_##type *map = set->data; \
++ harray_flush(map->members, map->hashsize, sizeof(dtype)); \
++ memset(map->cidr, 0, sizeof(map->cidr)); \
++ memset(map->nets, 0, sizeof(map->nets)); \
++ map->elements = 0; \
++}
++
++#define HASH_LIST_HEADER(type) \
++static void \
++type##_list_header(const struct ip_set *set, void *data) \
++{ \
++ const struct ip_set_##type *map = set->data; \
++ struct ip_set_req_##type##_create *header = data; \
++ \
++ header->hashsize = map->hashsize; \
++ header->probes = map->probes; \
++ header->resize = map->resize; \
++ __##type##_list_header(map, header); \
++}
++
++#define HASH_LIST_MEMBERS_SIZE(type, dtype) \
++static int \
++type##_list_members_size(const struct ip_set *set) \
++{ \
++ const struct ip_set_##type *map = set->data; \
++ \
++ return (map->hashsize * sizeof(dtype)); \
++}
++
++#define HASH_LIST_MEMBERS(type, dtype) \
++static void \
++type##_list_members(const struct ip_set *set, void *data) \
++{ \
++ const struct ip_set_##type *map = set->data; \
++ dtype *elem; \
++ uint32_t i; \
++ \
++ for (i = 0; i < map->hashsize; i++) { \
++ elem = HARRAY_ELEM(map->members, dtype *, i); \
++ ((dtype *)data)[i] = *elem; \
++ } \
++}
++
++#define HASH_LIST_MEMBERS_MEMCPY(type, dtype) \
++static void \
++type##_list_members(const struct ip_set *set, void *data) \
++{ \
++ const struct ip_set_##type *map = set->data; \
++ dtype *elem; \
++ uint32_t i; \
++ \
++ for (i = 0; i < map->hashsize; i++) { \
++ elem = HARRAY_ELEM(map->members, dtype *, i); \
++ memcpy((((dtype *)data)+i), elem, sizeof(dtype)); \
++ } \
++}
++
++#define IP_SET_RTYPE(type, __features) \
++struct ip_set_type ip_set_##type = { \
++ .typename = #type, \
++ .features = __features, \
++ .protocol_version = IP_SET_PROTOCOL_VERSION, \
++ .create = &type##_create, \
++ .retry = &type##_retry, \
++ .destroy = &type##_destroy, \
++ .flush = &type##_flush, \
++ .reqsize = sizeof(struct ip_set_req_##type), \
++ .addip = &type##_uadd, \
++ .addip_kernel = &type##_kadd, \
++ .delip = &type##_udel, \
++ .delip_kernel = &type##_kdel, \
++ .testip = &type##_utest, \
++ .testip_kernel = &type##_ktest, \
++ .header_size = sizeof(struct ip_set_req_##type##_create),\
++ .list_header = &type##_list_header, \
++ .list_members_size = &type##_list_members_size, \
++ .list_members = &type##_list_members, \
++ .me = THIS_MODULE, \
++};
++
++/* Helper functions */
++static inline void
++add_cidr_size(uint8_t *cidr, uint8_t size)
++{
++ uint8_t next;
++ int i;
++
++ for (i = 0; i < 30 && cidr[i]; i++) {
++ if (cidr[i] < size) {
++ next = cidr[i];
++ cidr[i] = size;
++ size = next;
++ }
++ }
++ if (i < 30)
++ cidr[i] = size;
++}
++
++static inline void
++del_cidr_size(uint8_t *cidr, uint8_t size)
++{
++ int i;
++
++ for (i = 0; i < 29 && cidr[i]; i++) {
++ if (cidr[i] == size)
++ cidr[i] = size = cidr[i+1];
++ }
++ cidr[29] = 0;
++}
++#else
++#include <arpa/inet.h>
++#endif /* __KERNEL */
++
++#ifndef UINT16_MAX
++#define UINT16_MAX 65535
++#endif
++
++static unsigned char shifts[] = {255, 253, 249, 241, 225, 193, 129, 1};
++
++static inline ip_set_ip_t
++pack_ip_cidr(ip_set_ip_t ip, unsigned char cidr)
++{
++ ip_set_ip_t addr, *paddr = &addr;
++ unsigned char n, t, *a;
++
++ addr = htonl(ip & (0xFFFFFFFF << (32 - (cidr))));
++#ifdef __KERNEL__
++ DP("ip:%u.%u.%u.%u/%u", NIPQUAD(addr), cidr);
++#endif
++ n = cidr / 8;
++ t = cidr % 8;
++ a = &((unsigned char *)paddr)[n];
++ *a = *a /(1 << (8 - t)) + shifts[t];
++#ifdef __KERNEL__
++ DP("n: %u, t: %u, a: %u", n, t, *a);
++ DP("ip:%u.%u.%u.%u/%u, %u.%u.%u.%u",
++ HIPQUAD(ip), cidr, NIPQUAD(addr));
++#endif
++
++ return ntohl(addr);
++}
++
++
++#endif /* __IP_SET_HASHES_H */
+--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_iphash.h
@@ -0,0 +1,30 @@
+#ifndef __IP_SET_IPHASH_H
+#define __IP_SET_IPHASH_H
+
+#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_hashes.h>
+
+#define SETTYPE_NAME "iphash"
-+#define MAX_RANGE 0x0000FFFF
+
+struct ip_set_iphash {
+ ip_set_ip_t *members; /* the iphash proper */
@@ -534,7 +1174,7 @@
+ uint16_t probes; /* max number of probes */
+ uint16_t resize; /* resize factor in percent */
+ ip_set_ip_t netmask; /* netmask */
-+ void *initval[0]; /* initvals for jhash_1word */
++ initval_t initval[0]; /* initvals for jhash_1word */
+};
+
+struct ip_set_req_iphash_create {
@@ -551,14 +1191,14 @@
+#endif /* __IP_SET_IPHASH_H */
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_ipmap.h
-@@ -0,0 +1,56 @@
+@@ -0,0 +1,57 @@
+#ifndef __IP_SET_IPMAP_H
+#define __IP_SET_IPMAP_H
+
+#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_bitmaps.h>
+
+#define SETTYPE_NAME "ipmap"
-+#define MAX_RANGE 0x0000FFFF
+
+struct ip_set_ipmap {
+ void *members; /* the ipmap proper */
@@ -567,6 +1207,7 @@
+ ip_set_ip_t netmask; /* subnet netmask */
+ ip_set_ip_t sizeid; /* size of set in IPs */
+ ip_set_ip_t hosts; /* number of hosts in a subnet */
++ u_int32_t size; /* size of the ipmap proper */
+};
+
+struct ip_set_req_ipmap_create {
@@ -579,7 +1220,7 @@
+ ip_set_ip_t ip;
+};
+
-+unsigned int
++static inline unsigned int
+mask_to_bits(ip_set_ip_t mask)
+{
+ unsigned int bits = 32;
@@ -589,19 +1230,19 @@
+ return bits;
+
+ maskaddr = 0xFFFFFFFE;
-+ while (--bits >= 0 && maskaddr != mask)
++ while (--bits > 0 && maskaddr != mask)
+ maskaddr <<= 1;
+
+ return bits;
+}
+
-+ip_set_ip_t
++static inline ip_set_ip_t
+range_to_mask(ip_set_ip_t from, ip_set_ip_t to, unsigned int *bits)
+{
+ ip_set_ip_t mask = 0xFFFFFFFE;
+
+ *bits = 32;
-+ while (--(*bits) >= 0 && mask && (to & mask) != from)
++ while (--(*bits) > 0 && mask && (to & mask) != from)
+ mask <<= 1;
+
+ return mask;
@@ -610,15 +1251,14 @@
+#endif /* __IP_SET_IPMAP_H */
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_ipporthash.h
-@@ -0,0 +1,34 @@
+@@ -0,0 +1,33 @@
+#ifndef __IP_SET_IPPORTHASH_H
+#define __IP_SET_IPPORTHASH_H
+
+#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_hashes.h>
+
+#define SETTYPE_NAME "ipporthash"
-+#define MAX_RANGE 0x0000FFFF
-+#define INVALID_PORT (MAX_RANGE + 1)
+
+struct ip_set_ipporthash {
+ ip_set_ip_t *members; /* the ipporthash proper */
@@ -628,7 +1268,7 @@
+ uint16_t resize; /* resize factor in percent */
+ ip_set_ip_t first_ip; /* host byte order, included in range */
+ ip_set_ip_t last_ip; /* host byte order, included in range */
-+ void *initval[0]; /* initvals for jhash_1word */
++ initval_t initval[0]; /* initvals for jhash_1word */
+};
+
+struct ip_set_req_ipporthash_create {
@@ -646,15 +1286,101 @@
+
+#endif /* __IP_SET_IPPORTHASH_H */
--- /dev/null
++++ b/include/linux/netfilter_ipv4/ip_set_ipportiphash.h
+@@ -0,0 +1,39 @@
++#ifndef __IP_SET_IPPORTIPHASH_H
++#define __IP_SET_IPPORTIPHASH_H
++
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_hashes.h>
++
++#define SETTYPE_NAME "ipportiphash"
++
++struct ipportip {
++ ip_set_ip_t ip;
++ ip_set_ip_t ip1;
++};
++
++struct ip_set_ipportiphash {
++ struct ipportip *members; /* the ipportip proper */
++ uint32_t elements; /* number of elements */
++ uint32_t hashsize; /* hash size */
++ uint16_t probes; /* max number of probes */
++ uint16_t resize; /* resize factor in percent */
++ ip_set_ip_t first_ip; /* host byte order, included in range */
++ ip_set_ip_t last_ip; /* host byte order, included in range */
++ initval_t initval[0]; /* initvals for jhash_1word */
++};
++
++struct ip_set_req_ipportiphash_create {
++ uint32_t hashsize;
++ uint16_t probes;
++ uint16_t resize;
++ ip_set_ip_t from;
++ ip_set_ip_t to;
++};
++
++struct ip_set_req_ipportiphash {
++ ip_set_ip_t ip;
++ ip_set_ip_t port;
++ ip_set_ip_t ip1;
++};
++
++#endif /* __IP_SET_IPPORTIPHASH_H */
+--- /dev/null
++++ b/include/linux/netfilter_ipv4/ip_set_ipportnethash.h
+@@ -0,0 +1,42 @@
++#ifndef __IP_SET_IPPORTNETHASH_H
++#define __IP_SET_IPPORTNETHASH_H
++
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_hashes.h>
++
++#define SETTYPE_NAME "ipportnethash"
++
++struct ipportip {
++ ip_set_ip_t ip;
++ ip_set_ip_t ip1;
++};
++
++struct ip_set_ipportnethash {
++ struct ipportip *members; /* the ipportip proper */
++ uint32_t elements; /* number of elements */
++ uint32_t hashsize; /* hash size */
++ uint16_t probes; /* max number of probes */
++ uint16_t resize; /* resize factor in percent */
++ ip_set_ip_t first_ip; /* host byte order, included in range */
++ ip_set_ip_t last_ip; /* host byte order, included in range */
++ uint8_t cidr[30]; /* CIDR sizes */
++ uint16_t nets[30]; /* nr of nets by CIDR sizes */
++ initval_t initval[0]; /* initvals for jhash_1word */
++};
++
++struct ip_set_req_ipportnethash_create {
++ uint32_t hashsize;
++ uint16_t probes;
++ uint16_t resize;
++ ip_set_ip_t from;
++ ip_set_ip_t to;
++};
++
++struct ip_set_req_ipportnethash {
++ ip_set_ip_t ip;
++ ip_set_ip_t port;
++ ip_set_ip_t ip1;
++ uint8_t cidr;
++};
++
++#endif /* __IP_SET_IPPORTNETHASH_H */
+--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_iptree.h
-@@ -0,0 +1,40 @@
+@@ -0,0 +1,39 @@
+#ifndef __IP_SET_IPTREE_H
+#define __IP_SET_IPTREE_H
+
+#include <linux/netfilter_ipv4/ip_set.h>
+
+#define SETTYPE_NAME "iptree"
-+#define MAX_RANGE 0x0000FFFF
+
+struct ip_set_iptreed {
+ unsigned long expires[256]; /* x.x.x.ADDR */
@@ -726,172 +1452,181 @@
+};
+
+struct ip_set_req_iptreemap {
-+ ip_set_ip_t start;
++ ip_set_ip_t ip;
+ ip_set_ip_t end;
+};
+
+#endif /* __IP_SET_IPTREEMAP_H */
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_jhash.h
-@@ -0,0 +1,148 @@
-+#ifndef _LINUX_IPSET_JHASH_H
-+#define _LINUX_IPSET_JHASH_H
-+
-+/* This is a copy of linux/jhash.h but the types u32/u8 are changed
-+ * to __u32/__u8 so that the header file can be included into
-+ * userspace code as well. Jozsef Kadlecsik (kadlec@blackhole.kfki.hu)
-+ */
+@@ -0,0 +1,157 @@
++#ifndef _LINUX_JHASH_H
++#define _LINUX_JHASH_H
+
+/* jhash.h: Jenkins hash support.
+ *
-+ * Copyright (C) 1996 Bob Jenkins (bob_jenkins@burtleburtle.net)
++ * Copyright (C) 2006. Bob Jenkins (bob_jenkins@burtleburtle.net)
+ *
+ * http://burtleburtle.net/bob/hash/
+ *
+ * These are the credits from Bob's sources:
+ *
-+ * lookup2.c, by Bob Jenkins, December 1996, Public Domain.
-+ * hash(), hash2(), hash3, and mix() are externally useful functions.
-+ * Routines to test the hash are included if SELF_TEST is defined.
-+ * You can use this free for any purpose. It has no warranty.
++ * lookup3.c, by Bob Jenkins, May 2006, Public Domain.
+ *
-+ * Copyright (C) 2003 David S. Miller (davem@redhat.com)
++ * These are functions for producing 32-bit hashes for hash table lookup.
++ * hashword(), hashlittle(), hashlittle2(), hashbig(), mix(), and final()
++ * are externally useful functions. Routines to test the hash are included
++ * if SELF_TEST is defined. You can use this free for any purpose. It's in
++ * the public domain. It has no warranty.
++ *
++ * Copyright (C) 2009 Jozsef Kadlecsik (kadlec@blackhole.kfki.hu)
+ *
+ * I've modified Bob's hash to be useful in the Linux kernel, and
-+ * any bugs present are surely my fault. -DaveM
++ * any bugs present are my fault. Jozsef
+ */
+
-+/* NOTE: Arguments are modified. */
-+#define __jhash_mix(a, b, c) \
++#define __rot(x,k) (((x)<<(k)) | ((x)>>(32-(k))))
++
++/* __jhash_mix - mix 3 32-bit values reversibly. */
++#define __jhash_mix(a,b,c) \
+{ \
-+ a -= b; a -= c; a ^= (c>>13); \
-+ b -= c; b -= a; b ^= (a<<8); \
-+ c -= a; c -= b; c ^= (b>>13); \
-+ a -= b; a -= c; a ^= (c>>12); \
-+ b -= c; b -= a; b ^= (a<<16); \
-+ c -= a; c -= b; c ^= (b>>5); \
-+ a -= b; a -= c; a ^= (c>>3); \
-+ b -= c; b -= a; b ^= (a<<10); \
-+ c -= a; c -= b; c ^= (b>>15); \
++ a -= c; a ^= __rot(c, 4); c += b; \
++ b -= a; b ^= __rot(a, 6); a += c; \
++ c -= b; c ^= __rot(b, 8); b += a; \
++ a -= c; a ^= __rot(c,16); c += b; \
++ b -= a; b ^= __rot(a,19); a += c; \
++ c -= b; c ^= __rot(b, 4); b += a; \
++}
++
++/* __jhash_final - final mixing of 3 32-bit values (a,b,c) into c */
++#define __jhash_final(a,b,c) \
++{ \
++ c ^= b; c -= __rot(b,14); \
++ a ^= c; a -= __rot(c,11); \
++ b ^= a; b -= __rot(a,25); \
++ c ^= b; c -= __rot(b,16); \
++ a ^= c; a -= __rot(c,4); \
++ b ^= a; b -= __rot(a,14); \
++ c ^= b; c -= __rot(b,24); \
+}
+
+/* The golden ration: an arbitrary value */
-+#define JHASH_GOLDEN_RATIO 0x9e3779b9
++#define JHASH_GOLDEN_RATIO 0xdeadbeef
+
+/* The most generic version, hashes an arbitrary sequence
+ * of bytes. No alignment or length assumptions are made about
-+ * the input key.
++ * the input key. The result depends on endianness.
+ */
-+static inline __u32 jhash(void *key, __u32 length, __u32 initval)
++static inline u32 jhash(const void *key, u32 length, u32 initval)
+{
-+ __u32 a, b, c, len;
-+ __u8 *k = key;
-+
-+ len = length;
-+ a = b = JHASH_GOLDEN_RATIO;
-+ c = initval;
++ u32 a,b,c;
++ const u8 *k = key;
+
-+ while (len >= 12) {
-+ a += (k[0] +((__u32)k[1]<<8) +((__u32)k[2]<<16) +((__u32)k[3]<<24));
-+ b += (k[4] +((__u32)k[5]<<8) +((__u32)k[6]<<16) +((__u32)k[7]<<24));
-+ c += (k[8] +((__u32)k[9]<<8) +((__u32)k[10]<<16)+((__u32)k[11]<<24));
-+
-+ __jhash_mix(a,b,c);
++ /* Set up the internal state */
++ a = b = c = JHASH_GOLDEN_RATIO + length + initval;
+
++ /* all but the last block: affect some 32 bits of (a,b,c) */
++ while (length > 12) {
++ a += (k[0] + ((u32)k[1]<<8) + ((u32)k[2]<<16) + ((u32)k[3]<<24));
++ b += (k[4] + ((u32)k[5]<<8) + ((u32)k[6]<<16) + ((u32)k[7]<<24));
++ c += (k[8] + ((u32)k[9]<<8) + ((u32)k[10]<<16) + ((u32)k[11]<<24));
++ __jhash_mix(a, b, c);
++ length -= 12;
+ k += 12;
-+ len -= 12;
+ }
+
-+ c += length;
-+ switch (len) {
-+ case 11: c += ((__u32)k[10]<<24);
-+ case 10: c += ((__u32)k[9]<<16);
-+ case 9 : c += ((__u32)k[8]<<8);
-+ case 8 : b += ((__u32)k[7]<<24);
-+ case 7 : b += ((__u32)k[6]<<16);
-+ case 6 : b += ((__u32)k[5]<<8);
++ /* last block: affect all 32 bits of (c) */
++ /* all the case statements fall through */
++ switch (length) {
++ case 12: c += (u32)k[11]<<24;
++ case 11: c += (u32)k[10]<<16;
++ case 10: c += (u32)k[9]<<8;
++ case 9 : c += k[8];
++ case 8 : b += (u32)k[7]<<24;
++ case 7 : b += (u32)k[6]<<16;
++ case 6 : b += (u32)k[5]<<8;
+ case 5 : b += k[4];
-+ case 4 : a += ((__u32)k[3]<<24);
-+ case 3 : a += ((__u32)k[2]<<16);
-+ case 2 : a += ((__u32)k[1]<<8);
++ case 4 : a += (u32)k[3]<<24;
++ case 3 : a += (u32)k[2]<<16;
++ case 2 : a += (u32)k[1]<<8;
+ case 1 : a += k[0];
-+ };
-+
-+ __jhash_mix(a,b,c);
++ __jhash_final(a, b, c);
++ case 0 :
++ break;
++ }
+
+ return c;
+}
+
-+/* A special optimized version that handles 1 or more of __u32s.
-+ * The length parameter here is the number of __u32s in the key.
++/* A special optimized version that handles 1 or more of u32s.
++ * The length parameter here is the number of u32s in the key.
+ */
-+static inline __u32 jhash2(__u32 *k, __u32 length, __u32 initval)
++static inline u32 jhash2(const u32 *k, u32 length, u32 initval)
+{
-+ __u32 a, b, c, len;
++ u32 a, b, c;
+
-+ a = b = JHASH_GOLDEN_RATIO;
-+ c = initval;
-+ len = length;
++ /* Set up the internal state */
++ a = b = c = JHASH_GOLDEN_RATIO + (length<<2) + initval;
+
-+ while (len >= 3) {
++ /* handle most of the key */
++ while (length > 3) {
+ a += k[0];
+ b += k[1];
+ c += k[2];
+ __jhash_mix(a, b, c);
-+ k += 3; len -= 3;
++ length -= 3;
++ k += 3;
+ }
+
-+ c += length * 4;
-+
-+ switch (len) {
-+ case 2 : b += k[1];
-+ case 1 : a += k[0];
-+ };
-+
-+ __jhash_mix(a,b,c);
++ /* handle the last 3 u32's */
++ /* all the case statements fall through */
++ switch (length) {
++ case 3: c += k[2];
++ case 2: b += k[1];
++ case 1: a += k[0];
++ __jhash_final(a, b, c);
++ case 0: /* case 0: nothing left to add */
++ break;
++ }
+
+ return c;
+}
+
-+
+/* A special ultra-optimized versions that knows they are hashing exactly
+ * 3, 2 or 1 word(s).
-+ *
-+ * NOTE: In partilar the "c += length; __jhash_mix(a,b,c);" normally
-+ * done at the end is not done here.
+ */
-+static inline __u32 jhash_3words(__u32 a, __u32 b, __u32 c, __u32 initval)
++static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval)
+{
-+ a += JHASH_GOLDEN_RATIO;
-+ b += JHASH_GOLDEN_RATIO;
-+ c += initval;
++ a += JHASH_GOLDEN_RATIO + initval;
++ b += JHASH_GOLDEN_RATIO + initval;
++ c += JHASH_GOLDEN_RATIO + initval;
+
-+ __jhash_mix(a, b, c);
++ __jhash_final(a, b, c);
+
+ return c;
+}
+
-+static inline __u32 jhash_2words(__u32 a, __u32 b, __u32 initval)
++static inline u32 jhash_2words(u32 a, u32 b, u32 initval)
+{
-+ return jhash_3words(a, b, 0, initval);
++ return jhash_3words(0, a, b, initval);
+}
+
-+static inline __u32 jhash_1word(__u32 a, __u32 initval)
++static inline u32 jhash_1word(u32 a, u32 initval)
+{
-+ return jhash_3words(a, 0, 0, initval);
++ return jhash_3words(0, 0, a, initval);
+}
+
-+#endif /* _LINUX_IPSET_JHASH_H */
++#endif /* _LINUX_JHASH_H */
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_macipmap.h
-@@ -0,0 +1,38 @@
+@@ -0,0 +1,39 @@
+#ifndef __IP_SET_MACIPMAP_H
+#define __IP_SET_MACIPMAP_H
+
+#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_bitmaps.h>
+
+#define SETTYPE_NAME "macipmap"
-+#define MAX_RANGE 0x0000FFFF
+
+/* general flags */
+#define IPSET_MACIP_MATCHUNSET 1
@@ -904,6 +1639,7 @@
+ ip_set_ip_t first_ip; /* host byte order, included in range */
+ ip_set_ip_t last_ip; /* host byte order, included in range */
+ u_int32_t flags;
++ u_int32_t size; /* size of the ipmap proper */
+};
+
+struct ip_set_req_macipmap_create {
@@ -918,43 +1654,48 @@
+};
+
+struct ip_set_macip {
-+ unsigned short flags;
++ unsigned short match;
+ unsigned char ethernet[ETH_ALEN];
+};
+
+#endif /* __IP_SET_MACIPMAP_H */
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_malloc.h
-@@ -0,0 +1,116 @@
+@@ -0,0 +1,153 @@
+#ifndef _IP_SET_MALLOC_H
+#define _IP_SET_MALLOC_H
+
+#ifdef __KERNEL__
++#include <linux/vmalloc.h>
+
-+/* Memory allocation and deallocation */
-+static size_t max_malloc_size = 0;
++static size_t max_malloc_size = 0, max_page_size = 0;
++static size_t default_max_malloc_size = 131072; /* Guaranteed: slab.c */
+
-+static inline void init_max_malloc_size(void)
++static inline int init_max_page_size(void)
+{
-+#define CACHE(x) max_malloc_size = x;
++/* Compatibility glues to support 2.4.36 */
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++#define __GFP_NOWARN 0
++
++ /* Guaranteed: slab.c */
++ max_malloc_size = max_page_size = default_max_malloc_size;
++#else
++ size_t page_size = 0;
++
++#define CACHE(x) if (max_page_size == 0 || x < max_page_size) \
++ page_size = x;
+#include <linux/kmalloc_sizes.h>
+#undef CACHE
-+}
++ if (page_size) {
++ if (max_malloc_size == 0)
++ max_malloc_size = page_size;
+
-+static inline void * ip_set_malloc(size_t bytes)
-+{
-+ if (bytes > max_malloc_size)
-+ return vmalloc(bytes);
-+ else
-+ return kmalloc(bytes, GFP_KERNEL);
-+}
++ max_page_size = page_size;
+
-+static inline void ip_set_free(void * data, size_t bytes)
-+{
-+ if (bytes > max_malloc_size)
-+ vfree(data);
-+ else
-+ kfree(data);
++ return 1;
++ }
++#endif
++ return 0;
+}
+
+struct harray {
@@ -963,18 +1704,17 @@
+};
+
+static inline void *
-+harray_malloc(size_t hashsize, size_t typesize, int flags)
++__harray_malloc(size_t hashsize, size_t typesize, gfp_t flags)
+{
+ struct harray *harray;
+ size_t max_elements, size, i, j;
+
-+ if (!max_malloc_size)
-+ init_max_malloc_size();
++ BUG_ON(max_page_size == 0);
+
-+ if (typesize > max_malloc_size)
++ if (typesize > max_page_size)
+ return NULL;
+
-+ max_elements = max_malloc_size/typesize;
++ max_elements = max_page_size/typesize;
+ size = hashsize/max_elements;
+ if (hashsize % max_elements)
+ size++;
@@ -1011,6 +1751,18 @@
+ return NULL;
+}
+
++static inline void *
++harray_malloc(size_t hashsize, size_t typesize, gfp_t flags)
++{
++ void *harray;
++
++ do {
++ harray = __harray_malloc(hashsize, typesize, flags|__GFP_NOWARN);
++ } while (harray == NULL && init_max_page_size());
++
++ return harray;
++}
++
+static inline void harray_free(void *h)
+{
+ struct harray *harray = (struct harray *) h;
@@ -1039,19 +1791,40 @@
+ + (which)%(__h)->max_elements); \
+})
+
++/* General memory allocation and deallocation */
++static inline void * ip_set_malloc(size_t bytes)
++{
++ BUG_ON(max_malloc_size == 0);
++
++ if (bytes > default_max_malloc_size)
++ return vmalloc(bytes);
++ else
++ return kmalloc(bytes, GFP_KERNEL | __GFP_NOWARN);
++}
++
++static inline void ip_set_free(void * data, size_t bytes)
++{
++ BUG_ON(max_malloc_size == 0);
++
++ if (bytes > default_max_malloc_size)
++ vfree(data);
++ else
++ kfree(data);
++}
++
+#endif /* __KERNEL__ */
+
+#endif /*_IP_SET_MALLOC_H*/
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_nethash.h
-@@ -0,0 +1,55 @@
+@@ -0,0 +1,31 @@
+#ifndef __IP_SET_NETHASH_H
+#define __IP_SET_NETHASH_H
+
+#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_hashes.h>
+
+#define SETTYPE_NAME "nethash"
-+#define MAX_RANGE 0x0000FFFF
+
+struct ip_set_nethash {
+ ip_set_ip_t *members; /* the nethash proper */
@@ -1059,8 +1832,9 @@
+ uint32_t hashsize; /* hash size */
+ uint16_t probes; /* max number of probes */
+ uint16_t resize; /* resize factor in percent */
-+ unsigned char cidr[30]; /* CIDR sizes */
-+ void *initval[0]; /* initvals for jhash_1word */
++ uint8_t cidr[30]; /* CIDR sizes */
++ uint16_t nets[30]; /* nr of nets by CIDR sizes */
++ initval_t initval[0]; /* initvals for jhash_1word */
+};
+
+struct ip_set_req_nethash_create {
@@ -1071,34 +1845,9 @@
+
+struct ip_set_req_nethash {
+ ip_set_ip_t ip;
-+ unsigned char cidr;
++ uint8_t cidr;
+};
+
-+static unsigned char shifts[] = {255, 253, 249, 241, 225, 193, 129, 1};
-+
-+static inline ip_set_ip_t
-+pack(ip_set_ip_t ip, unsigned char cidr)
-+{
-+ ip_set_ip_t addr, *paddr = &addr;
-+ unsigned char n, t, *a;
-+
-+ addr = htonl(ip & (0xFFFFFFFF << (32 - (cidr))));
-+#ifdef __KERNEL__
-+ DP("ip:%u.%u.%u.%u/%u", NIPQUAD(addr), cidr);
-+#endif
-+ n = cidr / 8;
-+ t = cidr % 8;
-+ a = &((unsigned char *)paddr)[n];
-+ *a = *a /(1 << (8 - t)) + shifts[t];
-+#ifdef __KERNEL__
-+ DP("n: %u, t: %u, a: %u", n, t, *a);
-+ DP("ip:%u.%u.%u.%u/%u, %u.%u.%u.%u",
-+ HIPQUAD(ip), cidr, NIPQUAD(addr));
-+#endif
-+
-+ return ntohl(addr);
-+}
-+
+#endif /* __IP_SET_NETHASH_H */
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_portmap.h
@@ -1107,15 +1856,15 @@
+#define __IP_SET_PORTMAP_H
+
+#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_bitmaps.h>
+
+#define SETTYPE_NAME "portmap"
-+#define MAX_RANGE 0x0000FFFF
-+#define INVALID_PORT (MAX_RANGE + 1)
+
+struct ip_set_portmap {
+ void *members; /* the portmap proper */
-+ ip_set_ip_t first_port; /* host byte order, included in range */
-+ ip_set_ip_t last_port; /* host byte order, included in range */
++ ip_set_ip_t first_ip; /* host byte order, included in range */
++ ip_set_ip_t last_ip; /* host byte order, included in range */
++ u_int32_t size; /* size of the ipmap proper */
+};
+
+struct ip_set_req_portmap_create {
@@ -1124,11 +1873,40 @@
+};
+
+struct ip_set_req_portmap {
-+ ip_set_ip_t port;
++ ip_set_ip_t ip;
+};
+
+#endif /* __IP_SET_PORTMAP_H */
--- /dev/null
++++ b/include/linux/netfilter_ipv4/ip_set_setlist.h
+@@ -0,0 +1,26 @@
++#ifndef __IP_SET_SETLIST_H
++#define __IP_SET_SETLIST_H
++
++#include <linux/netfilter_ipv4/ip_set.h>
++
++#define SETTYPE_NAME "setlist"
++
++#define IP_SET_SETLIST_ADD_AFTER 0
++#define IP_SET_SETLIST_ADD_BEFORE 1
++
++struct ip_set_setlist {
++ uint8_t size;
++ ip_set_id_t index[0];
++};
++
++struct ip_set_req_setlist_create {
++ uint8_t size;
++};
++
++struct ip_set_req_setlist {
++ char name[IP_SET_MAXNAMELEN];
++ char ref[IP_SET_MAXNAMELEN];
++ uint8_t before;
++};
++
++#endif /* __IP_SET_SETLIST_H */
+--- /dev/null
+++ b/include/linux/netfilter_ipv4/ipt_set.h
@@ -0,0 +1,21 @@
+#ifndef _IPT_SET_H
@@ -1154,14 +1932,14 @@
+#endif /*_IPT_SET_H*/
--- /dev/null
+++ b/net/ipv4/netfilter/ip_set.c
-@@ -0,0 +1,2003 @@
+@@ -0,0 +1,2076 @@
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ * Patrick Schaaf <bof@bof.de>
+ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
++ * published by the Free Software Foundation.
+ */
+
+/* Kernel module for IP set management */
@@ -1176,17 +1954,21 @@
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/random.h>
-+#include <linux/jhash.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ip_set_jhash.h>
+#include <linux/errno.h>
++#include <linux/capability.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
+#include <asm/semaphore.h>
++#else
++#include <linux/semaphore.h>
++#endif
+#include <linux/spinlock.h>
-+#include <linux/vmalloc.h>
+
+#define ASSERT_READ_LOCK(x)
+#define ASSERT_WRITE_LOCK(x)
++#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4/ip_set.h>
+
+static struct list_head set_type_list; /* all registered sets */
@@ -1198,14 +1980,16 @@
+static struct list_head *ip_set_hash; /* hash of bindings */
+static unsigned int ip_set_hash_random; /* random seed */
+
++#define SETNAME_EQ(a,b) (strncmp(a,b,IP_SET_MAXNAMELEN) == 0)
++
+/*
+ * Sets are identified either by the index in ip_set_list or by id.
-+ * The id never changes and is used to find a key in the hash.
-+ * The index may change by swapping and used at all other places
++ * The id never changes and is used to find a key in the hash.
++ * The index may change by swapping and used at all other places
+ * (set/SET netfilter modules, binding value, etc.)
+ *
+ * Userspace requests are serialized by ip_set_mutex and sets can
-+ * be deleted only from userspace. Therefore ip_set_list locking
++ * be deleted only from userspace. Therefore ip_set_list locking
+ * must obey the following rules:
+ *
+ * - kernel requests: read and write locking mandatory
@@ -1243,7 +2027,7 @@
+static ip_set_id_t
+ip_set_find_in_hash(ip_set_id_t id, ip_set_ip_t ip)
+{
-+ u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
++ u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
+ % ip_set_bindings_hash_size;
+ struct ip_set_hash *set_hash;
+
@@ -1253,14 +2037,14 @@
+
+ set_hash = __ip_set_find(key, id, ip);
+
-+ DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
++ DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
+ HIPQUAD(ip),
+ set_hash != NULL ? ip_set_list[set_hash->binding]->name : "");
+
+ return (set_hash != NULL ? set_hash->binding : IP_SET_INVALID_ID);
+}
+
-+static inline void
++static inline void
+__set_hash_del(struct ip_set_hash *set_hash)
+{
+ ASSERT_WRITE_LOCK(&ip_set_lock);
@@ -1288,11 +2072,11 @@
+
+ if (set_hash != NULL)
+ __set_hash_del(set_hash);
-+ write_unlock_bh(&ip_set_lock);
++ write_unlock_bh(&ip_set_lock);
+ return 0;
+}
+
-+static int
++static int
+ip_set_hash_add(ip_set_id_t id, ip_set_ip_t ip, ip_set_id_t binding)
+{
+ u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
@@ -1302,7 +2086,7 @@
+
+ IP_SET_ASSERT(ip_set_list[id]);
+ IP_SET_ASSERT(ip_set_list[binding]);
-+ DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
++ DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
+ HIPQUAD(ip), ip_set_list[binding]->name);
+ write_lock_bh(&ip_set_lock);
+ set_hash = __ip_set_find(key, id, ip);
@@ -1381,15 +2165,15 @@
+ res = set->type->testip_kernel(set, skb, &ip, flags, i++);
+ read_unlock_bh(&set->lock);
+ i += !!(set->type->features & IPSET_DATA_DOUBLE);
-+ } while (res > 0
-+ && flags[i]
++ } while (res > 0
++ && flags[i]
+ && follow_bindings(index, set, ip));
+ read_unlock_bh(&ip_set_lock);
+
-+ return res;
++ return (res < 0 ? 0 : res);
+}
+
-+void
++int
+ip_set_addip_kernel(ip_set_id_t index,
+ const struct sk_buff *skb,
+ const u_int32_t *flags)
@@ -1411,7 +2195,7 @@
+ write_unlock_bh(&set->lock);
+ i += !!(set->type->features & IPSET_DATA_DOUBLE);
+ } while ((res == 0 || res == -EEXIST)
-+ && flags[i]
++ && flags[i]
+ && follow_bindings(index, set, ip));
+ read_unlock_bh(&ip_set_lock);
+
@@ -1419,9 +2203,11 @@
+ && set->type->retry
+ && (res = set->type->retry(set)) == 0)
+ goto retry;
++
++ return res;
+}
+
-+void
++int
+ip_set_delip_kernel(ip_set_id_t index,
+ const struct sk_buff *skb,
+ const u_int32_t *flags)
@@ -1442,9 +2228,11 @@
+ write_unlock_bh(&set->lock);
+ i += !!(set->type->features & IPSET_DATA_DOUBLE);
+ } while ((res == 0 || res == -EEXIST)
-+ && flags[i]
++ && flags[i]
+ && follow_bindings(index, set, ip));
+ read_unlock_bh(&ip_set_lock);
++
++ return res;
+}
+
+/* Register and deregister settype */
@@ -1460,7 +2248,7 @@
+ return NULL;
+}
+
-+int
++int
+ip_set_register_set_type(struct ip_set_type *set_type)
+{
+ int ret = 0;
@@ -1476,7 +2264,7 @@
+ write_lock_bh(&ip_set_lock);
+ if (find_set_type(set_type->typename)) {
+ /* Duplicate! */
-+ ip_set_printk("'%s' already registered!",
++ ip_set_printk("'%s' already registered!",
+ set_type->typename);
+ ret = -EINVAL;
+ goto unlock;
@@ -1509,6 +2297,29 @@
+
+}
+
++ip_set_id_t
++__ip_set_get_byname(const char *name, struct ip_set **set)
++{
++ ip_set_id_t i, index = IP_SET_INVALID_ID;
++
++ for (i = 0; i < ip_set_max; i++) {
++ if (ip_set_list[i] != NULL
++ && SETNAME_EQ(ip_set_list[i]->name, name)) {
++ __ip_set_get(i);
++ index = i;
++ *set = ip_set_list[i];
++ break;
++ }
++ }
++ return index;
++}
++
++void __ip_set_put_byindex(ip_set_id_t index)
++{
++ if (ip_set_list[index])
++ __ip_set_put(index);
++}
++
+/*
+ * Userspace routines
+ */
@@ -1526,7 +2337,7 @@
+ down(&ip_set_app_mutex);
+ for (i = 0; i < ip_set_max; i++) {
+ if (ip_set_list[i] != NULL
-+ && strcmp(ip_set_list[i]->name, name) == 0) {
++ && SETNAME_EQ(ip_set_list[i]->name, name)) {
+ __ip_set_get(i);
+ index = i;
+ break;
@@ -1559,11 +2370,25 @@
+}
+
+/*
++ * Find the set id belonging to the index.
++ * We are protected by the mutex, so we do not need to use
++ * ip_set_lock. There is no need to reference the sets either.
++ */
++ip_set_id_t
++ip_set_id(ip_set_id_t index)
++{
++ if (index >= ip_set_max || !ip_set_list[index])
++ return IP_SET_INVALID_ID;
++
++ return ip_set_list[index]->id;
++}
++
++/*
+ * If the given set pointer points to a valid set, decrement
+ * reference count by 1. The caller shall not assume the index
+ * to be valid, after calling this function.
+ */
-+void ip_set_put(ip_set_id_t index)
++void ip_set_put_byindex(ip_set_id_t index)
+{
+ down(&ip_set_app_mutex);
+ if (ip_set_list[index])
@@ -1579,7 +2404,7 @@
+
+ for (i = 0; i < ip_set_max; i++) {
+ if (ip_set_list[i] != NULL
-+ && strcmp(ip_set_list[i]->name, name) == 0) {
++ && SETNAME_EQ(ip_set_list[i]->name, name)) {
+ index = i;
+ break;
+ }
@@ -1603,7 +2428,7 @@
+static inline int
+__ip_set_testip(struct ip_set *set,
+ const void *data,
-+ size_t size,
++ u_int32_t size,
+ ip_set_ip_t *ip)
+{
+ int res;
@@ -1618,7 +2443,7 @@
+static int
+__ip_set_addip(ip_set_id_t index,
+ const void *data,
-+ size_t size)
++ u_int32_t size)
+{
+ struct ip_set *set = ip_set_list[index];
+ ip_set_ip_t ip;
@@ -1639,9 +2464,18 @@
+static int
+ip_set_addip(ip_set_id_t index,
+ const void *data,
-+ size_t size)
++ u_int32_t size)
+{
++ struct ip_set *set = ip_set_list[index];
+
++ IP_SET_ASSERT(set);
++
++ if (size - sizeof(struct ip_set_req_adt) != set->type->reqsize) {
++ ip_set_printk("data length wrong (want %lu, have %zu)",
++ (long unsigned)set->type->reqsize,
++ size - sizeof(struct ip_set_req_adt));
++ return -EINVAL;
++ }
+ return __ip_set_addip(index,
+ data + sizeof(struct ip_set_req_adt),
+ size - sizeof(struct ip_set_req_adt));
@@ -1650,13 +2484,20 @@
+static int
+ip_set_delip(ip_set_id_t index,
+ const void *data,
-+ size_t size)
++ u_int32_t size)
+{
+ struct ip_set *set = ip_set_list[index];
+ ip_set_ip_t ip;
+ int res;
+
+ IP_SET_ASSERT(set);
++
++ if (size - sizeof(struct ip_set_req_adt) != set->type->reqsize) {
++ ip_set_printk("data length wrong (want %lu, have %zu)",
++ (long unsigned)set->type->reqsize,
++ size - sizeof(struct ip_set_req_adt));
++ return -EINVAL;
++ }
+ write_lock_bh(&set->lock);
+ res = set->type->delip(set,
+ data + sizeof(struct ip_set_req_adt),
@@ -1670,13 +2511,20 @@
+static int
+ip_set_testip(ip_set_id_t index,
+ const void *data,
-+ size_t size)
++ u_int32_t size)
+{
+ struct ip_set *set = ip_set_list[index];
+ ip_set_ip_t ip;
+ int res;
+
+ IP_SET_ASSERT(set);
++
++ if (size - sizeof(struct ip_set_req_adt) != set->type->reqsize) {
++ ip_set_printk("data length wrong (want %lu, have %zu)",
++ (long unsigned)set->type->reqsize,
++ size - sizeof(struct ip_set_req_adt));
++ return -EINVAL;
++ }
+ res = __ip_set_testip(set,
+ data + sizeof(struct ip_set_req_adt),
+ size - sizeof(struct ip_set_req_adt),
@@ -1688,10 +2536,10 @@
+static int
+ip_set_bindip(ip_set_id_t index,
+ const void *data,
-+ size_t size)
++ u_int32_t size)
+{
+ struct ip_set *set = ip_set_list[index];
-+ struct ip_set_req_bind *req_bind;
++ const struct ip_set_req_bind *req_bind;
+ ip_set_id_t binding;
+ ip_set_ip_t ip;
+ int res;
@@ -1700,18 +2548,16 @@
+ if (size < sizeof(struct ip_set_req_bind))
+ return -EINVAL;
+
-+ req_bind = (struct ip_set_req_bind *) data;
-+ req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
++ req_bind = data;
+
-+ if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
++ if (SETNAME_EQ(req_bind->binding, IPSET_TOKEN_DEFAULT)) {
+ /* Default binding of a set */
-+ char *binding_name;
++ const char *binding_name;
+
+ if (size != sizeof(struct ip_set_req_bind) + IP_SET_MAXNAMELEN)
+ return -EINVAL;
+
-+ binding_name = (char *)(data + sizeof(struct ip_set_req_bind));
-+ binding_name[IP_SET_MAXNAMELEN - 1] = '\0';
++ binding_name = data + sizeof(struct ip_set_req_bind);
+
+ binding = ip_set_find_byname(binding_name);
+ if (binding == IP_SET_INVALID_ID)
@@ -1776,10 +2622,10 @@
+static int
+ip_set_unbindip(ip_set_id_t index,
+ const void *data,
-+ size_t size)
++ u_int32_t size)
+{
+ struct ip_set *set;
-+ struct ip_set_req_bind *req_bind;
++ const struct ip_set_req_bind *req_bind;
+ ip_set_ip_t ip;
+ int res;
+
@@ -1787,19 +2633,18 @@
+ if (size < sizeof(struct ip_set_req_bind))
+ return -EINVAL;
+
-+ req_bind = (struct ip_set_req_bind *) data;
-+ req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
++ req_bind = data;
+
+ DP("%u %s", index, req_bind->binding);
+ if (index == IP_SET_INVALID_ID) {
+ /* unbind :all: */
-+ if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
++ if (SETNAME_EQ(req_bind->binding, IPSET_TOKEN_DEFAULT)) {
+ /* Default binding of sets */
+ write_lock_bh(&ip_set_lock);
+ FOREACH_SET_DO(__unbind_default);
+ write_unlock_bh(&ip_set_lock);
+ return 0;
-+ } else if (strcmp(req_bind->binding, IPSET_TOKEN_ALL) == 0) {
++ } else if (SETNAME_EQ(req_bind->binding, IPSET_TOKEN_ALL)) {
+ /* Flush all bindings of all sets*/
+ write_lock_bh(&ip_set_lock);
+ FOREACH_HASH_RW_DO(__set_hash_del);
@@ -1812,7 +2657,7 @@
+
+ set = ip_set_list[index];
+ IP_SET_ASSERT(set);
-+ if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
++ if (SETNAME_EQ(req_bind->binding, IPSET_TOKEN_DEFAULT)) {
+ /* Default binding of set */
+ ip_set_id_t binding = ip_set_find_byindex(set->binding);
+
@@ -1826,7 +2671,7 @@
+ write_unlock_bh(&ip_set_lock);
+
+ return 0;
-+ } else if (strcmp(req_bind->binding, IPSET_TOKEN_ALL) == 0) {
++ } else if (SETNAME_EQ(req_bind->binding, IPSET_TOKEN_ALL)) {
+ /* Flush all bindings */
+
+ write_lock_bh(&ip_set_lock);
@@ -1850,10 +2695,10 @@
+static int
+ip_set_testbind(ip_set_id_t index,
+ const void *data,
-+ size_t size)
++ u_int32_t size)
+{
+ struct ip_set *set = ip_set_list[index];
-+ struct ip_set_req_bind *req_bind;
++ const struct ip_set_req_bind *req_bind;
+ ip_set_id_t binding;
+ ip_set_ip_t ip;
+ int res;
@@ -1862,18 +2707,16 @@
+ if (size < sizeof(struct ip_set_req_bind))
+ return -EINVAL;
+
-+ req_bind = (struct ip_set_req_bind *) data;
-+ req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
++ req_bind = data;
+
-+ if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
++ if (SETNAME_EQ(req_bind->binding, IPSET_TOKEN_DEFAULT)) {
+ /* Default binding of set */
-+ char *binding_name;
++ const char *binding_name;
+
+ if (size != sizeof(struct ip_set_req_bind) + IP_SET_MAXNAMELEN)
+ return -EINVAL;
+
-+ binding_name = (char *)(data + sizeof(struct ip_set_req_bind));
-+ binding_name[IP_SET_MAXNAMELEN - 1] = '\0';
++ binding_name = data + sizeof(struct ip_set_req_bind);
+
+ binding = ip_set_find_byname(binding_name);
+ if (binding == IP_SET_INVALID_ID)
@@ -1894,7 +2737,7 @@
+ &ip);
+ DP("set %s, ip: %u.%u.%u.%u, binding %s",
+ set->name, HIPQUAD(ip), ip_set_list[binding]->name);
-+
++
+ if (res >= 0)
+ res = (ip_set_find_in_hash(set->id, ip) == binding)
+ ? -EEXIST : 0;
@@ -1927,7 +2770,7 @@
+ if (ip_set_list[i] == NULL) {
+ if (*id == IP_SET_INVALID_ID)
+ *id = *index = i;
-+ } else if (strcmp(name, ip_set_list[i]->name) == 0)
++ } else if (SETNAME_EQ(name, ip_set_list[i]->name))
+ /* Name clash */
+ return -EEXIST;
+ }
@@ -1954,13 +2797,14 @@
+ const char *typename,
+ ip_set_id_t restore,
+ const void *data,
-+ size_t size)
++ u_int32_t size)
+{
+ struct ip_set *set;
+ ip_set_id_t index = 0, id;
+ int res = 0;
+
+ DP("setname: %s, typename: %s, id: %u", name, typename, restore);
++
+ /*
+ * First, and without any locks, allocate and initialize
+ * a normal base set structure.
@@ -1968,7 +2812,7 @@
+ set = kmalloc(sizeof(struct ip_set), GFP_KERNEL);
+ if (!set)
+ return -ENOMEM;
-+ set->lock = RW_LOCK_UNLOCKED;
++ rwlock_init(&set->lock);
+ strncpy(set->name, name, IP_SET_MAXNAMELEN);
+ set->binding = IP_SET_INVALID_ID;
+ atomic_set(&set->ref, 0);
@@ -2004,6 +2848,14 @@
+ }
+ read_unlock_bh(&ip_set_lock);
+
++ /* Check request size */
++ if (size != set->type->header_size) {
++ ip_set_printk("data length wrong (want %lu, have %lu)",
++ (long unsigned)set->type->header_size,
++ (long unsigned)size);
++ goto put_out;
++ }
++
+ /*
+ * Without holding any locks, create private part.
+ */
@@ -2015,7 +2867,7 @@
+
+ /*
+ * Here, we have a valid, constructed set. &ip_set_lock again,
-+ * find free id/index and check that it is not already in
++ * find free id/index and check that it is not already in
+ * ip_set_list.
+ */
+ write_lock_bh(&ip_set_lock);
@@ -2030,7 +2882,7 @@
+ res = -ERANGE;
+ goto cleanup;
+ }
-+
++
+ /*
+ * Finally! Add our shiny new set to the list, and be done.
+ */
@@ -2089,7 +2941,7 @@
+ ip_set_destroy_set(index);
+ } else {
+ for (i = 0; i < ip_set_max; i++) {
-+ if (ip_set_list[i] != NULL
++ if (ip_set_list[i] != NULL
+ && (atomic_read(&ip_set_list[i]->ref)))
+ return -EBUSY;
+ }
@@ -2112,7 +2964,7 @@
+ write_unlock_bh(&set->lock);
+}
+
-+/*
++/*
+ * Flush data in a set - or in all sets
+ */
+static int
@@ -2139,9 +2991,7 @@
+ write_lock_bh(&ip_set_lock);
+ for (i = 0; i < ip_set_max; i++) {
+ if (ip_set_list[i] != NULL
-+ && strncmp(ip_set_list[i]->name,
-+ name,
-+ IP_SET_MAXNAMELEN - 1) == 0) {
++ && SETNAME_EQ(ip_set_list[i]->name, name)) {
+ res = -EEXIST;
+ goto unlock;
+ }
@@ -2165,7 +3015,9 @@
+ u_int32_t from_ref;
+
+ DP("set: %s to %s", from->name, to->name);
-+ /* Features must not change. Artifical restriction. */
++ /* Features must not change.
++ * Not an artifical restriction anymore, as we must prevent
++ * possible loops created by swapping in setlist type of sets. */
+ if (from->type->features != to->type->features)
+ return -ENOEXEC;
+
@@ -2192,7 +3044,7 @@
+
+static inline void
+__set_hash_bindings_size_list(struct ip_set_hash *set_hash,
-+ ip_set_id_t id, size_t *size)
++ ip_set_id_t id, u_int32_t *size)
+{
+ if (set_hash->id == id)
+ *size += sizeof(struct ip_set_hash_list);
@@ -2200,7 +3052,7 @@
+
+static inline void
+__set_hash_bindings_size_save(struct ip_set_hash *set_hash,
-+ ip_set_id_t id, size_t *size)
++ ip_set_id_t id, u_int32_t *size)
+{
+ if (set_hash->id == id)
+ *size += sizeof(struct ip_set_hash_save);
@@ -2211,8 +3063,7 @@
+ ip_set_id_t id, void *data, int *used)
+{
+ if (set_hash->id == id) {
-+ struct ip_set_hash_list *hash_list =
-+ (struct ip_set_hash_list *)(data + *used);
++ struct ip_set_hash_list *hash_list = data + *used;
+
+ hash_list->ip = set_hash->ip;
+ hash_list->binding = set_hash->binding;
@@ -2229,7 +3080,7 @@
+ struct ip_set_list *set_list;
+
+ /* Pointer to our header */
-+ set_list = (struct ip_set_list *) (data + *used);
++ set_list = data + *used;
+
+ DP("set: %s, used: %d %p %p", set->name, *used, data, data + *used);
+
@@ -2296,7 +3147,7 @@
+ struct ip_set_save *set_save;
+
+ /* Pointer to our header */
-+ set_save = (struct ip_set_save *) (data + *used);
++ set_save = data + *used;
+
+ /* Get and ensure header size */
+ if (*used + sizeof(struct ip_set_save) > len)
@@ -2304,7 +3155,7 @@
+ *used += sizeof(struct ip_set_save);
+
+ set = ip_set_list[index];
-+ DP("set: %s, used: %u(%u) %p %p", set->name, *used, len,
++ DP("set: %s, used: %d(%d) %p %p", set->name, *used, len,
+ data, data + *used);
+
+ read_lock_bh(&set->lock);
@@ -2321,8 +3172,8 @@
+ set->type->list_header(set, data + *used);
+ *used += set_save->header_size;
+
-+ DP("set header filled: %s, used: %u(%u) %p %p", set->name, *used,
-+ set_save->header_size, data, data + *used);
++ DP("set header filled: %s, used: %d(%lu) %p %p", set->name, *used,
++ (unsigned long)set_save->header_size, data, data + *used);
+ /* Get and ensure set specific members size */
+ set_save->members_size = set->type->list_members_size(set);
+ if (*used + set_save->members_size > len)
@@ -2332,8 +3183,8 @@
+ set->type->list_members(set, data + *used);
+ *used += set_save->members_size;
+ read_unlock_bh(&set->lock);
-+ DP("set members filled: %s, used: %u(%u) %p %p", set->name, *used,
-+ set_save->members_size, data, data + *used);
++ DP("set members filled: %s, used: %d(%lu) %p %p", set->name, *used,
++ (unsigned long)set_save->members_size, data, data + *used);
+ return 0;
+
+ unlock_set:
@@ -2353,8 +3204,7 @@
+{
+ if (*res == 0
+ && (id == IP_SET_INVALID_ID || set_hash->id == id)) {
-+ struct ip_set_hash_save *hash_save =
-+ (struct ip_set_hash_save *)(data + *used);
++ struct ip_set_hash_save *hash_save = data + *used;
+ /* Ensure bindings size */
+ if (*used + sizeof(struct ip_set_hash_save) > len) {
+ *res = -ENOMEM;
@@ -2381,7 +3231,7 @@
+ return -ENOMEM;
+
+ /* Marker */
-+ set_save = (struct ip_set_save *) (data + *used);
++ set_save = data + *used;
+ set_save->index = IP_SET_INVALID_ID;
+ set_save->header_size = 0;
+ set_save->members_size = 0;
@@ -2414,16 +3264,16 @@
+ while (1) {
+ line++;
+
-+ DP("%u %u %u", used, sizeof(struct ip_set_restore), len);
++ DP("%d %zu %d", used, sizeof(struct ip_set_restore), len);
+ /* Get and ensure header size */
+ if (used + sizeof(struct ip_set_restore) > len)
+ return line;
-+ set_restore = (struct ip_set_restore *) (data + used);
++ set_restore = data + used;
+ used += sizeof(struct ip_set_restore);
+
+ /* Ensure data size */
-+ if (used
-+ + set_restore->header_size
++ if (used
++ + set_restore->header_size
+ + set_restore->members_size > len)
+ return line;
+
@@ -2452,22 +3302,23 @@
+ /* Try to restore members data */
+ set = ip_set_list[index];
+ members_size = 0;
-+ DP("members_size %u reqsize %u",
-+ set_restore->members_size, set->type->reqsize);
++ DP("members_size %lu reqsize %lu",
++ (unsigned long)set_restore->members_size,
++ (unsigned long)set->type->reqsize);
+ while (members_size + set->type->reqsize <=
+ set_restore->members_size) {
+ line++;
-+ DP("members: %u, line %u", members_size, line);
++ DP("members: %d, line %d", members_size, line);
+ res = __ip_set_addip(index,
+ data + used + members_size,
+ set->type->reqsize);
-+ if (!(res == 0 || res == -EEXIST))
++ if (!(res == 0 || res == -EEXIST))
+ return line;
+ members_size += set->type->reqsize;
+ }
+
-+ DP("members_size %u %u",
-+ set_restore->members_size, members_size);
++ DP("members_size %lu %d",
++ (unsigned long)set_restore->members_size, members_size);
+ if (members_size != set_restore->members_size)
+ return line++;
+ used += set_restore->members_size;
@@ -2482,7 +3333,7 @@
+ /* Get and ensure size */
+ if (used + sizeof(struct ip_set_hash_save) > len)
+ return line;
-+ hash_save = (struct ip_set_hash_save *) (data + used);
++ hash_save = data + used;
+ used += sizeof(struct ip_set_hash_save);
+
+ /* hash_save->id is used to store the index */
@@ -2498,7 +3349,7 @@
+ set = ip_set_list[hash_save->id];
+ /* Null valued IP means default binding */
+ if (hash_save->ip)
-+ res = ip_set_hash_add(set->id,
++ res = ip_set_hash_add(set->id,
+ hash_save->ip,
+ hash_save->binding);
+ else {
@@ -2527,10 +3378,10 @@
+ struct ip_set_req_adt *req_adt;
+ ip_set_id_t index = IP_SET_INVALID_ID;
+ int (*adtfn)(ip_set_id_t index,
-+ const void *data, size_t size);
++ const void *data, u_int32_t size);
+ struct fn_table {
+ int (*fn)(ip_set_id_t index,
-+ const void *data, size_t size);
++ const void *data, u_int32_t size);
+ } adtfn_table[] =
+ { { ip_set_addip }, { ip_set_delip }, { ip_set_testip},
+ { ip_set_bindip}, { ip_set_unbindip }, { ip_set_testbind },
@@ -2565,8 +3416,7 @@
+
+ if (*op < IP_SET_OP_VERSION) {
+ /* Check the version at the beginning of operations */
-+ struct ip_set_req_version *req_version =
-+ (struct ip_set_req_version *) data;
++ struct ip_set_req_version *req_version = data;
+ if (req_version->version != IP_SET_PROTOCOL_VERSION) {
+ res = -EPROTO;
+ goto done;
@@ -2575,8 +3425,7 @@
+
+ switch (*op) {
+ case IP_SET_OP_CREATE:{
-+ struct ip_set_req_create *req_create
-+ = (struct ip_set_req_create *) data;
++ struct ip_set_req_create *req_create = data;
+
+ if (len < sizeof(struct ip_set_req_create)) {
+ ip_set_printk("short CREATE data (want >=%zu, got %u)",
@@ -2594,8 +3443,7 @@
+ goto done;
+ }
+ case IP_SET_OP_DESTROY:{
-+ struct ip_set_req_std *req_destroy
-+ = (struct ip_set_req_std *) data;
++ struct ip_set_req_std *req_destroy = data;
+
+ if (len != sizeof(struct ip_set_req_std)) {
+ ip_set_printk("invalid DESTROY data (want %zu, got %u)",
@@ -2603,7 +3451,7 @@
+ res = -EINVAL;
+ goto done;
+ }
-+ if (strcmp(req_destroy->name, IPSET_TOKEN_ALL) == 0) {
++ if (SETNAME_EQ(req_destroy->name, IPSET_TOKEN_ALL)) {
+ /* Destroy all sets */
+ index = IP_SET_INVALID_ID;
+ } else {
@@ -2620,8 +3468,7 @@
+ goto done;
+ }
+ case IP_SET_OP_FLUSH:{
-+ struct ip_set_req_std *req_flush =
-+ (struct ip_set_req_std *) data;
++ struct ip_set_req_std *req_flush = data;
+
+ if (len != sizeof(struct ip_set_req_std)) {
+ ip_set_printk("invalid FLUSH data (want %zu, got %u)",
@@ -2629,7 +3476,7 @@
+ res = -EINVAL;
+ goto done;
+ }
-+ if (strcmp(req_flush->name, IPSET_TOKEN_ALL) == 0) {
++ if (SETNAME_EQ(req_flush->name, IPSET_TOKEN_ALL)) {
+ /* Flush all sets */
+ index = IP_SET_INVALID_ID;
+ } else {
@@ -2645,8 +3492,7 @@
+ goto done;
+ }
+ case IP_SET_OP_RENAME:{
-+ struct ip_set_req_create *req_rename
-+ = (struct ip_set_req_create *) data;
++ struct ip_set_req_create *req_rename = data;
+
+ if (len != sizeof(struct ip_set_req_create)) {
+ ip_set_printk("invalid RENAME data (want %zu, got %u)",
@@ -2667,8 +3513,7 @@
+ goto done;
+ }
+ case IP_SET_OP_SWAP:{
-+ struct ip_set_req_create *req_swap
-+ = (struct ip_set_req_create *) data;
++ struct ip_set_req_create *req_swap = data;
+ ip_set_id_t to_index;
+
+ if (len != sizeof(struct ip_set_req_create)) {
@@ -2694,7 +3539,7 @@
+ res = ip_set_swap(index, to_index);
+ goto done;
+ }
-+ default:
++ default:
+ break; /* Set identified by id */
+ }
+
@@ -2711,10 +3556,10 @@
+ res = -EINVAL;
+ goto done;
+ }
-+ req_adt = (struct ip_set_req_adt *) data;
++ req_adt = data;
+
+ /* -U :all: :all:|:default: uses IP_SET_INVALID_ID */
-+ if (!(*op == IP_SET_OP_UNBIND_SET
++ if (!(*op == IP_SET_OP_UNBIND_SET
+ && req_adt->index == IP_SET_INVALID_ID)) {
+ index = ip_set_find_byindex(req_adt->index);
+ if (index == IP_SET_INVALID_ID) {
@@ -2733,7 +3578,7 @@
+ return res;
+}
+
-+static int
++static int
+ip_set_sockfn_get(struct sock *sk, int optval, void *user, int *len)
+{
+ int res = 0;
@@ -2771,8 +3616,7 @@
+
+ if (*op < IP_SET_OP_VERSION) {
+ /* Check the version at the beginning of operations */
-+ struct ip_set_req_version *req_version =
-+ (struct ip_set_req_version *) data;
++ struct ip_set_req_version *req_version = data;
+ if (req_version->version != IP_SET_PROTOCOL_VERSION) {
+ res = -EPROTO;
+ goto done;
@@ -2781,8 +3625,7 @@
+
+ switch (*op) {
+ case IP_SET_OP_VERSION: {
-+ struct ip_set_req_version *req_version =
-+ (struct ip_set_req_version *) data;
++ struct ip_set_req_version *req_version = data;
+
+ if (*len != sizeof(struct ip_set_req_version)) {
+ ip_set_printk("invalid VERSION (want %zu, got %d)",
@@ -2798,8 +3641,7 @@
+ goto done;
+ }
+ case IP_SET_OP_GET_BYNAME: {
-+ struct ip_set_req_get_set *req_get
-+ = (struct ip_set_req_get_set *) data;
++ struct ip_set_req_get_set *req_get = data;
+
+ if (*len != sizeof(struct ip_set_req_get_set)) {
+ ip_set_printk("invalid GET_BYNAME (want %zu, got %d)",
@@ -2813,8 +3655,7 @@
+ goto copy;
+ }
+ case IP_SET_OP_GET_BYINDEX: {
-+ struct ip_set_req_get_set *req_get
-+ = (struct ip_set_req_get_set *) data;
++ struct ip_set_req_get_set *req_get = data;
+
+ if (*len != sizeof(struct ip_set_req_get_set)) {
+ ip_set_printk("invalid GET_BYINDEX (want %zu, got %d)",
@@ -2830,8 +3671,7 @@
+ goto copy;
+ }
+ case IP_SET_OP_ADT_GET: {
-+ struct ip_set_req_adt_get *req_get
-+ = (struct ip_set_req_adt_get *) data;
++ struct ip_set_req_adt_get *req_get = data;
+
+ if (*len != sizeof(struct ip_set_req_adt_get)) {
+ ip_set_printk("invalid ADT_GET (want %zu, got %d)",
@@ -2853,8 +3693,7 @@
+ goto copy;
+ }
+ case IP_SET_OP_MAX_SETS: {
-+ struct ip_set_req_max_sets *req_max_sets
-+ = (struct ip_set_req_max_sets *) data;
++ struct ip_set_req_max_sets *req_max_sets = data;
+ ip_set_id_t i;
+
+ if (*len != sizeof(struct ip_set_req_max_sets)) {
@@ -2864,11 +3703,11 @@
+ goto done;
+ }
+
-+ if (strcmp(req_max_sets->set.name, IPSET_TOKEN_ALL) == 0) {
++ if (SETNAME_EQ(req_max_sets->set.name, IPSET_TOKEN_ALL)) {
+ req_max_sets->set.index = IP_SET_INVALID_ID;
+ } else {
+ req_max_sets->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
-+ req_max_sets->set.index =
++ req_max_sets->set.index =
+ ip_set_find_byname(req_max_sets->set.name);
+ if (req_max_sets->set.index == IP_SET_INVALID_ID) {
+ res = -ENOENT;
@@ -2883,10 +3722,9 @@
+ }
+ goto copy;
+ }
-+ case IP_SET_OP_LIST_SIZE:
++ case IP_SET_OP_LIST_SIZE:
+ case IP_SET_OP_SAVE_SIZE: {
-+ struct ip_set_req_setnames *req_setnames
-+ = (struct ip_set_req_setnames *) data;
++ struct ip_set_req_setnames *req_setnames = data;
+ struct ip_set_name_list *name_list;
+ struct ip_set *set;
+ ip_set_id_t i;
@@ -2904,8 +3742,7 @@
+ for (i = 0; i < ip_set_max; i++) {
+ if (ip_set_list[i] == NULL)
+ continue;
-+ name_list = (struct ip_set_name_list *)
-+ (data + used);
++ name_list = data + used;
+ used += sizeof(struct ip_set_name_list);
+ if (used > copylen) {
+ res = -EAGAIN;
@@ -2934,7 +3771,7 @@
+ + set->type->header_size
+ + set->type->list_members_size(set);
+ /* Sets are identified by id in the hash */
-+ FOREACH_HASH_DO(__set_hash_bindings_size_list,
++ FOREACH_HASH_DO(__set_hash_bindings_size_list,
+ set->id, &req_setnames->size);
+ break;
+ }
@@ -2957,8 +3794,7 @@
+ goto copy;
+ }
+ case IP_SET_OP_LIST: {
-+ struct ip_set_req_list *req_list
-+ = (struct ip_set_req_list *) data;
++ struct ip_set_req_list *req_list = data;
+ ip_set_id_t i;
+ int used;
+
@@ -2994,8 +3830,7 @@
+ goto copy;
+ }
+ case IP_SET_OP_SAVE: {
-+ struct ip_set_req_list *req_save
-+ = (struct ip_set_req_list *) data;
++ struct ip_set_req_list *req_save = data;
+ ip_set_id_t i;
+ int used;
+
@@ -3011,13 +3846,23 @@
+ res = -ENOENT;
+ goto done;
+ }
++
++#define SETLIST(set) (strcmp(set->type->typename, "setlist") == 0)
++
+ used = 0;
+ if (index == IP_SET_INVALID_ID) {
-+ /* Save all sets */
++ /* Save all sets: ugly setlist type dependency */
++ int setlist = 0;
++ setlists:
+ for (i = 0; i < ip_set_max && res == 0; i++) {
-+ if (ip_set_list[i] != NULL)
++ if (ip_set_list[i] != NULL
++ && !(setlist ^ SETLIST(ip_set_list[i])))
+ res = ip_set_save_set(i, data, &used, *len);
+ }
++ if (!setlist) {
++ setlist = 1;
++ goto setlists;
++ }
+ } else {
+ /* Save an individual set */
+ res = ip_set_save_set(index, data, &used, *len);
@@ -3034,20 +3879,19 @@
+ goto copy;
+ }
+ case IP_SET_OP_RESTORE: {
-+ struct ip_set_req_setnames *req_restore
-+ = (struct ip_set_req_setnames *) data;
++ struct ip_set_req_setnames *req_restore = data;
+ int line;
+
+ if (*len < sizeof(struct ip_set_req_setnames)
+ || *len != req_restore->size) {
-+ ip_set_printk("invalid RESTORE (want =%zu, got %d)",
-+ req_restore->size, *len);
++ ip_set_printk("invalid RESTORE (want =%lu, got %d)",
++ (long unsigned)req_restore->size, *len);
+ res = -EINVAL;
+ goto done;
+ }
+ line = ip_set_restore(data + sizeof(struct ip_set_req_setnames),
+ req_restore->size - sizeof(struct ip_set_req_setnames));
-+ DP("ip_set_restore: %u", line);
++ DP("ip_set_restore: %d", line);
+ if (line != 0) {
+ res = -EAGAIN;
+ req_restore->size = line;
@@ -3062,7 +3906,7 @@
+ } /* end of switch(op) */
+
+ copy:
-+ DP("set %s, copylen %u", index != IP_SET_INVALID_ID
++ DP("set %s, copylen %d", index != IP_SET_INVALID_ID
+ && ip_set_list[index]
+ ? ip_set_list[index]->name
+ : ":all:", copylen);
@@ -3085,12 +3929,15 @@
+ .get_optmin = SO_IP_SET,
+ .get_optmax = SO_IP_SET + 1,
+ .get = &ip_set_sockfn_get,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
++ .use = 0,
++#else
+ .owner = THIS_MODULE,
+#endif
+};
+
+static int max_sets, hash_size;
++
+module_param(max_sets, int, 0600);
+MODULE_PARM_DESC(max_sets, "maximal number of sets");
+module_param(hash_size, int, 0600);
@@ -3133,6 +3980,7 @@
+ vfree(ip_set_hash);
+ return res;
+ }
++
+ return 0;
+}
+
@@ -3150,7 +3998,10 @@
+
+EXPORT_SYMBOL(ip_set_get_byname);
+EXPORT_SYMBOL(ip_set_get_byindex);
-+EXPORT_SYMBOL(ip_set_put);
++EXPORT_SYMBOL(ip_set_put_byindex);
++EXPORT_SYMBOL(ip_set_id);
++EXPORT_SYMBOL(__ip_set_get_byname);
++EXPORT_SYMBOL(__ip_set_put_byindex);
+
+EXPORT_SYMBOL(ip_set_addip_kernel);
+EXPORT_SYMBOL(ip_set_delip_kernel);
@@ -3160,47 +4011,37 @@
+module_exit(ip_set_fini);
--- /dev/null
+++ b/net/ipv4/netfilter/ip_set_iphash.c
-@@ -0,0 +1,429 @@
-+/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+@@ -0,0 +1,166 @@
++/* Copyright (C) 2003-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
++ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an ip hash set */
+
+#include <linux/module.h>
++#include <linux/moduleparam.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
-+#include <linux/version.h>
-+#include <linux/jhash.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_jhash.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
+#include <linux/spinlock.h>
-+#include <linux/vmalloc.h>
+#include <linux/random.h>
+
+#include <net/ip.h>
+
-+#include <linux/netfilter_ipv4/ip_set_malloc.h>
+#include <linux/netfilter_ipv4/ip_set_iphash.h>
+
+static int limit = MAX_RANGE;
+
+static inline __u32
-+jhash_ip(const struct ip_set_iphash *map, uint16_t i, ip_set_ip_t ip)
++iphash_id(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ return jhash_1word(ip, *(((uint32_t *) map->initval) + i));
-+}
-+
-+static inline __u32
-+hash_id(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
++ struct ip_set_iphash *map = set->data;
+ __u32 id;
+ u_int16_t i;
+ ip_set_ip_t *elem;
@@ -3215,198 +4056,81 @@
+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
+ if (*elem == *hash_ip)
+ return id;
-+ /* No shortcut at testing - there can be deleted
-+ * entries. */
++ /* No shortcut - there can be deleted entries. */
+ }
+ return UINT_MAX;
+}
+
+static inline int
-+__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++iphash_test(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ return (ip && hash_id(set, ip, hash_ip) != UINT_MAX);
++ return (ip && iphash_id(set, hash_ip, ip) != UINT_MAX);
+}
+
-+static int
-+testip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_iphash *req =
-+ (struct ip_set_req_iphash *) data;
-+
-+ if (size != sizeof(struct ip_set_req_iphash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iphash),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __testip(set, req->ip, hash_ip);
-+}
++#define KADT_CONDITION
+
-+static int
-+testip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __testip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+}
++UADT(iphash, test)
++KADT(iphash, test, ipaddr)
+
+static inline int
-+__addip(struct ip_set_iphash *map, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++__iphash_add(struct ip_set_iphash *map, ip_set_ip_t *ip)
+{
+ __u32 probe;
+ u_int16_t i;
-+ ip_set_ip_t *elem;
-+
-+ if (!ip || map->elements >= limit)
-+ return -ERANGE;
-+
-+ *hash_ip = ip & map->netmask;
++ ip_set_ip_t *elem, *slot = NULL;
+
+ for (i = 0; i < map->probes; i++) {
-+ probe = jhash_ip(map, i, *hash_ip) % map->hashsize;
++ probe = jhash_ip(map, i, *ip) % map->hashsize;
+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
-+ if (*elem == *hash_ip)
++ if (*elem == *ip)
+ return -EEXIST;
-+ if (!*elem) {
-+ *elem = *hash_ip;
-+ map->elements++;
-+ return 0;
-+ }
++ if (!(slot || *elem))
++ slot = elem;
++ /* There can be deleted entries, must check all slots */
++ }
++ if (slot) {
++ *slot = *ip;
++ map->elements++;
++ return 0;
+ }
+ /* Trigger rehashing */
+ return -EAGAIN;
+}
+
-+static int
-+addip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_iphash *req =
-+ (struct ip_set_req_iphash *) data;
-+
-+ if (size != sizeof(struct ip_set_req_iphash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iphash),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __addip((struct ip_set_iphash *) set->data, req->ip, hash_ip);
-+}
-+
-+static int
-+addip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __addip((struct ip_set_iphash *) set->data,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+}
-+
-+static int retry(struct ip_set *set)
++static inline int
++iphash_add(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
-+ ip_set_ip_t hash_ip, *elem;
-+ void *members;
-+ u_int32_t i, hashsize = map->hashsize;
-+ int res;
-+ struct ip_set_iphash *tmp;
++ struct ip_set_iphash *map = set->data;
+
-+ if (map->resize == 0)
++ if (!ip || map->elements >= limit)
+ return -ERANGE;
+
-+ again:
-+ res = 0;
-+
-+ /* Calculate new hash size */
-+ hashsize += (hashsize * map->resize)/100;
-+ if (hashsize == map->hashsize)
-+ hashsize++;
-+
-+ ip_set_printk("rehashing of set %s triggered: "
-+ "hashsize grows from %u to %u",
-+ set->name, map->hashsize, hashsize);
-+
-+ tmp = kmalloc(sizeof(struct ip_set_iphash)
-+ + map->probes * sizeof(uint32_t), GFP_ATOMIC);
-+ if (!tmp) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_iphash)
-+ + map->probes * sizeof(uint32_t));
-+ return -ENOMEM;
-+ }
-+ tmp->members = harray_malloc(hashsize, sizeof(ip_set_ip_t), GFP_ATOMIC);
-+ if (!tmp->members) {
-+ DP("out of memory for %d bytes", hashsize * sizeof(ip_set_ip_t));
-+ kfree(tmp);
-+ return -ENOMEM;
-+ }
-+ tmp->hashsize = hashsize;
-+ tmp->elements = 0;
-+ tmp->probes = map->probes;
-+ tmp->resize = map->resize;
-+ tmp->netmask = map->netmask;
-+ memcpy(tmp->initval, map->initval, map->probes * sizeof(uint32_t));
-+
-+ write_lock_bh(&set->lock);
-+ map = (struct ip_set_iphash *) set->data; /* Play safe */
-+ for (i = 0; i < map->hashsize && res == 0; i++) {
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
-+ if (*elem)
-+ res = __addip(tmp, *elem, &hash_ip);
-+ }
-+ if (res) {
-+ /* Failure, try again */
-+ write_unlock_bh(&set->lock);
-+ harray_free(tmp->members);
-+ kfree(tmp);
-+ goto again;
-+ }
++ *hash_ip = ip & map->netmask;
+
-+ /* Success at resizing! */
-+ members = map->members;
-+
-+ map->hashsize = tmp->hashsize;
-+ map->members = tmp->members;
-+ write_unlock_bh(&set->lock);
++ return __iphash_add(map, hash_ip);
++}
+
-+ harray_free(members);
-+ kfree(tmp);
++UADT(iphash, add)
++KADT(iphash, add, ipaddr)
+
-+ return 0;
++static inline void
++__iphash_retry(struct ip_set_iphash *tmp, struct ip_set_iphash *map)
++{
++ tmp->netmask = map->netmask;
+}
+
++HASH_RETRY(iphash, ip_set_ip_t)
++
+static inline int
-+__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++iphash_del(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
++ struct ip_set_iphash *map = set->data;
+ ip_set_ip_t id, *elem;
+
+ if (!ip)
+ return -ERANGE;
+
-+ id = hash_id(set, ip, hash_ip);
++ id = iphash_id(set, hash_ip, ip);
+ if (id == UINT_MAX)
+ return -EEXIST;
+
@@ -3417,159 +4141,35 @@
+ return 0;
+}
+
-+static int
-+delip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_iphash *req =
-+ (struct ip_set_req_iphash *) data;
-+
-+ if (size != sizeof(struct ip_set_req_iphash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iphash),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __delip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+delip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __delip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+}
++UADT(iphash, del)
++KADT(iphash, del, ipaddr)
+
-+static int create(struct ip_set *set, const void *data, size_t size)
++static inline int
++__iphash_create(const struct ip_set_req_iphash_create *req,
++ struct ip_set_iphash *map)
+{
-+ struct ip_set_req_iphash_create *req =
-+ (struct ip_set_req_iphash_create *) data;
-+ struct ip_set_iphash *map;
-+ uint16_t i;
-+
-+ if (size != sizeof(struct ip_set_req_iphash_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iphash_create),
-+ size);
-+ return -EINVAL;
-+ }
-+
-+ if (req->hashsize < 1) {
-+ ip_set_printk("hashsize too small");
-+ return -ENOEXEC;
-+ }
-+
-+ if (req->probes < 1) {
-+ ip_set_printk("probes too small");
-+ return -ENOEXEC;
-+ }
-+
-+ map = kmalloc(sizeof(struct ip_set_iphash)
-+ + req->probes * sizeof(uint32_t), GFP_KERNEL);
-+ if (!map) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_iphash)
-+ + req->probes * sizeof(uint32_t));
-+ return -ENOMEM;
-+ }
-+ for (i = 0; i < req->probes; i++)
-+ get_random_bytes(((uint32_t *) map->initval)+i, 4);
-+ map->elements = 0;
-+ map->hashsize = req->hashsize;
-+ map->probes = req->probes;
-+ map->resize = req->resize;
+ map->netmask = req->netmask;
-+ map->members = harray_malloc(map->hashsize, sizeof(ip_set_ip_t), GFP_KERNEL);
-+ if (!map->members) {
-+ DP("out of memory for %d bytes", map->hashsize * sizeof(ip_set_ip_t));
-+ kfree(map);
-+ return -ENOMEM;
-+ }
-+
-+ set->data = map;
++
+ return 0;
+}
+
-+static void destroy(struct ip_set *set)
-+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
-+
-+ harray_free(map->members);
-+ kfree(map);
-+
-+ set->data = NULL;
-+}
-+
-+static void flush(struct ip_set *set)
-+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
-+ harray_flush(map->members, map->hashsize, sizeof(ip_set_ip_t));
-+ map->elements = 0;
-+}
++HASH_CREATE(iphash, ip_set_ip_t)
++HASH_DESTROY(iphash)
+
-+static void list_header(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
-+ struct ip_set_req_iphash_create *header =
-+ (struct ip_set_req_iphash_create *) data;
++HASH_FLUSH(iphash, ip_set_ip_t)
+
-+ header->hashsize = map->hashsize;
-+ header->probes = map->probes;
-+ header->resize = map->resize;
++static inline void
++__iphash_list_header(const struct ip_set_iphash *map,
++ struct ip_set_req_iphash_create *header)
++{
+ header->netmask = map->netmask;
+}
+
-+static int list_members_size(const struct ip_set *set)
-+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
++HASH_LIST_HEADER(iphash)
++HASH_LIST_MEMBERS_SIZE(iphash, ip_set_ip_t)
++HASH_LIST_MEMBERS(iphash, ip_set_ip_t)
+
-+ return (map->hashsize * sizeof(ip_set_ip_t));
-+}
-+
-+static void list_members(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
-+ ip_set_ip_t i, *elem;
-+
-+ for (i = 0; i < map->hashsize; i++) {
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
-+ ((ip_set_ip_t *)data)[i] = *elem;
-+ }
-+}
-+
-+static struct ip_set_type ip_set_iphash = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_iphash),
-+ .addip = &addip,
-+ .addip_kernel = &addip_kernel,
-+ .retry = &retry,
-+ .delip = &delip,
-+ .delip_kernel = &delip_kernel,
-+ .testip = &testip,
-+ .testip_kernel = &testip_kernel,
-+ .header_size = sizeof(struct ip_set_req_iphash_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
++IP_SET_RTYPE(iphash, IPSET_TYPE_IP | IPSET_DATA_SINGLE)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
@@ -3577,29 +4177,17 @@
+module_param(limit, int, 0600);
+MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
+
-+static int __init ip_set_iphash_init(void)
-+{
-+ return ip_set_register_set_type(&ip_set_iphash);
-+}
-+
-+static void __exit ip_set_iphash_fini(void)
-+{
-+ /* FIXME: possible race with ip_set_create() */
-+ ip_set_unregister_set_type(&ip_set_iphash);
-+}
-+
-+module_init(ip_set_iphash_init);
-+module_exit(ip_set_iphash_fini);
++REGISTER_MODULE(iphash)
--- /dev/null
+++ b/net/ipv4/netfilter/ip_set_ipmap.c
-@@ -0,0 +1,336 @@
+@@ -0,0 +1,142 @@
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ * Patrick Schaaf <bof@bof.de>
-+ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ * Copyright (C) 2003-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
++ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the single bitmap type */
@@ -3607,9 +4195,6 @@
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
-+#include <linux/version.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
@@ -3624,9 +4209,9 @@
+}
+
+static inline int
-+__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++ipmap_test(const struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
++ const struct ip_set_ipmap *map = set->data;
+
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
@@ -3637,46 +4222,15 @@
+ return !!test_bit(ip_to_id(map, *hash_ip), map->members);
+}
+
-+static int
-+testip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_ipmap *req =
-+ (struct ip_set_req_ipmap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_ipmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipmap),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __testip(set, req->ip, hash_ip);
-+}
++#define KADT_CONDITION
+
-+static int
-+testip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ int res = __testip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+ return (res < 0 ? 0 : res);
-+}
++UADT(ipmap, test)
++KADT(ipmap, test, ipaddr)
+
+static inline int
-+__addip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++ipmap_add(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
++ struct ip_set_ipmap *map = set->data;
+
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
@@ -3689,46 +4243,13 @@
+ return 0;
+}
+
-+static int
-+addip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_ipmap *req =
-+ (struct ip_set_req_ipmap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_ipmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipmap),
-+ size);
-+ return -EINVAL;
-+ }
-+ DP("%u.%u.%u.%u", HIPQUAD(req->ip));
-+ return __addip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+addip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __addip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+}
++UADT(ipmap, add)
++KADT(ipmap, add, ipaddr)
+
-+static inline int
-+__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++static inline int
++ipmap_del(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
++ struct ip_set_ipmap *map = set->data;
+
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
@@ -3741,71 +4262,13 @@
+ return 0;
+}
+
-+static int
-+delip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_ipmap *req =
-+ (struct ip_set_req_ipmap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_ipmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipmap),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __delip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+delip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __delip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+}
++UADT(ipmap, del)
++KADT(ipmap, del, ipaddr)
+
-+static int create(struct ip_set *set, const void *data, size_t size)
++static inline int
++__ipmap_create(const struct ip_set_req_ipmap_create *req,
++ struct ip_set_ipmap *map)
+{
-+ int newbytes;
-+ struct ip_set_req_ipmap_create *req =
-+ (struct ip_set_req_ipmap_create *) data;
-+ struct ip_set_ipmap *map;
-+
-+ if (size != sizeof(struct ip_set_req_ipmap_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipmap_create),
-+ size);
-+ return -EINVAL;
-+ }
-+
-+ DP("from %u.%u.%u.%u to %u.%u.%u.%u",
-+ HIPQUAD(req->from), HIPQUAD(req->to));
-+
-+ if (req->from > req->to) {
-+ DP("bad ip range");
-+ return -ENOEXEC;
-+ }
-+
-+ map = kmalloc(sizeof(struct ip_set_ipmap), GFP_KERNEL);
-+ if (!map) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_ipmap));
-+ return -ENOMEM;
-+ }
-+ map->first_ip = req->from;
-+ map->last_ip = req->to;
+ map->netmask = req->netmask;
+
+ if (req->netmask == 0xFFFFFFFF) {
@@ -3814,12 +4277,12 @@
+ } else {
+ unsigned int mask_bits, netmask_bits;
+ ip_set_ip_t mask;
-+
++
+ map->first_ip &= map->netmask; /* Should we better bark? */
-+
++
+ mask = range_to_mask(map->first_ip, map->last_ip, &mask_bits);
+ netmask_bits = mask_to_bits(map->netmask);
-+
++
+ if ((!mask && (map->first_ip || map->last_ip != 0xFFFFFFFF))
+ || netmask_bits <= mask_bits)
+ return -ENOEXEC;
@@ -3830,213 +4293,83 @@
+ map->sizeid = 2 << (netmask_bits - mask_bits - 1);
+ }
+ if (map->sizeid > MAX_RANGE + 1) {
-+ ip_set_printk("range too big (max %d addresses)",
-+ MAX_RANGE+1);
-+ kfree(map);
++ ip_set_printk("range too big, %d elements (max %d)",
++ map->sizeid, MAX_RANGE+1);
+ return -ENOEXEC;
+ }
+ DP("hosts %u, sizeid %u", map->hosts, map->sizeid);
-+ newbytes = bitmap_bytes(0, map->sizeid - 1);
-+ map->members = kmalloc(newbytes, GFP_KERNEL);
-+ if (!map->members) {
-+ DP("out of memory for %d bytes", newbytes);
-+ kfree(map);
-+ return -ENOMEM;
-+ }
-+ memset(map->members, 0, newbytes);
-+
-+ set->data = map;
-+ return 0;
-+}
-+
-+static void destroy(struct ip_set *set)
-+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
-+
-+ kfree(map->members);
-+ kfree(map);
-+
-+ set->data = NULL;
++ return bitmap_bytes(0, map->sizeid - 1);
+}
+
-+static void flush(struct ip_set *set)
-+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
-+ memset(map->members, 0, bitmap_bytes(0, map->sizeid - 1));
-+}
++BITMAP_CREATE(ipmap)
++BITMAP_DESTROY(ipmap)
++BITMAP_FLUSH(ipmap)
+
-+static void list_header(const struct ip_set *set, void *data)
++static inline void
++__ipmap_list_header(const struct ip_set_ipmap *map,
++ struct ip_set_req_ipmap_create *header)
+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
-+ struct ip_set_req_ipmap_create *header =
-+ (struct ip_set_req_ipmap_create *) data;
-+
-+ header->from = map->first_ip;
-+ header->to = map->last_ip;
+ header->netmask = map->netmask;
+}
+
-+static int list_members_size(const struct ip_set *set)
-+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
++BITMAP_LIST_HEADER(ipmap)
++BITMAP_LIST_MEMBERS_SIZE(ipmap)
++BITMAP_LIST_MEMBERS(ipmap)
+
-+ return bitmap_bytes(0, map->sizeid - 1);
-+}
-+
-+static void list_members(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
-+ int bytes = bitmap_bytes(0, map->sizeid - 1);
-+
-+ memcpy(data, map->members, bytes);
-+}
-+
-+static struct ip_set_type ip_set_ipmap = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_ipmap),
-+ .addip = &addip,
-+ .addip_kernel = &addip_kernel,
-+ .delip = &delip,
-+ .delip_kernel = &delip_kernel,
-+ .testip = &testip,
-+ .testip_kernel = &testip_kernel,
-+ .header_size = sizeof(struct ip_set_req_ipmap_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
++IP_SET_TYPE(ipmap, IPSET_TYPE_IP | IPSET_DATA_SINGLE)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("ipmap type of IP sets");
+
-+static int __init ip_set_ipmap_init(void)
-+{
-+ return ip_set_register_set_type(&ip_set_ipmap);
-+}
-+
-+static void __exit ip_set_ipmap_fini(void)
-+{
-+ /* FIXME: possible race with ip_set_create() */
-+ ip_set_unregister_set_type(&ip_set_ipmap);
-+}
-+
-+module_init(ip_set_ipmap_init);
-+module_exit(ip_set_ipmap_fini);
++REGISTER_MODULE(ipmap)
--- /dev/null
+++ b/net/ipv4/netfilter/ip_set_ipporthash.c
-@@ -0,0 +1,581 @@
-+/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+@@ -0,0 +1,203 @@
++/* Copyright (C) 2003-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
++ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an ip+port hash set */
+
+#include <linux/module.h>
++#include <linux/moduleparam.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/skbuff.h>
-+#include <linux/version.h>
-+#include <linux/jhash.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_jhash.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
+#include <linux/spinlock.h>
-+#include <linux/vmalloc.h>
+#include <linux/random.h>
+
+#include <net/ip.h>
+
-+#include <linux/netfilter_ipv4/ip_set_malloc.h>
+#include <linux/netfilter_ipv4/ip_set_ipporthash.h>
++#include <linux/netfilter_ipv4/ip_set_getport.h>
+
+static int limit = MAX_RANGE;
+
-+/* We must handle non-linear skbs */
-+static inline ip_set_ip_t
-+get_port(const struct sk_buff *skb, u_int32_t flags)
-+{
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ struct iphdr *iph = ip_hdr(skb);
-+#else
-+ struct iphdr *iph = skb->nh.iph;
-+#endif
-+ u_int16_t offset = ntohs(iph->frag_off) & IP_OFFSET;
-+
-+ switch (iph->protocol) {
-+ case IPPROTO_TCP: {
-+ struct tcphdr tcph;
-+
-+ /* See comments at tcp_match in ip_tables.c */
-+ if (offset)
-+ return INVALID_PORT;
-+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &tcph, sizeof(tcph)) < 0)
-+#else
-+ if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &tcph, sizeof(tcph)) < 0)
-+#endif
-+ /* No choice either */
-+ return INVALID_PORT;
-+
-+ return ntohs(flags & IPSET_SRC ?
-+ tcph.source : tcph.dest);
-+ }
-+ case IPPROTO_UDP: {
-+ struct udphdr udph;
-+
-+ if (offset)
-+ return INVALID_PORT;
-+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &udph, sizeof(udph)) < 0)
-+#else
-+ if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &udph, sizeof(udph)) < 0)
-+#endif
-+ /* No choice either */
-+ return INVALID_PORT;
-+
-+ return ntohs(flags & IPSET_SRC ?
-+ udph.source : udph.dest);
-+ }
-+ default:
-+ return INVALID_PORT;
-+ }
-+}
-+
+static inline __u32
-+jhash_ip(const struct ip_set_ipporthash *map, uint16_t i, ip_set_ip_t ip)
++ipporthash_id(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port)
+{
-+ return jhash_1word(ip, *(((uint32_t *) map->initval) + i));
-+}
-+
-+#define HASH_IP(map, ip, port) (port + ((ip - ((map)->first_ip)) << 16))
-+
-+static inline __u32
-+hash_id(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t port,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_ipporthash *map =
-+ (struct ip_set_ipporthash *) set->data;
++ struct ip_set_ipporthash *map = set->data;
+ __u32 id;
+ u_int16_t i;
+ ip_set_ip_t *elem;
+
-+ *hash_ip = HASH_IP(map, ip, port);
++ *hash_ip = pack_ip_port(map, ip, port);
++
+ DP("set: %s, ipport:%u.%u.%u.%u:%u, %u.%u.%u.%u",
+ set->name, HIPQUAD(ip), port, HIPQUAD(*hash_ip));
++ if (!*hash_ip)
++ return UINT_MAX;
+
+ for (i = 0; i < map->probes; i++) {
+ id = jhash_ip(map, i, *hash_ip) % map->hashsize;
@@ -4044,522 +4377,723 @@
+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
+ if (*elem == *hash_ip)
+ return id;
-+ /* No shortcut at testing - there can be deleted
-+ * entries. */
++ /* No shortcut - there can be deleted entries. */
+ }
+ return UINT_MAX;
+}
+
+static inline int
-+__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t port,
-+ ip_set_ip_t *hash_ip)
++ipporthash_test(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port)
+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
++ struct ip_set_ipporthash *map = set->data;
+
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
+
-+ return (hash_id(set, ip, port, hash_ip) != UINT_MAX);
++ return (ipporthash_id(set, hash_ip, ip, port) != UINT_MAX);
+}
+
-+static int
-+testip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_ipporthash *req =
-+ (struct ip_set_req_ipporthash *) data;
-+
-+ if (size != sizeof(struct ip_set_req_ipporthash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipporthash),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __testip(set, req->ip, req->port, hash_ip);
-+}
-+
-+static int
-+testip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ ip_set_ip_t port;
-+ int res;
-+
-+ if (flags[index+1] == 0)
++#define KADT_CONDITION \
++ ip_set_ip_t port; \
++ \
++ if (flags[index+1] == 0) \
++ return 0; \
++ \
++ port = get_port(skb, flags[index+1]); \
++ \
++ if (port == INVALID_PORT) \
+ return 0;
-+
-+ port = get_port(skb, flags[index+1]);
+
-+ DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
-+ flags[index] & IPSET_SRC ? "SRC" : "DST",
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ NIPQUAD(ip_hdr(skb)->saddr),
-+ NIPQUAD(ip_hdr(skb)->daddr));
-+#else
-+ NIPQUAD(skb->nh.iph->saddr),
-+ NIPQUAD(skb->nh.iph->daddr));
-+#endif
-+ DP("flag %s port %u",
-+ flags[index+1] & IPSET_SRC ? "SRC" : "DST",
-+ port);
-+ if (port == INVALID_PORT)
-+ return 0;
-+
-+ res = __testip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ port,
-+ hash_ip);
-+ return (res < 0 ? 0 : res);
-+
-+}
++UADT(ipporthash, test, req->port)
++KADT(ipporthash, test, ipaddr, port)
+
+static inline int
-+__add_haship(struct ip_set_ipporthash *map, ip_set_ip_t hash_ip)
++__ipporthash_add(struct ip_set_ipporthash *map, ip_set_ip_t *ip)
+{
+ __u32 probe;
+ u_int16_t i;
-+ ip_set_ip_t *elem;
++ ip_set_ip_t *elem, *slot = NULL;
+
+ for (i = 0; i < map->probes; i++) {
-+ probe = jhash_ip(map, i, hash_ip) % map->hashsize;
++ probe = jhash_ip(map, i, *ip) % map->hashsize;
+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
-+ if (*elem == hash_ip)
++ if (*elem == *ip)
+ return -EEXIST;
-+ if (!*elem) {
-+ *elem = hash_ip;
-+ map->elements++;
-+ return 0;
-+ }
++ if (!(slot || *elem))
++ slot = elem;
++ /* There can be deleted entries, must check all slots */
++ }
++ if (slot) {
++ *slot = *ip;
++ map->elements++;
++ return 0;
+ }
+ /* Trigger rehashing */
+ return -EAGAIN;
+}
+
+static inline int
-+__addip(struct ip_set_ipporthash *map, ip_set_ip_t ip, ip_set_ip_t port,
-+ ip_set_ip_t *hash_ip)
++ipporthash_add(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port)
+{
++ struct ip_set_ipporthash *map = set->data;
+ if (map->elements > limit)
+ return -ERANGE;
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
+
-+ *hash_ip = HASH_IP(map, ip, port);
++ *hash_ip = pack_ip_port(map, ip, port);
++
++ if (!*hash_ip)
++ return -ERANGE;
+
-+ return __add_haship(map, *hash_ip);
++ return __ipporthash_add(map, hash_ip);
+}
+
-+static int
-+addip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
++UADT(ipporthash, add, req->port)
++KADT(ipporthash, add, ipaddr, port)
++
++static inline void
++__ipporthash_retry(struct ip_set_ipporthash *tmp,
++ struct ip_set_ipporthash *map)
+{
-+ struct ip_set_req_ipporthash *req =
-+ (struct ip_set_req_ipporthash *) data;
++ tmp->first_ip = map->first_ip;
++ tmp->last_ip = map->last_ip;
++}
+
-+ if (size != sizeof(struct ip_set_req_ipporthash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipporthash),
-+ size);
-+ return -EINVAL;
++HASH_RETRY(ipporthash, ip_set_ip_t)
++
++static inline int
++ipporthash_del(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port)
++{
++ struct ip_set_ipporthash *map = set->data;
++ ip_set_ip_t id;
++ ip_set_ip_t *elem;
++
++ if (ip < map->first_ip || ip > map->last_ip)
++ return -ERANGE;
++
++ id = ipporthash_id(set, hash_ip, ip, port);
++
++ if (id == UINT_MAX)
++ return -EEXIST;
++
++ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
++ *elem = 0;
++ map->elements--;
++
++ return 0;
++}
++
++UADT(ipporthash, del, req->port)
++KADT(ipporthash, del, ipaddr, port)
++
++static inline int
++__ipporthash_create(const struct ip_set_req_ipporthash_create *req,
++ struct ip_set_ipporthash *map)
++{
++ if (req->to - req->from > MAX_RANGE) {
++ ip_set_printk("range too big, %d elements (max %d)",
++ req->to - req->from + 1, MAX_RANGE+1);
++ return -ENOEXEC;
+ }
-+ return __addip((struct ip_set_ipporthash *) set->data,
-+ req->ip, req->port, hash_ip);
++ map->first_ip = req->from;
++ map->last_ip = req->to;
++ return 0;
+}
+
-+static int
-+addip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
++HASH_CREATE(ipporthash, ip_set_ip_t)
++HASH_DESTROY(ipporthash)
++HASH_FLUSH(ipporthash, ip_set_ip_t)
++
++static inline void
++__ipporthash_list_header(const struct ip_set_ipporthash *map,
++ struct ip_set_req_ipporthash_create *header)
+{
-+ ip_set_ip_t port;
++ header->from = map->first_ip;
++ header->to = map->last_ip;
++}
+
-+ if (flags[index+1] == 0)
-+ return -EINVAL;
-+
-+ port = get_port(skb, flags[index+1]);
++HASH_LIST_HEADER(ipporthash)
++HASH_LIST_MEMBERS_SIZE(ipporthash, ip_set_ip_t)
++HASH_LIST_MEMBERS(ipporthash, ip_set_ip_t)
+
-+ DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
-+ flags[index] & IPSET_SRC ? "SRC" : "DST",
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ NIPQUAD(ip_hdr(skb)->saddr),
-+ NIPQUAD(ip_hdr(skb)->daddr));
-+#else
-+ NIPQUAD(skb->nh.iph->saddr),
-+ NIPQUAD(skb->nh.iph->daddr));
-+#endif
-+ DP("flag %s port %u",
-+ flags[index+1] & IPSET_SRC ? "SRC" : "DST",
-+ port);
-+ if (port == INVALID_PORT)
-+ return -EINVAL;
++IP_SET_RTYPE(ipporthash, IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_DATA_DOUBLE)
+
-+ return __addip((struct ip_set_ipporthash *) set->data,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ port,
-+ hash_ip);
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("ipporthash type of IP sets");
++module_param(limit, int, 0600);
++MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
++
++REGISTER_MODULE(ipporthash)
+--- /dev/null
++++ b/net/ipv4/netfilter/ip_set_ipportiphash.c
+@@ -0,0 +1,216 @@
++/* Copyright (C) 2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/* Kernel module implementing an ip+port+ip hash set */
++
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++#include <linux/ip.h>
++#include <linux/tcp.h>
++#include <linux/udp.h>
++#include <linux/skbuff.h>
++#include <linux/netfilter_ipv4/ip_set_jhash.h>
++#include <linux/errno.h>
++#include <asm/uaccess.h>
++#include <asm/bitops.h>
++#include <linux/spinlock.h>
++#include <linux/random.h>
++
++#include <net/ip.h>
++
++#include <linux/netfilter_ipv4/ip_set_ipportiphash.h>
++#include <linux/netfilter_ipv4/ip_set_getport.h>
++
++static int limit = MAX_RANGE;
++
++#define jhash_ip2(map, i, ipport, ip1) \
++ jhash_2words(ipport, ip1, *(map->initval + i))
++
++static inline __u32
++ipportiphash_id(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port, ip_set_ip_t ip1)
++{
++ struct ip_set_ipportiphash *map = set->data;
++ __u32 id;
++ u_int16_t i;
++ struct ipportip *elem;
++
++ *hash_ip = pack_ip_port(map, ip, port);
++ DP("set: %s, ipport:%u.%u.%u.%u:%u, %u.%u.%u.%u",
++ set->name, HIPQUAD(ip), port, HIPQUAD(*hash_ip));
++ if (!(*hash_ip || ip1))
++ return UINT_MAX;
++
++ for (i = 0; i < map->probes; i++) {
++ id = jhash_ip2(map, i, *hash_ip, ip1) % map->hashsize;
++ DP("hash key: %u", id);
++ elem = HARRAY_ELEM(map->members, struct ipportip *, id);
++ if (elem->ip == *hash_ip && elem->ip1 == ip1)
++ return id;
++ /* No shortcut - there can be deleted entries. */
++ }
++ return UINT_MAX;
+}
+
-+static int retry(struct ip_set *set)
++static inline int
++ipportiphash_test(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port, ip_set_ip_t ip1)
+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+ ip_set_ip_t *elem;
-+ void *members;
-+ u_int32_t i, hashsize = map->hashsize;
-+ int res;
-+ struct ip_set_ipporthash *tmp;
++ struct ip_set_ipportiphash *map = set->data;
+
-+ if (map->resize == 0)
++ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
+
-+ again:
-+ res = 0;
-+
-+ /* Calculate new hash size */
-+ hashsize += (hashsize * map->resize)/100;
-+ if (hashsize == map->hashsize)
-+ hashsize++;
-+
-+ ip_set_printk("rehashing of set %s triggered: "
-+ "hashsize grows from %u to %u",
-+ set->name, map->hashsize, hashsize);
-+
-+ tmp = kmalloc(sizeof(struct ip_set_ipporthash)
-+ + map->probes * sizeof(uint32_t), GFP_ATOMIC);
-+ if (!tmp) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_ipporthash)
-+ + map->probes * sizeof(uint32_t));
-+ return -ENOMEM;
-+ }
-+ tmp->members = harray_malloc(hashsize, sizeof(ip_set_ip_t), GFP_ATOMIC);
-+ if (!tmp->members) {
-+ DP("out of memory for %d bytes", hashsize * sizeof(ip_set_ip_t));
-+ kfree(tmp);
-+ return -ENOMEM;
++ return (ipportiphash_id(set, hash_ip, ip, port, ip1) != UINT_MAX);
++}
++
++#define KADT_CONDITION \
++ ip_set_ip_t port, ip1; \
++ \
++ if (flags[index+2] == 0) \
++ return 0; \
++ \
++ port = get_port(skb, flags[index+1]); \
++ ip1 = ipaddr(skb, flags[index+2]); \
++ \
++ if (port == INVALID_PORT) \
++ return 0;
++
++UADT(ipportiphash, test, req->port, req->ip1)
++KADT(ipportiphash, test, ipaddr, port, ip1)
++
++static inline int
++__ipportip_add(struct ip_set_ipportiphash *map,
++ ip_set_ip_t hash_ip, ip_set_ip_t ip1)
++{
++ __u32 probe;
++ u_int16_t i;
++ struct ipportip *elem, *slot = NULL;
++
++ for (i = 0; i < map->probes; i++) {
++ probe = jhash_ip2(map, i, hash_ip, ip1) % map->hashsize;
++ elem = HARRAY_ELEM(map->members, struct ipportip *, probe);
++ if (elem->ip == hash_ip && elem->ip1 == ip1)
++ return -EEXIST;
++ if (!(slot || elem->ip || elem->ip1))
++ slot = elem;
++ /* There can be deleted entries, must check all slots */
+ }
-+ tmp->hashsize = hashsize;
-+ tmp->elements = 0;
-+ tmp->probes = map->probes;
-+ tmp->resize = map->resize;
-+ tmp->first_ip = map->first_ip;
-+ tmp->last_ip = map->last_ip;
-+ memcpy(tmp->initval, map->initval, map->probes * sizeof(uint32_t));
-+
-+ write_lock_bh(&set->lock);
-+ map = (struct ip_set_ipporthash *) set->data; /* Play safe */
-+ for (i = 0; i < map->hashsize && res == 0; i++) {
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
-+ if (*elem)
-+ res = __add_haship(tmp, *elem);
-+ }
-+ if (res) {
-+ /* Failure, try again */
-+ write_unlock_bh(&set->lock);
-+ harray_free(tmp->members);
-+ kfree(tmp);
-+ goto again;
++ if (slot) {
++ slot->ip = hash_ip;
++ slot->ip1 = ip1;
++ map->elements++;
++ return 0;
+ }
++ /* Trigger rehashing */
++ return -EAGAIN;
++}
++
++static inline int
++__ipportiphash_add(struct ip_set_ipportiphash *map,
++ struct ipportip *elem)
++{
++ return __ipportip_add(map, elem->ip, elem->ip1);
++}
++
++static inline int
++ipportiphash_add(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port, ip_set_ip_t ip1)
++{
++ struct ip_set_ipportiphash *map = set->data;
+
-+ /* Success at resizing! */
-+ members = map->members;
++ if (map->elements > limit)
++ return -ERANGE;
++ if (ip < map->first_ip || ip > map->last_ip)
++ return -ERANGE;
+
-+ map->hashsize = tmp->hashsize;
-+ map->members = tmp->members;
-+ write_unlock_bh(&set->lock);
++ *hash_ip = pack_ip_port(map, ip, port);
++ if (!(*hash_ip || ip1))
++ return -ERANGE;
++
++ return __ipportip_add(map, *hash_ip, ip1);
++}
+
-+ harray_free(members);
-+ kfree(tmp);
++UADT(ipportiphash, add, req->port, req->ip1)
++KADT(ipportiphash, add, ipaddr, port, ip1)
+
-+ return 0;
++static inline void
++__ipportiphash_retry(struct ip_set_ipportiphash *tmp,
++ struct ip_set_ipportiphash *map)
++{
++ tmp->first_ip = map->first_ip;
++ tmp->last_ip = map->last_ip;
+}
+
++HASH_RETRY2(ipportiphash, struct ipportip)
++
+static inline int
-+__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t port,
-+ ip_set_ip_t *hash_ip)
++ipportiphash_del(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port, ip_set_ip_t ip1)
+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
++ struct ip_set_ipportiphash *map = set->data;
+ ip_set_ip_t id;
-+ ip_set_ip_t *elem;
++ struct ipportip *elem;
+
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
+
-+ id = hash_id(set, ip, port, hash_ip);
++ id = ipportiphash_id(set, hash_ip, ip, port, ip1);
+
+ if (id == UINT_MAX)
+ return -EEXIST;
+
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
-+ *elem = 0;
++ elem = HARRAY_ELEM(map->members, struct ipportip *, id);
++ elem->ip = elem->ip1 = 0;
+ map->elements--;
+
+ return 0;
+}
+
-+static int
-+delip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_ipporthash *req =
-+ (struct ip_set_req_ipporthash *) data;
++UADT(ipportiphash, del, req->port, req->ip1)
++KADT(ipportiphash, del, ipaddr, port, ip1)
+
-+ if (size != sizeof(struct ip_set_req_ipporthash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipporthash),
-+ size);
-+ return -EINVAL;
++static inline int
++__ipportiphash_create(const struct ip_set_req_ipportiphash_create *req,
++ struct ip_set_ipportiphash *map)
++{
++ if (req->to - req->from > MAX_RANGE) {
++ ip_set_printk("range too big, %d elements (max %d)",
++ req->to - req->from + 1, MAX_RANGE+1);
++ return -ENOEXEC;
+ }
-+ return __delip(set, req->ip, req->port, hash_ip);
++ map->first_ip = req->from;
++ map->last_ip = req->to;
++ return 0;
+}
+
-+static int
-+delip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
++HASH_CREATE(ipportiphash, struct ipportip)
++HASH_DESTROY(ipportiphash)
++HASH_FLUSH(ipportiphash, struct ipportip)
++
++static inline void
++__ipportiphash_list_header(const struct ip_set_ipportiphash *map,
++ struct ip_set_req_ipportiphash_create *header)
+{
-+ ip_set_ip_t port;
++ header->from = map->first_ip;
++ header->to = map->last_ip;
++}
+
-+ if (flags[index+1] == 0)
-+ return -EINVAL;
-+
-+ port = get_port(skb, flags[index+1]);
++HASH_LIST_HEADER(ipportiphash)
++HASH_LIST_MEMBERS_SIZE(ipportiphash, struct ipportip)
++HASH_LIST_MEMBERS_MEMCPY(ipportiphash, struct ipportip)
+
-+ DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
-+ flags[index] & IPSET_SRC ? "SRC" : "DST",
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ NIPQUAD(ip_hdr(skb)->saddr),
-+ NIPQUAD(ip_hdr(skb)->daddr));
-+#else
-+ NIPQUAD(skb->nh.iph->saddr),
-+ NIPQUAD(skb->nh.iph->daddr));
-+#endif
-+ DP("flag %s port %u",
-+ flags[index+1] & IPSET_SRC ? "SRC" : "DST",
-+ port);
-+ if (port == INVALID_PORT)
-+ return -EINVAL;
++IP_SET_RTYPE(ipportiphash, IPSET_TYPE_IP | IPSET_TYPE_PORT
++ | IPSET_TYPE_IP1 | IPSET_DATA_TRIPLE)
+
-+ return __delip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ port,
-+ hash_ip);
-+}
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("ipportiphash type of IP sets");
++module_param(limit, int, 0600);
++MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
++
++REGISTER_MODULE(ipportiphash)
+--- /dev/null
++++ b/net/ipv4/netfilter/ip_set_ipportnethash.c
+@@ -0,0 +1,304 @@
++/* Copyright (C) 2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/* Kernel module implementing an ip+port+net hash set */
+
-+static int create(struct ip_set *set, const void *data, size_t size)
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++#include <linux/ip.h>
++#include <linux/tcp.h>
++#include <linux/udp.h>
++#include <linux/skbuff.h>
++#include <linux/netfilter_ipv4/ip_set_jhash.h>
++#include <linux/errno.h>
++#include <asm/uaccess.h>
++#include <asm/bitops.h>
++#include <linux/spinlock.h>
++#include <linux/random.h>
++
++#include <net/ip.h>
++
++#include <linux/netfilter_ipv4/ip_set_ipportnethash.h>
++#include <linux/netfilter_ipv4/ip_set_getport.h>
++
++static int limit = MAX_RANGE;
++
++#define jhash_ip2(map, i, ipport, ip1) \
++ jhash_2words(ipport, ip1, *(map->initval + i))
++
++static inline __u32
++ipportnethash_id_cidr(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port,
++ ip_set_ip_t ip1, uint8_t cidr)
+{
-+ struct ip_set_req_ipporthash_create *req =
-+ (struct ip_set_req_ipporthash_create *) data;
-+ struct ip_set_ipporthash *map;
-+ uint16_t i;
++ struct ip_set_ipportnethash *map = set->data;
++ __u32 id;
++ u_int16_t i;
++ struct ipportip *elem;
+
-+ if (size != sizeof(struct ip_set_req_ipporthash_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipporthash_create),
-+ size);
-+ return -EINVAL;
++ *hash_ip = pack_ip_port(map, ip, port);
++ DP("set: %s, ipport:%u.%u.%u.%u:%u, %u.%u.%u.%u",
++ set->name, HIPQUAD(ip), port, HIPQUAD(*hash_ip));
++ ip1 = pack_ip_cidr(ip1, cidr);
++ if (!(*hash_ip || ip1))
++ return UINT_MAX;
++
++ for (i = 0; i < map->probes; i++) {
++ id = jhash_ip2(map, i, *hash_ip, ip1) % map->hashsize;
++ DP("hash key: %u", id);
++ elem = HARRAY_ELEM(map->members, struct ipportip *, id);
++ if (elem->ip == *hash_ip && elem->ip1 == ip1)
++ return id;
++ /* No shortcut - there can be deleted entries. */
+ }
++ return UINT_MAX;
++}
+
-+ if (req->hashsize < 1) {
-+ ip_set_printk("hashsize too small");
-+ return -ENOEXEC;
-+ }
++static inline __u32
++ipportnethash_id(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port, ip_set_ip_t ip1)
++{
++ struct ip_set_ipportnethash *map = set->data;
++ __u32 id = UINT_MAX;
++ int i;
+
-+ if (req->probes < 1) {
-+ ip_set_printk("probes too small");
-+ return -ENOEXEC;
++ for (i = 0; i < 30 && map->cidr[i]; i++) {
++ id = ipportnethash_id_cidr(set, hash_ip, ip, port, ip1,
++ map->cidr[i]);
++ if (id != UINT_MAX)
++ break;
+ }
++ return id;
++}
+
-+ map = kmalloc(sizeof(struct ip_set_ipporthash)
-+ + req->probes * sizeof(uint32_t), GFP_KERNEL);
-+ if (!map) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_ipporthash)
-+ + req->probes * sizeof(uint32_t));
-+ return -ENOMEM;
-+ }
-+ for (i = 0; i < req->probes; i++)
-+ get_random_bytes(((uint32_t *) map->initval)+i, 4);
-+ map->elements = 0;
-+ map->hashsize = req->hashsize;
-+ map->probes = req->probes;
-+ map->resize = req->resize;
-+ map->first_ip = req->from;
-+ map->last_ip = req->to;
-+ map->members = harray_malloc(map->hashsize, sizeof(ip_set_ip_t), GFP_KERNEL);
-+ if (!map->members) {
-+ DP("out of memory for %d bytes", map->hashsize * sizeof(ip_set_ip_t));
-+ kfree(map);
-+ return -ENOMEM;
-+ }
++static inline int
++ipportnethash_test_cidr(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port,
++ ip_set_ip_t ip1, uint8_t cidr)
++{
++ struct ip_set_ipportnethash *map = set->data;
++
++ if (ip < map->first_ip || ip > map->last_ip)
++ return -ERANGE;
+
-+ set->data = map;
-+ return 0;
++ return (ipportnethash_id_cidr(set, hash_ip, ip, port, ip1,
++ cidr) != UINT_MAX);
+}
+
-+static void destroy(struct ip_set *set)
++static inline int
++ipportnethash_test(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port, ip_set_ip_t ip1)
+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+
-+ harray_free(map->members);
-+ kfree(map);
++ struct ip_set_ipportnethash *map = set->data;
++
++ if (ip < map->first_ip || ip > map->last_ip)
++ return -ERANGE;
+
-+ set->data = NULL;
++ return (ipportnethash_id(set, hash_ip, ip, port, ip1) != UINT_MAX);
+}
+
-+static void flush(struct ip_set *set)
++static int
++ipportnethash_utest(struct ip_set *set, const void *data, u_int32_t size,
++ ip_set_ip_t *hash_ip)
+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+ harray_flush(map->members, map->hashsize, sizeof(ip_set_ip_t));
-+ map->elements = 0;
++ const struct ip_set_req_ipportnethash *req = data;
++
++ if (req->cidr <= 0 || req->cidr > 32)
++ return -EINVAL;
++ return (req->cidr == 32
++ ? ipportnethash_test(set, hash_ip, req->ip, req->port,
++ req->ip1)
++ : ipportnethash_test_cidr(set, hash_ip, req->ip, req->port,
++ req->ip1, req->cidr));
+}
+
-+static void list_header(const struct ip_set *set, void *data)
++#define KADT_CONDITION \
++ ip_set_ip_t port, ip1; \
++ \
++ if (flags[index+2] == 0) \
++ return 0; \
++ \
++ port = get_port(skb, flags[index+1]); \
++ ip1 = ipaddr(skb, flags[index+2]); \
++ \
++ if (port == INVALID_PORT) \
++ return 0;
++
++KADT(ipportnethash, test, ipaddr, port, ip1)
++
++static inline int
++__ipportnet_add(struct ip_set_ipportnethash *map,
++ ip_set_ip_t hash_ip, ip_set_ip_t ip1)
+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+ struct ip_set_req_ipporthash_create *header =
-+ (struct ip_set_req_ipporthash_create *) data;
++ __u32 probe;
++ u_int16_t i;
++ struct ipportip *elem, *slot = NULL;
+
-+ header->hashsize = map->hashsize;
-+ header->probes = map->probes;
-+ header->resize = map->resize;
-+ header->from = map->first_ip;
-+ header->to = map->last_ip;
++ for (i = 0; i < map->probes; i++) {
++ probe = jhash_ip2(map, i, hash_ip, ip1) % map->hashsize;
++ elem = HARRAY_ELEM(map->members, struct ipportip *, probe);
++ if (elem->ip == hash_ip && elem->ip1 == ip1)
++ return -EEXIST;
++ if (!(slot || elem->ip || elem->ip1))
++ slot = elem;
++ /* There can be deleted entries, must check all slots */
++ }
++ if (slot) {
++ slot->ip = hash_ip;
++ slot->ip1 = ip1;
++ map->elements++;
++ return 0;
++ }
++ /* Trigger rehashing */
++ return -EAGAIN;
+}
+
-+static int list_members_size(const struct ip_set *set)
++static inline int
++__ipportnethash_add(struct ip_set_ipportnethash *map,
++ struct ipportip *elem)
+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+
-+ return (map->hashsize * sizeof(ip_set_ip_t));
++ return __ipportnet_add(map, elem->ip, elem->ip1);
+}
+
-+static void list_members(const struct ip_set *set, void *data)
++static inline int
++ipportnethash_add(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port,
++ ip_set_ip_t ip1, uint8_t cidr)
+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+ ip_set_ip_t i, *elem;
++ struct ip_set_ipportnethash *map = set->data;
++ struct ipportip;
++ int ret;
++
++ if (map->elements > limit)
++ return -ERANGE;
++ if (ip < map->first_ip || ip > map->last_ip)
++ return -ERANGE;
++ if (cidr <= 0 || cidr >= 32)
++ return -EINVAL;
++ if (map->nets[cidr-1] == UINT16_MAX)
++ return -ERANGE;
+
-+ for (i = 0; i < map->hashsize; i++) {
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
-+ ((ip_set_ip_t *)data)[i] = *elem;
++ *hash_ip = pack_ip_port(map, ip, port);
++ ip1 = pack_ip_cidr(ip1, cidr);
++ if (!(*hash_ip || ip1))
++ return -ERANGE;
++
++ ret =__ipportnet_add(map, *hash_ip, ip1);
++ if (ret == 0) {
++ if (!map->nets[cidr-1]++)
++ add_cidr_size(map->cidr, cidr);
++ map->elements++;
+ }
++ return ret;
+}
+
-+static struct ip_set_type ip_set_ipporthash = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_DATA_DOUBLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_ipporthash),
-+ .addip = &addip,
-+ .addip_kernel = &addip_kernel,
-+ .retry = &retry,
-+ .delip = &delip,
-+ .delip_kernel = &delip_kernel,
-+ .testip = &testip,
-+ .testip_kernel = &testip_kernel,
-+ .header_size = sizeof(struct ip_set_req_ipporthash_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
++#undef KADT_CONDITION
++#define KADT_CONDITION \
++ struct ip_set_ipportnethash *map = set->data; \
++ uint8_t cidr = map->cidr[0] ? map->cidr[0] : 31; \
++ ip_set_ip_t port, ip1; \
++ \
++ if (flags[index+2] == 0) \
++ return 0; \
++ \
++ port = get_port(skb, flags[index+1]); \
++ ip1 = ipaddr(skb, flags[index+2]); \
++ \
++ if (port == INVALID_PORT) \
++ return 0;
+
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-+MODULE_DESCRIPTION("ipporthash type of IP sets");
-+module_param(limit, int, 0600);
-+MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
++UADT(ipportnethash, add, req->port, req->ip1, req->cidr)
++KADT(ipportnethash, add, ipaddr, port, ip1, cidr)
+
-+static int __init ip_set_ipporthash_init(void)
++static inline void
++__ipportnethash_retry(struct ip_set_ipportnethash *tmp,
++ struct ip_set_ipportnethash *map)
+{
-+ return ip_set_register_set_type(&ip_set_ipporthash);
++ tmp->first_ip = map->first_ip;
++ tmp->last_ip = map->last_ip;
++ memcpy(tmp->cidr, map->cidr, sizeof(tmp->cidr));
++ memcpy(tmp->nets, map->nets, sizeof(tmp->nets));
+}
+
-+static void __exit ip_set_ipporthash_fini(void)
++HASH_RETRY2(ipportnethash, struct ipportip)
++
++static inline int
++ipportnethash_del(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port,
++ ip_set_ip_t ip1, uint8_t cidr)
+{
-+ /* FIXME: possible race with ip_set_create() */
-+ ip_set_unregister_set_type(&ip_set_ipporthash);
++ struct ip_set_ipportnethash *map = set->data;
++ ip_set_ip_t id;
++ struct ipportip *elem;
++
++ if (ip < map->first_ip || ip > map->last_ip)
++ return -ERANGE;
++ if (!ip)
++ return -ERANGE;
++ if (cidr <= 0 || cidr >= 32)
++ return -EINVAL;
++
++ id = ipportnethash_id_cidr(set, hash_ip, ip, port, ip1, cidr);
++
++ if (id == UINT_MAX)
++ return -EEXIST;
++
++ elem = HARRAY_ELEM(map->members, struct ipportip *, id);
++ elem->ip = elem->ip1 = 0;
++ map->elements--;
++ if (!map->nets[cidr-1]--)
++ del_cidr_size(map->cidr, cidr);
++
++ return 0;
+}
+
-+module_init(ip_set_ipporthash_init);
-+module_exit(ip_set_ipporthash_fini);
++UADT(ipportnethash, del, req->port, req->ip1, req->cidr)
++KADT(ipportnethash, del, ipaddr, port, ip1, cidr)
++
++static inline int
++__ipportnethash_create(const struct ip_set_req_ipportnethash_create *req,
++ struct ip_set_ipportnethash *map)
++{
++ if (req->to - req->from > MAX_RANGE) {
++ ip_set_printk("range too big, %d elements (max %d)",
++ req->to - req->from + 1, MAX_RANGE+1);
++ return -ENOEXEC;
++ }
++ map->first_ip = req->from;
++ map->last_ip = req->to;
++ memset(map->cidr, 0, sizeof(map->cidr));
++ memset(map->nets, 0, sizeof(map->nets));
++ return 0;
++}
++
++HASH_CREATE(ipportnethash, struct ipportip)
++HASH_DESTROY(ipportnethash)
++HASH_FLUSH_CIDR(ipportnethash, struct ipportip);
++
++static inline void
++__ipportnethash_list_header(const struct ip_set_ipportnethash *map,
++ struct ip_set_req_ipportnethash_create *header)
++{
++ header->from = map->first_ip;
++ header->to = map->last_ip;
++}
++
++HASH_LIST_HEADER(ipportnethash)
++
++HASH_LIST_MEMBERS_SIZE(ipportnethash, struct ipportip)
++HASH_LIST_MEMBERS_MEMCPY(ipportnethash, struct ipportip)
++
++IP_SET_RTYPE(ipportnethash, IPSET_TYPE_IP | IPSET_TYPE_PORT
++ | IPSET_TYPE_IP1 | IPSET_DATA_TRIPLE)
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("ipportnethash type of IP sets");
++module_param(limit, int, 0600);
++MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
++
++REGISTER_MODULE(ipportnethash)
--- /dev/null
+++ b/net/ipv4/netfilter/ip_set_iptree.c
-@@ -0,0 +1,612 @@
-+/* Copyright (C) 2005 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+@@ -0,0 +1,466 @@
++/* Copyright (C) 2005-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
++ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the iptree type */
+
-+#include <linux/version.h>
+#include <linux/module.h>
++#include <linux/moduleparam.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
+#include <linux/spinlock.h>
++#include <linux/timer.h>
+
-+/* Backward compatibility */
-+#ifndef __nocast
-+#define __nocast
-+#endif
-+
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_bitmaps.h>
+#include <linux/netfilter_ipv4/ip_set_iptree.h>
+
+static int limit = MAX_RANGE;
+
+/* Garbage collection interval in seconds: */
+#define IPTREE_GC_TIME 5*60
-+/* Sleep so many milliseconds before trying again
-+ * to delete the gc timer at destroying/flushing a set */
++/* Sleep so many milliseconds before trying again
++ * to delete the gc timer at destroying/flushing a set */
+#define IPTREE_DESTROY_SLEEP 100
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
-+static struct kmem_cache *branch_cachep;
-+static struct kmem_cache *leaf_cachep;
-+#else
-+static kmem_cache_t *branch_cachep;
-+static kmem_cache_t *leaf_cachep;
-+#endif
++static __KMEM_CACHE_T__ *branch_cachep;
++static __KMEM_CACHE_T__ *leaf_cachep;
++
+
+#if defined(__LITTLE_ENDIAN)
+#define ABCD(a,b,c,d,addrp) do { \
@@ -4587,9 +5121,9 @@
+} while (0)
+
+static inline int
-+__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++iptree_test(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set_iptree *map = set->data;
+ struct ip_set_iptreeb *btree;
+ struct ip_set_iptreec *ctree;
+ struct ip_set_iptreed *dtree;
@@ -4610,53 +5144,10 @@
+ || time_after(dtree->expires[d], jiffies));
+}
+
-+static int
-+testip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_iptree *req =
-+ (struct ip_set_req_iptree *) data;
-+
-+ if (size != sizeof(struct ip_set_req_iptree)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iptree),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __testip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+testip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ int res;
-+
-+ DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
-+ flags[index] & IPSET_SRC ? "SRC" : "DST",
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ NIPQUAD(ip_hdr(skb)->saddr),
-+ NIPQUAD(ip_hdr(skb)->daddr));
-+#else
-+ NIPQUAD(skb->nh.iph->saddr),
-+ NIPQUAD(skb->nh.iph->daddr));
-+#endif
++#define KADT_CONDITION
+
-+ res = __testip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+ return (res < 0 ? 0 : res);
-+}
++UADT(iptree, test)
++KADT(iptree, test, ipaddr)
+
+#define ADDIP_WALK(map, elem, branch, type, cachep) do { \
+ if ((map)->tree[elem]) { \
@@ -4674,10 +5165,10 @@
+} while (0)
+
+static inline int
-+__addip(struct ip_set *set, ip_set_ip_t ip, unsigned int timeout,
-+ ip_set_ip_t *hash_ip)
++iptree_add(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, unsigned int timeout)
+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set_iptree *map = set->data;
+ struct ip_set_iptreeb *btree;
+ struct ip_set_iptreec *ctree;
+ struct ip_set_iptreed *dtree;
@@ -4698,6 +5189,8 @@
+ if (dtree->expires[d]
+ && (!map->timeout || time_after(dtree->expires[d], jiffies)))
+ ret = -EEXIST;
++ if (map->timeout && timeout == 0)
++ timeout = map->timeout;
+ dtree->expires[d] = map->timeout ? (timeout * HZ + jiffies) : 1;
+ /* Lottery: I won! */
+ if (dtree->expires[d] == 0)
@@ -4708,47 +5201,8 @@
+ return ret;
+}
+
-+static int
-+addip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
-+ struct ip_set_req_iptree *req =
-+ (struct ip_set_req_iptree *) data;
-+
-+ if (size != sizeof(struct ip_set_req_iptree)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iptree),
-+ size);
-+ return -EINVAL;
-+ }
-+ DP("%u.%u.%u.%u %u", HIPQUAD(req->ip), req->timeout);
-+ return __addip(set, req->ip,
-+ req->timeout ? req->timeout : map->timeout,
-+ hash_ip);
-+}
-+
-+static int
-+addip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
-+
-+ return __addip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ map->timeout,
-+ hash_ip);
-+}
++UADT(iptree, add, req->timeout)
++KADT(iptree, add, ipaddr, 0)
+
+#define DELIP_WALK(map, elem, branch) do { \
+ if ((map)->tree[elem]) { \
@@ -4757,10 +5211,10 @@
+ return -EEXIST; \
+} while (0)
+
-+static inline int
-+__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++static inline int
++iptree_del(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set_iptree *map = set->data;
+ struct ip_set_iptreeb *btree;
+ struct ip_set_iptreec *ctree;
+ struct ip_set_iptreed *dtree;
@@ -4783,40 +5237,8 @@
+ return -EEXIST;
+}
+
-+static int
-+delip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_iptree *req =
-+ (struct ip_set_req_iptree *) data;
-+
-+ if (size != sizeof(struct ip_set_req_iptree)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iptree),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __delip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+delip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __delip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+}
++UADT(iptree, del)
++KADT(iptree, del, ipaddr)
+
+#define LOOP_WALK_BEGIN(map, i, branch) \
+ for (i = 0; i < 256; i++) { \
@@ -4826,10 +5248,11 @@
+
+#define LOOP_WALK_END }
+
-+static void ip_tree_gc(unsigned long ul_set)
++static void
++ip_tree_gc(unsigned long ul_set)
+{
-+ struct ip_set *set = (void *) ul_set;
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set *set = (struct ip_set *) ul_set;
++ struct ip_set_iptree *map = set->data;
+ struct ip_set_iptreeb *btree;
+ struct ip_set_iptreec *ctree;
+ struct ip_set_iptreed *dtree;
@@ -4896,9 +5319,10 @@
+ add_timer(&map->gc);
+}
+
-+static inline void init_gc_timer(struct ip_set *set)
++static inline void
++init_gc_timer(struct ip_set *set)
+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set_iptree *map = set->data;
+
+ /* Even if there is no timeout for the entries,
+ * we still have to call gc because delete
@@ -4911,22 +5335,22 @@
+ add_timer(&map->gc);
+}
+
-+static int create(struct ip_set *set, const void *data, size_t size)
++static int
++iptree_create(struct ip_set *set, const void *data, u_int32_t size)
+{
-+ struct ip_set_req_iptree_create *req =
-+ (struct ip_set_req_iptree_create *) data;
++ const struct ip_set_req_iptree_create *req = data;
+ struct ip_set_iptree *map;
+
+ if (size != sizeof(struct ip_set_req_iptree_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
++ ip_set_printk("data length wrong (want %zu, have %lu)",
+ sizeof(struct ip_set_req_iptree_create),
-+ size);
++ (unsigned long)size);
+ return -EINVAL;
+ }
+
+ map = kmalloc(sizeof(struct ip_set_iptree), GFP_KERNEL);
+ if (!map) {
-+ DP("out of memory for %d bytes",
++ DP("out of memory for %zu bytes",
+ sizeof(struct ip_set_iptree));
+ return -ENOMEM;
+ }
@@ -4940,7 +5364,8 @@
+ return 0;
+}
+
-+static void __flush(struct ip_set_iptree *map)
++static inline void
++__flush(struct ip_set_iptree *map)
+{
+ struct ip_set_iptreeb *btree;
+ struct ip_set_iptreec *ctree;
@@ -4959,9 +5384,10 @@
+ map->elements = 0;
+}
+
-+static void destroy(struct ip_set *set)
++static void
++iptree_destroy(struct ip_set *set)
+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set_iptree *map = set->data;
+
+ /* gc might be running */
+ while (!del_timer(&map->gc))
@@ -4971,9 +5397,10 @@
+ set->data = NULL;
+}
+
-+static void flush(struct ip_set *set)
++static void
++iptree_flush(struct ip_set *set)
+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set_iptree *map = set->data;
+ unsigned int timeout = map->timeout;
+
+ /* gc might be running */
@@ -4986,18 +5413,19 @@
+ init_gc_timer(set);
+}
+
-+static void list_header(const struct ip_set *set, void *data)
++static void
++iptree_list_header(const struct ip_set *set, void *data)
+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
-+ struct ip_set_req_iptree_create *header =
-+ (struct ip_set_req_iptree_create *) data;
++ const struct ip_set_iptree *map = set->data;
++ struct ip_set_req_iptree_create *header = data;
+
+ header->timeout = map->timeout;
+}
+
-+static int list_members_size(const struct ip_set *set)
++static int
++iptree_list_members_size(const struct ip_set *set)
+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ const struct ip_set_iptree *map = set->data;
+ struct ip_set_iptreeb *btree;
+ struct ip_set_iptreec *ctree;
+ struct ip_set_iptreed *dtree;
@@ -5020,9 +5448,10 @@
+ return (count * sizeof(struct ip_set_req_iptree));
+}
+
-+static void list_members(const struct ip_set *set, void *data)
++static void
++iptree_list_members(const struct ip_set *set, void *data)
+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ const struct ip_set_iptree *map = set->data;
+ struct ip_set_iptreeb *btree;
+ struct ip_set_iptreec *ctree;
+ struct ip_set_iptreed *dtree;
@@ -5036,9 +5465,9 @@
+ for (d = 0; d < 256; d++) {
+ if (dtree->expires[d]
+ && (!map->timeout || time_after(dtree->expires[d], jiffies))) {
-+ entry = (struct ip_set_req_iptree *)(data + offset);
++ entry = data + offset;
+ entry->ip = ((a << 24) | (b << 16) | (c << 8) | d);
-+ entry->timeout = !map->timeout ? 0
++ entry->timeout = !map->timeout ? 0
+ : (dtree->expires[d] - jiffies)/HZ;
+ offset += sizeof(struct ip_set_req_iptree);
+ }
@@ -5048,26 +5477,7 @@
+ LOOP_WALK_END;
+}
+
-+static struct ip_set_type ip_set_iptree = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_iptree),
-+ .addip = &addip,
-+ .addip_kernel = &addip_kernel,
-+ .delip = &delip,
-+ .delip_kernel = &delip_kernel,
-+ .testip = &testip,
-+ .testip_kernel = &testip_kernel,
-+ .header_size = sizeof(struct ip_set_req_iptree_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
++IP_SET_TYPE(iptree, IPSET_TYPE_IP | IPSET_DATA_SINGLE)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
@@ -5079,29 +5489,15 @@
+{
+ int ret;
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
-+ branch_cachep = kmem_cache_create("ip_set_iptreeb",
-+ sizeof(struct ip_set_iptreeb),
-+ 0, 0, NULL);
-+#else
-+ branch_cachep = kmem_cache_create("ip_set_iptreeb",
-+ sizeof(struct ip_set_iptreeb),
-+ 0, 0, NULL, NULL);
-+#endif
++ branch_cachep = KMEM_CACHE_CREATE("ip_set_iptreeb",
++ sizeof(struct ip_set_iptreeb));
+ if (!branch_cachep) {
+ printk(KERN_ERR "Unable to create ip_set_iptreeb slab cache\n");
+ ret = -ENOMEM;
+ goto out;
+ }
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
-+ leaf_cachep = kmem_cache_create("ip_set_iptreed",
-+ sizeof(struct ip_set_iptreed),
-+ 0, 0, NULL);
-+#else
-+ leaf_cachep = kmem_cache_create("ip_set_iptreed",
-+ sizeof(struct ip_set_iptreed),
-+ 0, 0, NULL, NULL);
-+#endif
++ leaf_cachep = KMEM_CACHE_CREATE("ip_set_iptreed",
++ sizeof(struct ip_set_iptreed));
+ if (!leaf_cachep) {
+ printk(KERN_ERR "Unable to create ip_set_iptreed slab cache\n");
+ ret = -ENOMEM;
@@ -5130,7 +5526,7 @@
+module_exit(ip_set_iptree_fini);
--- /dev/null
+++ b/net/ipv4/netfilter/ip_set_iptreemap.c
-@@ -0,0 +1,829 @@
+@@ -0,0 +1,708 @@
+/* Copyright (C) 2007 Sven Wegener <sven.wegener@stealer.net>
+ *
+ * This program is free software; you can redistribute it and/or modify it
@@ -5139,38 +5535,33 @@
+ */
+
+/* This modules implements the iptreemap ipset type. It uses bitmaps to
-+ * represent every single IPv4 address as a single bit. The bitmaps are managed
-+ * in a tree structure, where the first three octets of an addresses are used
-+ * as an index to find the bitmap and the last octet is used as the bit number.
++ * represent every single IPv4 address as a bit. The bitmaps are managed in a
++ * tree structure, where the first three octets of an address are used as an
++ * index to find the bitmap and the last octet is used as the bit number.
+ */
+
-+#include <linux/version.h>
++#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
+#include <linux/spinlock.h>
++#include <linux/timer.h>
+
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_bitmaps.h>
+#include <linux/netfilter_ipv4/ip_set_iptreemap.h>
+
+#define IPTREEMAP_DEFAULT_GC_TIME (5 * 60)
+#define IPTREEMAP_DESTROY_SLEEP (100)
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
-+static struct kmem_cache *cachep_b;
-+static struct kmem_cache *cachep_c;
-+static struct kmem_cache *cachep_d;
-+#else
-+static kmem_cache_t *cachep_b;
-+static kmem_cache_t *cachep_c;
-+static kmem_cache_t *cachep_d;
-+#endif
++static __KMEM_CACHE_T__ *cachep_b;
++static __KMEM_CACHE_T__ *cachep_c;
++static __KMEM_CACHE_T__ *cachep_d;
+
+static struct ip_set_iptreemap_d *fullbitmap_d;
+static struct ip_set_iptreemap_c *fullbitmap_c;
@@ -5319,9 +5710,6 @@
+#define LOOP_WALK_END_COUNT() \
+ }
+
-+#define MIN(a, b) (a < b ? a : b)
-+#define MAX(a, b) (a > b ? a : b)
-+
+#define GETVALUE1(a, a1, b1, r) \
+ (a == a1 ? b1 : r)
+
@@ -5391,9 +5779,9 @@
+}
+
+static inline int
-+__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++iptreemap_test(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap *map = set->data;
+ struct ip_set_iptreemap_b *btree;
+ struct ip_set_iptreemap_c *ctree;
+ struct ip_set_iptreemap_d *dtree;
@@ -5410,40 +5798,13 @@
+ return !!test_bit(d, (void *) dtree->bitmap);
+}
+
-+static int
-+testip(struct ip_set *set, const void *data, size_t size, ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_iptreemap *req = (struct ip_set_req_iptreemap *) data;
++#define KADT_CONDITION
+
-+ if (size != sizeof(struct ip_set_req_iptreemap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)", sizeof(struct ip_set_req_iptreemap), size);
-+ return -EINVAL;
-+ }
-+
-+ return __testip(set, req->start, hash_ip);
-+}
-+
-+static int
-+testip_kernel(struct ip_set *set, const struct sk_buff *skb, ip_set_ip_t *hash_ip, const u_int32_t *flags, unsigned char index)
-+{
-+ int res;
-+
-+ res = __testip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+
-+ return (res < 0 ? 0 : res);
-+}
++UADT(iptreemap, test)
++KADT(iptreemap, test, ipaddr)
+
+static inline int
-+__addip_single(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++__addip_single(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
+ struct ip_set_iptreemap_b *btree;
@@ -5459,18 +5820,19 @@
+ ADDIP_WALK(btree, b, ctree, struct ip_set_iptreemap_c, cachep_c, fullbitmap_c);
+ ADDIP_WALK(ctree, c, dtree, struct ip_set_iptreemap_d, cachep_d, fullbitmap_d);
+
-+ if (test_and_set_bit(d, (void *) dtree->bitmap))
++ if (__test_and_set_bit(d, (void *) dtree->bitmap))
+ return -EEXIST;
+
-+ set_bit(b, (void *) btree->dirty);
++ __set_bit(b, (void *) btree->dirty);
+
+ return 0;
+}
+
+static inline int
-+__addip_range(struct ip_set *set, ip_set_ip_t start, ip_set_ip_t end, ip_set_ip_t *hash_ip)
++iptreemap_add(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t start, ip_set_ip_t end)
+{
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap *map = set->data;
+ struct ip_set_iptreemap_b *btree;
+ struct ip_set_iptreemap_c *ctree;
+ struct ip_set_iptreemap_d *dtree;
@@ -5479,7 +5841,7 @@
+ unsigned char a2, b2, c2, d2;
+
+ if (start == end)
-+ return __addip_single(set, start, hash_ip);
++ return __addip_single(set, hash_ip, start);
+
+ *hash_ip = start;
+
@@ -5491,8 +5853,8 @@
+ ADDIP_RANGE_LOOP(btree, b, GETVALUE1(a, a1, b1, 0), GETVALUE1(a, a2, b2, 255), CHECK2(a, b, a1, a2, b1, b2, c1, c2, d1, d2), ctree, fullbitmap_c, cachep_c, free_c) {
+ ADDIP_RANGE_LOOP(ctree, c, GETVALUE2(a, b, a1, b1, c1, 0), GETVALUE2(a, b, a2, b2, c2, 255), CHECK3(a, b, c, a1, a2, b1, b2, c1, c2, d1, d2), dtree, fullbitmap_d, cachep_d, free_d) {
+ for (d = GETVALUE3(a, b, c, a1, b1, c1, d1, 0); d <= GETVALUE3(a, b, c, a2, b2, c2, d2, 255); d++)
-+ set_bit(d, (void *) dtree->bitmap);
-+ set_bit(b, (void *) btree->dirty);
++ __set_bit(d, (void *) dtree->bitmap);
++ __set_bit(b, (void *) btree->dirty);
+ } ADDIP_RANGE_LOOP_END();
+ } ADDIP_RANGE_LOOP_END();
+ } ADDIP_RANGE_LOOP_END();
@@ -5500,39 +5862,14 @@
+ return 0;
+}
+
-+static int
-+addip(struct ip_set *set, const void *data, size_t size, ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_iptreemap *req = (struct ip_set_req_iptreemap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_iptreemap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)", sizeof(struct ip_set_req_iptreemap), size);
-+ return -EINVAL;
-+ }
-+
-+ return __addip_range(set, MIN(req->start, req->end), MAX(req->start, req->end), hash_ip);
-+}
-+
-+static int
-+addip_kernel(struct ip_set *set, const struct sk_buff *skb, ip_set_ip_t *hash_ip, const u_int32_t *flags, unsigned char index)
-+{
-+
-+ return __addip_single(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+}
++UADT0(iptreemap, add, min(req->ip, req->end), max(req->ip, req->end))
++KADT(iptreemap, add, ipaddr, ip)
+
+static inline int
-+__delip_single(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip, unsigned int __nocast flags)
++__delip_single(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, gfp_t flags)
+{
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap *map = set->data;
+ struct ip_set_iptreemap_b *btree;
+ struct ip_set_iptreemap_c *ctree;
+ struct ip_set_iptreemap_d *dtree;
@@ -5546,18 +5883,19 @@
+ DELIP_WALK(btree, b, ctree, cachep_c, fullbitmap_c, flags);
+ DELIP_WALK(ctree, c, dtree, cachep_d, fullbitmap_d, flags);
+
-+ if (!test_and_clear_bit(d, (void *) dtree->bitmap))
++ if (!__test_and_clear_bit(d, (void *) dtree->bitmap))
+ return -EEXIST;
+
-+ set_bit(b, (void *) btree->dirty);
++ __set_bit(b, (void *) btree->dirty);
+
+ return 0;
+}
+
+static inline int
-+__delip_range(struct ip_set *set, ip_set_ip_t start, ip_set_ip_t end, ip_set_ip_t *hash_ip, unsigned int __nocast flags)
++iptreemap_del(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t start, ip_set_ip_t end, gfp_t flags)
+{
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap *map = set->data;
+ struct ip_set_iptreemap_b *btree;
+ struct ip_set_iptreemap_c *ctree;
+ struct ip_set_iptreemap_d *dtree;
@@ -5566,7 +5904,7 @@
+ unsigned char a2, b2, c2, d2;
+
+ if (start == end)
-+ return __delip_single(set, start, hash_ip, flags);
++ return __delip_single(set, hash_ip, start, flags);
+
+ *hash_ip = start;
+
@@ -5578,8 +5916,8 @@
+ DELIP_RANGE_LOOP(btree, b, GETVALUE1(a, a1, b1, 0), GETVALUE1(a, a2, b2, 255), CHECK2(a, b, a1, a2, b1, b2, c1, c2, d1, d2), ctree, fullbitmap_c, cachep_c, free_c, flags) {
+ DELIP_RANGE_LOOP(ctree, c, GETVALUE2(a, b, a1, b1, c1, 0), GETVALUE2(a, b, a2, b2, c2, 255), CHECK3(a, b, c, a1, a2, b1, b2, c1, c2, d1, d2), dtree, fullbitmap_d, cachep_d, free_d, flags) {
+ for (d = GETVALUE3(a, b, c, a1, b1, c1, d1, 0); d <= GETVALUE3(a, b, c, a2, b2, c2, d2, 255); d++)
-+ clear_bit(d, (void *) dtree->bitmap);
-+ set_bit(b, (void *) btree->dirty);
++ __clear_bit(d, (void *) dtree->bitmap);
++ __set_bit(b, (void *) btree->dirty);
+ } DELIP_RANGE_LOOP_END();
+ } DELIP_RANGE_LOOP_END();
+ } DELIP_RANGE_LOOP_END();
@@ -5587,34 +5925,8 @@
+ return 0;
+}
+
-+static int
-+delip(struct ip_set *set, const void *data, size_t size, ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_iptreemap *req = (struct ip_set_req_iptreemap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_iptreemap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)", sizeof(struct ip_set_req_iptreemap), size);
-+ return -EINVAL;
-+ }
-+
-+ return __delip_range(set, MIN(req->start, req->end), MAX(req->start, req->end), hash_ip, GFP_KERNEL);
-+}
-+
-+static int
-+delip_kernel(struct ip_set *set, const struct sk_buff *skb, ip_set_ip_t *hash_ip, const u_int32_t *flags, unsigned char index)
-+{
-+ return __delip_single(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip,
-+ GFP_ATOMIC);
-+}
++UADT0(iptreemap, del, min(req->ip, req->end), max(req->ip, req->end), GFP_KERNEL)
++KADT(iptreemap, del, ipaddr, ip, GFP_ATOMIC)
+
+/* Check the status of the bitmap
+ * -1 == all bits cleared
@@ -5638,7 +5950,7 @@
+gc(unsigned long addr)
+{
+ struct ip_set *set = (struct ip_set *) addr;
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap *map = set->data;
+ struct ip_set_iptreemap_b *btree;
+ struct ip_set_iptreemap_c *ctree;
+ struct ip_set_iptreemap_d *dtree;
@@ -5649,7 +5961,7 @@
+
+ LOOP_WALK_BEGIN_GC(map, a, btree, fullbitmap_b, cachep_b, i) {
+ LOOP_WALK_BEGIN_GC(btree, b, ctree, fullbitmap_c, cachep_c, j) {
-+ if (!test_and_clear_bit(b, (void *) btree->dirty))
++ if (!__test_and_clear_bit(b, (void *) btree->dirty))
+ continue;
+ LOOP_WALK_BEGIN_GC(ctree, c, dtree, fullbitmap_d, cachep_d, k) {
+ switch (bitmap_status(dtree)) {
@@ -5677,7 +5989,7 @@
+static inline void
+init_gc_timer(struct ip_set *set)
+{
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap *map = set->data;
+
+ init_timer(&map->gc);
+ map->gc.data = (unsigned long) set;
@@ -5686,16 +5998,12 @@
+ add_timer(&map->gc);
+}
+
-+static int create(struct ip_set *set, const void *data, size_t size)
++static int
++iptreemap_create(struct ip_set *set, const void *data, u_int32_t size)
+{
-+ struct ip_set_req_iptreemap_create *req = (struct ip_set_req_iptreemap_create *) data;
++ const struct ip_set_req_iptreemap_create *req = data;
+ struct ip_set_iptreemap *map;
+
-+ if (size != sizeof(struct ip_set_req_iptreemap_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)", sizeof(struct ip_set_req_iptreemap_create), size);
-+ return -EINVAL;
-+ }
-+
+ map = kzalloc(sizeof(*map), GFP_KERNEL);
+ if (!map)
+ return -ENOMEM;
@@ -5708,7 +6016,8 @@
+ return 0;
+}
+
-+static inline void __flush(struct ip_set_iptreemap *map)
++static inline void
++__flush(struct ip_set_iptreemap *map)
+{
+ struct ip_set_iptreemap_b *btree;
+ unsigned int a;
@@ -5719,9 +6028,10 @@
+ LOOP_WALK_END();
+}
+
-+static void destroy(struct ip_set *set)
++static void
++iptreemap_destroy(struct ip_set *set)
+{
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap *map = set->data;
+
+ while (!del_timer(&map->gc))
+ msleep(IPTREEMAP_DESTROY_SLEEP);
@@ -5732,9 +6042,10 @@
+ set->data = NULL;
+}
+
-+static void flush(struct ip_set *set)
++static void
++iptreemap_flush(struct ip_set *set)
+{
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap *map = set->data;
+
+ while (!del_timer(&map->gc))
+ msleep(IPTREEMAP_DESTROY_SLEEP);
@@ -5746,17 +6057,19 @@
+ init_gc_timer(set);
+}
+
-+static void list_header(const struct ip_set *set, void *data)
++static void
++iptreemap_list_header(const struct ip_set *set, void *data)
+{
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
-+ struct ip_set_req_iptreemap_create *header = (struct ip_set_req_iptreemap_create *) data;
++ struct ip_set_iptreemap *map = set->data;
++ struct ip_set_req_iptreemap_create *header = data;
+
+ header->gc_interval = map->gc_interval;
+}
+
-+static int list_members_size(const struct ip_set *set)
++static int
++iptreemap_list_members_size(const struct ip_set *set)
+{
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap *map = set->data;
+ struct ip_set_iptreemap_b *btree;
+ struct ip_set_iptreemap_c *ctree;
+ struct ip_set_iptreemap_d *dtree;
@@ -5783,19 +6096,21 @@
+ return (count * sizeof(struct ip_set_req_iptreemap));
+}
+
-+static inline size_t add_member(void *data, size_t offset, ip_set_ip_t start, ip_set_ip_t end)
++static inline u_int32_t
++add_member(void *data, size_t offset, ip_set_ip_t start, ip_set_ip_t end)
+{
-+ struct ip_set_req_iptreemap *entry = (struct ip_set_req_iptreemap *) (data + offset);
++ struct ip_set_req_iptreemap *entry = data + offset;
+
-+ entry->start = start;
++ entry->ip = start;
+ entry->end = end;
+
+ return sizeof(*entry);
+}
+
-+static void list_members(const struct ip_set *set, void *data)
++static void
++iptreemap_list_members(const struct ip_set *set, void *data)
+{
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap *map = set->data;
+ struct ip_set_iptreemap_b *btree;
+ struct ip_set_iptreemap_c *ctree;
+ struct ip_set_iptreemap_d *dtree;
@@ -5830,26 +6145,7 @@
+ add_member(data, offset, start, end);
+}
+
-+static struct ip_set_type ip_set_iptreemap = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = create,
-+ .destroy = destroy,
-+ .flush = flush,
-+ .reqsize = sizeof(struct ip_set_req_iptreemap),
-+ .addip = addip,
-+ .addip_kernel = addip_kernel,
-+ .delip = delip,
-+ .delip_kernel = delip_kernel,
-+ .testip = testip,
-+ .testip_kernel = testip_kernel,
-+ .header_size = sizeof(struct ip_set_req_iptreemap_create),
-+ .list_header = list_header,
-+ .list_members_size = list_members_size,
-+ .list_members = list_members,
-+ .me = THIS_MODULE,
-+};
++IP_SET_TYPE(iptreemap, IPSET_TYPE_IP | IPSET_DATA_SINGLE)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Sven Wegener <sven.wegener@stealer.net>");
@@ -5860,43 +6156,22 @@
+ int ret = -ENOMEM;
+ int a;
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
-+ cachep_b = kmem_cache_create("ip_set_iptreemap_b",
-+ sizeof(struct ip_set_iptreemap_b),
-+ 0, 0, NULL);
-+#else
-+ cachep_b = kmem_cache_create("ip_set_iptreemap_b",
-+ sizeof(struct ip_set_iptreemap_b),
-+ 0, 0, NULL, NULL);
-+#endif
++ cachep_b = KMEM_CACHE_CREATE("ip_set_iptreemap_b",
++ sizeof(struct ip_set_iptreemap_b));
+ if (!cachep_b) {
+ ip_set_printk("Unable to create ip_set_iptreemap_b slab cache");
+ goto out;
+ }
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
-+ cachep_c = kmem_cache_create("ip_set_iptreemap_c",
-+ sizeof(struct ip_set_iptreemap_c),
-+ 0, 0, NULL);
-+#else
-+ cachep_c = kmem_cache_create("ip_set_iptreemap_c",
-+ sizeof(struct ip_set_iptreemap_c),
-+ 0, 0, NULL, NULL);
-+#endif
++ cachep_c = KMEM_CACHE_CREATE("ip_set_iptreemap_c",
++ sizeof(struct ip_set_iptreemap_c));
+ if (!cachep_c) {
+ ip_set_printk("Unable to create ip_set_iptreemap_c slab cache");
+ goto outb;
+ }
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
-+ cachep_d = kmem_cache_create("ip_set_iptreemap_d",
-+ sizeof(struct ip_set_iptreemap_d),
-+ 0, 0, NULL);
-+#else
-+ cachep_d = kmem_cache_create("ip_set_iptreemap_d",
-+ sizeof(struct ip_set_iptreemap_d),
-+ 0, 0, NULL, NULL);
-+#endif
++ cachep_d = KMEM_CACHE_CREATE("ip_set_iptreemap_d",
++ sizeof(struct ip_set_iptreemap_d));
+ if (!cachep_d) {
+ ip_set_printk("Unable to create ip_set_iptreemap_d slab cache");
+ goto outc;
@@ -5962,15 +6237,15 @@
+module_exit(ip_set_iptreemap_fini);
--- /dev/null
+++ b/net/ipv4/netfilter/ip_set_macipmap.c
-@@ -0,0 +1,375 @@
+@@ -0,0 +1,164 @@
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ * Patrick Schaaf <bof@bof.de>
+ * Martin Josefsson <gandalf@wlug.westbo.se>
-+ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ * Copyright (C) 2003-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
++ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the macipmap type */
@@ -5978,32 +6253,21 @@
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
-+#include <linux/version.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
+#include <linux/spinlock.h>
+#include <linux/if_ether.h>
-+#include <linux/vmalloc.h>
+
-+#include <linux/netfilter_ipv4/ip_set_malloc.h>
+#include <linux/netfilter_ipv4/ip_set_macipmap.h>
+
+static int
-+testip(struct ip_set *set, const void *data, size_t size, ip_set_ip_t *hash_ip)
++macipmap_utest(struct ip_set *set, const void *data, u_int32_t size,
++ ip_set_ip_t *hash_ip)
+{
-+ struct ip_set_macipmap *map = (struct ip_set_macipmap *) set->data;
-+ struct ip_set_macip *table = (struct ip_set_macip *) map->members;
-+ struct ip_set_req_macipmap *req = (struct ip_set_req_macipmap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_macipmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_macipmap),
-+ size);
-+ return -EINVAL;
-+ }
++ const struct ip_set_macipmap *map = set->data;
++ const struct ip_set_macip *table = map->members;
++ const struct ip_set_req_macipmap *req = data;
+
+ if (req->ip < map->first_ip || req->ip > map->last_ip)
+ return -ERANGE;
@@ -6011,8 +6275,7 @@
+ *hash_ip = req->ip;
+ DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
+ set->name, HIPQUAD(req->ip), HIPQUAD(*hash_ip));
-+ if (test_bit(IPSET_MACIP_ISSET,
-+ (void *) &table[req->ip - map->first_ip].flags)) {
++ if (table[req->ip - map->first_ip].match) {
+ return (memcmp(req->ethernet,
+ &table[req->ip - map->first_ip].ethernet,
+ ETH_ALEN) == 0);
@@ -6022,26 +6285,17 @@
+}
+
+static int
-+testip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+ struct ip_set_macip *table =
-+ (struct ip_set_macip *) map->members;
++macipmap_ktest(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ const struct ip_set_macipmap *map = set->data;
++ const struct ip_set_macip *table = map->members;
+ ip_set_ip_t ip;
+
-+ ip = ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr);
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr);
-+#endif
++ ip = ipaddr(skb, flags[index]);
+
+ if (ip < map->first_ip || ip > map->last_ip)
+ return 0;
@@ -6049,17 +6303,11 @@
+ *hash_ip = ip;
+ DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
+ set->name, HIPQUAD(ip), HIPQUAD(*hash_ip));
-+ if (test_bit(IPSET_MACIP_ISSET,
-+ (void *) &table[ip - map->first_ip].flags)) {
++ if (table[ip - map->first_ip].match) {
+ /* Is mac pointer valid?
+ * If so, compare... */
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
+ return (skb_mac_header(skb) >= skb->head
+ && (skb_mac_header(skb) + ETH_HLEN) <= skb->data
-+#else
-+ return (skb->mac.raw >= skb->head
-+ && (skb->mac.raw + ETH_HLEN) <= skb->data
-+#endif
+ && (memcmp(eth_hdr(skb)->h_source,
+ &table[ip - map->first_ip].ethernet,
+ ETH_ALEN) == 0));
@@ -6070,324 +6318,132 @@
+
+/* returns 0 on success */
+static inline int
-+__addip(struct ip_set *set,
-+ ip_set_ip_t ip, unsigned char *ethernet, ip_set_ip_t *hash_ip)
++macipmap_add(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, const unsigned char *ethernet)
+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+ struct ip_set_macip *table =
-+ (struct ip_set_macip *) map->members;
++ struct ip_set_macipmap *map = set->data;
++ struct ip_set_macip *table = map->members;
+
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
-+ if (test_and_set_bit(IPSET_MACIP_ISSET,
-+ (void *) &table[ip - map->first_ip].flags))
++ if (table[ip - map->first_ip].match)
+ return -EEXIST;
+
+ *hash_ip = ip;
+ DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
+ memcpy(&table[ip - map->first_ip].ethernet, ethernet, ETH_ALEN);
++ table[ip - map->first_ip].match = IPSET_MACIP_ISSET;
+ return 0;
+}
+
-+static int
-+addip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_macipmap *req =
-+ (struct ip_set_req_macipmap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_macipmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_macipmap),
-+ size);
++#define KADT_CONDITION \
++ if (!(skb_mac_header(skb) >= skb->head \
++ && (skb_mac_header(skb) + ETH_HLEN) <= skb->data))\
+ return -EINVAL;
-+ }
-+ return __addip(set, req->ip, req->ethernet, hash_ip);
-+}
+
-+static int
-+addip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ ip_set_ip_t ip;
-+
-+ ip = ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr);
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr);
-+#endif
-+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ if (!(skb_mac_header(skb) >= skb->head
-+ && (skb_mac_header(skb) + ETH_HLEN) <= skb->data))
-+#else
-+ if (!(skb->mac.raw >= skb->head
-+ && (skb->mac.raw + ETH_HLEN) <= skb->data))
-+#endif
-+ return -EINVAL;
-+
-+ return __addip(set, ip, eth_hdr(skb)->h_source, hash_ip);
-+}
++UADT(macipmap, add, req->ethernet)
++KADT(macipmap, add, ipaddr, eth_hdr(skb)->h_source)
+
+static inline int
-+__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++macipmap_del(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+ struct ip_set_macip *table =
-+ (struct ip_set_macip *) map->members;
++ struct ip_set_macipmap *map = set->data;
++ struct ip_set_macip *table = map->members;
+
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
-+ if (!test_and_clear_bit(IPSET_MACIP_ISSET,
-+ (void *)&table[ip - map->first_ip].flags))
++ if (!table[ip - map->first_ip].match)
+ return -EEXIST;
+
+ *hash_ip = ip;
++ table[ip - map->first_ip].match = 0;
+ DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
+ return 0;
+}
+
-+static int
-+delip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_macipmap *req =
-+ (struct ip_set_req_macipmap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_macipmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_macipmap),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __delip(set, req->ip, hash_ip);
-+}
++#undef KADT_CONDITION
++#define KADT_CONDITION
+
-+static int
-+delip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __delip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+}
++UADT(macipmap, del)
++KADT(macipmap, del, ipaddr)
+
-+static inline size_t members_size(ip_set_id_t from, ip_set_id_t to)
-+{
-+ return (size_t)((to - from + 1) * sizeof(struct ip_set_macip));
-+}
-+
-+static int create(struct ip_set *set, const void *data, size_t size)
++static inline int
++__macipmap_create(const struct ip_set_req_macipmap_create *req,
++ struct ip_set_macipmap *map)
+{
-+ int newbytes;
-+ struct ip_set_req_macipmap_create *req =
-+ (struct ip_set_req_macipmap_create *) data;
-+ struct ip_set_macipmap *map;
-+
-+ if (size != sizeof(struct ip_set_req_macipmap_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_macipmap_create),
-+ size);
-+ return -EINVAL;
-+ }
-+
-+ DP("from %u.%u.%u.%u to %u.%u.%u.%u",
-+ HIPQUAD(req->from), HIPQUAD(req->to));
-+
-+ if (req->from > req->to) {
-+ DP("bad ip range");
-+ return -ENOEXEC;
-+ }
-+
+ if (req->to - req->from > MAX_RANGE) {
-+ ip_set_printk("range too big (max %d addresses)",
-+ MAX_RANGE+1);
++ ip_set_printk("range too big, %d elements (max %d)",
++ req->to - req->from + 1, MAX_RANGE+1);
+ return -ENOEXEC;
+ }
-+
-+ map = kmalloc(sizeof(struct ip_set_macipmap), GFP_KERNEL);
-+ if (!map) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_macipmap));
-+ return -ENOMEM;
-+ }
+ map->flags = req->flags;
-+ map->first_ip = req->from;
-+ map->last_ip = req->to;
-+ newbytes = members_size(map->first_ip, map->last_ip);
-+ map->members = ip_set_malloc(newbytes);
-+ DP("members: %u %p", newbytes, map->members);
-+ if (!map->members) {
-+ DP("out of memory for %d bytes", newbytes);
-+ kfree(map);
-+ return -ENOMEM;
-+ }
-+ memset(map->members, 0, newbytes);
-+
-+ set->data = map;
-+ return 0;
-+}
-+
-+static void destroy(struct ip_set *set)
-+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+
-+ ip_set_free(map->members, members_size(map->first_ip, map->last_ip));
-+ kfree(map);
-+
-+ set->data = NULL;
++ return (req->to - req->from + 1) * sizeof(struct ip_set_macip);
+}
+
-+static void flush(struct ip_set *set)
-+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+ memset(map->members, 0, members_size(map->first_ip, map->last_ip));
-+}
++BITMAP_CREATE(macipmap)
++BITMAP_DESTROY(macipmap)
++BITMAP_FLUSH(macipmap)
+
-+static void list_header(const struct ip_set *set, void *data)
++static inline void
++__macipmap_list_header(const struct ip_set_macipmap *map,
++ struct ip_set_req_macipmap_create *header)
+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+ struct ip_set_req_macipmap_create *header =
-+ (struct ip_set_req_macipmap_create *) data;
-+
-+ DP("list_header %x %x %u", map->first_ip, map->last_ip,
-+ map->flags);
-+
-+ header->from = map->first_ip;
-+ header->to = map->last_ip;
+ header->flags = map->flags;
+}
+
-+static int list_members_size(const struct ip_set *set)
-+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+
-+ DP("%u", members_size(map->first_ip, map->last_ip));
-+ return members_size(map->first_ip, map->last_ip);
-+}
-+
-+static void list_members(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+
-+ int bytes = members_size(map->first_ip, map->last_ip);
-+
-+ DP("members: %u %p", bytes, map->members);
-+ memcpy(data, map->members, bytes);
-+}
++BITMAP_LIST_HEADER(macipmap)
++BITMAP_LIST_MEMBERS_SIZE(macipmap)
++BITMAP_LIST_MEMBERS(macipmap)
+
-+static struct ip_set_type ip_set_macipmap = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_macipmap),
-+ .addip = &addip,
-+ .addip_kernel = &addip_kernel,
-+ .delip = &delip,
-+ .delip_kernel = &delip_kernel,
-+ .testip = &testip,
-+ .testip_kernel = &testip_kernel,
-+ .header_size = sizeof(struct ip_set_req_macipmap_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
++IP_SET_TYPE(macipmap, IPSET_TYPE_IP | IPSET_DATA_SINGLE)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("macipmap type of IP sets");
+
-+static int __init ip_set_macipmap_init(void)
-+{
-+ init_max_malloc_size();
-+ return ip_set_register_set_type(&ip_set_macipmap);
-+}
-+
-+static void __exit ip_set_macipmap_fini(void)
-+{
-+ /* FIXME: possible race with ip_set_create() */
-+ ip_set_unregister_set_type(&ip_set_macipmap);
-+}
-+
-+module_init(ip_set_macipmap_init);
-+module_exit(ip_set_macipmap_fini);
++REGISTER_MODULE(macipmap)
--- /dev/null
+++ b/net/ipv4/netfilter/ip_set_nethash.c
-@@ -0,0 +1,497 @@
-+/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+@@ -0,0 +1,225 @@
++/* Copyright (C) 2003-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
++ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing a cidr nethash set */
+
+#include <linux/module.h>
++#include <linux/moduleparam.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
-+#include <linux/version.h>
-+#include <linux/jhash.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_jhash.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
+#include <linux/spinlock.h>
-+#include <linux/vmalloc.h>
+#include <linux/random.h>
+
+#include <net/ip.h>
+
-+#include <linux/netfilter_ipv4/ip_set_malloc.h>
+#include <linux/netfilter_ipv4/ip_set_nethash.h>
+
+static int limit = MAX_RANGE;
+
+static inline __u32
-+jhash_ip(const struct ip_set_nethash *map, uint16_t i, ip_set_ip_t ip)
-+{
-+ return jhash_1word(ip, *(((uint32_t *) map->initval) + i));
-+}
-+
-+static inline __u32
-+hash_id_cidr(struct ip_set_nethash *map,
-+ ip_set_ip_t ip,
-+ unsigned char cidr,
-+ ip_set_ip_t *hash_ip)
++nethash_id_cidr(const struct ip_set_nethash *map,
++ ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip,
++ uint8_t cidr)
+{
+ __u32 id;
+ u_int16_t i;
+ ip_set_ip_t *elem;
+
-+ *hash_ip = pack(ip, cidr);
++ *hash_ip = pack_ip_cidr(ip, cidr);
++ if (!*hash_ip)
++ return MAX_RANGE;
+
+ for (i = 0; i < map->probes; i++) {
+ id = jhash_ip(map, i, *hash_ip) % map->hashsize;
@@ -6395,19 +6451,20 @@
+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
+ if (*elem == *hash_ip)
+ return id;
++ /* No shortcut - there can be deleted entries. */
+ }
+ return UINT_MAX;
+}
+
+static inline __u32
-+hash_id(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++nethash_id(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
++ const struct ip_set_nethash *map = set->data;
+ __u32 id = UINT_MAX;
+ int i;
+
+ for (i = 0; i < 30 && map->cidr[i]; i++) {
-+ id = hash_id_cidr(map, ip, map->cidr[i], hash_ip);
++ id = nethash_id_cidr(map, hash_ip, ip, map->cidr[i]);
+ if (id != UINT_MAX)
+ break;
+ }
@@ -6415,409 +6472,156 @@
+}
+
+static inline int
-+__testip_cidr(struct ip_set *set, ip_set_ip_t ip, unsigned char cidr,
-+ ip_set_ip_t *hash_ip)
++nethash_test_cidr(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, uint8_t cidr)
+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
++ const struct ip_set_nethash *map = set->data;
+
-+ return (ip && hash_id_cidr(map, ip, cidr, hash_ip) != UINT_MAX);
++ return (nethash_id_cidr(map, hash_ip, ip, cidr) != UINT_MAX);
+}
+
+static inline int
-+__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++nethash_test(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ return (ip && hash_id(set, ip, hash_ip) != UINT_MAX);
++ return (nethash_id(set, hash_ip, ip) != UINT_MAX);
+}
+
+static int
-+testip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
++nethash_utest(struct ip_set *set, const void *data, u_int32_t size,
++ ip_set_ip_t *hash_ip)
+{
-+ struct ip_set_req_nethash *req =
-+ (struct ip_set_req_nethash *) data;
++ const struct ip_set_req_nethash *req = data;
+
-+ if (size != sizeof(struct ip_set_req_nethash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_nethash),
-+ size);
++ if (req->cidr <= 0 || req->cidr > 32)
+ return -EINVAL;
-+ }
-+ return (req->cidr == 32 ? __testip(set, req->ip, hash_ip)
-+ : __testip_cidr(set, req->ip, req->cidr, hash_ip));
++ return (req->cidr == 32 ? nethash_test(set, hash_ip, req->ip)
++ : nethash_test_cidr(set, hash_ip, req->ip, req->cidr));
+}
+
-+static int
-+testip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __testip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+}
++#define KADT_CONDITION
++
++KADT(nethash, test, ipaddr)
+
+static inline int
-+__addip_base(struct ip_set_nethash *map, ip_set_ip_t ip)
++__nethash_add(struct ip_set_nethash *map, ip_set_ip_t *ip)
+{
+ __u32 probe;
+ u_int16_t i;
-+ ip_set_ip_t *elem;
++ ip_set_ip_t *elem, *slot = NULL;
+
+ for (i = 0; i < map->probes; i++) {
-+ probe = jhash_ip(map, i, ip) % map->hashsize;
++ probe = jhash_ip(map, i, *ip) % map->hashsize;
+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
-+ if (*elem == ip)
++ if (*elem == *ip)
+ return -EEXIST;
-+ if (!*elem) {
-+ *elem = ip;
-+ map->elements++;
-+ return 0;
-+ }
++ if (!(slot || *elem))
++ slot = elem;
++ /* There can be deleted entries, must check all slots */
++ }
++ if (slot) {
++ *slot = *ip;
++ map->elements++;
++ return 0;
+ }
+ /* Trigger rehashing */
+ return -EAGAIN;
+}
+
+static inline int
-+__addip(struct ip_set_nethash *map, ip_set_ip_t ip, unsigned char cidr,
-+ ip_set_ip_t *hash_ip)
++nethash_add(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, uint8_t cidr)
+{
-+ if (!ip || map->elements >= limit)
-+ return -ERANGE;
-+
-+ *hash_ip = pack(ip, cidr);
-+ DP("%u.%u.%u.%u/%u, %u.%u.%u.%u", HIPQUAD(ip), cidr, HIPQUAD(*hash_ip));
++ struct ip_set_nethash *map = set->data;
++ int ret;
+
-+ return __addip_base(map, *hash_ip);
-+}
++ if (map->elements >= limit || map->nets[cidr-1] == UINT16_MAX)
++ return -ERANGE;
++ if (cidr <= 0 || cidr >= 32)
++ return -EINVAL;
+
-+static void
-+update_cidr_sizes(struct ip_set_nethash *map, unsigned char cidr)
-+{
-+ unsigned char next;
-+ int i;
++ *hash_ip = pack_ip_cidr(ip, cidr);
++ DP("%u.%u.%u.%u/%u, %u.%u.%u.%u", HIPQUAD(ip), cidr, HIPQUAD(*hash_ip));
++ if (!*hash_ip)
++ return -ERANGE;
+
-+ for (i = 0; i < 30 && map->cidr[i]; i++) {
-+ if (map->cidr[i] == cidr) {
-+ return;
-+ } else if (map->cidr[i] < cidr) {
-+ next = map->cidr[i];
-+ map->cidr[i] = cidr;
-+ cidr = next;
-+ }
-+ }
-+ if (i < 30)
-+ map->cidr[i] = cidr;
-+}
-+
-+static int
-+addip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_nethash *req =
-+ (struct ip_set_req_nethash *) data;
-+ int ret;
-+
-+ if (size != sizeof(struct ip_set_req_nethash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_nethash),
-+ size);
-+ return -EINVAL;
++ ret = __nethash_add(map, hash_ip);
++ if (ret == 0) {
++ if (!map->nets[cidr-1]++)
++ add_cidr_size(map->cidr, cidr);
++ map->elements++;
+ }
-+ ret = __addip((struct ip_set_nethash *) set->data,
-+ req->ip, req->cidr, hash_ip);
-+
-+ if (ret == 0)
-+ update_cidr_sizes((struct ip_set_nethash *) set->data,
-+ req->cidr);
+
+ return ret;
+}
+
-+static int
-+addip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+ int ret = -ERANGE;
-+ ip_set_ip_t ip = ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr);
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr);
-+#endif
-+
-+ if (map->cidr[0])
-+ ret = __addip(map, ip, map->cidr[0], hash_ip);
-+
-+ return ret;
-+}
++#undef KADT_CONDITION
++#define KADT_CONDITION \
++ struct ip_set_nethash *map = set->data; \
++ uint8_t cidr = map->cidr[0] ? map->cidr[0] : 31;
+
-+static int retry(struct ip_set *set)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+ ip_set_ip_t *elem;
-+ void *members;
-+ u_int32_t i, hashsize = map->hashsize;
-+ int res;
-+ struct ip_set_nethash *tmp;
-+
-+ if (map->resize == 0)
-+ return -ERANGE;
++UADT(nethash, add, req->cidr)
++KADT(nethash, add, ipaddr, cidr)
+
-+ again:
-+ res = 0;
-+
-+ /* Calculate new parameters */
-+ hashsize += (hashsize * map->resize)/100;
-+ if (hashsize == map->hashsize)
-+ hashsize++;
-+
-+ ip_set_printk("rehashing of set %s triggered: "
-+ "hashsize grows from %u to %u",
-+ set->name, map->hashsize, hashsize);
-+
-+ tmp = kmalloc(sizeof(struct ip_set_nethash)
-+ + map->probes * sizeof(uint32_t), GFP_ATOMIC);
-+ if (!tmp) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_nethash)
-+ + map->probes * sizeof(uint32_t));
-+ return -ENOMEM;
-+ }
-+ tmp->members = harray_malloc(hashsize, sizeof(ip_set_ip_t), GFP_ATOMIC);
-+ if (!tmp->members) {
-+ DP("out of memory for %d bytes", hashsize * sizeof(ip_set_ip_t));
-+ kfree(tmp);
-+ return -ENOMEM;
-+ }
-+ tmp->hashsize = hashsize;
-+ tmp->elements = 0;
-+ tmp->probes = map->probes;
-+ tmp->resize = map->resize;
-+ memcpy(tmp->initval, map->initval, map->probes * sizeof(uint32_t));
-+ memcpy(tmp->cidr, map->cidr, 30 * sizeof(unsigned char));
-+
-+ write_lock_bh(&set->lock);
-+ map = (struct ip_set_nethash *) set->data; /* Play safe */
-+ for (i = 0; i < map->hashsize && res == 0; i++) {
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
-+ if (*elem)
-+ res = __addip_base(tmp, *elem);
-+ }
-+ if (res) {
-+ /* Failure, try again */
-+ write_unlock_bh(&set->lock);
-+ harray_free(tmp->members);
-+ kfree(tmp);
-+ goto again;
-+ }
-+
-+ /* Success at resizing! */
-+ members = map->members;
-+
-+ map->hashsize = tmp->hashsize;
-+ map->members = tmp->members;
-+ write_unlock_bh(&set->lock);
-+
-+ harray_free(members);
-+ kfree(tmp);
-+
-+ return 0;
++static inline void
++__nethash_retry(struct ip_set_nethash *tmp, struct ip_set_nethash *map)
++{
++ memcpy(tmp->cidr, map->cidr, sizeof(tmp->cidr));
++ memcpy(tmp->nets, map->nets, sizeof(tmp->nets));
+}
+
++HASH_RETRY(nethash, ip_set_ip_t)
++
+static inline int
-+__delip(struct ip_set_nethash *map, ip_set_ip_t ip, unsigned char cidr,
-+ ip_set_ip_t *hash_ip)
++nethash_del(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, uint8_t cidr)
+{
++ struct ip_set_nethash *map = set->data;
+ ip_set_ip_t id, *elem;
+
-+ if (!ip)
-+ return -ERANGE;
++ if (cidr <= 0 || cidr >= 32)
++ return -EINVAL;
+
-+ id = hash_id_cidr(map, ip, cidr, hash_ip);
++ id = nethash_id_cidr(map, hash_ip, ip, cidr);
+ if (id == UINT_MAX)
+ return -EEXIST;
+
+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
+ *elem = 0;
+ map->elements--;
++ if (!map->nets[cidr-1]--)
++ del_cidr_size(map->cidr, cidr);
+ return 0;
+}
+
-+static int
-+delip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_nethash *req =
-+ (struct ip_set_req_nethash *) data;
-+
-+ if (size != sizeof(struct ip_set_req_nethash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_nethash),
-+ size);
-+ return -EINVAL;
-+ }
-+ /* TODO: no garbage collection in map->cidr */
-+ return __delip((struct ip_set_nethash *) set->data,
-+ req->ip, req->cidr, hash_ip);
-+}
-+
-+static int
-+delip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+ int ret = -ERANGE;
-+ ip_set_ip_t ip = ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr);
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr);
-+#endif
-+
-+ if (map->cidr[0])
-+ ret = __delip(map, ip, map->cidr[0], hash_ip);
-+
-+ return ret;
-+}
++UADT(nethash, del, req->cidr)
++KADT(nethash, del, ipaddr, cidr)
+
-+static int create(struct ip_set *set, const void *data, size_t size)
++static inline int
++__nethash_create(const struct ip_set_req_nethash_create *req,
++ struct ip_set_nethash *map)
+{
-+ struct ip_set_req_nethash_create *req =
-+ (struct ip_set_req_nethash_create *) data;
-+ struct ip_set_nethash *map;
-+ uint16_t i;
-+
-+ if (size != sizeof(struct ip_set_req_nethash_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_nethash_create),
-+ size);
-+ return -EINVAL;
-+ }
-+
-+ if (req->hashsize < 1) {
-+ ip_set_printk("hashsize too small");
-+ return -ENOEXEC;
-+ }
-+ if (req->probes < 1) {
-+ ip_set_printk("probes too small");
-+ return -ENOEXEC;
-+ }
-+
-+ map = kmalloc(sizeof(struct ip_set_nethash)
-+ + req->probes * sizeof(uint32_t), GFP_KERNEL);
-+ if (!map) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_nethash)
-+ + req->probes * sizeof(uint32_t));
-+ return -ENOMEM;
-+ }
-+ for (i = 0; i < req->probes; i++)
-+ get_random_bytes(((uint32_t *) map->initval)+i, 4);
-+ map->elements = 0;
-+ map->hashsize = req->hashsize;
-+ map->probes = req->probes;
-+ map->resize = req->resize;
-+ memset(map->cidr, 0, 30 * sizeof(unsigned char));
-+ map->members = harray_malloc(map->hashsize, sizeof(ip_set_ip_t), GFP_KERNEL);
-+ if (!map->members) {
-+ DP("out of memory for %d bytes", map->hashsize * sizeof(ip_set_ip_t));
-+ kfree(map);
-+ return -ENOMEM;
-+ }
++ memset(map->cidr, 0, sizeof(map->cidr));
++ memset(map->nets, 0, sizeof(map->nets));
+
-+ set->data = map;
+ return 0;
+}
+
-+static void destroy(struct ip_set *set)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+
-+ harray_free(map->members);
-+ kfree(map);
-+
-+ set->data = NULL;
-+}
-+
-+static void flush(struct ip_set *set)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+ harray_flush(map->members, map->hashsize, sizeof(ip_set_ip_t));
-+ memset(map->cidr, 0, 30 * sizeof(unsigned char));
-+ map->elements = 0;
-+}
-+
-+static void list_header(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+ struct ip_set_req_nethash_create *header =
-+ (struct ip_set_req_nethash_create *) data;
-+
-+ header->hashsize = map->hashsize;
-+ header->probes = map->probes;
-+ header->resize = map->resize;
-+}
++HASH_CREATE(nethash, ip_set_ip_t)
++HASH_DESTROY(nethash)
+
-+static int list_members_size(const struct ip_set *set)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
++HASH_FLUSH_CIDR(nethash, ip_set_ip_t)
+
-+ return (map->hashsize * sizeof(ip_set_ip_t));
++static inline void
++__nethash_list_header(const struct ip_set_nethash *map,
++ struct ip_set_req_nethash_create *header)
++{
+}
+
-+static void list_members(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+ ip_set_ip_t i, *elem;
-+
-+ for (i = 0; i < map->hashsize; i++) {
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
-+ ((ip_set_ip_t *)data)[i] = *elem;
-+ }
-+}
++HASH_LIST_HEADER(nethash)
++HASH_LIST_MEMBERS_SIZE(nethash, ip_set_ip_t)
++HASH_LIST_MEMBERS(nethash, ip_set_ip_t)
+
-+static struct ip_set_type ip_set_nethash = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_nethash),
-+ .addip = &addip,
-+ .addip_kernel = &addip_kernel,
-+ .retry = &retry,
-+ .delip = &delip,
-+ .delip_kernel = &delip_kernel,
-+ .testip = &testip,
-+ .testip_kernel = &testip_kernel,
-+ .header_size = sizeof(struct ip_set_req_nethash_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
++IP_SET_RTYPE(nethash, IPSET_TYPE_IP | IPSET_DATA_SINGLE)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
@@ -6825,27 +6629,15 @@
+module_param(limit, int, 0600);
+MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
+
-+static int __init ip_set_nethash_init(void)
-+{
-+ return ip_set_register_set_type(&ip_set_nethash);
-+}
-+
-+static void __exit ip_set_nethash_fini(void)
-+{
-+ /* FIXME: possible race with ip_set_create() */
-+ ip_set_unregister_set_type(&ip_set_nethash);
-+}
-+
-+module_init(ip_set_nethash_init);
-+module_exit(ip_set_nethash_fini);
++REGISTER_MODULE(nethash)
--- /dev/null
+++ b/net/ipv4/netfilter/ip_set_portmap.c
-@@ -0,0 +1,346 @@
-+/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+@@ -0,0 +1,114 @@
++/* Copyright (C) 2003-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
++ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing a port set type as a bitmap */
@@ -6855,9 +6647,6 @@
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/skbuff.h>
-+#include <linux/version.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
@@ -6866,330 +6655,434 @@
+#include <net/ip.h>
+
+#include <linux/netfilter_ipv4/ip_set_portmap.h>
++#include <linux/netfilter_ipv4/ip_set_getport.h>
+
-+/* We must handle non-linear skbs */
-+static inline ip_set_ip_t
-+get_port(const struct sk_buff *skb, u_int32_t flags)
++static inline int
++portmap_test(const struct ip_set *set, ip_set_ip_t *hash_port,
++ ip_set_ip_t port)
+{
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ struct iphdr *iph = ip_hdr(skb);
-+#else
-+ struct iphdr *iph = skb->nh.iph;
-+#endif
-+ u_int16_t offset = ntohs(iph->frag_off) & IP_OFFSET;
-+ switch (iph->protocol) {
-+ case IPPROTO_TCP: {
-+ struct tcphdr tcph;
++ const struct ip_set_portmap *map = set->data;
++
++ if (port < map->first_ip || port > map->last_ip)
++ return -ERANGE;
+
-+ /* See comments at tcp_match in ip_tables.c */
-+ if (offset)
-+ return INVALID_PORT;
++ *hash_port = port;
++ DP("set: %s, port:%u, %u", set->name, port, *hash_port);
++ return !!test_bit(port - map->first_ip, map->members);
++}
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &tcph, sizeof(tcph)) < 0)
-+#else
-+ if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &tcph, sizeof(tcph)) < 0)
-+#endif
-+ /* No choice either */
-+ return INVALID_PORT;
-+
-+ return ntohs(flags & IPSET_SRC ?
-+ tcph.source : tcph.dest);
-+ }
-+ case IPPROTO_UDP: {
-+ struct udphdr udph;
++#define KADT_CONDITION \
++ if (ip == INVALID_PORT) \
++ return 0;
+
-+ if (offset)
-+ return INVALID_PORT;
++UADT(portmap, test)
++KADT(portmap, test, get_port)
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &udph, sizeof(udph)) < 0)
-+#else
-+ if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &udph, sizeof(udph)) < 0)
-+#endif
-+ /* No choice either */
-+ return INVALID_PORT;
-+
-+ return ntohs(flags & IPSET_SRC ?
-+ udph.source : udph.dest);
-+ }
-+ default:
-+ return INVALID_PORT;
-+ }
++static inline int
++portmap_add(struct ip_set *set, ip_set_ip_t *hash_port, ip_set_ip_t port)
++{
++ struct ip_set_portmap *map = set->data;
++
++ if (port < map->first_ip || port > map->last_ip)
++ return -ERANGE;
++ if (test_and_set_bit(port - map->first_ip, map->members))
++ return -EEXIST;
++
++ *hash_port = port;
++ DP("port %u", port);
++ return 0;
+}
+
++UADT(portmap, add)
++KADT(portmap, add, get_port)
++
+static inline int
-+__testport(struct ip_set *set, ip_set_ip_t port, ip_set_ip_t *hash_port)
++portmap_del(struct ip_set *set, ip_set_ip_t *hash_port, ip_set_ip_t port)
+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
++ struct ip_set_portmap *map = set->data;
+
-+ if (port < map->first_port || port > map->last_port)
++ if (port < map->first_ip || port > map->last_ip)
+ return -ERANGE;
++ if (!test_and_clear_bit(port - map->first_ip, map->members))
++ return -EEXIST;
+
+ *hash_port = port;
-+ DP("set: %s, port:%u, %u", set->name, port, *hash_port);
-+ return !!test_bit(port - map->first_port, map->members);
++ DP("port %u", port);
++ return 0;
+}
+
-+static int
-+testport(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_port)
-+{
-+ struct ip_set_req_portmap *req =
-+ (struct ip_set_req_portmap *) data;
++UADT(portmap, del)
++KADT(portmap, del, get_port)
+
-+ if (size != sizeof(struct ip_set_req_portmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_portmap),
-+ size);
-+ return -EINVAL;
++static inline int
++__portmap_create(const struct ip_set_req_portmap_create *req,
++ struct ip_set_portmap *map)
++{
++ if (req->to - req->from > MAX_RANGE) {
++ ip_set_printk("range too big, %d elements (max %d)",
++ req->to - req->from + 1, MAX_RANGE+1);
++ return -ENOEXEC;
+ }
-+ return __testport(set, req->port, hash_port);
++ return bitmap_bytes(req->from, req->to);
+}
+
-+static int
-+testport_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_port,
-+ const u_int32_t *flags,
-+ unsigned char index)
++BITMAP_CREATE(portmap)
++BITMAP_DESTROY(portmap)
++BITMAP_FLUSH(portmap)
++
++static inline void
++__portmap_list_header(const struct ip_set_portmap *map,
++ struct ip_set_req_portmap_create *header)
+{
-+ int res;
-+ ip_set_ip_t port = get_port(skb, flags[index]);
++}
+
-+ DP("flag %s port %u", flags[index] & IPSET_SRC ? "SRC" : "DST", port);
-+ if (port == INVALID_PORT)
-+ return 0;
++BITMAP_LIST_HEADER(portmap)
++BITMAP_LIST_MEMBERS_SIZE(portmap)
++BITMAP_LIST_MEMBERS(portmap)
+
-+ res = __testport(set, port, hash_port);
-+
-+ return (res < 0 ? 0 : res);
-+}
++IP_SET_TYPE(portmap, IPSET_TYPE_PORT | IPSET_DATA_SINGLE)
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("portmap type of IP sets");
++
++REGISTER_MODULE(portmap)
+--- /dev/null
++++ b/net/ipv4/netfilter/ip_set_setlist.c
+@@ -0,0 +1,330 @@
++/* Copyright (C) 2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/* Kernel module implementing an IP set type: the setlist type */
++
++#include <linux/module.h>
++#include <linux/ip.h>
++#include <linux/skbuff.h>
++#include <linux/errno.h>
++
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_bitmaps.h>
++#include <linux/netfilter_ipv4/ip_set_setlist.h>
++
++/*
++ * before ==> index, ref
++ * after ==> ref, index
++ */
+
+static inline int
-+__addport(struct ip_set *set, ip_set_ip_t port, ip_set_ip_t *hash_port)
++next_index_eq(const struct ip_set_setlist *map, int i, ip_set_id_t index)
+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
-+
-+ if (port < map->first_port || port > map->last_port)
-+ return -ERANGE;
-+ if (test_and_set_bit(port - map->first_port, map->members))
-+ return -EEXIST;
-+
-+ *hash_port = port;
-+ DP("port %u", port);
-+ return 0;
++ return i < map->size && map->index[i] == index;
+}
+
+static int
-+addport(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_port)
-+{
-+ struct ip_set_req_portmap *req =
-+ (struct ip_set_req_portmap *) data;
++setlist_utest(struct ip_set *set, const void *data, u_int32_t size,
++ ip_set_ip_t *hash_ip)
++{
++ const struct ip_set_setlist *map = set->data;
++ const struct ip_set_req_setlist *req = data;
++ ip_set_id_t index, ref = IP_SET_INVALID_ID;
++ int i, res = 0;
++ struct ip_set *s;
++
++ if (req->before && req->ref[0] == '\0')
++ return 0;
+
-+ if (size != sizeof(struct ip_set_req_portmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_portmap),
-+ size);
-+ return -EINVAL;
++ index = __ip_set_get_byname(req->name, &s);
++ if (index == IP_SET_INVALID_ID)
++ return 0;
++ if (req->ref[0] != '\0') {
++ ref = __ip_set_get_byname(req->ref, &s);
++ if (ref == IP_SET_INVALID_ID)
++ goto finish;
++ }
++ for (i = 0; i < map->size
++ && map->index[i] != IP_SET_INVALID_ID; i++) {
++ if (req->before && map->index[i] == index) {
++ res = next_index_eq(map, i + 1, ref);
++ break;
++ } else if (!req->before) {
++ if ((ref == IP_SET_INVALID_ID
++ && map->index[i] == index)
++ || (map->index[i] == ref
++ && next_index_eq(map, i + 1, index))) {
++ res = 1;
++ break;
++ }
++ }
+ }
-+ return __addport(set, req->port, hash_port);
++ if (ref != IP_SET_INVALID_ID)
++ __ip_set_put_byindex(ref);
++finish:
++ __ip_set_put_byindex(index);
++ return res;
+}
+
+static int
-+addport_kernel(struct ip_set *set,
++setlist_ktest(struct ip_set *set,
+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_port,
++ ip_set_ip_t *hash_ip,
+ const u_int32_t *flags,
+ unsigned char index)
+{
-+ ip_set_ip_t port = get_port(skb, flags[index]);
++ struct ip_set_setlist *map = set->data;
++ int i, res = 0;
+
-+ if (port == INVALID_PORT)
-+ return -EINVAL;
-+
-+ return __addport(set, port, hash_port);
++ for (i = 0; i < map->size
++ && map->index[i] != IP_SET_INVALID_ID
++ && res == 0; i++)
++ res = ip_set_testip_kernel(map->index[i], skb, flags);
++ return res;
+}
+
+static inline int
-+__delport(struct ip_set *set, ip_set_ip_t port, ip_set_ip_t *hash_port)
++insert_setlist(struct ip_set_setlist *map, int i, ip_set_id_t index)
+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
++ ip_set_id_t tmp;
++ int j;
+
-+ if (port < map->first_port || port > map->last_port)
++ DP("i: %u, last %u\n", i, map->index[map->size - 1]);
++ if (i >= map->size || map->index[map->size - 1] != IP_SET_INVALID_ID)
+ return -ERANGE;
-+ if (!test_and_clear_bit(port - map->first_port, map->members))
-+ return -EEXIST;
-+
-+ *hash_port = port;
-+ DP("port %u", port);
++
++ for (j = i; j < map->size
++ && index != IP_SET_INVALID_ID; j++) {
++ tmp = map->index[j];
++ map->index[j] = index;
++ index = tmp;
++ }
+ return 0;
+}
+
+static int
-+delport(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_port)
++setlist_uadd(struct ip_set *set, const void *data, u_int32_t size,
++ ip_set_ip_t *hash_ip)
+{
-+ struct ip_set_req_portmap *req =
-+ (struct ip_set_req_portmap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_portmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_portmap),
-+ size);
++ struct ip_set_setlist *map = set->data;
++ const struct ip_set_req_setlist *req = data;
++ ip_set_id_t index, ref = IP_SET_INVALID_ID;
++ int i, res = -ERANGE;
++ struct ip_set *s;
++
++ if (req->before && req->ref[0] == '\0')
+ return -EINVAL;
++
++ index = __ip_set_get_byname(req->name, &s);
++ if (index == IP_SET_INVALID_ID)
++ return -EEXIST;
++ /* "Loop detection" */
++ if (strcmp(s->type->typename, "setlist") == 0)
++ goto finish;
++
++ if (req->ref[0] != '\0') {
++ ref = __ip_set_get_byname(req->ref, &s);
++ if (ref == IP_SET_INVALID_ID) {
++ res = -EEXIST;
++ goto finish;
++ }
+ }
-+ return __delport(set, req->port, hash_port);
++ for (i = 0; i < map->size; i++) {
++ if (map->index[i] != ref)
++ continue;
++ if (req->before)
++ res = insert_setlist(map, i, index);
++ else
++ res = insert_setlist(map,
++ ref == IP_SET_INVALID_ID ? i : i + 1,
++ index);
++ break;
++ }
++ if (ref != IP_SET_INVALID_ID)
++ __ip_set_put_byindex(ref);
++ /* In case of success, we keep the reference to the set */
++finish:
++ if (res != 0)
++ __ip_set_put_byindex(index);
++ return res;
+}
+
+static int
-+delport_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_port,
-+ const u_int32_t *flags,
-+ unsigned char index)
++setlist_kadd(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
+{
-+ ip_set_ip_t port = get_port(skb, flags[index]);
++ struct ip_set_setlist *map = set->data;
++ int i, res = -EINVAL;
+
-+ if (port == INVALID_PORT)
-+ return -EINVAL;
-+
-+ return __delport(set, port, hash_port);
++ for (i = 0; i < map->size
++ && map->index[i] != IP_SET_INVALID_ID
++ && res != 0; i++)
++ res = ip_set_addip_kernel(map->index[i], skb, flags);
++ return res;
+}
+
-+static int create(struct ip_set *set, const void *data, size_t size)
++static inline int
++unshift_setlist(struct ip_set_setlist *map, int i)
+{
-+ int newbytes;
-+ struct ip_set_req_portmap_create *req =
-+ (struct ip_set_req_portmap_create *) data;
-+ struct ip_set_portmap *map;
++ int j;
++
++ for (j = i; j < map->size - 1; j++)
++ map->index[j] = map->index[j+1];
++ map->index[map->size-1] = IP_SET_INVALID_ID;
++ return 0;
++}
+
-+ if (size != sizeof(struct ip_set_req_portmap_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_portmap_create),
-+ size);
++static int
++setlist_udel(struct ip_set *set, const void *data, u_int32_t size,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_setlist *map = set->data;
++ const struct ip_set_req_setlist *req = data;
++ ip_set_id_t index, ref = IP_SET_INVALID_ID;
++ int i, res = -EEXIST;
++ struct ip_set *s;
++
++ if (req->before && req->ref[0] == '\0')
+ return -EINVAL;
-+ }
+
-+ DP("from %u to %u", req->from, req->to);
-+
-+ if (req->from > req->to) {
-+ DP("bad port range");
-+ return -ENOEXEC;
++ index = __ip_set_get_byname(req->name, &s);
++ if (index == IP_SET_INVALID_ID)
++ return -EEXIST;
++ if (req->ref[0] != '\0') {
++ ref = __ip_set_get_byname(req->ref, &s);
++ if (ref == IP_SET_INVALID_ID)
++ goto finish;
++ }
++ for (i = 0; i < map->size
++ && map->index[i] != IP_SET_INVALID_ID; i++) {
++ if (req->before) {
++ if (map->index[i] == index
++ && next_index_eq(map, i + 1, ref)) {
++ res = unshift_setlist(map, i);
++ break;
++ }
++ } else if (ref == IP_SET_INVALID_ID) {
++ if (map->index[i] == index) {
++ res = unshift_setlist(map, i);
++ break;
++ }
++ } else if (map->index[i] == ref
++ && next_index_eq(map, i + 1, index)) {
++ res = unshift_setlist(map, i + 1);
++ break;
++ }
+ }
++ if (ref != IP_SET_INVALID_ID)
++ __ip_set_put_byindex(ref);
++finish:
++ __ip_set_put_byindex(index);
++ /* In case of success, release the reference to the set */
++ if (res == 0)
++ __ip_set_put_byindex(index);
++ return res;
++}
+
-+ if (req->to - req->from > MAX_RANGE) {
-+ ip_set_printk("range too big (max %d ports)",
-+ MAX_RANGE+1);
-+ return -ENOEXEC;
-+ }
++static int
++setlist_kdel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ struct ip_set_setlist *map = set->data;
++ int i, res = -EINVAL;
++
++ for (i = 0; i < map->size
++ && map->index[i] != IP_SET_INVALID_ID
++ && res != 0; i++)
++ res = ip_set_delip_kernel(map->index[i], skb, flags);
++ return res;
++}
+
-+ map = kmalloc(sizeof(struct ip_set_portmap), GFP_KERNEL);
-+ if (!map) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_portmap));
-+ return -ENOMEM;
-+ }
-+ map->first_port = req->from;
-+ map->last_port = req->to;
-+ newbytes = bitmap_bytes(req->from, req->to);
-+ map->members = kmalloc(newbytes, GFP_KERNEL);
-+ if (!map->members) {
-+ DP("out of memory for %d bytes", newbytes);
-+ kfree(map);
++static int
++setlist_create(struct ip_set *set, const void *data, u_int32_t size)
++{
++ struct ip_set_setlist *map;
++ const struct ip_set_req_setlist_create *req = data;
++ int i;
++
++ map = kmalloc(sizeof(struct ip_set_setlist) +
++ req->size * sizeof(ip_set_id_t), GFP_KERNEL);
++ if (!map)
+ return -ENOMEM;
-+ }
-+ memset(map->members, 0, newbytes);
-+
++ map->size = req->size;
++ for (i = 0; i < map->size; i++)
++ map->index[i] = IP_SET_INVALID_ID;
++
+ set->data = map;
+ return 0;
-+}
++}
+
-+static void destroy(struct ip_set *set)
++static void
++setlist_destroy(struct ip_set *set)
+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
++ struct ip_set_setlist *map = set->data;
++ int i;
++
++ for (i = 0; i < map->size
++ && map->index[i] != IP_SET_INVALID_ID; i++)
++ __ip_set_put_byindex(map->index[i]);
+
-+ kfree(map->members);
+ kfree(map);
-+
+ set->data = NULL;
+}
+
-+static void flush(struct ip_set *set)
++static void
++setlist_flush(struct ip_set *set)
+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
-+ memset(map->members, 0, bitmap_bytes(map->first_port, map->last_port));
++ struct ip_set_setlist *map = set->data;
++ int i;
++
++ for (i = 0; i < map->size
++ && map->index[i] != IP_SET_INVALID_ID; i++) {
++ __ip_set_put_byindex(map->index[i]);
++ map->index[i] = IP_SET_INVALID_ID;
++ }
+}
+
-+static void list_header(const struct ip_set *set, void *data)
++static void
++setlist_list_header(const struct ip_set *set, void *data)
+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
-+ struct ip_set_req_portmap_create *header =
-+ (struct ip_set_req_portmap_create *) data;
-+
-+ DP("list_header %u %u", map->first_port, map->last_port);
-+
-+ header->from = map->first_port;
-+ header->to = map->last_port;
++ const struct ip_set_setlist *map = set->data;
++ struct ip_set_req_setlist_create *header = data;
++
++ header->size = map->size;
+}
+
-+static int list_members_size(const struct ip_set *set)
++static int
++setlist_list_members_size(const struct ip_set *set)
+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
-+
-+ return bitmap_bytes(map->first_port, map->last_port);
++ const struct ip_set_setlist *map = set->data;
++
++ return map->size * sizeof(ip_set_id_t);
+}
+
-+static void list_members(const struct ip_set *set, void *data)
++static void
++setlist_list_members(const struct ip_set *set, void *data)
+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
-+ int bytes = bitmap_bytes(map->first_port, map->last_port);
-+
-+ memcpy(data, map->members, bytes);
++ struct ip_set_setlist *map = set->data;
++ int i;
++
++ for (i = 0; i < map->size; i++)
++ *((ip_set_id_t *)data + i) = ip_set_id(map->index[i]);
+}
+
-+static struct ip_set_type ip_set_portmap = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_PORT | IPSET_DATA_SINGLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_portmap),
-+ .addip = &addport,
-+ .addip_kernel = &addport_kernel,
-+ .delip = &delport,
-+ .delip_kernel = &delport_kernel,
-+ .testip = &testport,
-+ .testip_kernel = &testport_kernel,
-+ .header_size = sizeof(struct ip_set_req_portmap_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
++IP_SET_TYPE(setlist, IPSET_TYPE_SETNAME | IPSET_DATA_SINGLE)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-+MODULE_DESCRIPTION("portmap type of IP sets");
-+
-+static int __init ip_set_portmap_init(void)
-+{
-+ return ip_set_register_set_type(&ip_set_portmap);
-+}
-+
-+static void __exit ip_set_portmap_fini(void)
-+{
-+ /* FIXME: possible race with ip_set_create() */
-+ ip_set_unregister_set_type(&ip_set_portmap);
-+}
++MODULE_DESCRIPTION("setlist type of IP sets");
+
-+module_init(ip_set_portmap_init);
-+module_exit(ip_set_portmap_fini);
++REGISTER_MODULE(setlist)
--- /dev/null
+++ b/net/ipv4/netfilter/ipt_set.c
-@@ -0,0 +1,160 @@
+@@ -0,0 +1,238 @@
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ * Patrick Schaaf <bof@bof.de>
+ * Martin Josefsson <gandalf@wlug.westbo.se>
@@ -7197,7 +7090,7 @@
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
++ * published by the Free Software Foundation.
+ */
+
+/* Kernel module to match an IP set. */
@@ -7207,7 +7100,14 @@
+#include <linux/skbuff.h>
+#include <linux/version.h>
+
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
+#include <linux/netfilter_ipv4/ip_tables.h>
++#define xt_register_match ipt_register_match
++#define xt_unregister_match ipt_unregister_match
++#define xt_match ipt_match
++#else
++#include <linux/netfilter/x_tables.h>
++#endif
+#include <linux/netfilter_ipv4/ip_set.h>
+#include <linux/netfilter_ipv4/ipt_set.h>
+
@@ -7221,58 +7121,119 @@
+ return inv;
+}
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
-+static bool
-+#else
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++static int
++match(const struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ const void *matchinfo,
++ int offset,
++ const void *hdr,
++ u_int16_t datalen,
++ int *hotdrop)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
++static int
++match(const struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ const void *matchinfo,
++ int offset,
++ int *hotdrop)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
++static int
++match(const struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ const void *matchinfo,
++ int offset,
++ unsigned int protoff,
++ int *hotdrop)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
+static int
-+#endif
+match(const struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
+ const struct xt_match *match,
-+#endif
+ const void *matchinfo,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
-+ int offset, unsigned int protoff, bool *hotdrop)
-+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
-+ int offset, unsigned int protoff, int *hotdrop)
-+#else
-+ int offset, int *hotdrop)
++ int offset,
++ unsigned int protoff,
++ int *hotdrop)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++static bool
++match(const struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ const struct xt_match *match,
++ const void *matchinfo,
++ int offset,
++ unsigned int protoff,
++ bool *hotdrop)
++#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */
++static bool
++match(const struct sk_buff *skb,
++ const struct xt_match_param *par)
+#endif
+{
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
+ const struct ipt_set_info_match *info = matchinfo;
++#else
++ const struct ipt_set_info_match *info = par->matchinfo;
++#endif
+
+ return match_set(&info->match_set,
+ skb,
+ info->match_set.flags[0] & IPSET_MATCH_INV);
+}
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
-+bool
-+#else
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
+static int
-+#endif
+checkentry(const char *tablename,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
-+ const void *inf,
-+#else
+ const struct ipt_ip *ip,
-+#endif
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
++ void *matchinfo,
++ unsigned int matchsize,
++ unsigned int hook_mask)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
++static int
++checkentry(const char *tablename,
++ const void *inf,
++ void *matchinfo,
++ unsigned int matchsize,
++ unsigned int hook_mask)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++static int
++checkentry(const char *tablename,
++ const void *inf,
+ const struct xt_match *match,
-+#endif
+ void *matchinfo,
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
+ unsigned int matchsize,
-+#endif
+ unsigned int hook_mask)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
++static int
++checkentry(const char *tablename,
++ const void *inf,
++ const struct xt_match *match,
++ void *matchinfo,
++ unsigned int hook_mask)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++static bool
++checkentry(const char *tablename,
++ const void *inf,
++ const struct xt_match *match,
++ void *matchinfo,
++ unsigned int hook_mask)
++#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */
++static bool
++checkentry(const struct xt_mtchk_param *par)
++#endif
+{
-+ struct ipt_set_info_match *info =
-+ (struct ipt_set_info_match *) matchinfo;
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++ struct ipt_set_info_match *info = matchinfo;
++#else
++ struct ipt_set_info_match *info = par->matchinfo;
++#endif
+ ip_set_id_t index;
+
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
+ if (matchsize != IPT_ALIGN(sizeof(struct ipt_set_info_match))) {
+ ip_set_printk("invalid matchsize %d", matchsize);
+ return 0;
@@ -7294,65 +7255,75 @@
+ return 1;
+}
+
-+static void destroy(
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
-+ const struct xt_match *match,
-+#endif
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-+ void *matchinfo, unsigned int matchsize)
-+#else
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
++static void destroy(void *matchinfo,
++ unsigned int matchsize)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++static void destroy(const struct xt_match *match,
++ void *matchinfo,
++ unsigned int matchsize)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++static void destroy(const struct xt_match *match,
+ void *matchinfo)
++#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */
++static void destroy(const struct xt_mtdtor_param *par)
+#endif
+{
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
+ struct ipt_set_info_match *info = matchinfo;
++#else
++ struct ipt_set_info_match *info = par->matchinfo;
++#endif
+
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
+ if (matchsize != IPT_ALIGN(sizeof(struct ipt_set_info_match))) {
+ ip_set_printk("invalid matchsize %d", matchsize);
+ return;
+ }
+#endif
-+ ip_set_put(info->match_set.index);
++ ip_set_put_byindex(info->match_set.index);
+}
+
-+static struct ipt_match set_match = {
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
++static struct xt_match set_match = {
++ .name = "set",
++ .match = &match,
++ .checkentry = &checkentry,
++ .destroy = &destroy,
++ .me = THIS_MODULE
++};
++#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17) */
++static struct xt_match set_match = {
+ .name = "set",
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
+ .family = AF_INET,
-+#endif
+ .match = &match,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
+ .matchsize = sizeof(struct ipt_set_info_match),
-+#endif
+ .checkentry = &checkentry,
+ .destroy = &destroy,
+ .me = THIS_MODULE
+};
++#endif
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("iptables IP set match module");
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
-+#define ipt_register_match xt_register_match
-+#define ipt_unregister_match xt_unregister_match
-+#endif
-+
+static int __init ipt_ipset_init(void)
+{
-+ return ipt_register_match(&set_match);
++ return xt_register_match(&set_match);
+}
+
+static void __exit ipt_ipset_fini(void)
+{
-+ ipt_unregister_match(&set_match);
++ xt_unregister_match(&set_match);
+}
+
+module_init(ipt_ipset_init);
+module_exit(ipt_ipset_fini);
--- /dev/null
+++ b/net/ipv4/netfilter/ipt_SET.c
-@@ -0,0 +1,179 @@
+@@ -0,0 +1,242 @@
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ * Patrick Schaaf <bof@bof.de>
+ * Martin Josefsson <gandalf@wlug.westbo.se>
@@ -7360,49 +7331,79 @@
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
++ * published by the Free Software Foundation.
+ */
+
+/* ipt_SET.c - netfilter target to manipulate IP sets */
+
-+#include <linux/types.h>
-+#include <linux/ip.h>
-+#include <linux/timer.h>
+#include <linux/module.h>
-+#include <linux/netfilter.h>
-+#include <linux/netdevice.h>
-+#include <linux/if.h>
-+#include <linux/inetdevice.h>
++#include <linux/ip.h>
++#include <linux/skbuff.h>
+#include <linux/version.h>
-+#include <net/protocol.h>
-+#include <net/checksum.h>
++
+#include <linux/netfilter_ipv4.h>
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
+#include <linux/netfilter_ipv4/ip_tables.h>
++#define xt_register_target ipt_register_target
++#define xt_unregister_target ipt_unregister_target
++#define xt_target ipt_target
++#define XT_CONTINUE IPT_CONTINUE
++#else
++#include <linux/netfilter/x_tables.h>
++#endif
+#include <linux/netfilter_ipv4/ipt_set.h>
+
+static unsigned int
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
-+target(struct sk_buff *skb,
-+#else
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++target(struct sk_buff **pskb,
++ unsigned int hooknum,
++ const struct net_device *in,
++ const struct net_device *out,
++ const void *targinfo,
++ void *userinfo)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
++target(struct sk_buff **pskb,
++ const struct net_device *in,
++ const struct net_device *out,
++ unsigned int hooknum,
++ const void *targinfo,
++ void *userinfo)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
+target(struct sk_buff **pskb,
-+#endif
+ const struct net_device *in,
+ const struct net_device *out,
+ unsigned int hooknum,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
+ const struct xt_target *target,
-+#endif
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
+ const void *targinfo,
+ void *userinfo)
-+#else
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
++target(struct sk_buff **pskb,
++ const struct net_device *in,
++ const struct net_device *out,
++ unsigned int hooknum,
++ const struct xt_target *target,
++ const void *targinfo)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++target(struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ unsigned int hooknum,
++ const struct xt_target *target,
+ const void *targinfo)
++#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */
++target(struct sk_buff *skb,
++ const struct xt_target_param *par)
+#endif
+{
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
+ const struct ipt_set_info_target *info = targinfo;
++#else
++ const struct ipt_set_info_target *info = par->targinfo;
++#endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+ struct sk_buff *skb = *pskb;
+#endif
++
+
+ if (info->add_set.index != IP_SET_INVALID_ID)
+ ip_set_addip_kernel(info->add_set.index,
@@ -7413,34 +7414,58 @@
+ skb,
+ info->del_set.flags);
+
-+ return IPT_CONTINUE;
++ return XT_CONTINUE;
+}
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
-+static bool
-+#else
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
+static int
-+#endif
+checkentry(const char *tablename,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
-+ const void *e,
-+#else
+ const struct ipt_entry *e,
-+#endif
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
++ void *targinfo,
++ unsigned int targinfosize,
++ unsigned int hook_mask)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
++static int
++checkentry(const char *tablename,
++ const void *e,
++ void *targinfo,
++ unsigned int targinfosize,
++ unsigned int hook_mask)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++static int
++checkentry(const char *tablename,
++ const void *e,
++ const struct xt_target *target,
++ void *targinfo,
++ unsigned int targinfosize,
++ unsigned int hook_mask)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
++static int
++checkentry(const char *tablename,
++ const void *e,
+ const struct xt_target *target,
-+#endif
+ void *targinfo,
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-+ unsigned int targinfosize,
-+#endif
+ unsigned int hook_mask)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++static bool
++checkentry(const char *tablename,
++ const void *e,
++ const struct xt_target *target,
++ void *targinfo,
++ unsigned int hook_mask)
++#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */
++static bool
++checkentry(const struct xt_tgchk_param *par)
++#endif
+{
-+ struct ipt_set_info_target *info =
-+ (struct ipt_set_info_target *) targinfo;
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++ const struct ipt_set_info_target *info = targinfo;
++#else
++ const struct ipt_set_info_target *info = par->targinfo;
++#endif
+ ip_set_id_t index;
+
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
+ if (targinfosize != IPT_ALIGN(sizeof(*info))) {
+ DP("bad target info size %u", targinfosize);
+ return 0;
@@ -7473,68 +7498,77 @@
+ return 1;
+}
+
-+static void destroy(
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
-+ const struct xt_target *target,
-+#endif
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-+ void *targetinfo, unsigned int targetsize)
-+#else
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
++static void destroy(void *targetinfo,
++ unsigned int targetsize)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++static void destroy(const struct xt_target *target,
++ void *targetinfo,
++ unsigned int targetsize)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++static void destroy(const struct xt_target *target,
+ void *targetinfo)
++#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */
++static void destroy(const struct xt_tgdtor_param *par)
+#endif
+{
-+ struct ipt_set_info_target *info = targetinfo;
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++ const struct ipt_set_info_target *info = targetinfo;
++#else
++ const struct ipt_set_info_target *info = par->targinfo;
++#endif
+
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
+ if (targetsize != IPT_ALIGN(sizeof(struct ipt_set_info_target))) {
+ ip_set_printk("invalid targetsize %d", targetsize);
+ return;
+ }
+#endif
+ if (info->add_set.index != IP_SET_INVALID_ID)
-+ ip_set_put(info->add_set.index);
++ ip_set_put_byindex(info->add_set.index);
+ if (info->del_set.index != IP_SET_INVALID_ID)
-+ ip_set_put(info->del_set.index);
++ ip_set_put_byindex(info->del_set.index);
+}
+
-+static struct ipt_target SET_target = {
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
++static struct xt_target SET_target = {
++ .name = "SET",
++ .target = target,
++ .checkentry = checkentry,
++ .destroy = destroy,
++ .me = THIS_MODULE
++};
++#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17) */
++static struct xt_target SET_target = {
+ .name = "SET",
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
+ .family = AF_INET,
-+#endif
+ .target = target,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
+ .targetsize = sizeof(struct ipt_set_info_target),
-+#endif
+ .checkentry = checkentry,
+ .destroy = destroy,
+ .me = THIS_MODULE
+};
++#endif
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("iptables IP set target module");
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
-+#define ipt_register_target xt_register_target
-+#define ipt_unregister_target xt_unregister_target
-+#endif
-+
+static int __init ipt_SET_init(void)
+{
-+ return ipt_register_target(&SET_target);
++ return xt_register_target(&SET_target);
+}
+
+static void __exit ipt_SET_fini(void)
+{
-+ ipt_unregister_target(&SET_target);
++ xt_unregister_target(&SET_target);
+}
+
+module_init(ipt_SET_init);
+module_exit(ipt_SET_fini);
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
-@@ -379,5 +379,122 @@ config IP_NF_ARP_MANGLE
+@@ -379,5 +379,146 @@ config IP_NF_ARP_MANGLE
Allows altering the ARP packet payload: source and destination
hardware and network addresses.
@@ -7619,6 +7653,22 @@
+
+ To compile it as a module, choose M here. If unsure, say N.
+
++config IP_NF_SET_IPPORTIPHASH
++ tristate "ipportiphash set support"
++ depends on IP_NF_SET
++ help
++ This option adds the ipportiphash set type support.
++
++ To compile it as a module, choose M here. If unsure, say N.
++
++config IP_NF_SET_IPPORTNETHASH
++ tristate "ipportnethash set support"
++ depends on IP_NF_SET
++ help
++ This option adds the ipportnethash set type support.
++
++ To compile it as a module, choose M here. If unsure, say N.
++
+config IP_NF_SET_IPTREE
+ tristate "iptree set support"
+ depends on IP_NF_SET
@@ -7635,6 +7685,14 @@
+
+ To compile it as a module, choose M here. If unsure, say N.
+
++config IP_NF_SET_SETLIST
++ tristate "setlist set support"
++ depends on IP_NF_SET
++ help
++ This option adds the setlist set type support.
++
++ To compile it as a module, choose M here. If unsure, say N.
++
+config IP_NF_MATCH_SET
+ tristate "set match support"
+ depends on IP_NF_SET
@@ -7667,7 +7725,7 @@
# targets
obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o
-@@ -57,6 +58,18 @@ obj-$(CONFIG_IP_NF_TARGET_REDIRECT) += i
+@@ -57,6 +58,21 @@ obj-$(CONFIG_IP_NF_TARGET_REDIRECT) += i
obj-$(CONFIG_IP_NF_TARGET_REJECT) += ipt_REJECT.o
obj-$(CONFIG_IP_NF_TARGET_TTL) += ipt_TTL.o
obj-$(CONFIG_IP_NF_TARGET_ULOG) += ipt_ULOG.o
@@ -7681,8 +7739,11 @@
+obj-$(CONFIG_IP_NF_SET_IPHASH) += ip_set_iphash.o
+obj-$(CONFIG_IP_NF_SET_NETHASH) += ip_set_nethash.o
+obj-$(CONFIG_IP_NF_SET_IPPORTHASH) += ip_set_ipporthash.o
++obj-$(CONFIG_IP_NF_SET_IPPORTIPHASH) += ip_set_ipportiphash.o
++obj-$(CONFIG_IP_NF_SET_IPPORTNETHASH) += ip_set_ipportnethash.o
+obj-$(CONFIG_IP_NF_SET_IPTREE) += ip_set_iptree.o
+obj-$(CONFIG_IP_NF_SET_IPTREEMAP) += ip_set_iptreemap.o
++obj-$(CONFIG_IP_NF_SET_SETLIST) += ip_set_setlist.o
# generic ARP tables
obj-$(CONFIG_IP_NF_ARPTABLES) += arp_tables.o
diff --git a/target/linux/generic-2.6/patches-2.6.27/130-netfilter_ipset.patch b/target/linux/generic-2.6/patches-2.6.27/130-netfilter_ipset.patch
index 3ea2a6dd0a..dd8dbd9fea 100644
--- a/target/linux/generic-2.6/patches-2.6.27/130-netfilter_ipset.patch
+++ b/target/linux/generic-2.6/patches-2.6.27/130-netfilter_ipset.patch
@@ -1,23 +1,29 @@
--- a/include/linux/netfilter_ipv4/Kbuild
+++ b/include/linux/netfilter_ipv4/Kbuild
-@@ -45,3 +45,14 @@ header-y += ipt_ttl.h
+@@ -45,3 +45,20 @@ header-y += ipt_ttl.h
unifdef-y += ip_queue.h
unifdef-y += ip_tables.h
+
+unifdef-y += ip_set.h
+header-y += ip_set_iphash.h
++unifdef-y += ip_set_bitmaps.h
++unifdef-y += ip_set_getport.h
++unifdef-y += ip_set_hashes.h
+header-y += ip_set_ipmap.h
+header-y += ip_set_ipporthash.h
++header-y += ip_set_ipportiphash.h
++header-y += ip_set_ipportnethash.h
+unifdef-y += ip_set_iptree.h
+unifdef-y += ip_set_iptreemap.h
+header-y += ip_set_jhash.h
+header-y += ip_set_macipmap.h
-+unifdef-y += ip_set_nethash.h
++header-y += ip_set_nethash.h
+header-y += ip_set_portmap.h
++header-y += ip_set_setlist.h
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set.h
-@@ -0,0 +1,498 @@
+@@ -0,0 +1,574 @@
+#ifndef _IP_SET_H
+#define _IP_SET_H
+
@@ -28,7 +34,7 @@
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
++ * published by the Free Software Foundation.
+ */
+
+#if 0
@@ -57,10 +63,10 @@
+ * - in order to "deal with" backward compatibility, renamed to ipset
+ */
+
-+/*
-+ * Used so that the kernel module and ipset-binary can match their versions
++/*
++ * Used so that the kernel module and ipset-binary can match their versions
+ */
-+#define IP_SET_PROTOCOL_VERSION 2
++#define IP_SET_PROTOCOL_VERSION 3
+
+#define IP_SET_MAXNAMELEN 32 /* set names and set typenames */
+
@@ -69,7 +75,7 @@
+ *
+ * The representation works in HOST byte order, because most set types
+ * will perform arithmetic operations and compare operations.
-+ *
++ *
+ * For now the type is an uint32_t.
+ *
+ * Make sure to ONLY use the functions when translating and parsing
@@ -107,6 +113,9 @@
+#define IPSET_TYPE_PORT 0x02 /* Port type of set */
+#define IPSET_DATA_SINGLE 0x04 /* Single data storage */
+#define IPSET_DATA_DOUBLE 0x08 /* Double data storage */
++#define IPSET_DATA_TRIPLE 0x10 /* Triple data storage */
++#define IPSET_TYPE_IP1 0x20 /* IP address type of set */
++#define IPSET_TYPE_SETNAME 0x40 /* setname type of set */
+
+/* Reserved keywords */
+#define IPSET_TOKEN_DEFAULT ":default:"
@@ -120,8 +129,8 @@
+ * 200-299: list, save, restore
+ */
+
-+/* Single shot operations:
-+ * version, create, destroy, flush, rename and swap
++/* Single shot operations:
++ * version, create, destroy, flush, rename and swap
+ *
+ * Sets are identified by name.
+ */
@@ -172,7 +181,7 @@
+ unsigned version;
+};
+
-+/* Double shots operations:
++/* Double shots operations:
+ * add, del, test, bind and unbind.
+ *
+ * First we query the kernel to get the index and type of the target set,
@@ -214,7 +223,7 @@
+};
+
+#define IP_SET_OP_UNBIND_SET 0x00000105 /* Unbind an IP from a set */
-+/* Uses ip_set_req_bind, with type speficic addage
++/* Uses ip_set_req_bind, with type speficic addage
+ * index = 0 means unbinding for all sets */
+
+#define IP_SET_OP_TEST_BIND_SET 0x00000106 /* Test binding an IP to a set */
@@ -245,7 +254,7 @@
+struct ip_set_req_setnames {
+ unsigned op;
+ ip_set_id_t index; /* set to list/save */
-+ size_t size; /* size to get setdata/bindings */
++ u_int32_t size; /* size to get setdata/bindings */
+ /* followed by sets number of struct ip_set_name_list */
+};
+
@@ -260,16 +269,16 @@
+#define IP_SET_OP_LIST 0x00000203
+struct ip_set_req_list {
+ IP_SET_REQ_BYINDEX;
-+ /* sets number of struct ip_set_list in reply */
++ /* sets number of struct ip_set_list in reply */
+};
+
+struct ip_set_list {
+ ip_set_id_t index;
+ ip_set_id_t binding;
+ u_int32_t ref;
-+ size_t header_size; /* Set header data of header_size */
-+ size_t members_size; /* Set members data of members_size */
-+ size_t bindings_size; /* Set bindings data of bindings_size */
++ u_int32_t header_size; /* Set header data of header_size */
++ u_int32_t members_size; /* Set members data of members_size */
++ u_int32_t bindings_size;/* Set bindings data of bindings_size */
+};
+
+struct ip_set_hash_list {
@@ -286,8 +295,8 @@
+struct ip_set_save {
+ ip_set_id_t index;
+ ip_set_id_t binding;
-+ size_t header_size; /* Set header data of header_size */
-+ size_t members_size; /* Set members data of members_size */
++ u_int32_t header_size; /* Set header data of header_size */
++ u_int32_t members_size; /* Set members data of members_size */
+};
+
+/* At restoring, ip == 0 means default binding for the given set: */
@@ -300,15 +309,15 @@
+/* The restore operation */
+#define IP_SET_OP_RESTORE 0x00000205
+/* Uses ip_set_req_setnames followed by ip_set_restore structures
-+ * plus a marker ip_set_restore, followed by ip_set_hash_save
++ * plus a marker ip_set_restore, followed by ip_set_hash_save
+ * structures.
+ */
+struct ip_set_restore {
+ char name[IP_SET_MAXNAMELEN];
+ char typename[IP_SET_MAXNAMELEN];
+ ip_set_id_t index;
-+ size_t header_size; /* Create data of header_size */
-+ size_t members_size; /* Set members data of members_size */
++ u_int32_t header_size; /* Create data of header_size */
++ u_int32_t members_size; /* Set members data of members_size */
+};
+
+static inline int bitmap_bytes(ip_set_ip_t a, ip_set_ip_t b)
@@ -316,7 +325,12 @@
+ return 4 * ((((b - a + 8) / 8) + 3) / 4);
+}
+
++/* General limit for the elements in a set */
++#define MAX_RANGE 0x0000FFFF
++
+#ifdef __KERNEL__
++#include <linux/netfilter_ipv4/ip_set_compat.h>
++#include <linux/netfilter_ipv4/ip_set_malloc.h>
+
+#define ip_set_printk(format, args...) \
+ do { \
@@ -361,7 +375,7 @@
+ * return 0 if not in set, 1 if in set.
+ */
+ int (*testip_kernel) (struct ip_set *set,
-+ const struct sk_buff * skb,
++ const struct sk_buff * skb,
+ ip_set_ip_t *ip,
+ const u_int32_t *flags,
+ unsigned char index);
@@ -370,22 +384,22 @@
+ * return 0 if not in set, 1 if in set.
+ */
+ int (*testip) (struct ip_set *set,
-+ const void *data, size_t size,
++ const void *data, u_int32_t size,
+ ip_set_ip_t *ip);
+
+ /*
+ * Size of the data structure passed by when
+ * adding/deletin/testing an entry.
+ */
-+ size_t reqsize;
++ u_int32_t reqsize;
+
+ /* Add IP into set (userspace: ipset -A set IP)
+ * Return -EEXIST if the address is already in the set,
+ * and -ERANGE if the address lies outside the set bounds.
+ * If the address was not already in the set, 0 is returned.
+ */
-+ int (*addip) (struct ip_set *set,
-+ const void *data, size_t size,
++ int (*addip) (struct ip_set *set,
++ const void *data, u_int32_t size,
+ ip_set_ip_t *ip);
+
+ /* Add IP into set (kernel: iptables ... -j SET set src|dst)
@@ -394,7 +408,7 @@
+ * If the address was not already in the set, 0 is returned.
+ */
+ int (*addip_kernel) (struct ip_set *set,
-+ const struct sk_buff * skb,
++ const struct sk_buff * skb,
+ ip_set_ip_t *ip,
+ const u_int32_t *flags,
+ unsigned char index);
@@ -404,8 +418,8 @@
+ * and -ERANGE if the address lies outside the set bounds.
+ * If the address really was in the set, 0 is returned.
+ */
-+ int (*delip) (struct ip_set *set,
-+ const void *data, size_t size,
++ int (*delip) (struct ip_set *set,
++ const void *data, u_int32_t size,
+ ip_set_ip_t *ip);
+
+ /* remove IP from set (kernel: iptables ... -j SET --entry x)
@@ -414,7 +428,7 @@
+ * If the address really was in the set, 0 is returned.
+ */
+ int (*delip_kernel) (struct ip_set *set,
-+ const struct sk_buff * skb,
++ const struct sk_buff * skb,
+ ip_set_ip_t *ip,
+ const u_int32_t *flags,
+ unsigned char index);
@@ -422,7 +436,7 @@
+ /* new set creation - allocated type specific items
+ */
+ int (*create) (struct ip_set *set,
-+ const void *data, size_t size);
++ const void *data, u_int32_t size);
+
+ /* retry the operation after successfully tweaking the set
+ */
@@ -441,16 +455,16 @@
+
+ /* Listing: size needed for header
+ */
-+ size_t header_size;
++ u_int32_t header_size;
+
+ /* Listing: Get the header
+ *
+ * Fill in the information in "data".
-+ * This function is always run after list_header_size() under a
-+ * writelock on the set. Therefor is the length of "data" always
-+ * correct.
++ * This function is always run after list_header_size() under a
++ * writelock on the set. Therefor is the length of "data" always
++ * correct.
+ */
-+ void (*list_header) (const struct ip_set *set,
++ void (*list_header) (const struct ip_set *set,
+ void *data);
+
+ /* Listing: Get the size for the set members
@@ -460,9 +474,9 @@
+ /* Listing: Get the set members
+ *
+ * Fill in the information in "data".
-+ * This function is always run after list_member_size() under a
-+ * writelock on the set. Therefor is the length of "data" always
-+ * correct.
++ * This function is always run after list_member_size() under a
++ * writelock on the set. Therefor is the length of "data" always
++ * correct.
+ */
+ void (*list_members) (const struct ip_set *set,
+ void *data);
@@ -499,33 +513,659 @@
+
+/* register and unregister set references */
+extern ip_set_id_t ip_set_get_byname(const char name[IP_SET_MAXNAMELEN]);
-+extern ip_set_id_t ip_set_get_byindex(ip_set_id_t id);
-+extern void ip_set_put(ip_set_id_t id);
++extern ip_set_id_t ip_set_get_byindex(ip_set_id_t index);
++extern void ip_set_put_byindex(ip_set_id_t index);
++extern ip_set_id_t ip_set_id(ip_set_id_t index);
++extern ip_set_id_t __ip_set_get_byname(const char name[IP_SET_MAXNAMELEN],
++ struct ip_set **set);
++extern void __ip_set_put_byindex(ip_set_id_t index);
+
+/* API for iptables set match, and SET target */
-+extern void ip_set_addip_kernel(ip_set_id_t id,
-+ const struct sk_buff *skb,
-+ const u_int32_t *flags);
-+extern void ip_set_delip_kernel(ip_set_id_t id,
-+ const struct sk_buff *skb,
-+ const u_int32_t *flags);
++extern int ip_set_addip_kernel(ip_set_id_t id,
++ const struct sk_buff *skb,
++ const u_int32_t *flags);
++extern int ip_set_delip_kernel(ip_set_id_t id,
++ const struct sk_buff *skb,
++ const u_int32_t *flags);
+extern int ip_set_testip_kernel(ip_set_id_t id,
+ const struct sk_buff *skb,
+ const u_int32_t *flags);
+
++/* Macros to generate functions */
++
++#define STRUCT(pre, type) CONCAT2(pre, type)
++#define CONCAT2(pre, type) struct pre##type
++
++#define FNAME(pre, mid, post) CONCAT3(pre, mid, post)
++#define CONCAT3(pre, mid, post) pre##mid##post
++
++#define UADT0(type, adt, args...) \
++static int \
++FNAME(type,_u,adt)(struct ip_set *set, const void *data, u_int32_t size,\
++ ip_set_ip_t *hash_ip) \
++{ \
++ const STRUCT(ip_set_req_,type) *req = data; \
++ \
++ return FNAME(type,_,adt)(set, hash_ip , ## args); \
++}
++
++#define UADT(type, adt, args...) \
++ UADT0(type, adt, req->ip , ## args)
++
++#define KADT(type, adt, getfn, args...) \
++static int \
++FNAME(type,_k,adt)(struct ip_set *set, \
++ const struct sk_buff *skb, \
++ ip_set_ip_t *hash_ip, \
++ const u_int32_t *flags, \
++ unsigned char index) \
++{ \
++ ip_set_ip_t ip = getfn(skb, flags[index]); \
++ \
++ KADT_CONDITION \
++ return FNAME(type,_,adt)(set, hash_ip, ip , ##args); \
++}
++
++#define REGISTER_MODULE(type) \
++static int __init ip_set_##type##_init(void) \
++{ \
++ init_max_page_size(); \
++ return ip_set_register_set_type(&ip_set_##type); \
++} \
++ \
++static void __exit ip_set_##type##_fini(void) \
++{ \
++ /* FIXME: possible race with ip_set_create() */ \
++ ip_set_unregister_set_type(&ip_set_##type); \
++} \
++ \
++module_init(ip_set_##type##_init); \
++module_exit(ip_set_##type##_fini);
++
++/* Common functions */
++
++static inline ip_set_ip_t
++ipaddr(const struct sk_buff *skb, u_int32_t flag)
++{
++ return ntohl(flag & IPSET_SRC ? ip_hdr(skb)->saddr : ip_hdr(skb)->daddr);
++}
++
++#define jhash_ip(map, i, ip) jhash_1word(ip, *(map->initval + i))
++
++#define pack_ip_port(map, ip, port) \
++ (port + ((ip - ((map)->first_ip)) << 16))
++
+#endif /* __KERNEL__ */
+
+#endif /*_IP_SET_H*/
--- /dev/null
++++ b/include/linux/netfilter_ipv4/ip_set_bitmaps.h
+@@ -0,0 +1,121 @@
++#ifndef __IP_SET_BITMAPS_H
++#define __IP_SET_BITMAPS_H
++
++/* Macros to generate functions */
++
++#ifdef __KERNEL__
++#define BITMAP_CREATE(type) \
++static int \
++type##_create(struct ip_set *set, const void *data, u_int32_t size) \
++{ \
++ int newbytes; \
++ const struct ip_set_req_##type##_create *req = data; \
++ struct ip_set_##type *map; \
++ \
++ if (req->from > req->to) { \
++ DP("bad range"); \
++ return -ENOEXEC; \
++ } \
++ \
++ map = kmalloc(sizeof(struct ip_set_##type), GFP_KERNEL); \
++ if (!map) { \
++ DP("out of memory for %zu bytes", \
++ sizeof(struct ip_set_##type)); \
++ return -ENOMEM; \
++ } \
++ map->first_ip = req->from; \
++ map->last_ip = req->to; \
++ \
++ newbytes = __##type##_create(req, map); \
++ if (newbytes < 0) { \
++ kfree(map); \
++ return newbytes; \
++ } \
++ \
++ map->size = newbytes; \
++ map->members = ip_set_malloc(newbytes); \
++ if (!map->members) { \
++ DP("out of memory for %i bytes", newbytes); \
++ kfree(map); \
++ return -ENOMEM; \
++ } \
++ memset(map->members, 0, newbytes); \
++ \
++ set->data = map; \
++ return 0; \
++}
++
++#define BITMAP_DESTROY(type) \
++static void \
++type##_destroy(struct ip_set *set) \
++{ \
++ struct ip_set_##type *map = set->data; \
++ \
++ ip_set_free(map->members, map->size); \
++ kfree(map); \
++ \
++ set->data = NULL; \
++}
++
++#define BITMAP_FLUSH(type) \
++static void \
++type##_flush(struct ip_set *set) \
++{ \
++ struct ip_set_##type *map = set->data; \
++ memset(map->members, 0, map->size); \
++}
++
++#define BITMAP_LIST_HEADER(type) \
++static void \
++type##_list_header(const struct ip_set *set, void *data) \
++{ \
++ const struct ip_set_##type *map = set->data; \
++ struct ip_set_req_##type##_create *header = data; \
++ \
++ header->from = map->first_ip; \
++ header->to = map->last_ip; \
++ __##type##_list_header(map, header); \
++}
++
++#define BITMAP_LIST_MEMBERS_SIZE(type) \
++static int \
++type##_list_members_size(const struct ip_set *set) \
++{ \
++ const struct ip_set_##type *map = set->data; \
++ \
++ return map->size; \
++}
++
++#define BITMAP_LIST_MEMBERS(type) \
++static void \
++type##_list_members(const struct ip_set *set, void *data) \
++{ \
++ const struct ip_set_##type *map = set->data; \
++ \
++ memcpy(data, map->members, map->size); \
++}
++
++#define IP_SET_TYPE(type, __features) \
++struct ip_set_type ip_set_##type = { \
++ .typename = #type, \
++ .features = __features, \
++ .protocol_version = IP_SET_PROTOCOL_VERSION, \
++ .create = &type##_create, \
++ .destroy = &type##_destroy, \
++ .flush = &type##_flush, \
++ .reqsize = sizeof(struct ip_set_req_##type), \
++ .addip = &type##_uadd, \
++ .addip_kernel = &type##_kadd, \
++ .delip = &type##_udel, \
++ .delip_kernel = &type##_kdel, \
++ .testip = &type##_utest, \
++ .testip_kernel = &type##_ktest, \
++ .header_size = sizeof(struct ip_set_req_##type##_create),\
++ .list_header = &type##_list_header, \
++ .list_members_size = &type##_list_members_size, \
++ .list_members = &type##_list_members, \
++ .me = THIS_MODULE, \
++};
++#endif /* __KERNEL */
++
++#endif /* __IP_SET_BITMAPS_H */
+--- /dev/null
++++ b/include/linux/netfilter_ipv4/ip_set_compat.h
+@@ -0,0 +1,71 @@
++#ifndef _IP_SET_COMPAT_H
++#define _IP_SET_COMPAT_H
++
++#ifdef __KERNEL__
++#include <linux/version.h>
++
++/* Arrgh */
++#ifdef MODULE
++#define __MOD_INC(foo) __MOD_INC_USE_COUNT(foo)
++#define __MOD_DEC(foo) __MOD_DEC_USE_COUNT(foo)
++#else
++#define __MOD_INC(foo) 1
++#define __MOD_DEC(foo)
++#endif
++
++/* Backward compatibility */
++#ifndef __nocast
++#define __nocast
++#endif
++#ifndef __bitwise__
++#define __bitwise__
++#endif
++
++/* Compatibility glue code */
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++#include <linux/interrupt.h>
++#define DEFINE_RWLOCK(x) rwlock_t x = RW_LOCK_UNLOCKED
++#define try_module_get(x) __MOD_INC(x)
++#define module_put(x) __MOD_DEC(x)
++#define __clear_bit(nr, addr) clear_bit(nr, addr)
++#define __set_bit(nr, addr) set_bit(nr, addr)
++#define __test_and_set_bit(nr, addr) test_and_set_bit(nr, addr)
++#define __test_and_clear_bit(nr, addr) test_and_clear_bit(nr, addr)
++
++typedef unsigned __bitwise__ gfp_t;
++
++static inline void *kzalloc(size_t size, gfp_t flags)
++{
++ void *data = kmalloc(size, flags);
++
++ if (data)
++ memset(data, 0, size);
++
++ return data;
++}
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
++#define __KMEM_CACHE_T__ kmem_cache_t
++#else
++#define __KMEM_CACHE_T__ struct kmem_cache
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
++#define ip_hdr(skb) ((skb)->nh.iph)
++#define skb_mac_header(skb) ((skb)->mac.raw)
++#define eth_hdr(skb) ((struct ethhdr *)skb_mac_header(skb))
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
++#include <linux/netfilter.h>
++#define KMEM_CACHE_CREATE(name, size) \
++ kmem_cache_create(name, size, 0, 0, NULL, NULL)
++#else
++#define KMEM_CACHE_CREATE(name, size) \
++ kmem_cache_create(name, size, 0, 0, NULL)
++#endif
++
++
++#endif /* __KERNEL__ */
++#endif /* _IP_SET_COMPAT_H */
+--- /dev/null
++++ b/include/linux/netfilter_ipv4/ip_set_getport.h
+@@ -0,0 +1,48 @@
++#ifndef _IP_SET_GETPORT_H
++#define _IP_SET_GETPORT_H
++
++#ifdef __KERNEL__
++
++#define INVALID_PORT (MAX_RANGE + 1)
++
++/* We must handle non-linear skbs */
++static inline ip_set_ip_t
++get_port(const struct sk_buff *skb, u_int32_t flags)
++{
++ struct iphdr *iph = ip_hdr(skb);
++ u_int16_t offset = ntohs(iph->frag_off) & IP_OFFSET;
++ switch (iph->protocol) {
++ case IPPROTO_TCP: {
++ struct tcphdr tcph;
++
++ /* See comments at tcp_match in ip_tables.c */
++ if (offset)
++ return INVALID_PORT;
++
++ if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &tcph, sizeof(tcph)) < 0)
++ /* No choice either */
++ return INVALID_PORT;
++
++ return ntohs(flags & IPSET_SRC ?
++ tcph.source : tcph.dest);
++ }
++ case IPPROTO_UDP: {
++ struct udphdr udph;
++
++ if (offset)
++ return INVALID_PORT;
++
++ if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &udph, sizeof(udph)) < 0)
++ /* No choice either */
++ return INVALID_PORT;
++
++ return ntohs(flags & IPSET_SRC ?
++ udph.source : udph.dest);
++ }
++ default:
++ return INVALID_PORT;
++ }
++}
++#endif /* __KERNEL__ */
++
++#endif /*_IP_SET_GETPORT_H*/
+--- /dev/null
++++ b/include/linux/netfilter_ipv4/ip_set_hashes.h
+@@ -0,0 +1,306 @@
++#ifndef __IP_SET_HASHES_H
++#define __IP_SET_HASHES_H
++
++#define initval_t uint32_t
++
++/* Macros to generate functions */
++
++#ifdef __KERNEL__
++#define HASH_RETRY0(type, dtype, cond) \
++static int \
++type##_retry(struct ip_set *set) \
++{ \
++ struct ip_set_##type *map = set->data, *tmp; \
++ dtype *elem; \
++ void *members; \
++ u_int32_t i, hashsize = map->hashsize; \
++ int res; \
++ \
++ if (map->resize == 0) \
++ return -ERANGE; \
++ \
++ again: \
++ res = 0; \
++ \
++ /* Calculate new hash size */ \
++ hashsize += (hashsize * map->resize)/100; \
++ if (hashsize == map->hashsize) \
++ hashsize++; \
++ \
++ ip_set_printk("rehashing of set %s triggered: " \
++ "hashsize grows from %lu to %lu", \
++ set->name, \
++ (long unsigned)map->hashsize, \
++ (long unsigned)hashsize); \
++ \
++ tmp = kmalloc(sizeof(struct ip_set_##type) \
++ + map->probes * sizeof(initval_t), GFP_ATOMIC); \
++ if (!tmp) { \
++ DP("out of memory for %zu bytes", \
++ sizeof(struct ip_set_##type) \
++ + map->probes * sizeof(initval_t)); \
++ return -ENOMEM; \
++ } \
++ tmp->members = harray_malloc(hashsize, sizeof(dtype), GFP_ATOMIC);\
++ if (!tmp->members) { \
++ DP("out of memory for %zu bytes", hashsize * sizeof(dtype));\
++ kfree(tmp); \
++ return -ENOMEM; \
++ } \
++ tmp->hashsize = hashsize; \
++ tmp->elements = 0; \
++ tmp->probes = map->probes; \
++ tmp->resize = map->resize; \
++ memcpy(tmp->initval, map->initval, map->probes * sizeof(initval_t));\
++ __##type##_retry(tmp, map); \
++ \
++ write_lock_bh(&set->lock); \
++ map = set->data; /* Play safe */ \
++ for (i = 0; i < map->hashsize && res == 0; i++) { \
++ elem = HARRAY_ELEM(map->members, dtype *, i); \
++ if (cond) \
++ res = __##type##_add(tmp, elem); \
++ } \
++ if (res) { \
++ /* Failure, try again */ \
++ write_unlock_bh(&set->lock); \
++ harray_free(tmp->members); \
++ kfree(tmp); \
++ goto again; \
++ } \
++ \
++ /* Success at resizing! */ \
++ members = map->members; \
++ \
++ map->hashsize = tmp->hashsize; \
++ map->members = tmp->members; \
++ write_unlock_bh(&set->lock); \
++ \
++ harray_free(members); \
++ kfree(tmp); \
++ \
++ return 0; \
++}
++
++#define HASH_RETRY(type, dtype) \
++ HASH_RETRY0(type, dtype, *elem)
++
++#define HASH_RETRY2(type, dtype) \
++ HASH_RETRY0(type, dtype, elem->ip || elem->ip1)
++
++#define HASH_CREATE(type, dtype) \
++static int \
++type##_create(struct ip_set *set, const void *data, u_int32_t size) \
++{ \
++ const struct ip_set_req_##type##_create *req = data; \
++ struct ip_set_##type *map; \
++ uint16_t i; \
++ \
++ if (req->hashsize < 1) { \
++ ip_set_printk("hashsize too small"); \
++ return -ENOEXEC; \
++ } \
++ \
++ if (req->probes < 1) { \
++ ip_set_printk("probes too small"); \
++ return -ENOEXEC; \
++ } \
++ \
++ map = kmalloc(sizeof(struct ip_set_##type) \
++ + req->probes * sizeof(initval_t), GFP_KERNEL); \
++ if (!map) { \
++ DP("out of memory for %zu bytes", \
++ sizeof(struct ip_set_##type) \
++ + req->probes * sizeof(initval_t)); \
++ return -ENOMEM; \
++ } \
++ for (i = 0; i < req->probes; i++) \
++ get_random_bytes(((initval_t *) map->initval)+i, 4); \
++ map->elements = 0; \
++ map->hashsize = req->hashsize; \
++ map->probes = req->probes; \
++ map->resize = req->resize; \
++ if (__##type##_create(req, map)) { \
++ kfree(map); \
++ return -ENOEXEC; \
++ } \
++ map->members = harray_malloc(map->hashsize, sizeof(dtype), GFP_KERNEL);\
++ if (!map->members) { \
++ DP("out of memory for %zu bytes", map->hashsize * sizeof(dtype));\
++ kfree(map); \
++ return -ENOMEM; \
++ } \
++ \
++ set->data = map; \
++ return 0; \
++}
++
++#define HASH_DESTROY(type) \
++static void \
++type##_destroy(struct ip_set *set) \
++{ \
++ struct ip_set_##type *map = set->data; \
++ \
++ harray_free(map->members); \
++ kfree(map); \
++ \
++ set->data = NULL; \
++}
++
++#define HASH_FLUSH(type, dtype) \
++static void \
++type##_flush(struct ip_set *set) \
++{ \
++ struct ip_set_##type *map = set->data; \
++ harray_flush(map->members, map->hashsize, sizeof(dtype)); \
++ map->elements = 0; \
++}
++
++#define HASH_FLUSH_CIDR(type, dtype) \
++static void \
++type##_flush(struct ip_set *set) \
++{ \
++ struct ip_set_##type *map = set->data; \
++ harray_flush(map->members, map->hashsize, sizeof(dtype)); \
++ memset(map->cidr, 0, sizeof(map->cidr)); \
++ memset(map->nets, 0, sizeof(map->nets)); \
++ map->elements = 0; \
++}
++
++#define HASH_LIST_HEADER(type) \
++static void \
++type##_list_header(const struct ip_set *set, void *data) \
++{ \
++ const struct ip_set_##type *map = set->data; \
++ struct ip_set_req_##type##_create *header = data; \
++ \
++ header->hashsize = map->hashsize; \
++ header->probes = map->probes; \
++ header->resize = map->resize; \
++ __##type##_list_header(map, header); \
++}
++
++#define HASH_LIST_MEMBERS_SIZE(type, dtype) \
++static int \
++type##_list_members_size(const struct ip_set *set) \
++{ \
++ const struct ip_set_##type *map = set->data; \
++ \
++ return (map->hashsize * sizeof(dtype)); \
++}
++
++#define HASH_LIST_MEMBERS(type, dtype) \
++static void \
++type##_list_members(const struct ip_set *set, void *data) \
++{ \
++ const struct ip_set_##type *map = set->data; \
++ dtype *elem; \
++ uint32_t i; \
++ \
++ for (i = 0; i < map->hashsize; i++) { \
++ elem = HARRAY_ELEM(map->members, dtype *, i); \
++ ((dtype *)data)[i] = *elem; \
++ } \
++}
++
++#define HASH_LIST_MEMBERS_MEMCPY(type, dtype) \
++static void \
++type##_list_members(const struct ip_set *set, void *data) \
++{ \
++ const struct ip_set_##type *map = set->data; \
++ dtype *elem; \
++ uint32_t i; \
++ \
++ for (i = 0; i < map->hashsize; i++) { \
++ elem = HARRAY_ELEM(map->members, dtype *, i); \
++ memcpy((((dtype *)data)+i), elem, sizeof(dtype)); \
++ } \
++}
++
++#define IP_SET_RTYPE(type, __features) \
++struct ip_set_type ip_set_##type = { \
++ .typename = #type, \
++ .features = __features, \
++ .protocol_version = IP_SET_PROTOCOL_VERSION, \
++ .create = &type##_create, \
++ .retry = &type##_retry, \
++ .destroy = &type##_destroy, \
++ .flush = &type##_flush, \
++ .reqsize = sizeof(struct ip_set_req_##type), \
++ .addip = &type##_uadd, \
++ .addip_kernel = &type##_kadd, \
++ .delip = &type##_udel, \
++ .delip_kernel = &type##_kdel, \
++ .testip = &type##_utest, \
++ .testip_kernel = &type##_ktest, \
++ .header_size = sizeof(struct ip_set_req_##type##_create),\
++ .list_header = &type##_list_header, \
++ .list_members_size = &type##_list_members_size, \
++ .list_members = &type##_list_members, \
++ .me = THIS_MODULE, \
++};
++
++/* Helper functions */
++static inline void
++add_cidr_size(uint8_t *cidr, uint8_t size)
++{
++ uint8_t next;
++ int i;
++
++ for (i = 0; i < 30 && cidr[i]; i++) {
++ if (cidr[i] < size) {
++ next = cidr[i];
++ cidr[i] = size;
++ size = next;
++ }
++ }
++ if (i < 30)
++ cidr[i] = size;
++}
++
++static inline void
++del_cidr_size(uint8_t *cidr, uint8_t size)
++{
++ int i;
++
++ for (i = 0; i < 29 && cidr[i]; i++) {
++ if (cidr[i] == size)
++ cidr[i] = size = cidr[i+1];
++ }
++ cidr[29] = 0;
++}
++#else
++#include <arpa/inet.h>
++#endif /* __KERNEL */
++
++#ifndef UINT16_MAX
++#define UINT16_MAX 65535
++#endif
++
++static unsigned char shifts[] = {255, 253, 249, 241, 225, 193, 129, 1};
++
++static inline ip_set_ip_t
++pack_ip_cidr(ip_set_ip_t ip, unsigned char cidr)
++{
++ ip_set_ip_t addr, *paddr = &addr;
++ unsigned char n, t, *a;
++
++ addr = htonl(ip & (0xFFFFFFFF << (32 - (cidr))));
++#ifdef __KERNEL__
++ DP("ip:%u.%u.%u.%u/%u", NIPQUAD(addr), cidr);
++#endif
++ n = cidr / 8;
++ t = cidr % 8;
++ a = &((unsigned char *)paddr)[n];
++ *a = *a /(1 << (8 - t)) + shifts[t];
++#ifdef __KERNEL__
++ DP("n: %u, t: %u, a: %u", n, t, *a);
++ DP("ip:%u.%u.%u.%u/%u, %u.%u.%u.%u",
++ HIPQUAD(ip), cidr, NIPQUAD(addr));
++#endif
++
++ return ntohl(addr);
++}
++
++
++#endif /* __IP_SET_HASHES_H */
+--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_iphash.h
@@ -0,0 +1,30 @@
+#ifndef __IP_SET_IPHASH_H
+#define __IP_SET_IPHASH_H
+
+#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_hashes.h>
+
+#define SETTYPE_NAME "iphash"
-+#define MAX_RANGE 0x0000FFFF
+
+struct ip_set_iphash {
+ ip_set_ip_t *members; /* the iphash proper */
@@ -534,7 +1174,7 @@
+ uint16_t probes; /* max number of probes */
+ uint16_t resize; /* resize factor in percent */
+ ip_set_ip_t netmask; /* netmask */
-+ void *initval[0]; /* initvals for jhash_1word */
++ initval_t initval[0]; /* initvals for jhash_1word */
+};
+
+struct ip_set_req_iphash_create {
@@ -551,14 +1191,14 @@
+#endif /* __IP_SET_IPHASH_H */
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_ipmap.h
-@@ -0,0 +1,56 @@
+@@ -0,0 +1,57 @@
+#ifndef __IP_SET_IPMAP_H
+#define __IP_SET_IPMAP_H
+
+#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_bitmaps.h>
+
+#define SETTYPE_NAME "ipmap"
-+#define MAX_RANGE 0x0000FFFF
+
+struct ip_set_ipmap {
+ void *members; /* the ipmap proper */
@@ -567,6 +1207,7 @@
+ ip_set_ip_t netmask; /* subnet netmask */
+ ip_set_ip_t sizeid; /* size of set in IPs */
+ ip_set_ip_t hosts; /* number of hosts in a subnet */
++ u_int32_t size; /* size of the ipmap proper */
+};
+
+struct ip_set_req_ipmap_create {
@@ -579,46 +1220,45 @@
+ ip_set_ip_t ip;
+};
+
-+unsigned int
++static inline unsigned int
+mask_to_bits(ip_set_ip_t mask)
+{
+ unsigned int bits = 32;
+ ip_set_ip_t maskaddr;
-+
++
+ if (mask == 0xFFFFFFFF)
+ return bits;
-+
++
+ maskaddr = 0xFFFFFFFE;
-+ while (--bits >= 0 && maskaddr != mask)
++ while (--bits > 0 && maskaddr != mask)
+ maskaddr <<= 1;
-+
++
+ return bits;
+}
+
-+ip_set_ip_t
++static inline ip_set_ip_t
+range_to_mask(ip_set_ip_t from, ip_set_ip_t to, unsigned int *bits)
+{
+ ip_set_ip_t mask = 0xFFFFFFFE;
-+
++
+ *bits = 32;
-+ while (--(*bits) >= 0 && mask && (to & mask) != from)
++ while (--(*bits) > 0 && mask && (to & mask) != from)
+ mask <<= 1;
-+
++
+ return mask;
+}
-+
++
+#endif /* __IP_SET_IPMAP_H */
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_ipporthash.h
-@@ -0,0 +1,34 @@
+@@ -0,0 +1,33 @@
+#ifndef __IP_SET_IPPORTHASH_H
+#define __IP_SET_IPPORTHASH_H
+
+#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_hashes.h>
+
+#define SETTYPE_NAME "ipporthash"
-+#define MAX_RANGE 0x0000FFFF
-+#define INVALID_PORT (MAX_RANGE + 1)
+
+struct ip_set_ipporthash {
+ ip_set_ip_t *members; /* the ipporthash proper */
@@ -628,7 +1268,7 @@
+ uint16_t resize; /* resize factor in percent */
+ ip_set_ip_t first_ip; /* host byte order, included in range */
+ ip_set_ip_t last_ip; /* host byte order, included in range */
-+ void *initval[0]; /* initvals for jhash_1word */
++ initval_t initval[0]; /* initvals for jhash_1word */
+};
+
+struct ip_set_req_ipporthash_create {
@@ -646,15 +1286,101 @@
+
+#endif /* __IP_SET_IPPORTHASH_H */
--- /dev/null
++++ b/include/linux/netfilter_ipv4/ip_set_ipportiphash.h
+@@ -0,0 +1,39 @@
++#ifndef __IP_SET_IPPORTIPHASH_H
++#define __IP_SET_IPPORTIPHASH_H
++
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_hashes.h>
++
++#define SETTYPE_NAME "ipportiphash"
++
++struct ipportip {
++ ip_set_ip_t ip;
++ ip_set_ip_t ip1;
++};
++
++struct ip_set_ipportiphash {
++ struct ipportip *members; /* the ipportip proper */
++ uint32_t elements; /* number of elements */
++ uint32_t hashsize; /* hash size */
++ uint16_t probes; /* max number of probes */
++ uint16_t resize; /* resize factor in percent */
++ ip_set_ip_t first_ip; /* host byte order, included in range */
++ ip_set_ip_t last_ip; /* host byte order, included in range */
++ initval_t initval[0]; /* initvals for jhash_1word */
++};
++
++struct ip_set_req_ipportiphash_create {
++ uint32_t hashsize;
++ uint16_t probes;
++ uint16_t resize;
++ ip_set_ip_t from;
++ ip_set_ip_t to;
++};
++
++struct ip_set_req_ipportiphash {
++ ip_set_ip_t ip;
++ ip_set_ip_t port;
++ ip_set_ip_t ip1;
++};
++
++#endif /* __IP_SET_IPPORTIPHASH_H */
+--- /dev/null
++++ b/include/linux/netfilter_ipv4/ip_set_ipportnethash.h
+@@ -0,0 +1,42 @@
++#ifndef __IP_SET_IPPORTNETHASH_H
++#define __IP_SET_IPPORTNETHASH_H
++
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_hashes.h>
++
++#define SETTYPE_NAME "ipportnethash"
++
++struct ipportip {
++ ip_set_ip_t ip;
++ ip_set_ip_t ip1;
++};
++
++struct ip_set_ipportnethash {
++ struct ipportip *members; /* the ipportip proper */
++ uint32_t elements; /* number of elements */
++ uint32_t hashsize; /* hash size */
++ uint16_t probes; /* max number of probes */
++ uint16_t resize; /* resize factor in percent */
++ ip_set_ip_t first_ip; /* host byte order, included in range */
++ ip_set_ip_t last_ip; /* host byte order, included in range */
++ uint8_t cidr[30]; /* CIDR sizes */
++ uint16_t nets[30]; /* nr of nets by CIDR sizes */
++ initval_t initval[0]; /* initvals for jhash_1word */
++};
++
++struct ip_set_req_ipportnethash_create {
++ uint32_t hashsize;
++ uint16_t probes;
++ uint16_t resize;
++ ip_set_ip_t from;
++ ip_set_ip_t to;
++};
++
++struct ip_set_req_ipportnethash {
++ ip_set_ip_t ip;
++ ip_set_ip_t port;
++ ip_set_ip_t ip1;
++ uint8_t cidr;
++};
++
++#endif /* __IP_SET_IPPORTNETHASH_H */
+--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_iptree.h
-@@ -0,0 +1,40 @@
+@@ -0,0 +1,39 @@
+#ifndef __IP_SET_IPTREE_H
+#define __IP_SET_IPTREE_H
+
+#include <linux/netfilter_ipv4/ip_set.h>
+
+#define SETTYPE_NAME "iptree"
-+#define MAX_RANGE 0x0000FFFF
+
+struct ip_set_iptreed {
+ unsigned long expires[256]; /* x.x.x.ADDR */
@@ -726,172 +1452,181 @@
+};
+
+struct ip_set_req_iptreemap {
-+ ip_set_ip_t start;
++ ip_set_ip_t ip;
+ ip_set_ip_t end;
+};
+
+#endif /* __IP_SET_IPTREEMAP_H */
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_jhash.h
-@@ -0,0 +1,148 @@
-+#ifndef _LINUX_IPSET_JHASH_H
-+#define _LINUX_IPSET_JHASH_H
-+
-+/* This is a copy of linux/jhash.h but the types u32/u8 are changed
-+ * to __u32/__u8 so that the header file can be included into
-+ * userspace code as well. Jozsef Kadlecsik (kadlec@blackhole.kfki.hu)
-+ */
+@@ -0,0 +1,157 @@
++#ifndef _LINUX_JHASH_H
++#define _LINUX_JHASH_H
+
+/* jhash.h: Jenkins hash support.
+ *
-+ * Copyright (C) 1996 Bob Jenkins (bob_jenkins@burtleburtle.net)
++ * Copyright (C) 2006. Bob Jenkins (bob_jenkins@burtleburtle.net)
+ *
+ * http://burtleburtle.net/bob/hash/
+ *
+ * These are the credits from Bob's sources:
+ *
-+ * lookup2.c, by Bob Jenkins, December 1996, Public Domain.
-+ * hash(), hash2(), hash3, and mix() are externally useful functions.
-+ * Routines to test the hash are included if SELF_TEST is defined.
-+ * You can use this free for any purpose. It has no warranty.
++ * lookup3.c, by Bob Jenkins, May 2006, Public Domain.
+ *
-+ * Copyright (C) 2003 David S. Miller (davem@redhat.com)
++ * These are functions for producing 32-bit hashes for hash table lookup.
++ * hashword(), hashlittle(), hashlittle2(), hashbig(), mix(), and final()
++ * are externally useful functions. Routines to test the hash are included
++ * if SELF_TEST is defined. You can use this free for any purpose. It's in
++ * the public domain. It has no warranty.
++ *
++ * Copyright (C) 2009 Jozsef Kadlecsik (kadlec@blackhole.kfki.hu)
+ *
+ * I've modified Bob's hash to be useful in the Linux kernel, and
-+ * any bugs present are surely my fault. -DaveM
++ * any bugs present are my fault. Jozsef
+ */
+
-+/* NOTE: Arguments are modified. */
-+#define __jhash_mix(a, b, c) \
++#define __rot(x,k) (((x)<<(k)) | ((x)>>(32-(k))))
++
++/* __jhash_mix - mix 3 32-bit values reversibly. */
++#define __jhash_mix(a,b,c) \
+{ \
-+ a -= b; a -= c; a ^= (c>>13); \
-+ b -= c; b -= a; b ^= (a<<8); \
-+ c -= a; c -= b; c ^= (b>>13); \
-+ a -= b; a -= c; a ^= (c>>12); \
-+ b -= c; b -= a; b ^= (a<<16); \
-+ c -= a; c -= b; c ^= (b>>5); \
-+ a -= b; a -= c; a ^= (c>>3); \
-+ b -= c; b -= a; b ^= (a<<10); \
-+ c -= a; c -= b; c ^= (b>>15); \
++ a -= c; a ^= __rot(c, 4); c += b; \
++ b -= a; b ^= __rot(a, 6); a += c; \
++ c -= b; c ^= __rot(b, 8); b += a; \
++ a -= c; a ^= __rot(c,16); c += b; \
++ b -= a; b ^= __rot(a,19); a += c; \
++ c -= b; c ^= __rot(b, 4); b += a; \
++}
++
++/* __jhash_final - final mixing of 3 32-bit values (a,b,c) into c */
++#define __jhash_final(a,b,c) \
++{ \
++ c ^= b; c -= __rot(b,14); \
++ a ^= c; a -= __rot(c,11); \
++ b ^= a; b -= __rot(a,25); \
++ c ^= b; c -= __rot(b,16); \
++ a ^= c; a -= __rot(c,4); \
++ b ^= a; b -= __rot(a,14); \
++ c ^= b; c -= __rot(b,24); \
+}
+
+/* The golden ration: an arbitrary value */
-+#define JHASH_GOLDEN_RATIO 0x9e3779b9
++#define JHASH_GOLDEN_RATIO 0xdeadbeef
+
+/* The most generic version, hashes an arbitrary sequence
+ * of bytes. No alignment or length assumptions are made about
-+ * the input key.
++ * the input key. The result depends on endianness.
+ */
-+static inline __u32 jhash(void *key, __u32 length, __u32 initval)
++static inline u32 jhash(const void *key, u32 length, u32 initval)
+{
-+ __u32 a, b, c, len;
-+ __u8 *k = key;
-+
-+ len = length;
-+ a = b = JHASH_GOLDEN_RATIO;
-+ c = initval;
++ u32 a,b,c;
++ const u8 *k = key;
+
-+ while (len >= 12) {
-+ a += (k[0] +((__u32)k[1]<<8) +((__u32)k[2]<<16) +((__u32)k[3]<<24));
-+ b += (k[4] +((__u32)k[5]<<8) +((__u32)k[6]<<16) +((__u32)k[7]<<24));
-+ c += (k[8] +((__u32)k[9]<<8) +((__u32)k[10]<<16)+((__u32)k[11]<<24));
-+
-+ __jhash_mix(a,b,c);
++ /* Set up the internal state */
++ a = b = c = JHASH_GOLDEN_RATIO + length + initval;
+
++ /* all but the last block: affect some 32 bits of (a,b,c) */
++ while (length > 12) {
++ a += (k[0] + ((u32)k[1]<<8) + ((u32)k[2]<<16) + ((u32)k[3]<<24));
++ b += (k[4] + ((u32)k[5]<<8) + ((u32)k[6]<<16) + ((u32)k[7]<<24));
++ c += (k[8] + ((u32)k[9]<<8) + ((u32)k[10]<<16) + ((u32)k[11]<<24));
++ __jhash_mix(a, b, c);
++ length -= 12;
+ k += 12;
-+ len -= 12;
+ }
+
-+ c += length;
-+ switch (len) {
-+ case 11: c += ((__u32)k[10]<<24);
-+ case 10: c += ((__u32)k[9]<<16);
-+ case 9 : c += ((__u32)k[8]<<8);
-+ case 8 : b += ((__u32)k[7]<<24);
-+ case 7 : b += ((__u32)k[6]<<16);
-+ case 6 : b += ((__u32)k[5]<<8);
++ /* last block: affect all 32 bits of (c) */
++ /* all the case statements fall through */
++ switch (length) {
++ case 12: c += (u32)k[11]<<24;
++ case 11: c += (u32)k[10]<<16;
++ case 10: c += (u32)k[9]<<8;
++ case 9 : c += k[8];
++ case 8 : b += (u32)k[7]<<24;
++ case 7 : b += (u32)k[6]<<16;
++ case 6 : b += (u32)k[5]<<8;
+ case 5 : b += k[4];
-+ case 4 : a += ((__u32)k[3]<<24);
-+ case 3 : a += ((__u32)k[2]<<16);
-+ case 2 : a += ((__u32)k[1]<<8);
++ case 4 : a += (u32)k[3]<<24;
++ case 3 : a += (u32)k[2]<<16;
++ case 2 : a += (u32)k[1]<<8;
+ case 1 : a += k[0];
-+ };
-+
-+ __jhash_mix(a,b,c);
++ __jhash_final(a, b, c);
++ case 0 :
++ break;
++ }
+
+ return c;
+}
+
-+/* A special optimized version that handles 1 or more of __u32s.
-+ * The length parameter here is the number of __u32s in the key.
++/* A special optimized version that handles 1 or more of u32s.
++ * The length parameter here is the number of u32s in the key.
+ */
-+static inline __u32 jhash2(__u32 *k, __u32 length, __u32 initval)
++static inline u32 jhash2(const u32 *k, u32 length, u32 initval)
+{
-+ __u32 a, b, c, len;
++ u32 a, b, c;
+
-+ a = b = JHASH_GOLDEN_RATIO;
-+ c = initval;
-+ len = length;
++ /* Set up the internal state */
++ a = b = c = JHASH_GOLDEN_RATIO + (length<<2) + initval;
+
-+ while (len >= 3) {
++ /* handle most of the key */
++ while (length > 3) {
+ a += k[0];
+ b += k[1];
+ c += k[2];
+ __jhash_mix(a, b, c);
-+ k += 3; len -= 3;
++ length -= 3;
++ k += 3;
+ }
+
-+ c += length * 4;
-+
-+ switch (len) {
-+ case 2 : b += k[1];
-+ case 1 : a += k[0];
-+ };
-+
-+ __jhash_mix(a,b,c);
++ /* handle the last 3 u32's */
++ /* all the case statements fall through */
++ switch (length) {
++ case 3: c += k[2];
++ case 2: b += k[1];
++ case 1: a += k[0];
++ __jhash_final(a, b, c);
++ case 0: /* case 0: nothing left to add */
++ break;
++ }
+
+ return c;
+}
+
-+
+/* A special ultra-optimized versions that knows they are hashing exactly
+ * 3, 2 or 1 word(s).
-+ *
-+ * NOTE: In partilar the "c += length; __jhash_mix(a,b,c);" normally
-+ * done at the end is not done here.
+ */
-+static inline __u32 jhash_3words(__u32 a, __u32 b, __u32 c, __u32 initval)
++static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval)
+{
-+ a += JHASH_GOLDEN_RATIO;
-+ b += JHASH_GOLDEN_RATIO;
-+ c += initval;
++ a += JHASH_GOLDEN_RATIO + initval;
++ b += JHASH_GOLDEN_RATIO + initval;
++ c += JHASH_GOLDEN_RATIO + initval;
+
-+ __jhash_mix(a, b, c);
++ __jhash_final(a, b, c);
+
+ return c;
+}
+
-+static inline __u32 jhash_2words(__u32 a, __u32 b, __u32 initval)
++static inline u32 jhash_2words(u32 a, u32 b, u32 initval)
+{
-+ return jhash_3words(a, b, 0, initval);
++ return jhash_3words(0, a, b, initval);
+}
+
-+static inline __u32 jhash_1word(__u32 a, __u32 initval)
++static inline u32 jhash_1word(u32 a, u32 initval)
+{
-+ return jhash_3words(a, 0, 0, initval);
++ return jhash_3words(0, 0, a, initval);
+}
+
-+#endif /* _LINUX_IPSET_JHASH_H */
++#endif /* _LINUX_JHASH_H */
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_macipmap.h
-@@ -0,0 +1,38 @@
+@@ -0,0 +1,39 @@
+#ifndef __IP_SET_MACIPMAP_H
+#define __IP_SET_MACIPMAP_H
+
+#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_bitmaps.h>
+
+#define SETTYPE_NAME "macipmap"
-+#define MAX_RANGE 0x0000FFFF
+
+/* general flags */
+#define IPSET_MACIP_MATCHUNSET 1
@@ -904,6 +1639,7 @@
+ ip_set_ip_t first_ip; /* host byte order, included in range */
+ ip_set_ip_t last_ip; /* host byte order, included in range */
+ u_int32_t flags;
++ u_int32_t size; /* size of the ipmap proper */
+};
+
+struct ip_set_req_macipmap_create {
@@ -918,43 +1654,48 @@
+};
+
+struct ip_set_macip {
-+ unsigned short flags;
++ unsigned short match;
+ unsigned char ethernet[ETH_ALEN];
+};
+
+#endif /* __IP_SET_MACIPMAP_H */
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_malloc.h
-@@ -0,0 +1,116 @@
+@@ -0,0 +1,153 @@
+#ifndef _IP_SET_MALLOC_H
+#define _IP_SET_MALLOC_H
+
+#ifdef __KERNEL__
++#include <linux/vmalloc.h>
+
-+/* Memory allocation and deallocation */
-+static size_t max_malloc_size = 0;
++static size_t max_malloc_size = 0, max_page_size = 0;
++static size_t default_max_malloc_size = 131072; /* Guaranteed: slab.c */
+
-+static inline void init_max_malloc_size(void)
++static inline int init_max_page_size(void)
+{
-+#define CACHE(x) max_malloc_size = x;
++/* Compatibility glues to support 2.4.36 */
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++#define __GFP_NOWARN 0
++
++ /* Guaranteed: slab.c */
++ max_malloc_size = max_page_size = default_max_malloc_size;
++#else
++ size_t page_size = 0;
++
++#define CACHE(x) if (max_page_size == 0 || x < max_page_size) \
++ page_size = x;
+#include <linux/kmalloc_sizes.h>
+#undef CACHE
-+}
++ if (page_size) {
++ if (max_malloc_size == 0)
++ max_malloc_size = page_size;
+
-+static inline void * ip_set_malloc(size_t bytes)
-+{
-+ if (bytes > max_malloc_size)
-+ return vmalloc(bytes);
-+ else
-+ return kmalloc(bytes, GFP_KERNEL);
-+}
++ max_page_size = page_size;
+
-+static inline void ip_set_free(void * data, size_t bytes)
-+{
-+ if (bytes > max_malloc_size)
-+ vfree(data);
-+ else
-+ kfree(data);
++ return 1;
++ }
++#endif
++ return 0;
+}
+
+struct harray {
@@ -962,37 +1703,36 @@
+ void *arrays[0];
+};
+
-+static inline void *
-+harray_malloc(size_t hashsize, size_t typesize, int flags)
++static inline void *
++__harray_malloc(size_t hashsize, size_t typesize, gfp_t flags)
+{
+ struct harray *harray;
+ size_t max_elements, size, i, j;
+
-+ if (!max_malloc_size)
-+ init_max_malloc_size();
++ BUG_ON(max_page_size == 0);
+
-+ if (typesize > max_malloc_size)
++ if (typesize > max_page_size)
+ return NULL;
+
-+ max_elements = max_malloc_size/typesize;
++ max_elements = max_page_size/typesize;
+ size = hashsize/max_elements;
+ if (hashsize % max_elements)
+ size++;
-+
++
+ /* Last pointer signals end of arrays */
+ harray = kmalloc(sizeof(struct harray) + (size + 1) * sizeof(void *),
+ flags);
+
+ if (!harray)
+ return NULL;
-+
++
+ for (i = 0; i < size - 1; i++) {
+ harray->arrays[i] = kmalloc(max_elements * typesize, flags);
+ if (!harray->arrays[i])
+ goto undo;
+ memset(harray->arrays[i], 0, max_elements * typesize);
+ }
-+ harray->arrays[i] = kmalloc((hashsize - i * max_elements) * typesize,
++ harray->arrays[i] = kmalloc((hashsize - i * max_elements) * typesize,
+ flags);
+ if (!harray->arrays[i])
+ goto undo;
@@ -1000,7 +1740,7 @@
+
+ harray->max_elements = max_elements;
+ harray->arrays[size] = NULL;
-+
++
+ return (void *)harray;
+
+ undo:
@@ -1011,11 +1751,23 @@
+ return NULL;
+}
+
++static inline void *
++harray_malloc(size_t hashsize, size_t typesize, gfp_t flags)
++{
++ void *harray;
++
++ do {
++ harray = __harray_malloc(hashsize, typesize, flags|__GFP_NOWARN);
++ } while (harray == NULL && init_max_page_size());
++
++ return harray;
++}
++
+static inline void harray_free(void *h)
+{
+ struct harray *harray = (struct harray *) h;
+ size_t i;
-+
++
+ for (i = 0; harray->arrays[i] != NULL; i++)
+ kfree(harray->arrays[i]);
+ kfree(harray);
@@ -1025,10 +1777,10 @@
+{
+ struct harray *harray = (struct harray *) h;
+ size_t i;
-+
++
+ for (i = 0; harray->arrays[i+1] != NULL; i++)
+ memset(harray->arrays[i], 0, harray->max_elements * typesize);
-+ memset(harray->arrays[i], 0,
++ memset(harray->arrays[i], 0,
+ (hashsize - i * harray->max_elements) * typesize);
+}
+
@@ -1039,19 +1791,40 @@
+ + (which)%(__h)->max_elements); \
+})
+
++/* General memory allocation and deallocation */
++static inline void * ip_set_malloc(size_t bytes)
++{
++ BUG_ON(max_malloc_size == 0);
++
++ if (bytes > default_max_malloc_size)
++ return vmalloc(bytes);
++ else
++ return kmalloc(bytes, GFP_KERNEL | __GFP_NOWARN);
++}
++
++static inline void ip_set_free(void * data, size_t bytes)
++{
++ BUG_ON(max_malloc_size == 0);
++
++ if (bytes > default_max_malloc_size)
++ vfree(data);
++ else
++ kfree(data);
++}
++
+#endif /* __KERNEL__ */
+
+#endif /*_IP_SET_MALLOC_H*/
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_nethash.h
-@@ -0,0 +1,55 @@
+@@ -0,0 +1,31 @@
+#ifndef __IP_SET_NETHASH_H
+#define __IP_SET_NETHASH_H
+
+#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_hashes.h>
+
+#define SETTYPE_NAME "nethash"
-+#define MAX_RANGE 0x0000FFFF
+
+struct ip_set_nethash {
+ ip_set_ip_t *members; /* the nethash proper */
@@ -1059,8 +1832,9 @@
+ uint32_t hashsize; /* hash size */
+ uint16_t probes; /* max number of probes */
+ uint16_t resize; /* resize factor in percent */
-+ unsigned char cidr[30]; /* CIDR sizes */
-+ void *initval[0]; /* initvals for jhash_1word */
++ uint8_t cidr[30]; /* CIDR sizes */
++ uint16_t nets[30]; /* nr of nets by CIDR sizes */
++ initval_t initval[0]; /* initvals for jhash_1word */
+};
+
+struct ip_set_req_nethash_create {
@@ -1071,34 +1845,9 @@
+
+struct ip_set_req_nethash {
+ ip_set_ip_t ip;
-+ unsigned char cidr;
++ uint8_t cidr;
+};
+
-+static unsigned char shifts[] = {255, 253, 249, 241, 225, 193, 129, 1};
-+
-+static inline ip_set_ip_t
-+pack(ip_set_ip_t ip, unsigned char cidr)
-+{
-+ ip_set_ip_t addr, *paddr = &addr;
-+ unsigned char n, t, *a;
-+
-+ addr = htonl(ip & (0xFFFFFFFF << (32 - (cidr))));
-+#ifdef __KERNEL__
-+ DP("ip:%u.%u.%u.%u/%u", NIPQUAD(addr), cidr);
-+#endif
-+ n = cidr / 8;
-+ t = cidr % 8;
-+ a = &((unsigned char *)paddr)[n];
-+ *a = *a /(1 << (8 - t)) + shifts[t];
-+#ifdef __KERNEL__
-+ DP("n: %u, t: %u, a: %u", n, t, *a);
-+ DP("ip:%u.%u.%u.%u/%u, %u.%u.%u.%u",
-+ HIPQUAD(ip), cidr, NIPQUAD(addr));
-+#endif
-+
-+ return ntohl(addr);
-+}
-+
+#endif /* __IP_SET_NETHASH_H */
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_portmap.h
@@ -1107,15 +1856,15 @@
+#define __IP_SET_PORTMAP_H
+
+#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_bitmaps.h>
+
+#define SETTYPE_NAME "portmap"
-+#define MAX_RANGE 0x0000FFFF
-+#define INVALID_PORT (MAX_RANGE + 1)
+
+struct ip_set_portmap {
+ void *members; /* the portmap proper */
-+ ip_set_ip_t first_port; /* host byte order, included in range */
-+ ip_set_ip_t last_port; /* host byte order, included in range */
++ ip_set_ip_t first_ip; /* host byte order, included in range */
++ ip_set_ip_t last_ip; /* host byte order, included in range */
++ u_int32_t size; /* size of the ipmap proper */
+};
+
+struct ip_set_req_portmap_create {
@@ -1124,11 +1873,40 @@
+};
+
+struct ip_set_req_portmap {
-+ ip_set_ip_t port;
++ ip_set_ip_t ip;
+};
+
+#endif /* __IP_SET_PORTMAP_H */
--- /dev/null
++++ b/include/linux/netfilter_ipv4/ip_set_setlist.h
+@@ -0,0 +1,26 @@
++#ifndef __IP_SET_SETLIST_H
++#define __IP_SET_SETLIST_H
++
++#include <linux/netfilter_ipv4/ip_set.h>
++
++#define SETTYPE_NAME "setlist"
++
++#define IP_SET_SETLIST_ADD_AFTER 0
++#define IP_SET_SETLIST_ADD_BEFORE 1
++
++struct ip_set_setlist {
++ uint8_t size;
++ ip_set_id_t index[0];
++};
++
++struct ip_set_req_setlist_create {
++ uint8_t size;
++};
++
++struct ip_set_req_setlist {
++ char name[IP_SET_MAXNAMELEN];
++ char ref[IP_SET_MAXNAMELEN];
++ uint8_t before;
++};
++
++#endif /* __IP_SET_SETLIST_H */
+--- /dev/null
+++ b/include/linux/netfilter_ipv4/ipt_set.h
@@ -0,0 +1,21 @@
+#ifndef _IPT_SET_H
@@ -1154,7 +1932,7 @@
+#endif /*_IPT_SET_H*/
--- /dev/null
+++ b/net/ipv4/netfilter/ip_set.c
-@@ -0,0 +1,2003 @@
+@@ -0,0 +1,2076 @@
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ * Patrick Schaaf <bof@bof.de>
+ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
@@ -1176,17 +1954,21 @@
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/random.h>
-+#include <linux/jhash.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ip_set_jhash.h>
+#include <linux/errno.h>
-+#include <linux/semaphore.h>
++#include <linux/capability.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
++#include <asm/semaphore.h>
++#else
++#include <linux/semaphore.h>
++#endif
+#include <linux/spinlock.h>
-+#include <linux/vmalloc.h>
+
+#define ASSERT_READ_LOCK(x)
+#define ASSERT_WRITE_LOCK(x)
++#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4/ip_set.h>
+
+static struct list_head set_type_list; /* all registered sets */
@@ -1198,6 +1980,8 @@
+static struct list_head *ip_set_hash; /* hash of bindings */
+static unsigned int ip_set_hash_random; /* random seed */
+
++#define SETNAME_EQ(a,b) (strncmp(a,b,IP_SET_MAXNAMELEN) == 0)
++
+/*
+ * Sets are identified either by the index in ip_set_list or by id.
+ * The id never changes and is used to find a key in the hash.
@@ -1236,7 +2020,7 @@
+ list_for_each_entry(set_hash, &ip_set_hash[key], list)
+ if (set_hash->id == id && set_hash->ip == ip)
+ return set_hash;
-+
++
+ return NULL;
+}
+
@@ -1249,10 +2033,10 @@
+
+ ASSERT_READ_LOCK(&ip_set_lock);
+ IP_SET_ASSERT(ip_set_list[id]);
-+ DP("set: %s, ip: %u.%u.%u.%u", ip_set_list[id]->name, HIPQUAD(ip));
-+
++ DP("set: %s, ip: %u.%u.%u.%u", ip_set_list[id]->name, HIPQUAD(ip));
++
+ set_hash = __ip_set_find(key, id, ip);
-+
++
+ DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
+ HIPQUAD(ip),
+ set_hash != NULL ? ip_set_list[set_hash->binding]->name : "");
@@ -1264,7 +2048,7 @@
+__set_hash_del(struct ip_set_hash *set_hash)
+{
+ ASSERT_WRITE_LOCK(&ip_set_lock);
-+ IP_SET_ASSERT(ip_set_list[set_hash->binding]);
++ IP_SET_ASSERT(ip_set_list[set_hash->binding]);
+
+ __ip_set_put(set_hash->binding);
+ list_del(&set_hash->list);
@@ -1277,9 +2061,9 @@
+ u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
+ % ip_set_bindings_hash_size;
+ struct ip_set_hash *set_hash;
-+
++
+ IP_SET_ASSERT(ip_set_list[id]);
-+ DP("set: %s, ip: %u.%u.%u.%u", ip_set_list[id]->name, HIPQUAD(ip));
++ DP("set: %s, ip: %u.%u.%u.%u", ip_set_list[id]->name, HIPQUAD(ip));
+ write_lock_bh(&ip_set_lock);
+ set_hash = __ip_set_find(key, id, ip);
+ DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
@@ -1288,7 +2072,7 @@
+
+ if (set_hash != NULL)
+ __set_hash_del(set_hash);
-+ write_unlock_bh(&ip_set_lock);
++ write_unlock_bh(&ip_set_lock);
+ return 0;
+}
+
@@ -1299,7 +2083,7 @@
+ % ip_set_bindings_hash_size;
+ struct ip_set_hash *set_hash;
+ int ret = 0;
-+
++
+ IP_SET_ASSERT(ip_set_list[id]);
+ IP_SET_ASSERT(ip_set_list[binding]);
+ DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
@@ -1317,7 +2101,7 @@
+ set_hash->ip = ip;
+ list_add(&set_hash->list, &ip_set_hash[key]);
+ } else {
-+ IP_SET_ASSERT(ip_set_list[set_hash->binding]);
++ IP_SET_ASSERT(ip_set_list[set_hash->binding]);
+ DP("overwrite binding: %s",
+ ip_set_list[set_hash->binding]->name);
+ __ip_set_put(set_hash->binding);
@@ -1370,7 +2154,7 @@
+ ip_set_ip_t ip;
+ int res;
+ unsigned char i = 0;
-+
++
+ IP_SET_ASSERT(flags[i]);
+ read_lock_bh(&ip_set_lock);
+ do {
@@ -1386,10 +2170,10 @@
+ && follow_bindings(index, set, ip));
+ read_unlock_bh(&ip_set_lock);
+
-+ return res;
++ return (res < 0 ? 0 : res);
+}
+
-+void
++int
+ip_set_addip_kernel(ip_set_id_t index,
+ const struct sk_buff *skb,
+ const u_int32_t *flags)
@@ -1419,9 +2203,11 @@
+ && set->type->retry
+ && (res = set->type->retry(set)) == 0)
+ goto retry;
++
++ return res;
+}
+
-+void
++int
+ip_set_delip_kernel(ip_set_id_t index,
+ const struct sk_buff *skb,
+ const u_int32_t *flags)
@@ -1445,6 +2231,8 @@
+ && flags[i]
+ && follow_bindings(index, set, ip));
+ read_unlock_bh(&ip_set_lock);
++
++ return res;
+}
+
+/* Register and deregister settype */
@@ -1464,7 +2252,7 @@
+ip_set_register_set_type(struct ip_set_type *set_type)
+{
+ int ret = 0;
-+
++
+ if (set_type->protocol_version != IP_SET_PROTOCOL_VERSION) {
+ ip_set_printk("'%s' uses wrong protocol version %u (want %u)",
+ set_type->typename,
@@ -1509,6 +2297,29 @@
+
+}
+
++ip_set_id_t
++__ip_set_get_byname(const char *name, struct ip_set **set)
++{
++ ip_set_id_t i, index = IP_SET_INVALID_ID;
++
++ for (i = 0; i < ip_set_max; i++) {
++ if (ip_set_list[i] != NULL
++ && SETNAME_EQ(ip_set_list[i]->name, name)) {
++ __ip_set_get(i);
++ index = i;
++ *set = ip_set_list[i];
++ break;
++ }
++ }
++ return index;
++}
++
++void __ip_set_put_byindex(ip_set_id_t index)
++{
++ if (ip_set_list[index])
++ __ip_set_put(index);
++}
++
+/*
+ * Userspace routines
+ */
@@ -1522,11 +2333,11 @@
+ip_set_get_byname(const char *name)
+{
+ ip_set_id_t i, index = IP_SET_INVALID_ID;
-+
++
+ down(&ip_set_app_mutex);
+ for (i = 0; i < ip_set_max; i++) {
+ if (ip_set_list[i] != NULL
-+ && strcmp(ip_set_list[i]->name, name) == 0) {
++ && SETNAME_EQ(ip_set_list[i]->name, name)) {
+ __ip_set_get(i);
+ index = i;
+ break;
@@ -1548,22 +2359,36 @@
+
+ if (index >= ip_set_max)
+ return IP_SET_INVALID_ID;
-+
++
+ if (ip_set_list[index])
+ __ip_set_get(index);
+ else
+ index = IP_SET_INVALID_ID;
-+
++
+ up(&ip_set_app_mutex);
+ return index;
+}
+
+/*
++ * Find the set id belonging to the index.
++ * We are protected by the mutex, so we do not need to use
++ * ip_set_lock. There is no need to reference the sets either.
++ */
++ip_set_id_t
++ip_set_id(ip_set_id_t index)
++{
++ if (index >= ip_set_max || !ip_set_list[index])
++ return IP_SET_INVALID_ID;
++
++ return ip_set_list[index]->id;
++}
++
++/*
+ * If the given set pointer points to a valid set, decrement
+ * reference count by 1. The caller shall not assume the index
+ * to be valid, after calling this function.
+ */
-+void ip_set_put(ip_set_id_t index)
++void ip_set_put_byindex(ip_set_id_t index)
+{
+ down(&ip_set_app_mutex);
+ if (ip_set_list[index])
@@ -1576,10 +2401,10 @@
+ip_set_find_byname(const char *name)
+{
+ ip_set_id_t i, index = IP_SET_INVALID_ID;
-+
++
+ for (i = 0; i < ip_set_max; i++) {
+ if (ip_set_list[i] != NULL
-+ && strcmp(ip_set_list[i]->name, name) == 0) {
++ && SETNAME_EQ(ip_set_list[i]->name, name)) {
+ index = i;
+ break;
+ }
@@ -1592,7 +2417,7 @@
+{
+ if (index >= ip_set_max || ip_set_list[index] == NULL)
+ index = IP_SET_INVALID_ID;
-+
++
+ return index;
+}
+
@@ -1603,7 +2428,7 @@
+static inline int
+__ip_set_testip(struct ip_set *set,
+ const void *data,
-+ size_t size,
++ u_int32_t size,
+ ip_set_ip_t *ip)
+{
+ int res;
@@ -1618,12 +2443,12 @@
+static int
+__ip_set_addip(ip_set_id_t index,
+ const void *data,
-+ size_t size)
++ u_int32_t size)
+{
+ struct ip_set *set = ip_set_list[index];
+ ip_set_ip_t ip;
+ int res;
-+
++
+ IP_SET_ASSERT(set);
+ do {
+ write_lock_bh(&set->lock);
@@ -1639,9 +2464,18 @@
+static int
+ip_set_addip(ip_set_id_t index,
+ const void *data,
-+ size_t size)
++ u_int32_t size)
+{
++ struct ip_set *set = ip_set_list[index];
++
++ IP_SET_ASSERT(set);
+
++ if (size - sizeof(struct ip_set_req_adt) != set->type->reqsize) {
++ ip_set_printk("data length wrong (want %lu, have %zu)",
++ (long unsigned)set->type->reqsize,
++ size - sizeof(struct ip_set_req_adt));
++ return -EINVAL;
++ }
+ return __ip_set_addip(index,
+ data + sizeof(struct ip_set_req_adt),
+ size - sizeof(struct ip_set_req_adt));
@@ -1650,13 +2484,20 @@
+static int
+ip_set_delip(ip_set_id_t index,
+ const void *data,
-+ size_t size)
++ u_int32_t size)
+{
+ struct ip_set *set = ip_set_list[index];
+ ip_set_ip_t ip;
+ int res;
-+
++
+ IP_SET_ASSERT(set);
++
++ if (size - sizeof(struct ip_set_req_adt) != set->type->reqsize) {
++ ip_set_printk("data length wrong (want %lu, have %zu)",
++ (long unsigned)set->type->reqsize,
++ size - sizeof(struct ip_set_req_adt));
++ return -EINVAL;
++ }
+ write_lock_bh(&set->lock);
+ res = set->type->delip(set,
+ data + sizeof(struct ip_set_req_adt),
@@ -1670,13 +2511,20 @@
+static int
+ip_set_testip(ip_set_id_t index,
+ const void *data,
-+ size_t size)
++ u_int32_t size)
+{
+ struct ip_set *set = ip_set_list[index];
+ ip_set_ip_t ip;
+ int res;
+
+ IP_SET_ASSERT(set);
++
++ if (size - sizeof(struct ip_set_req_adt) != set->type->reqsize) {
++ ip_set_printk("data length wrong (want %lu, have %zu)",
++ (long unsigned)set->type->reqsize,
++ size - sizeof(struct ip_set_req_adt));
++ return -EINVAL;
++ }
+ res = __ip_set_testip(set,
+ data + sizeof(struct ip_set_req_adt),
+ size - sizeof(struct ip_set_req_adt),
@@ -1688,10 +2536,10 @@
+static int
+ip_set_bindip(ip_set_id_t index,
+ const void *data,
-+ size_t size)
++ u_int32_t size)
+{
+ struct ip_set *set = ip_set_list[index];
-+ struct ip_set_req_bind *req_bind;
++ const struct ip_set_req_bind *req_bind;
+ ip_set_id_t binding;
+ ip_set_ip_t ip;
+ int res;
@@ -1699,19 +2547,17 @@
+ IP_SET_ASSERT(set);
+ if (size < sizeof(struct ip_set_req_bind))
+ return -EINVAL;
++
++ req_bind = data;
+
-+ req_bind = (struct ip_set_req_bind *) data;
-+ req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
-+
-+ if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
++ if (SETNAME_EQ(req_bind->binding, IPSET_TOKEN_DEFAULT)) {
+ /* Default binding of a set */
-+ char *binding_name;
-+
++ const char *binding_name;
++
+ if (size != sizeof(struct ip_set_req_bind) + IP_SET_MAXNAMELEN)
+ return -EINVAL;
+
-+ binding_name = (char *)(data + sizeof(struct ip_set_req_bind));
-+ binding_name[IP_SET_MAXNAMELEN - 1] = '\0';
++ binding_name = data + sizeof(struct ip_set_req_bind);
+
+ binding = ip_set_find_byname(binding_name);
+ if (binding == IP_SET_INVALID_ID)
@@ -1737,7 +2583,7 @@
+ &ip);
+ DP("set %s, ip: %u.%u.%u.%u, binding %s",
+ set->name, HIPQUAD(ip), ip_set_list[binding]->name);
-+
++
+ if (res >= 0)
+ res = ip_set_hash_add(set->id, ip, binding);
+
@@ -1776,30 +2622,29 @@
+static int
+ip_set_unbindip(ip_set_id_t index,
+ const void *data,
-+ size_t size)
++ u_int32_t size)
+{
+ struct ip_set *set;
-+ struct ip_set_req_bind *req_bind;
++ const struct ip_set_req_bind *req_bind;
+ ip_set_ip_t ip;
+ int res;
+
+ DP("");
+ if (size < sizeof(struct ip_set_req_bind))
+ return -EINVAL;
-+
-+ req_bind = (struct ip_set_req_bind *) data;
-+ req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
-+
++
++ req_bind = data;
++
+ DP("%u %s", index, req_bind->binding);
+ if (index == IP_SET_INVALID_ID) {
+ /* unbind :all: */
-+ if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
++ if (SETNAME_EQ(req_bind->binding, IPSET_TOKEN_DEFAULT)) {
+ /* Default binding of sets */
+ write_lock_bh(&ip_set_lock);
+ FOREACH_SET_DO(__unbind_default);
+ write_unlock_bh(&ip_set_lock);
+ return 0;
-+ } else if (strcmp(req_bind->binding, IPSET_TOKEN_ALL) == 0) {
++ } else if (SETNAME_EQ(req_bind->binding, IPSET_TOKEN_ALL)) {
+ /* Flush all bindings of all sets*/
+ write_lock_bh(&ip_set_lock);
+ FOREACH_HASH_RW_DO(__set_hash_del);
@@ -1809,16 +2654,16 @@
+ DP("unreachable reached!");
+ return -EINVAL;
+ }
-+
++
+ set = ip_set_list[index];
+ IP_SET_ASSERT(set);
-+ if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
++ if (SETNAME_EQ(req_bind->binding, IPSET_TOKEN_DEFAULT)) {
+ /* Default binding of set */
+ ip_set_id_t binding = ip_set_find_byindex(set->binding);
+
+ if (binding == IP_SET_INVALID_ID)
+ return -ENOENT;
-+
++
+ write_lock_bh(&ip_set_lock);
+ /* Sets in hash values are referenced */
+ __ip_set_put(set->binding);
@@ -1826,7 +2671,7 @@
+ write_unlock_bh(&ip_set_lock);
+
+ return 0;
-+ } else if (strcmp(req_bind->binding, IPSET_TOKEN_ALL) == 0) {
++ } else if (SETNAME_EQ(req_bind->binding, IPSET_TOKEN_ALL)) {
+ /* Flush all bindings */
+
+ write_lock_bh(&ip_set_lock);
@@ -1834,7 +2679,7 @@
+ write_unlock_bh(&ip_set_lock);
+ return 0;
+ }
-+
++
+ res = __ip_set_testip(set,
+ data + sizeof(struct ip_set_req_bind),
+ size - sizeof(struct ip_set_req_bind),
@@ -1850,10 +2695,10 @@
+static int
+ip_set_testbind(ip_set_id_t index,
+ const void *data,
-+ size_t size)
++ u_int32_t size)
+{
+ struct ip_set *set = ip_set_list[index];
-+ struct ip_set_req_bind *req_bind;
++ const struct ip_set_req_bind *req_bind;
+ ip_set_id_t binding;
+ ip_set_ip_t ip;
+ int res;
@@ -1861,24 +2706,22 @@
+ IP_SET_ASSERT(set);
+ if (size < sizeof(struct ip_set_req_bind))
+ return -EINVAL;
++
++ req_bind = data;
+
-+ req_bind = (struct ip_set_req_bind *) data;
-+ req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
-+
-+ if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
++ if (SETNAME_EQ(req_bind->binding, IPSET_TOKEN_DEFAULT)) {
+ /* Default binding of set */
-+ char *binding_name;
-+
++ const char *binding_name;
++
+ if (size != sizeof(struct ip_set_req_bind) + IP_SET_MAXNAMELEN)
+ return -EINVAL;
+
-+ binding_name = (char *)(data + sizeof(struct ip_set_req_bind));
-+ binding_name[IP_SET_MAXNAMELEN - 1] = '\0';
++ binding_name = data + sizeof(struct ip_set_req_bind);
+
+ binding = ip_set_find_byname(binding_name);
+ if (binding == IP_SET_INVALID_ID)
+ return -ENOENT;
-+
++
+ res = (set->binding == binding) ? -EEXIST : 0;
+
+ return res;
@@ -1886,15 +2729,15 @@
+ binding = ip_set_find_byname(req_bind->binding);
+ if (binding == IP_SET_INVALID_ID)
+ return -ENOENT;
-+
-+
++
++
+ res = __ip_set_testip(set,
+ data + sizeof(struct ip_set_req_bind),
+ size - sizeof(struct ip_set_req_bind),
+ &ip);
+ DP("set %s, ip: %u.%u.%u.%u, binding %s",
+ set->name, HIPQUAD(ip), ip_set_list[binding]->name);
-+
++
+ if (res >= 0)
+ res = (ip_set_find_in_hash(set->id, ip) == binding)
+ ? -EEXIST : 0;
@@ -1906,7 +2749,7 @@
+find_set_type_rlock(const char *typename)
+{
+ struct ip_set_type *type;
-+
++
+ read_lock_bh(&ip_set_lock);
+ type = find_set_type(typename);
+ if (type == NULL)
@@ -1927,7 +2770,7 @@
+ if (ip_set_list[i] == NULL) {
+ if (*id == IP_SET_INVALID_ID)
+ *id = *index = i;
-+ } else if (strcmp(name, ip_set_list[i]->name) == 0)
++ } else if (SETNAME_EQ(name, ip_set_list[i]->name))
+ /* Name clash */
+ return -EEXIST;
+ }
@@ -1935,7 +2778,7 @@
+ /* No free slot remained */
+ return -ERANGE;
+ /* Check that index is usable as id (swapping) */
-+ check:
++ check:
+ for (i = 0; i < ip_set_max; i++) {
+ if (ip_set_list[i] != NULL
+ && ip_set_list[i]->id == *id) {
@@ -1954,13 +2797,14 @@
+ const char *typename,
+ ip_set_id_t restore,
+ const void *data,
-+ size_t size)
++ u_int32_t size)
+{
+ struct ip_set *set;
+ ip_set_id_t index = 0, id;
+ int res = 0;
+
+ DP("setname: %s, typename: %s, id: %u", name, typename, restore);
++
+ /*
+ * First, and without any locks, allocate and initialize
+ * a normal base set structure.
@@ -1968,7 +2812,7 @@
+ set = kmalloc(sizeof(struct ip_set), GFP_KERNEL);
+ if (!set)
+ return -ENOMEM;
-+ set->lock = RW_LOCK_UNLOCKED;
++ rwlock_init(&set->lock);
+ strncpy(set->name, name, IP_SET_MAXNAMELEN);
+ set->binding = IP_SET_INVALID_ID;
+ atomic_set(&set->ref, 0);
@@ -2004,6 +2848,14 @@
+ }
+ read_unlock_bh(&ip_set_lock);
+
++ /* Check request size */
++ if (size != set->type->header_size) {
++ ip_set_printk("data length wrong (want %lu, have %lu)",
++ (long unsigned)set->type->header_size,
++ (long unsigned)size);
++ goto put_out;
++ }
++
+ /*
+ * Without holding any locks, create private part.
+ */
@@ -2030,7 +2882,7 @@
+ res = -ERANGE;
+ goto cleanup;
+ }
-+
++
+ /*
+ * Finally! Add our shiny new set to the list, and be done.
+ */
@@ -2039,7 +2891,7 @@
+ ip_set_list[index] = set;
+ write_unlock_bh(&ip_set_lock);
+ return res;
-+
++
+ cleanup:
+ write_unlock_bh(&ip_set_lock);
+ set->type->destroy(set);
@@ -2139,9 +2991,7 @@
+ write_lock_bh(&ip_set_lock);
+ for (i = 0; i < ip_set_max; i++) {
+ if (ip_set_list[i] != NULL
-+ && strncmp(ip_set_list[i]->name,
-+ name,
-+ IP_SET_MAXNAMELEN - 1) == 0) {
++ && SETNAME_EQ(ip_set_list[i]->name, name)) {
+ res = -EEXIST;
+ goto unlock;
+ }
@@ -2165,11 +3015,13 @@
+ u_int32_t from_ref;
+
+ DP("set: %s to %s", from->name, to->name);
-+ /* Features must not change. Artifical restriction. */
++ /* Features must not change.
++ * Not an artifical restriction anymore, as we must prevent
++ * possible loops created by swapping in setlist type of sets. */
+ if (from->type->features != to->type->features)
+ return -ENOEXEC;
+
-+ /* No magic here: ref munging protected by the mutex */
++ /* No magic here: ref munging protected by the mutex */
+ write_lock_bh(&ip_set_lock);
+ strncpy(from_name, from->name, IP_SET_MAXNAMELEN);
+ from_ref = atomic_read(&from->ref);
@@ -2178,10 +3030,10 @@
+ atomic_set(&from->ref, atomic_read(&to->ref));
+ strncpy(to->name, from_name, IP_SET_MAXNAMELEN);
+ atomic_set(&to->ref, from_ref);
-+
++
+ ip_set_list[from_index] = to;
+ ip_set_list[to_index] = from;
-+
++
+ write_unlock_bh(&ip_set_lock);
+ return 0;
+}
@@ -2192,7 +3044,7 @@
+
+static inline void
+__set_hash_bindings_size_list(struct ip_set_hash *set_hash,
-+ ip_set_id_t id, size_t *size)
++ ip_set_id_t id, u_int32_t *size)
+{
+ if (set_hash->id == id)
+ *size += sizeof(struct ip_set_hash_list);
@@ -2200,7 +3052,7 @@
+
+static inline void
+__set_hash_bindings_size_save(struct ip_set_hash *set_hash,
-+ ip_set_id_t id, size_t *size)
++ ip_set_id_t id, u_int32_t *size)
+{
+ if (set_hash->id == id)
+ *size += sizeof(struct ip_set_hash_save);
@@ -2211,8 +3063,7 @@
+ ip_set_id_t id, void *data, int *used)
+{
+ if (set_hash->id == id) {
-+ struct ip_set_hash_list *hash_list =
-+ (struct ip_set_hash_list *)(data + *used);
++ struct ip_set_hash_list *hash_list = data + *used;
+
+ hash_list->ip = set_hash->ip;
+ hash_list->binding = set_hash->binding;
@@ -2229,7 +3080,7 @@
+ struct ip_set_list *set_list;
+
+ /* Pointer to our header */
-+ set_list = (struct ip_set_list *) (data + *used);
++ set_list = data + *used;
+
+ DP("set: %s, used: %d %p %p", set->name, *used, data, data + *used);
+
@@ -2274,7 +3125,7 @@
+
+ /* Fill in set spefific bindings data */
+ FOREACH_HASH_DO(__set_hash_bindings, set->id, data, used);
-+
++
+ return 0;
+
+ unlock_set:
@@ -2296,7 +3147,7 @@
+ struct ip_set_save *set_save;
+
+ /* Pointer to our header */
-+ set_save = (struct ip_set_save *) (data + *used);
++ set_save = data + *used;
+
+ /* Get and ensure header size */
+ if (*used + sizeof(struct ip_set_save) > len)
@@ -2304,7 +3155,7 @@
+ *used += sizeof(struct ip_set_save);
+
+ set = ip_set_list[index];
-+ DP("set: %s, used: %u(%u) %p %p", set->name, *used, len,
++ DP("set: %s, used: %d(%d) %p %p", set->name, *used, len,
+ data, data + *used);
+
+ read_lock_bh(&set->lock);
@@ -2321,8 +3172,8 @@
+ set->type->list_header(set, data + *used);
+ *used += set_save->header_size;
+
-+ DP("set header filled: %s, used: %u(%u) %p %p", set->name, *used,
-+ set_save->header_size, data, data + *used);
++ DP("set header filled: %s, used: %d(%lu) %p %p", set->name, *used,
++ (unsigned long)set_save->header_size, data, data + *used);
+ /* Get and ensure set specific members size */
+ set_save->members_size = set->type->list_members_size(set);
+ if (*used + set_save->members_size > len)
@@ -2332,8 +3183,8 @@
+ set->type->list_members(set, data + *used);
+ *used += set_save->members_size;
+ read_unlock_bh(&set->lock);
-+ DP("set members filled: %s, used: %u(%u) %p %p", set->name, *used,
-+ set_save->members_size, data, data + *used);
++ DP("set members filled: %s, used: %d(%lu) %p %p", set->name, *used,
++ (unsigned long)set_save->members_size, data, data + *used);
+ return 0;
+
+ unlock_set:
@@ -2353,8 +3204,7 @@
+{
+ if (*res == 0
+ && (id == IP_SET_INVALID_ID || set_hash->id == id)) {
-+ struct ip_set_hash_save *hash_save =
-+ (struct ip_set_hash_save *)(data + *used);
++ struct ip_set_hash_save *hash_save = data + *used;
+ /* Ensure bindings size */
+ if (*used + sizeof(struct ip_set_hash_save) > len) {
+ *res = -ENOMEM;
@@ -2381,7 +3231,7 @@
+ return -ENOMEM;
+
+ /* Marker */
-+ set_save = (struct ip_set_save *) (data + *used);
++ set_save = data + *used;
+ set_save->index = IP_SET_INVALID_ID;
+ set_save->header_size = 0;
+ set_save->members_size = 0;
@@ -2394,7 +3244,7 @@
+ index = ip_set_list[index]->id;
+ FOREACH_HASH_DO(__set_hash_save_bindings, index, data, used, len, &res);
+
-+ return res;
++ return res;
+}
+
+/*
@@ -2413,12 +3263,12 @@
+ /* Loop to restore sets */
+ while (1) {
+ line++;
-+
-+ DP("%u %u %u", used, sizeof(struct ip_set_restore), len);
++
++ DP("%d %zu %d", used, sizeof(struct ip_set_restore), len);
+ /* Get and ensure header size */
+ if (used + sizeof(struct ip_set_restore) > len)
+ return line;
-+ set_restore = (struct ip_set_restore *) (data + used);
++ set_restore = data + used;
+ used += sizeof(struct ip_set_restore);
+
+ /* Ensure data size */
@@ -2432,7 +3282,7 @@
+ line--;
+ goto bindings;
+ }
-+
++
+ /* Try to create the set */
+ DP("restore %s %s", set_restore->name, set_restore->typename);
+ res = ip_set_create(set_restore->name,
@@ -2440,7 +3290,7 @@
+ set_restore->index,
+ data + used,
+ set_restore->header_size);
-+
++
+ if (res != 0)
+ return line;
+ used += set_restore->header_size;
@@ -2452,12 +3302,13 @@
+ /* Try to restore members data */
+ set = ip_set_list[index];
+ members_size = 0;
-+ DP("members_size %u reqsize %u",
-+ set_restore->members_size, set->type->reqsize);
++ DP("members_size %lu reqsize %lu",
++ (unsigned long)set_restore->members_size,
++ (unsigned long)set->type->reqsize);
+ while (members_size + set->type->reqsize <=
+ set_restore->members_size) {
+ line++;
-+ DP("members: %u, line %u", members_size, line);
++ DP("members: %d, line %d", members_size, line);
+ res = __ip_set_addip(index,
+ data + used + members_size,
+ set->type->reqsize);
@@ -2466,29 +3317,29 @@
+ members_size += set->type->reqsize;
+ }
+
-+ DP("members_size %u %u",
-+ set_restore->members_size, members_size);
++ DP("members_size %lu %d",
++ (unsigned long)set_restore->members_size, members_size);
+ if (members_size != set_restore->members_size)
+ return line++;
-+ used += set_restore->members_size;
++ used += set_restore->members_size;
+ }
-+
++
+ bindings:
+ /* Loop to restore bindings */
+ while (used < len) {
+ line++;
+
-+ DP("restore binding, line %u", line);
++ DP("restore binding, line %u", line);
+ /* Get and ensure size */
+ if (used + sizeof(struct ip_set_hash_save) > len)
+ return line;
-+ hash_save = (struct ip_set_hash_save *) (data + used);
++ hash_save = data + used;
+ used += sizeof(struct ip_set_hash_save);
-+
++
+ /* hash_save->id is used to store the index */
+ index = ip_set_find_byindex(hash_save->id);
+ DP("restore binding index %u, id %u, %u -> %u",
-+ index, hash_save->id, hash_save->ip, hash_save->binding);
++ index, hash_save->id, hash_save->ip, hash_save->binding);
+ if (index != hash_save->id)
+ return line;
+ if (ip_set_find_byindex(hash_save->binding) == IP_SET_INVALID_ID) {
@@ -2514,8 +3365,8 @@
+ }
+ if (used != len)
+ return line;
-+
-+ return 0;
++
++ return 0;
+}
+
+static int
@@ -2527,10 +3378,10 @@
+ struct ip_set_req_adt *req_adt;
+ ip_set_id_t index = IP_SET_INVALID_ID;
+ int (*adtfn)(ip_set_id_t index,
-+ const void *data, size_t size);
++ const void *data, u_int32_t size);
+ struct fn_table {
+ int (*fn)(ip_set_id_t index,
-+ const void *data, size_t size);
++ const void *data, u_int32_t size);
+ } adtfn_table[] =
+ { { ip_set_addip }, { ip_set_delip }, { ip_set_testip},
+ { ip_set_bindip}, { ip_set_unbindip }, { ip_set_testbind },
@@ -2562,11 +3413,10 @@
+
+ op = (unsigned *)data;
+ DP("op=%x", *op);
-+
++
+ if (*op < IP_SET_OP_VERSION) {
+ /* Check the version at the beginning of operations */
-+ struct ip_set_req_version *req_version =
-+ (struct ip_set_req_version *) data;
++ struct ip_set_req_version *req_version = data;
+ if (req_version->version != IP_SET_PROTOCOL_VERSION) {
+ res = -EPROTO;
+ goto done;
@@ -2575,9 +3425,8 @@
+
+ switch (*op) {
+ case IP_SET_OP_CREATE:{
-+ struct ip_set_req_create *req_create
-+ = (struct ip_set_req_create *) data;
-+
++ struct ip_set_req_create *req_create = data;
++
+ if (len < sizeof(struct ip_set_req_create)) {
+ ip_set_printk("short CREATE data (want >=%zu, got %u)",
+ sizeof(struct ip_set_req_create), len);
@@ -2594,16 +3443,15 @@
+ goto done;
+ }
+ case IP_SET_OP_DESTROY:{
-+ struct ip_set_req_std *req_destroy
-+ = (struct ip_set_req_std *) data;
-+
++ struct ip_set_req_std *req_destroy = data;
++
+ if (len != sizeof(struct ip_set_req_std)) {
+ ip_set_printk("invalid DESTROY data (want %zu, got %u)",
+ sizeof(struct ip_set_req_std), len);
+ res = -EINVAL;
+ goto done;
+ }
-+ if (strcmp(req_destroy->name, IPSET_TOKEN_ALL) == 0) {
++ if (SETNAME_EQ(req_destroy->name, IPSET_TOKEN_ALL)) {
+ /* Destroy all sets */
+ index = IP_SET_INVALID_ID;
+ } else {
@@ -2615,13 +3463,12 @@
+ goto done;
+ }
+ }
-+
++
+ res = ip_set_destroy(index);
+ goto done;
+ }
+ case IP_SET_OP_FLUSH:{
-+ struct ip_set_req_std *req_flush =
-+ (struct ip_set_req_std *) data;
++ struct ip_set_req_std *req_flush = data;
+
+ if (len != sizeof(struct ip_set_req_std)) {
+ ip_set_printk("invalid FLUSH data (want %zu, got %u)",
@@ -2629,7 +3476,7 @@
+ res = -EINVAL;
+ goto done;
+ }
-+ if (strcmp(req_flush->name, IPSET_TOKEN_ALL) == 0) {
++ if (SETNAME_EQ(req_flush->name, IPSET_TOKEN_ALL)) {
+ /* Flush all sets */
+ index = IP_SET_INVALID_ID;
+ } else {
@@ -2645,8 +3492,7 @@
+ goto done;
+ }
+ case IP_SET_OP_RENAME:{
-+ struct ip_set_req_create *req_rename
-+ = (struct ip_set_req_create *) data;
++ struct ip_set_req_create *req_rename = data;
+
+ if (len != sizeof(struct ip_set_req_create)) {
+ ip_set_printk("invalid RENAME data (want %zu, got %u)",
@@ -2657,7 +3503,7 @@
+
+ req_rename->name[IP_SET_MAXNAMELEN - 1] = '\0';
+ req_rename->typename[IP_SET_MAXNAMELEN - 1] = '\0';
-+
++
+ index = ip_set_find_byname(req_rename->name);
+ if (index == IP_SET_INVALID_ID) {
+ res = -ENOENT;
@@ -2667,8 +3513,7 @@
+ goto done;
+ }
+ case IP_SET_OP_SWAP:{
-+ struct ip_set_req_create *req_swap
-+ = (struct ip_set_req_create *) data;
++ struct ip_set_req_create *req_swap = data;
+ ip_set_id_t to_index;
+
+ if (len != sizeof(struct ip_set_req_create)) {
@@ -2697,7 +3542,7 @@
+ default:
+ break; /* Set identified by id */
+ }
-+
++
+ /* There we may have add/del/test/bind/unbind/test_bind operations */
+ if (*op < IP_SET_OP_ADD_IP || *op > IP_SET_OP_TEST_BIND_SET) {
+ res = -EBADMSG;
@@ -2711,7 +3556,7 @@
+ res = -EINVAL;
+ goto done;
+ }
-+ req_adt = (struct ip_set_req_adt *) data;
++ req_adt = data;
+
+ /* -U :all: :all:|:default: uses IP_SET_INVALID_ID */
+ if (!(*op == IP_SET_OP_UNBIND_SET
@@ -2771,8 +3616,7 @@
+
+ if (*op < IP_SET_OP_VERSION) {
+ /* Check the version at the beginning of operations */
-+ struct ip_set_req_version *req_version =
-+ (struct ip_set_req_version *) data;
++ struct ip_set_req_version *req_version = data;
+ if (req_version->version != IP_SET_PROTOCOL_VERSION) {
+ res = -EPROTO;
+ goto done;
@@ -2781,8 +3625,7 @@
+
+ switch (*op) {
+ case IP_SET_OP_VERSION: {
-+ struct ip_set_req_version *req_version =
-+ (struct ip_set_req_version *) data;
++ struct ip_set_req_version *req_version = data;
+
+ if (*len != sizeof(struct ip_set_req_version)) {
+ ip_set_printk("invalid VERSION (want %zu, got %d)",
@@ -2798,8 +3641,7 @@
+ goto done;
+ }
+ case IP_SET_OP_GET_BYNAME: {
-+ struct ip_set_req_get_set *req_get
-+ = (struct ip_set_req_get_set *) data;
++ struct ip_set_req_get_set *req_get = data;
+
+ if (*len != sizeof(struct ip_set_req_get_set)) {
+ ip_set_printk("invalid GET_BYNAME (want %zu, got %d)",
@@ -2813,8 +3655,7 @@
+ goto copy;
+ }
+ case IP_SET_OP_GET_BYINDEX: {
-+ struct ip_set_req_get_set *req_get
-+ = (struct ip_set_req_get_set *) data;
++ struct ip_set_req_get_set *req_get = data;
+
+ if (*len != sizeof(struct ip_set_req_get_set)) {
+ ip_set_printk("invalid GET_BYINDEX (want %zu, got %d)",
@@ -2830,8 +3671,7 @@
+ goto copy;
+ }
+ case IP_SET_OP_ADT_GET: {
-+ struct ip_set_req_adt_get *req_get
-+ = (struct ip_set_req_adt_get *) data;
++ struct ip_set_req_adt_get *req_get = data;
+
+ if (*len != sizeof(struct ip_set_req_adt_get)) {
+ ip_set_printk("invalid ADT_GET (want %zu, got %d)",
@@ -2853,8 +3693,7 @@
+ goto copy;
+ }
+ case IP_SET_OP_MAX_SETS: {
-+ struct ip_set_req_max_sets *req_max_sets
-+ = (struct ip_set_req_max_sets *) data;
++ struct ip_set_req_max_sets *req_max_sets = data;
+ ip_set_id_t i;
+
+ if (*len != sizeof(struct ip_set_req_max_sets)) {
@@ -2864,7 +3703,7 @@
+ goto done;
+ }
+
-+ if (strcmp(req_max_sets->set.name, IPSET_TOKEN_ALL) == 0) {
++ if (SETNAME_EQ(req_max_sets->set.name, IPSET_TOKEN_ALL)) {
+ req_max_sets->set.index = IP_SET_INVALID_ID;
+ } else {
+ req_max_sets->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
@@ -2885,8 +3724,7 @@
+ }
+ case IP_SET_OP_LIST_SIZE:
+ case IP_SET_OP_SAVE_SIZE: {
-+ struct ip_set_req_setnames *req_setnames
-+ = (struct ip_set_req_setnames *) data;
++ struct ip_set_req_setnames *req_setnames = data;
+ struct ip_set_name_list *name_list;
+ struct ip_set *set;
+ ip_set_id_t i;
@@ -2904,8 +3742,7 @@
+ for (i = 0; i < ip_set_max; i++) {
+ if (ip_set_list[i] == NULL)
+ continue;
-+ name_list = (struct ip_set_name_list *)
-+ (data + used);
++ name_list = data + used;
+ used += sizeof(struct ip_set_name_list);
+ if (used > copylen) {
+ res = -EAGAIN;
@@ -2957,8 +3794,7 @@
+ goto copy;
+ }
+ case IP_SET_OP_LIST: {
-+ struct ip_set_req_list *req_list
-+ = (struct ip_set_req_list *) data;
++ struct ip_set_req_list *req_list = data;
+ ip_set_id_t i;
+ int used;
+
@@ -2994,8 +3830,7 @@
+ goto copy;
+ }
+ case IP_SET_OP_SAVE: {
-+ struct ip_set_req_list *req_save
-+ = (struct ip_set_req_list *) data;
++ struct ip_set_req_list *req_save = data;
+ ip_set_id_t i;
+ int used;
+
@@ -3011,20 +3846,30 @@
+ res = -ENOENT;
+ goto done;
+ }
++
++#define SETLIST(set) (strcmp(set->type->typename, "setlist") == 0)
++
+ used = 0;
+ if (index == IP_SET_INVALID_ID) {
-+ /* Save all sets */
++ /* Save all sets: ugly setlist type dependency */
++ int setlist = 0;
++ setlists:
+ for (i = 0; i < ip_set_max && res == 0; i++) {
-+ if (ip_set_list[i] != NULL)
++ if (ip_set_list[i] != NULL
++ && !(setlist ^ SETLIST(ip_set_list[i])))
+ res = ip_set_save_set(i, data, &used, *len);
+ }
++ if (!setlist) {
++ setlist = 1;
++ goto setlists;
++ }
+ } else {
+ /* Save an individual set */
+ res = ip_set_save_set(index, data, &used, *len);
+ }
+ if (res == 0)
+ res = ip_set_save_bindings(index, data, &used, *len);
-+
++
+ if (res != 0)
+ goto done;
+ else if (copylen != used) {
@@ -3034,20 +3879,19 @@
+ goto copy;
+ }
+ case IP_SET_OP_RESTORE: {
-+ struct ip_set_req_setnames *req_restore
-+ = (struct ip_set_req_setnames *) data;
++ struct ip_set_req_setnames *req_restore = data;
+ int line;
+
+ if (*len < sizeof(struct ip_set_req_setnames)
+ || *len != req_restore->size) {
-+ ip_set_printk("invalid RESTORE (want =%zu, got %d)",
-+ req_restore->size, *len);
++ ip_set_printk("invalid RESTORE (want =%lu, got %d)",
++ (long unsigned)req_restore->size, *len);
+ res = -EINVAL;
+ goto done;
+ }
+ line = ip_set_restore(data + sizeof(struct ip_set_req_setnames),
+ req_restore->size - sizeof(struct ip_set_req_setnames));
-+ DP("ip_set_restore: %u", line);
++ DP("ip_set_restore: %d", line);
+ if (line != 0) {
+ res = -EAGAIN;
+ req_restore->size = line;
@@ -3062,12 +3906,12 @@
+ } /* end of switch(op) */
+
+ copy:
-+ DP("set %s, copylen %u", index != IP_SET_INVALID_ID
++ DP("set %s, copylen %d", index != IP_SET_INVALID_ID
+ && ip_set_list[index]
+ ? ip_set_list[index]->name
+ : ":all:", copylen);
+ res = copy_to_user(user, data, copylen);
-+
++
+ done:
+ up(&ip_set_app_mutex);
+ vfree(data);
@@ -3085,12 +3929,15 @@
+ .get_optmin = SO_IP_SET,
+ .get_optmax = SO_IP_SET + 1,
+ .get = &ip_set_sockfn_get,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
++ .use = 0,
++#else
+ .owner = THIS_MODULE,
+#endif
+};
+
+static int max_sets, hash_size;
++
+module_param(max_sets, int, 0600);
+MODULE_PARM_DESC(max_sets, "maximal number of sets");
+module_param(hash_size, int, 0600);
@@ -3133,6 +3980,7 @@
+ vfree(ip_set_hash);
+ return res;
+ }
++
+ return 0;
+}
+
@@ -3150,7 +3998,10 @@
+
+EXPORT_SYMBOL(ip_set_get_byname);
+EXPORT_SYMBOL(ip_set_get_byindex);
-+EXPORT_SYMBOL(ip_set_put);
++EXPORT_SYMBOL(ip_set_put_byindex);
++EXPORT_SYMBOL(ip_set_id);
++EXPORT_SYMBOL(__ip_set_get_byname);
++EXPORT_SYMBOL(__ip_set_put_byindex);
+
+EXPORT_SYMBOL(ip_set_addip_kernel);
+EXPORT_SYMBOL(ip_set_delip_kernel);
@@ -3160,8 +4011,8 @@
+module_exit(ip_set_fini);
--- /dev/null
+++ b/net/ipv4/netfilter/ip_set_iphash.c
-@@ -0,0 +1,429 @@
-+/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+@@ -0,0 +1,166 @@
++/* Copyright (C) 2003-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
@@ -3171,36 +4022,26 @@
+/* Kernel module implementing an ip hash set */
+
+#include <linux/module.h>
++#include <linux/moduleparam.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
-+#include <linux/version.h>
-+#include <linux/jhash.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_jhash.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
+#include <linux/spinlock.h>
-+#include <linux/vmalloc.h>
+#include <linux/random.h>
+
+#include <net/ip.h>
+
-+#include <linux/netfilter_ipv4/ip_set_malloc.h>
+#include <linux/netfilter_ipv4/ip_set_iphash.h>
+
+static int limit = MAX_RANGE;
+
+static inline __u32
-+jhash_ip(const struct ip_set_iphash *map, uint16_t i, ip_set_ip_t ip)
-+{
-+ return jhash_1word(ip, *(((uint32_t *) map->initval) + i));
-+}
-+
-+static inline __u32
-+hash_id(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++iphash_id(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
++ struct ip_set_iphash *map = set->data;
+ __u32 id;
+ u_int16_t i;
+ ip_set_ip_t *elem;
@@ -3208,208 +4049,91 @@
+ *hash_ip = ip & map->netmask;
+ DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u, %u.%u.%u.%u",
+ set->name, HIPQUAD(ip), HIPQUAD(*hash_ip), HIPQUAD(map->netmask));
-+
++
+ for (i = 0; i < map->probes; i++) {
+ id = jhash_ip(map, i, *hash_ip) % map->hashsize;
+ DP("hash key: %u", id);
+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
+ if (*elem == *hash_ip)
+ return id;
-+ /* No shortcut at testing - there can be deleted
-+ * entries. */
++ /* No shortcut - there can be deleted entries. */
+ }
+ return UINT_MAX;
+}
+
+static inline int
-+__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++iphash_test(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ return (ip && hash_id(set, ip, hash_ip) != UINT_MAX);
++ return (ip && iphash_id(set, hash_ip, ip) != UINT_MAX);
+}
+
-+static int
-+testip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_iphash *req =
-+ (struct ip_set_req_iphash *) data;
++#define KADT_CONDITION
+
-+ if (size != sizeof(struct ip_set_req_iphash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iphash),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __testip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+testip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __testip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+}
++UADT(iphash, test)
++KADT(iphash, test, ipaddr)
+
+static inline int
-+__addip(struct ip_set_iphash *map, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++__iphash_add(struct ip_set_iphash *map, ip_set_ip_t *ip)
+{
+ __u32 probe;
+ u_int16_t i;
-+ ip_set_ip_t *elem;
-+
-+ if (!ip || map->elements >= limit)
-+ return -ERANGE;
-+
-+ *hash_ip = ip & map->netmask;
-+
++ ip_set_ip_t *elem, *slot = NULL;
++
+ for (i = 0; i < map->probes; i++) {
-+ probe = jhash_ip(map, i, *hash_ip) % map->hashsize;
++ probe = jhash_ip(map, i, *ip) % map->hashsize;
+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
-+ if (*elem == *hash_ip)
++ if (*elem == *ip)
+ return -EEXIST;
-+ if (!*elem) {
-+ *elem = *hash_ip;
-+ map->elements++;
-+ return 0;
-+ }
++ if (!(slot || *elem))
++ slot = elem;
++ /* There can be deleted entries, must check all slots */
++ }
++ if (slot) {
++ *slot = *ip;
++ map->elements++;
++ return 0;
+ }
+ /* Trigger rehashing */
+ return -EAGAIN;
+}
+
-+static int
-+addip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
++static inline int
++iphash_add(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_req_iphash *req =
-+ (struct ip_set_req_iphash *) data;
++ struct ip_set_iphash *map = set->data;
++
++ if (!ip || map->elements >= limit)
++ return -ERANGE;
+
-+ if (size != sizeof(struct ip_set_req_iphash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iphash),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __addip((struct ip_set_iphash *) set->data, req->ip, hash_ip);
++ *hash_ip = ip & map->netmask;
++
++ return __iphash_add(map, hash_ip);
+}
+
-+static int
-+addip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __addip((struct ip_set_iphash *) set->data,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+}
++UADT(iphash, add)
++KADT(iphash, add, ipaddr)
+
-+static int retry(struct ip_set *set)
++static inline void
++__iphash_retry(struct ip_set_iphash *tmp, struct ip_set_iphash *map)
+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
-+ ip_set_ip_t hash_ip, *elem;
-+ void *members;
-+ u_int32_t i, hashsize = map->hashsize;
-+ int res;
-+ struct ip_set_iphash *tmp;
-+
-+ if (map->resize == 0)
-+ return -ERANGE;
-+
-+ again:
-+ res = 0;
-+
-+ /* Calculate new hash size */
-+ hashsize += (hashsize * map->resize)/100;
-+ if (hashsize == map->hashsize)
-+ hashsize++;
-+
-+ ip_set_printk("rehashing of set %s triggered: "
-+ "hashsize grows from %u to %u",
-+ set->name, map->hashsize, hashsize);
-+
-+ tmp = kmalloc(sizeof(struct ip_set_iphash)
-+ + map->probes * sizeof(uint32_t), GFP_ATOMIC);
-+ if (!tmp) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_iphash)
-+ + map->probes * sizeof(uint32_t));
-+ return -ENOMEM;
-+ }
-+ tmp->members = harray_malloc(hashsize, sizeof(ip_set_ip_t), GFP_ATOMIC);
-+ if (!tmp->members) {
-+ DP("out of memory for %d bytes", hashsize * sizeof(ip_set_ip_t));
-+ kfree(tmp);
-+ return -ENOMEM;
-+ }
-+ tmp->hashsize = hashsize;
-+ tmp->elements = 0;
-+ tmp->probes = map->probes;
-+ tmp->resize = map->resize;
+ tmp->netmask = map->netmask;
-+ memcpy(tmp->initval, map->initval, map->probes * sizeof(uint32_t));
-+
-+ write_lock_bh(&set->lock);
-+ map = (struct ip_set_iphash *) set->data; /* Play safe */
-+ for (i = 0; i < map->hashsize && res == 0; i++) {
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
-+ if (*elem)
-+ res = __addip(tmp, *elem, &hash_ip);
-+ }
-+ if (res) {
-+ /* Failure, try again */
-+ write_unlock_bh(&set->lock);
-+ harray_free(tmp->members);
-+ kfree(tmp);
-+ goto again;
-+ }
-+
-+ /* Success at resizing! */
-+ members = map->members;
-+
-+ map->hashsize = tmp->hashsize;
-+ map->members = tmp->members;
-+ write_unlock_bh(&set->lock);
-+
-+ harray_free(members);
-+ kfree(tmp);
-+
-+ return 0;
+}
+
++HASH_RETRY(iphash, ip_set_ip_t)
++
+static inline int
-+__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++iphash_del(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
++ struct ip_set_iphash *map = set->data;
+ ip_set_ip_t id, *elem;
+
+ if (!ip)
+ return -ERANGE;
+
-+ id = hash_id(set, ip, hash_ip);
++ id = iphash_id(set, hash_ip, ip);
+ if (id == UINT_MAX)
+ return -EEXIST;
-+
++
+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
+ *elem = 0;
+ map->elements--;
@@ -3417,159 +4141,35 @@
+ return 0;
+}
+
-+static int
-+delip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_iphash *req =
-+ (struct ip_set_req_iphash *) data;
-+
-+ if (size != sizeof(struct ip_set_req_iphash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iphash),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __delip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+delip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __delip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+}
++UADT(iphash, del)
++KADT(iphash, del, ipaddr)
+
-+static int create(struct ip_set *set, const void *data, size_t size)
++static inline int
++__iphash_create(const struct ip_set_req_iphash_create *req,
++ struct ip_set_iphash *map)
+{
-+ struct ip_set_req_iphash_create *req =
-+ (struct ip_set_req_iphash_create *) data;
-+ struct ip_set_iphash *map;
-+ uint16_t i;
-+
-+ if (size != sizeof(struct ip_set_req_iphash_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iphash_create),
-+ size);
-+ return -EINVAL;
-+ }
-+
-+ if (req->hashsize < 1) {
-+ ip_set_printk("hashsize too small");
-+ return -ENOEXEC;
-+ }
-+
-+ if (req->probes < 1) {
-+ ip_set_printk("probes too small");
-+ return -ENOEXEC;
-+ }
-+
-+ map = kmalloc(sizeof(struct ip_set_iphash)
-+ + req->probes * sizeof(uint32_t), GFP_KERNEL);
-+ if (!map) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_iphash)
-+ + req->probes * sizeof(uint32_t));
-+ return -ENOMEM;
-+ }
-+ for (i = 0; i < req->probes; i++)
-+ get_random_bytes(((uint32_t *) map->initval)+i, 4);
-+ map->elements = 0;
-+ map->hashsize = req->hashsize;
-+ map->probes = req->probes;
-+ map->resize = req->resize;
+ map->netmask = req->netmask;
-+ map->members = harray_malloc(map->hashsize, sizeof(ip_set_ip_t), GFP_KERNEL);
-+ if (!map->members) {
-+ DP("out of memory for %d bytes", map->hashsize * sizeof(ip_set_ip_t));
-+ kfree(map);
-+ return -ENOMEM;
-+ }
-+
-+ set->data = map;
++
+ return 0;
+}
+
-+static void destroy(struct ip_set *set)
-+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
-+
-+ harray_free(map->members);
-+ kfree(map);
-+
-+ set->data = NULL;
-+}
-+
-+static void flush(struct ip_set *set)
-+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
-+ harray_flush(map->members, map->hashsize, sizeof(ip_set_ip_t));
-+ map->elements = 0;
-+}
++HASH_CREATE(iphash, ip_set_ip_t)
++HASH_DESTROY(iphash)
+
-+static void list_header(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
-+ struct ip_set_req_iphash_create *header =
-+ (struct ip_set_req_iphash_create *) data;
++HASH_FLUSH(iphash, ip_set_ip_t)
+
-+ header->hashsize = map->hashsize;
-+ header->probes = map->probes;
-+ header->resize = map->resize;
++static inline void
++__iphash_list_header(const struct ip_set_iphash *map,
++ struct ip_set_req_iphash_create *header)
++{
+ header->netmask = map->netmask;
+}
+
-+static int list_members_size(const struct ip_set *set)
-+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
-+
-+ return (map->hashsize * sizeof(ip_set_ip_t));
-+}
-+
-+static void list_members(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
-+ ip_set_ip_t i, *elem;
-+
-+ for (i = 0; i < map->hashsize; i++) {
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
-+ ((ip_set_ip_t *)data)[i] = *elem;
-+ }
-+}
++HASH_LIST_HEADER(iphash)
++HASH_LIST_MEMBERS_SIZE(iphash, ip_set_ip_t)
++HASH_LIST_MEMBERS(iphash, ip_set_ip_t)
+
-+static struct ip_set_type ip_set_iphash = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_iphash),
-+ .addip = &addip,
-+ .addip_kernel = &addip_kernel,
-+ .retry = &retry,
-+ .delip = &delip,
-+ .delip_kernel = &delip_kernel,
-+ .testip = &testip,
-+ .testip_kernel = &testip_kernel,
-+ .header_size = sizeof(struct ip_set_req_iphash_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
++IP_SET_RTYPE(iphash, IPSET_TYPE_IP | IPSET_DATA_SINGLE)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
@@ -3577,25 +4177,13 @@
+module_param(limit, int, 0600);
+MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
+
-+static int __init ip_set_iphash_init(void)
-+{
-+ return ip_set_register_set_type(&ip_set_iphash);
-+}
-+
-+static void __exit ip_set_iphash_fini(void)
-+{
-+ /* FIXME: possible race with ip_set_create() */
-+ ip_set_unregister_set_type(&ip_set_iphash);
-+}
-+
-+module_init(ip_set_iphash_init);
-+module_exit(ip_set_iphash_fini);
++REGISTER_MODULE(iphash)
--- /dev/null
+++ b/net/ipv4/netfilter/ip_set_ipmap.c
-@@ -0,0 +1,336 @@
+@@ -0,0 +1,142 @@
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ * Patrick Schaaf <bof@bof.de>
-+ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ * Copyright (C) 2003-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
@@ -3607,9 +4195,6 @@
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
-+#include <linux/version.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
@@ -3624,10 +4209,10 @@
+}
+
+static inline int
-+__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++ipmap_test(const struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
-+
++ const struct ip_set_ipmap *map = set->data;
++
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
+
@@ -3637,46 +4222,15 @@
+ return !!test_bit(ip_to_id(map, *hash_ip), map->members);
+}
+
-+static int
-+testip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_ipmap *req =
-+ (struct ip_set_req_ipmap *) data;
++#define KADT_CONDITION
+
-+ if (size != sizeof(struct ip_set_req_ipmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipmap),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __testip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+testip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ int res = __testip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+ return (res < 0 ? 0 : res);
-+}
++UADT(ipmap, test)
++KADT(ipmap, test, ipaddr)
+
+static inline int
-+__addip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++ipmap_add(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
++ struct ip_set_ipmap *map = set->data;
+
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
@@ -3689,46 +4243,13 @@
+ return 0;
+}
+
-+static int
-+addip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_ipmap *req =
-+ (struct ip_set_req_ipmap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_ipmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipmap),
-+ size);
-+ return -EINVAL;
-+ }
-+ DP("%u.%u.%u.%u", HIPQUAD(req->ip));
-+ return __addip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+addip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __addip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+}
++UADT(ipmap, add)
++KADT(ipmap, add, ipaddr)
+
+static inline int
-+__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++ipmap_del(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
++ struct ip_set_ipmap *map = set->data;
+
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
@@ -3737,75 +4258,17 @@
+ DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
+ if (!test_and_clear_bit(ip_to_id(map, *hash_ip), map->members))
+ return -EEXIST;
-+
++
+ return 0;
+}
+
-+static int
-+delip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_ipmap *req =
-+ (struct ip_set_req_ipmap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_ipmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipmap),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __delip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+delip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __delip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+}
++UADT(ipmap, del)
++KADT(ipmap, del, ipaddr)
+
-+static int create(struct ip_set *set, const void *data, size_t size)
++static inline int
++__ipmap_create(const struct ip_set_req_ipmap_create *req,
++ struct ip_set_ipmap *map)
+{
-+ int newbytes;
-+ struct ip_set_req_ipmap_create *req =
-+ (struct ip_set_req_ipmap_create *) data;
-+ struct ip_set_ipmap *map;
-+
-+ if (size != sizeof(struct ip_set_req_ipmap_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipmap_create),
-+ size);
-+ return -EINVAL;
-+ }
-+
-+ DP("from %u.%u.%u.%u to %u.%u.%u.%u",
-+ HIPQUAD(req->from), HIPQUAD(req->to));
-+
-+ if (req->from > req->to) {
-+ DP("bad ip range");
-+ return -ENOEXEC;
-+ }
-+
-+ map = kmalloc(sizeof(struct ip_set_ipmap), GFP_KERNEL);
-+ if (!map) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_ipmap));
-+ return -ENOMEM;
-+ }
-+ map->first_ip = req->from;
-+ map->last_ip = req->to;
+ map->netmask = req->netmask;
+
+ if (req->netmask == 0xFFFFFFFF) {
@@ -3830,109 +4293,40 @@
+ map->sizeid = 2 << (netmask_bits - mask_bits - 1);
+ }
+ if (map->sizeid > MAX_RANGE + 1) {
-+ ip_set_printk("range too big (max %d addresses)",
-+ MAX_RANGE+1);
-+ kfree(map);
++ ip_set_printk("range too big, %d elements (max %d)",
++ map->sizeid, MAX_RANGE+1);
+ return -ENOEXEC;
+ }
+ DP("hosts %u, sizeid %u", map->hosts, map->sizeid);
-+ newbytes = bitmap_bytes(0, map->sizeid - 1);
-+ map->members = kmalloc(newbytes, GFP_KERNEL);
-+ if (!map->members) {
-+ DP("out of memory for %d bytes", newbytes);
-+ kfree(map);
-+ return -ENOMEM;
-+ }
-+ memset(map->members, 0, newbytes);
-+
-+ set->data = map;
-+ return 0;
-+}
-+
-+static void destroy(struct ip_set *set)
-+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
-+
-+ kfree(map->members);
-+ kfree(map);
-+
-+ set->data = NULL;
++ return bitmap_bytes(0, map->sizeid - 1);
+}
+
-+static void flush(struct ip_set *set)
-+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
-+ memset(map->members, 0, bitmap_bytes(0, map->sizeid - 1));
-+}
++BITMAP_CREATE(ipmap)
++BITMAP_DESTROY(ipmap)
++BITMAP_FLUSH(ipmap)
+
-+static void list_header(const struct ip_set *set, void *data)
++static inline void
++__ipmap_list_header(const struct ip_set_ipmap *map,
++ struct ip_set_req_ipmap_create *header)
+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
-+ struct ip_set_req_ipmap_create *header =
-+ (struct ip_set_req_ipmap_create *) data;
-+
-+ header->from = map->first_ip;
-+ header->to = map->last_ip;
+ header->netmask = map->netmask;
+}
+
-+static int list_members_size(const struct ip_set *set)
-+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
-+
-+ return bitmap_bytes(0, map->sizeid - 1);
-+}
++BITMAP_LIST_HEADER(ipmap)
++BITMAP_LIST_MEMBERS_SIZE(ipmap)
++BITMAP_LIST_MEMBERS(ipmap)
+
-+static void list_members(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
-+ int bytes = bitmap_bytes(0, map->sizeid - 1);
-+
-+ memcpy(data, map->members, bytes);
-+}
-+
-+static struct ip_set_type ip_set_ipmap = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_ipmap),
-+ .addip = &addip,
-+ .addip_kernel = &addip_kernel,
-+ .delip = &delip,
-+ .delip_kernel = &delip_kernel,
-+ .testip = &testip,
-+ .testip_kernel = &testip_kernel,
-+ .header_size = sizeof(struct ip_set_req_ipmap_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
++IP_SET_TYPE(ipmap, IPSET_TYPE_IP | IPSET_DATA_SINGLE)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("ipmap type of IP sets");
+
-+static int __init ip_set_ipmap_init(void)
-+{
-+ return ip_set_register_set_type(&ip_set_ipmap);
-+}
-+
-+static void __exit ip_set_ipmap_fini(void)
-+{
-+ /* FIXME: possible race with ip_set_create() */
-+ ip_set_unregister_set_type(&ip_set_ipmap);
-+}
-+
-+module_init(ip_set_ipmap_init);
-+module_exit(ip_set_ipmap_fini);
++REGISTER_MODULE(ipmap)
--- /dev/null
+++ b/net/ipv4/netfilter/ip_set_ipporthash.c
-@@ -0,0 +1,581 @@
-+/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+@@ -0,0 +1,203 @@
++/* Copyright (C) 2003-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
@@ -3942,581 +4336,729 @@
+/* Kernel module implementing an ip+port hash set */
+
+#include <linux/module.h>
++#include <linux/moduleparam.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/skbuff.h>
-+#include <linux/version.h>
-+#include <linux/jhash.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_jhash.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
+#include <linux/spinlock.h>
-+#include <linux/vmalloc.h>
+#include <linux/random.h>
+
+#include <net/ip.h>
+
-+#include <linux/netfilter_ipv4/ip_set_malloc.h>
+#include <linux/netfilter_ipv4/ip_set_ipporthash.h>
++#include <linux/netfilter_ipv4/ip_set_getport.h>
+
+static int limit = MAX_RANGE;
+
-+/* We must handle non-linear skbs */
-+static inline ip_set_ip_t
-+get_port(const struct sk_buff *skb, u_int32_t flags)
-+{
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ struct iphdr *iph = ip_hdr(skb);
-+#else
-+ struct iphdr *iph = skb->nh.iph;
-+#endif
-+ u_int16_t offset = ntohs(iph->frag_off) & IP_OFFSET;
-+
-+ switch (iph->protocol) {
-+ case IPPROTO_TCP: {
-+ struct tcphdr tcph;
-+
-+ /* See comments at tcp_match in ip_tables.c */
-+ if (offset)
-+ return INVALID_PORT;
-+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &tcph, sizeof(tcph)) < 0)
-+#else
-+ if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &tcph, sizeof(tcph)) < 0)
-+#endif
-+ /* No choice either */
-+ return INVALID_PORT;
-+
-+ return ntohs(flags & IPSET_SRC ?
-+ tcph.source : tcph.dest);
-+ }
-+ case IPPROTO_UDP: {
-+ struct udphdr udph;
-+
-+ if (offset)
-+ return INVALID_PORT;
-+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &udph, sizeof(udph)) < 0)
-+#else
-+ if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &udph, sizeof(udph)) < 0)
-+#endif
-+ /* No choice either */
-+ return INVALID_PORT;
-+
-+ return ntohs(flags & IPSET_SRC ?
-+ udph.source : udph.dest);
-+ }
-+ default:
-+ return INVALID_PORT;
-+ }
-+}
-+
+static inline __u32
-+jhash_ip(const struct ip_set_ipporthash *map, uint16_t i, ip_set_ip_t ip)
++ipporthash_id(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port)
+{
-+ return jhash_1word(ip, *(((uint32_t *) map->initval) + i));
-+}
-+
-+#define HASH_IP(map, ip, port) (port + ((ip - ((map)->first_ip)) << 16))
-+
-+static inline __u32
-+hash_id(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t port,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_ipporthash *map =
-+ (struct ip_set_ipporthash *) set->data;
++ struct ip_set_ipporthash *map = set->data;
+ __u32 id;
+ u_int16_t i;
+ ip_set_ip_t *elem;
+
-+ *hash_ip = HASH_IP(map, ip, port);
++ *hash_ip = pack_ip_port(map, ip, port);
++
+ DP("set: %s, ipport:%u.%u.%u.%u:%u, %u.%u.%u.%u",
+ set->name, HIPQUAD(ip), port, HIPQUAD(*hash_ip));
-+
++ if (!*hash_ip)
++ return UINT_MAX;
++
+ for (i = 0; i < map->probes; i++) {
+ id = jhash_ip(map, i, *hash_ip) % map->hashsize;
+ DP("hash key: %u", id);
+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
+ if (*elem == *hash_ip)
+ return id;
-+ /* No shortcut at testing - there can be deleted
-+ * entries. */
++ /* No shortcut - there can be deleted entries. */
+ }
+ return UINT_MAX;
+}
+
+static inline int
-+__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t port,
-+ ip_set_ip_t *hash_ip)
++ipporthash_test(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port)
+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+
++ struct ip_set_ipporthash *map = set->data;
++
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
+
-+ return (hash_id(set, ip, port, hash_ip) != UINT_MAX);
++ return (ipporthash_id(set, hash_ip, ip, port) != UINT_MAX);
+}
+
-+static int
-+testip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_ipporthash *req =
-+ (struct ip_set_req_ipporthash *) data;
-+
-+ if (size != sizeof(struct ip_set_req_ipporthash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipporthash),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __testip(set, req->ip, req->port, hash_ip);
-+}
-+
-+static int
-+testip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ ip_set_ip_t port;
-+ int res;
-+
-+ if (flags[index+1] == 0)
-+ return 0;
-+
-+ port = get_port(skb, flags[index+1]);
-+
-+ DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
-+ flags[index] & IPSET_SRC ? "SRC" : "DST",
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ NIPQUAD(ip_hdr(skb)->saddr),
-+ NIPQUAD(ip_hdr(skb)->daddr));
-+#else
-+ NIPQUAD(skb->nh.iph->saddr),
-+ NIPQUAD(skb->nh.iph->daddr));
-+#endif
-+ DP("flag %s port %u",
-+ flags[index+1] & IPSET_SRC ? "SRC" : "DST",
-+ port);
-+ if (port == INVALID_PORT)
++#define KADT_CONDITION \
++ ip_set_ip_t port; \
++ \
++ if (flags[index+1] == 0) \
++ return 0; \
++ \
++ port = get_port(skb, flags[index+1]); \
++ \
++ if (port == INVALID_PORT) \
+ return 0;
+
-+ res = __testip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ port,
-+ hash_ip);
-+ return (res < 0 ? 0 : res);
-+
-+}
++UADT(ipporthash, test, req->port)
++KADT(ipporthash, test, ipaddr, port)
+
+static inline int
-+__add_haship(struct ip_set_ipporthash *map, ip_set_ip_t hash_ip)
++__ipporthash_add(struct ip_set_ipporthash *map, ip_set_ip_t *ip)
+{
+ __u32 probe;
+ u_int16_t i;
-+ ip_set_ip_t *elem;
++ ip_set_ip_t *elem, *slot = NULL;
+
+ for (i = 0; i < map->probes; i++) {
-+ probe = jhash_ip(map, i, hash_ip) % map->hashsize;
++ probe = jhash_ip(map, i, *ip) % map->hashsize;
+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
-+ if (*elem == hash_ip)
++ if (*elem == *ip)
+ return -EEXIST;
-+ if (!*elem) {
-+ *elem = hash_ip;
-+ map->elements++;
-+ return 0;
-+ }
++ if (!(slot || *elem))
++ slot = elem;
++ /* There can be deleted entries, must check all slots */
++ }
++ if (slot) {
++ *slot = *ip;
++ map->elements++;
++ return 0;
+ }
+ /* Trigger rehashing */
+ return -EAGAIN;
+}
+
+static inline int
-+__addip(struct ip_set_ipporthash *map, ip_set_ip_t ip, ip_set_ip_t port,
-+ ip_set_ip_t *hash_ip)
++ipporthash_add(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port)
+{
++ struct ip_set_ipporthash *map = set->data;
+ if (map->elements > limit)
+ return -ERANGE;
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
+
-+ *hash_ip = HASH_IP(map, ip, port);
++ *hash_ip = pack_ip_port(map, ip, port);
+
-+ return __add_haship(map, *hash_ip);
++ if (!*hash_ip)
++ return -ERANGE;
++
++ return __ipporthash_add(map, hash_ip);
+}
+
-+static int
-+addip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
++UADT(ipporthash, add, req->port)
++KADT(ipporthash, add, ipaddr, port)
++
++static inline void
++__ipporthash_retry(struct ip_set_ipporthash *tmp,
++ struct ip_set_ipporthash *map)
+{
-+ struct ip_set_req_ipporthash *req =
-+ (struct ip_set_req_ipporthash *) data;
++ tmp->first_ip = map->first_ip;
++ tmp->last_ip = map->last_ip;
++}
+
-+ if (size != sizeof(struct ip_set_req_ipporthash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipporthash),
-+ size);
-+ return -EINVAL;
++HASH_RETRY(ipporthash, ip_set_ip_t)
++
++static inline int
++ipporthash_del(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port)
++{
++ struct ip_set_ipporthash *map = set->data;
++ ip_set_ip_t id;
++ ip_set_ip_t *elem;
++
++ if (ip < map->first_ip || ip > map->last_ip)
++ return -ERANGE;
++
++ id = ipporthash_id(set, hash_ip, ip, port);
++
++ if (id == UINT_MAX)
++ return -EEXIST;
++
++ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
++ *elem = 0;
++ map->elements--;
++
++ return 0;
++}
++
++UADT(ipporthash, del, req->port)
++KADT(ipporthash, del, ipaddr, port)
++
++static inline int
++__ipporthash_create(const struct ip_set_req_ipporthash_create *req,
++ struct ip_set_ipporthash *map)
++{
++ if (req->to - req->from > MAX_RANGE) {
++ ip_set_printk("range too big, %d elements (max %d)",
++ req->to - req->from + 1, MAX_RANGE+1);
++ return -ENOEXEC;
+ }
-+ return __addip((struct ip_set_ipporthash *) set->data,
-+ req->ip, req->port, hash_ip);
++ map->first_ip = req->from;
++ map->last_ip = req->to;
++ return 0;
+}
+
-+static int
-+addip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
++HASH_CREATE(ipporthash, ip_set_ip_t)
++HASH_DESTROY(ipporthash)
++HASH_FLUSH(ipporthash, ip_set_ip_t)
++
++static inline void
++__ipporthash_list_header(const struct ip_set_ipporthash *map,
++ struct ip_set_req_ipporthash_create *header)
+{
-+ ip_set_ip_t port;
++ header->from = map->first_ip;
++ header->to = map->last_ip;
++}
+
-+ if (flags[index+1] == 0)
-+ return -EINVAL;
++HASH_LIST_HEADER(ipporthash)
++HASH_LIST_MEMBERS_SIZE(ipporthash, ip_set_ip_t)
++HASH_LIST_MEMBERS(ipporthash, ip_set_ip_t)
+
-+ port = get_port(skb, flags[index+1]);
++IP_SET_RTYPE(ipporthash, IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_DATA_DOUBLE)
+
-+ DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
-+ flags[index] & IPSET_SRC ? "SRC" : "DST",
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ NIPQUAD(ip_hdr(skb)->saddr),
-+ NIPQUAD(ip_hdr(skb)->daddr));
-+#else
-+ NIPQUAD(skb->nh.iph->saddr),
-+ NIPQUAD(skb->nh.iph->daddr));
-+#endif
-+ DP("flag %s port %u",
-+ flags[index+1] & IPSET_SRC ? "SRC" : "DST",
-+ port);
-+ if (port == INVALID_PORT)
-+ return -EINVAL;
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("ipporthash type of IP sets");
++module_param(limit, int, 0600);
++MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
+
-+ return __addip((struct ip_set_ipporthash *) set->data,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ port,
-+ hash_ip);
-+}
++REGISTER_MODULE(ipporthash)
+--- /dev/null
++++ b/net/ipv4/netfilter/ip_set_ipportiphash.c
+@@ -0,0 +1,216 @@
++/* Copyright (C) 2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/* Kernel module implementing an ip+port+ip hash set */
++
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++#include <linux/ip.h>
++#include <linux/tcp.h>
++#include <linux/udp.h>
++#include <linux/skbuff.h>
++#include <linux/netfilter_ipv4/ip_set_jhash.h>
++#include <linux/errno.h>
++#include <asm/uaccess.h>
++#include <asm/bitops.h>
++#include <linux/spinlock.h>
++#include <linux/random.h>
+
-+static int retry(struct ip_set *set)
++#include <net/ip.h>
++
++#include <linux/netfilter_ipv4/ip_set_ipportiphash.h>
++#include <linux/netfilter_ipv4/ip_set_getport.h>
++
++static int limit = MAX_RANGE;
++
++#define jhash_ip2(map, i, ipport, ip1) \
++ jhash_2words(ipport, ip1, *(map->initval + i))
++
++static inline __u32
++ipportiphash_id(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port, ip_set_ip_t ip1)
+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+ ip_set_ip_t *elem;
-+ void *members;
-+ u_int32_t i, hashsize = map->hashsize;
-+ int res;
-+ struct ip_set_ipporthash *tmp;
++ struct ip_set_ipportiphash *map = set->data;
++ __u32 id;
++ u_int16_t i;
++ struct ipportip *elem;
+
-+ if (map->resize == 0)
++ *hash_ip = pack_ip_port(map, ip, port);
++ DP("set: %s, ipport:%u.%u.%u.%u:%u, %u.%u.%u.%u",
++ set->name, HIPQUAD(ip), port, HIPQUAD(*hash_ip));
++ if (!(*hash_ip || ip1))
++ return UINT_MAX;
++
++ for (i = 0; i < map->probes; i++) {
++ id = jhash_ip2(map, i, *hash_ip, ip1) % map->hashsize;
++ DP("hash key: %u", id);
++ elem = HARRAY_ELEM(map->members, struct ipportip *, id);
++ if (elem->ip == *hash_ip && elem->ip1 == ip1)
++ return id;
++ /* No shortcut - there can be deleted entries. */
++ }
++ return UINT_MAX;
++}
++
++static inline int
++ipportiphash_test(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port, ip_set_ip_t ip1)
++{
++ struct ip_set_ipportiphash *map = set->data;
++
++ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
+
-+ again:
-+ res = 0;
++ return (ipportiphash_id(set, hash_ip, ip, port, ip1) != UINT_MAX);
++}
+
-+ /* Calculate new hash size */
-+ hashsize += (hashsize * map->resize)/100;
-+ if (hashsize == map->hashsize)
-+ hashsize++;
++#define KADT_CONDITION \
++ ip_set_ip_t port, ip1; \
++ \
++ if (flags[index+2] == 0) \
++ return 0; \
++ \
++ port = get_port(skb, flags[index+1]); \
++ ip1 = ipaddr(skb, flags[index+2]); \
++ \
++ if (port == INVALID_PORT) \
++ return 0;
+
-+ ip_set_printk("rehashing of set %s triggered: "
-+ "hashsize grows from %u to %u",
-+ set->name, map->hashsize, hashsize);
++UADT(ipportiphash, test, req->port, req->ip1)
++KADT(ipportiphash, test, ipaddr, port, ip1)
+
-+ tmp = kmalloc(sizeof(struct ip_set_ipporthash)
-+ + map->probes * sizeof(uint32_t), GFP_ATOMIC);
-+ if (!tmp) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_ipporthash)
-+ + map->probes * sizeof(uint32_t));
-+ return -ENOMEM;
++static inline int
++__ipportip_add(struct ip_set_ipportiphash *map,
++ ip_set_ip_t hash_ip, ip_set_ip_t ip1)
++{
++ __u32 probe;
++ u_int16_t i;
++ struct ipportip *elem, *slot = NULL;
++
++ for (i = 0; i < map->probes; i++) {
++ probe = jhash_ip2(map, i, hash_ip, ip1) % map->hashsize;
++ elem = HARRAY_ELEM(map->members, struct ipportip *, probe);
++ if (elem->ip == hash_ip && elem->ip1 == ip1)
++ return -EEXIST;
++ if (!(slot || elem->ip || elem->ip1))
++ slot = elem;
++ /* There can be deleted entries, must check all slots */
+ }
-+ tmp->members = harray_malloc(hashsize, sizeof(ip_set_ip_t), GFP_ATOMIC);
-+ if (!tmp->members) {
-+ DP("out of memory for %d bytes", hashsize * sizeof(ip_set_ip_t));
-+ kfree(tmp);
-+ return -ENOMEM;
++ if (slot) {
++ slot->ip = hash_ip;
++ slot->ip1 = ip1;
++ map->elements++;
++ return 0;
+ }
-+ tmp->hashsize = hashsize;
-+ tmp->elements = 0;
-+ tmp->probes = map->probes;
-+ tmp->resize = map->resize;
-+ tmp->first_ip = map->first_ip;
-+ tmp->last_ip = map->last_ip;
-+ memcpy(tmp->initval, map->initval, map->probes * sizeof(uint32_t));
++ /* Trigger rehashing */
++ return -EAGAIN;
++}
+
-+ write_lock_bh(&set->lock);
-+ map = (struct ip_set_ipporthash *) set->data; /* Play safe */
-+ for (i = 0; i < map->hashsize && res == 0; i++) {
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
-+ if (*elem)
-+ res = __add_haship(tmp, *elem);
-+ }
-+ if (res) {
-+ /* Failure, try again */
-+ write_unlock_bh(&set->lock);
-+ harray_free(tmp->members);
-+ kfree(tmp);
-+ goto again;
-+ }
++static inline int
++__ipportiphash_add(struct ip_set_ipportiphash *map,
++ struct ipportip *elem)
++{
++ return __ipportip_add(map, elem->ip, elem->ip1);
++}
+
-+ /* Success at resizing! */
-+ members = map->members;
++static inline int
++ipportiphash_add(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port, ip_set_ip_t ip1)
++{
++ struct ip_set_ipportiphash *map = set->data;
++
++ if (map->elements > limit)
++ return -ERANGE;
++ if (ip < map->first_ip || ip > map->last_ip)
++ return -ERANGE;
+
-+ map->hashsize = tmp->hashsize;
-+ map->members = tmp->members;
-+ write_unlock_bh(&set->lock);
++ *hash_ip = pack_ip_port(map, ip, port);
++ if (!(*hash_ip || ip1))
++ return -ERANGE;
++
++ return __ipportip_add(map, *hash_ip, ip1);
++}
+
-+ harray_free(members);
-+ kfree(tmp);
++UADT(ipportiphash, add, req->port, req->ip1)
++KADT(ipportiphash, add, ipaddr, port, ip1)
+
-+ return 0;
++static inline void
++__ipportiphash_retry(struct ip_set_ipportiphash *tmp,
++ struct ip_set_ipportiphash *map)
++{
++ tmp->first_ip = map->first_ip;
++ tmp->last_ip = map->last_ip;
+}
+
++HASH_RETRY2(ipportiphash, struct ipportip)
++
+static inline int
-+__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t port,
-+ ip_set_ip_t *hash_ip)
++ipportiphash_del(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port, ip_set_ip_t ip1)
+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
++ struct ip_set_ipportiphash *map = set->data;
+ ip_set_ip_t id;
-+ ip_set_ip_t *elem;
++ struct ipportip *elem;
+
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
+
-+ id = hash_id(set, ip, port, hash_ip);
++ id = ipportiphash_id(set, hash_ip, ip, port, ip1);
+
+ if (id == UINT_MAX)
+ return -EEXIST;
-+
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
-+ *elem = 0;
++
++ elem = HARRAY_ELEM(map->members, struct ipportip *, id);
++ elem->ip = elem->ip1 = 0;
+ map->elements--;
+
+ return 0;
+}
+
-+static int
-+delip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_ipporthash *req =
-+ (struct ip_set_req_ipporthash *) data;
++UADT(ipportiphash, del, req->port, req->ip1)
++KADT(ipportiphash, del, ipaddr, port, ip1)
+
-+ if (size != sizeof(struct ip_set_req_ipporthash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipporthash),
-+ size);
-+ return -EINVAL;
++static inline int
++__ipportiphash_create(const struct ip_set_req_ipportiphash_create *req,
++ struct ip_set_ipportiphash *map)
++{
++ if (req->to - req->from > MAX_RANGE) {
++ ip_set_printk("range too big, %d elements (max %d)",
++ req->to - req->from + 1, MAX_RANGE+1);
++ return -ENOEXEC;
+ }
-+ return __delip(set, req->ip, req->port, hash_ip);
++ map->first_ip = req->from;
++ map->last_ip = req->to;
++ return 0;
+}
+
-+static int
-+delip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
++HASH_CREATE(ipportiphash, struct ipportip)
++HASH_DESTROY(ipportiphash)
++HASH_FLUSH(ipportiphash, struct ipportip)
++
++static inline void
++__ipportiphash_list_header(const struct ip_set_ipportiphash *map,
++ struct ip_set_req_ipportiphash_create *header)
+{
-+ ip_set_ip_t port;
++ header->from = map->first_ip;
++ header->to = map->last_ip;
++}
+
-+ if (flags[index+1] == 0)
-+ return -EINVAL;
++HASH_LIST_HEADER(ipportiphash)
++HASH_LIST_MEMBERS_SIZE(ipportiphash, struct ipportip)
++HASH_LIST_MEMBERS_MEMCPY(ipportiphash, struct ipportip)
+
-+ port = get_port(skb, flags[index+1]);
++IP_SET_RTYPE(ipportiphash, IPSET_TYPE_IP | IPSET_TYPE_PORT
++ | IPSET_TYPE_IP1 | IPSET_DATA_TRIPLE)
+
-+ DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
-+ flags[index] & IPSET_SRC ? "SRC" : "DST",
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ NIPQUAD(ip_hdr(skb)->saddr),
-+ NIPQUAD(ip_hdr(skb)->daddr));
-+#else
-+ NIPQUAD(skb->nh.iph->saddr),
-+ NIPQUAD(skb->nh.iph->daddr));
-+#endif
-+ DP("flag %s port %u",
-+ flags[index+1] & IPSET_SRC ? "SRC" : "DST",
-+ port);
-+ if (port == INVALID_PORT)
-+ return -EINVAL;
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("ipportiphash type of IP sets");
++module_param(limit, int, 0600);
++MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
+
-+ return __delip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ port,
-+ hash_ip);
-+}
++REGISTER_MODULE(ipportiphash)
+--- /dev/null
++++ b/net/ipv4/netfilter/ip_set_ipportnethash.c
+@@ -0,0 +1,304 @@
++/* Copyright (C) 2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/* Kernel module implementing an ip+port+net hash set */
++
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++#include <linux/ip.h>
++#include <linux/tcp.h>
++#include <linux/udp.h>
++#include <linux/skbuff.h>
++#include <linux/netfilter_ipv4/ip_set_jhash.h>
++#include <linux/errno.h>
++#include <asm/uaccess.h>
++#include <asm/bitops.h>
++#include <linux/spinlock.h>
++#include <linux/random.h>
+
-+static int create(struct ip_set *set, const void *data, size_t size)
++#include <net/ip.h>
++
++#include <linux/netfilter_ipv4/ip_set_ipportnethash.h>
++#include <linux/netfilter_ipv4/ip_set_getport.h>
++
++static int limit = MAX_RANGE;
++
++#define jhash_ip2(map, i, ipport, ip1) \
++ jhash_2words(ipport, ip1, *(map->initval + i))
++
++static inline __u32
++ipportnethash_id_cidr(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port,
++ ip_set_ip_t ip1, uint8_t cidr)
+{
-+ struct ip_set_req_ipporthash_create *req =
-+ (struct ip_set_req_ipporthash_create *) data;
-+ struct ip_set_ipporthash *map;
-+ uint16_t i;
++ struct ip_set_ipportnethash *map = set->data;
++ __u32 id;
++ u_int16_t i;
++ struct ipportip *elem;
+
-+ if (size != sizeof(struct ip_set_req_ipporthash_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipporthash_create),
-+ size);
-+ return -EINVAL;
++ *hash_ip = pack_ip_port(map, ip, port);
++ DP("set: %s, ipport:%u.%u.%u.%u:%u, %u.%u.%u.%u",
++ set->name, HIPQUAD(ip), port, HIPQUAD(*hash_ip));
++ ip1 = pack_ip_cidr(ip1, cidr);
++ if (!(*hash_ip || ip1))
++ return UINT_MAX;
++
++ for (i = 0; i < map->probes; i++) {
++ id = jhash_ip2(map, i, *hash_ip, ip1) % map->hashsize;
++ DP("hash key: %u", id);
++ elem = HARRAY_ELEM(map->members, struct ipportip *, id);
++ if (elem->ip == *hash_ip && elem->ip1 == ip1)
++ return id;
++ /* No shortcut - there can be deleted entries. */
+ }
++ return UINT_MAX;
++}
+
-+ if (req->hashsize < 1) {
-+ ip_set_printk("hashsize too small");
-+ return -ENOEXEC;
-+ }
++static inline __u32
++ipportnethash_id(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port, ip_set_ip_t ip1)
++{
++ struct ip_set_ipportnethash *map = set->data;
++ __u32 id = UINT_MAX;
++ int i;
+
-+ if (req->probes < 1) {
-+ ip_set_printk("probes too small");
-+ return -ENOEXEC;
++ for (i = 0; i < 30 && map->cidr[i]; i++) {
++ id = ipportnethash_id_cidr(set, hash_ip, ip, port, ip1,
++ map->cidr[i]);
++ if (id != UINT_MAX)
++ break;
+ }
++ return id;
++}
+
-+ map = kmalloc(sizeof(struct ip_set_ipporthash)
-+ + req->probes * sizeof(uint32_t), GFP_KERNEL);
-+ if (!map) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_ipporthash)
-+ + req->probes * sizeof(uint32_t));
-+ return -ENOMEM;
-+ }
-+ for (i = 0; i < req->probes; i++)
-+ get_random_bytes(((uint32_t *) map->initval)+i, 4);
-+ map->elements = 0;
-+ map->hashsize = req->hashsize;
-+ map->probes = req->probes;
-+ map->resize = req->resize;
-+ map->first_ip = req->from;
-+ map->last_ip = req->to;
-+ map->members = harray_malloc(map->hashsize, sizeof(ip_set_ip_t), GFP_KERNEL);
-+ if (!map->members) {
-+ DP("out of memory for %d bytes", map->hashsize * sizeof(ip_set_ip_t));
-+ kfree(map);
-+ return -ENOMEM;
-+ }
++static inline int
++ipportnethash_test_cidr(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port,
++ ip_set_ip_t ip1, uint8_t cidr)
++{
++ struct ip_set_ipportnethash *map = set->data;
++
++ if (ip < map->first_ip || ip > map->last_ip)
++ return -ERANGE;
+
-+ set->data = map;
-+ return 0;
++ return (ipportnethash_id_cidr(set, hash_ip, ip, port, ip1,
++ cidr) != UINT_MAX);
+}
+
-+static void destroy(struct ip_set *set)
++static inline int
++ipportnethash_test(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port, ip_set_ip_t ip1)
+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+
-+ harray_free(map->members);
-+ kfree(map);
++ struct ip_set_ipportnethash *map = set->data;
++
++ if (ip < map->first_ip || ip > map->last_ip)
++ return -ERANGE;
+
-+ set->data = NULL;
++ return (ipportnethash_id(set, hash_ip, ip, port, ip1) != UINT_MAX);
+}
+
-+static void flush(struct ip_set *set)
++static int
++ipportnethash_utest(struct ip_set *set, const void *data, u_int32_t size,
++ ip_set_ip_t *hash_ip)
+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+ harray_flush(map->members, map->hashsize, sizeof(ip_set_ip_t));
-+ map->elements = 0;
++ const struct ip_set_req_ipportnethash *req = data;
++
++ if (req->cidr <= 0 || req->cidr > 32)
++ return -EINVAL;
++ return (req->cidr == 32
++ ? ipportnethash_test(set, hash_ip, req->ip, req->port,
++ req->ip1)
++ : ipportnethash_test_cidr(set, hash_ip, req->ip, req->port,
++ req->ip1, req->cidr));
+}
+
-+static void list_header(const struct ip_set *set, void *data)
++#define KADT_CONDITION \
++ ip_set_ip_t port, ip1; \
++ \
++ if (flags[index+2] == 0) \
++ return 0; \
++ \
++ port = get_port(skb, flags[index+1]); \
++ ip1 = ipaddr(skb, flags[index+2]); \
++ \
++ if (port == INVALID_PORT) \
++ return 0;
++
++KADT(ipportnethash, test, ipaddr, port, ip1)
++
++static inline int
++__ipportnet_add(struct ip_set_ipportnethash *map,
++ ip_set_ip_t hash_ip, ip_set_ip_t ip1)
+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+ struct ip_set_req_ipporthash_create *header =
-+ (struct ip_set_req_ipporthash_create *) data;
++ __u32 probe;
++ u_int16_t i;
++ struct ipportip *elem, *slot = NULL;
+
-+ header->hashsize = map->hashsize;
-+ header->probes = map->probes;
-+ header->resize = map->resize;
-+ header->from = map->first_ip;
-+ header->to = map->last_ip;
++ for (i = 0; i < map->probes; i++) {
++ probe = jhash_ip2(map, i, hash_ip, ip1) % map->hashsize;
++ elem = HARRAY_ELEM(map->members, struct ipportip *, probe);
++ if (elem->ip == hash_ip && elem->ip1 == ip1)
++ return -EEXIST;
++ if (!(slot || elem->ip || elem->ip1))
++ slot = elem;
++ /* There can be deleted entries, must check all slots */
++ }
++ if (slot) {
++ slot->ip = hash_ip;
++ slot->ip1 = ip1;
++ map->elements++;
++ return 0;
++ }
++ /* Trigger rehashing */
++ return -EAGAIN;
+}
+
-+static int list_members_size(const struct ip_set *set)
++static inline int
++__ipportnethash_add(struct ip_set_ipportnethash *map,
++ struct ipportip *elem)
+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+
-+ return (map->hashsize * sizeof(ip_set_ip_t));
++ return __ipportnet_add(map, elem->ip, elem->ip1);
+}
+
-+static void list_members(const struct ip_set *set, void *data)
++static inline int
++ipportnethash_add(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port,
++ ip_set_ip_t ip1, uint8_t cidr)
+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+ ip_set_ip_t i, *elem;
++ struct ip_set_ipportnethash *map = set->data;
++ struct ipportip;
++ int ret;
++
++ if (map->elements > limit)
++ return -ERANGE;
++ if (ip < map->first_ip || ip > map->last_ip)
++ return -ERANGE;
++ if (cidr <= 0 || cidr >= 32)
++ return -EINVAL;
++ if (map->nets[cidr-1] == UINT16_MAX)
++ return -ERANGE;
+
-+ for (i = 0; i < map->hashsize; i++) {
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
-+ ((ip_set_ip_t *)data)[i] = *elem;
++ *hash_ip = pack_ip_port(map, ip, port);
++ ip1 = pack_ip_cidr(ip1, cidr);
++ if (!(*hash_ip || ip1))
++ return -ERANGE;
++
++ ret =__ipportnet_add(map, *hash_ip, ip1);
++ if (ret == 0) {
++ if (!map->nets[cidr-1]++)
++ add_cidr_size(map->cidr, cidr);
++ map->elements++;
+ }
++ return ret;
+}
+
-+static struct ip_set_type ip_set_ipporthash = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_DATA_DOUBLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_ipporthash),
-+ .addip = &addip,
-+ .addip_kernel = &addip_kernel,
-+ .retry = &retry,
-+ .delip = &delip,
-+ .delip_kernel = &delip_kernel,
-+ .testip = &testip,
-+ .testip_kernel = &testip_kernel,
-+ .header_size = sizeof(struct ip_set_req_ipporthash_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
++#undef KADT_CONDITION
++#define KADT_CONDITION \
++ struct ip_set_ipportnethash *map = set->data; \
++ uint8_t cidr = map->cidr[0] ? map->cidr[0] : 31; \
++ ip_set_ip_t port, ip1; \
++ \
++ if (flags[index+2] == 0) \
++ return 0; \
++ \
++ port = get_port(skb, flags[index+1]); \
++ ip1 = ipaddr(skb, flags[index+2]); \
++ \
++ if (port == INVALID_PORT) \
++ return 0;
+
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-+MODULE_DESCRIPTION("ipporthash type of IP sets");
-+module_param(limit, int, 0600);
-+MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
++UADT(ipportnethash, add, req->port, req->ip1, req->cidr)
++KADT(ipportnethash, add, ipaddr, port, ip1, cidr)
+
-+static int __init ip_set_ipporthash_init(void)
++static inline void
++__ipportnethash_retry(struct ip_set_ipportnethash *tmp,
++ struct ip_set_ipportnethash *map)
+{
-+ return ip_set_register_set_type(&ip_set_ipporthash);
++ tmp->first_ip = map->first_ip;
++ tmp->last_ip = map->last_ip;
++ memcpy(tmp->cidr, map->cidr, sizeof(tmp->cidr));
++ memcpy(tmp->nets, map->nets, sizeof(tmp->nets));
+}
+
-+static void __exit ip_set_ipporthash_fini(void)
++HASH_RETRY2(ipportnethash, struct ipportip)
++
++static inline int
++ipportnethash_del(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port,
++ ip_set_ip_t ip1, uint8_t cidr)
+{
-+ /* FIXME: possible race with ip_set_create() */
-+ ip_set_unregister_set_type(&ip_set_ipporthash);
++ struct ip_set_ipportnethash *map = set->data;
++ ip_set_ip_t id;
++ struct ipportip *elem;
++
++ if (ip < map->first_ip || ip > map->last_ip)
++ return -ERANGE;
++ if (!ip)
++ return -ERANGE;
++ if (cidr <= 0 || cidr >= 32)
++ return -EINVAL;
++
++ id = ipportnethash_id_cidr(set, hash_ip, ip, port, ip1, cidr);
++
++ if (id == UINT_MAX)
++ return -EEXIST;
++
++ elem = HARRAY_ELEM(map->members, struct ipportip *, id);
++ elem->ip = elem->ip1 = 0;
++ map->elements--;
++ if (!map->nets[cidr-1]--)
++ del_cidr_size(map->cidr, cidr);
++
++ return 0;
++}
++
++UADT(ipportnethash, del, req->port, req->ip1, req->cidr)
++KADT(ipportnethash, del, ipaddr, port, ip1, cidr)
++
++static inline int
++__ipportnethash_create(const struct ip_set_req_ipportnethash_create *req,
++ struct ip_set_ipportnethash *map)
++{
++ if (req->to - req->from > MAX_RANGE) {
++ ip_set_printk("range too big, %d elements (max %d)",
++ req->to - req->from + 1, MAX_RANGE+1);
++ return -ENOEXEC;
++ }
++ map->first_ip = req->from;
++ map->last_ip = req->to;
++ memset(map->cidr, 0, sizeof(map->cidr));
++ memset(map->nets, 0, sizeof(map->nets));
++ return 0;
+}
+
-+module_init(ip_set_ipporthash_init);
-+module_exit(ip_set_ipporthash_fini);
++HASH_CREATE(ipportnethash, struct ipportip)
++HASH_DESTROY(ipportnethash)
++HASH_FLUSH_CIDR(ipportnethash, struct ipportip);
++
++static inline void
++__ipportnethash_list_header(const struct ip_set_ipportnethash *map,
++ struct ip_set_req_ipportnethash_create *header)
++{
++ header->from = map->first_ip;
++ header->to = map->last_ip;
++}
++
++HASH_LIST_HEADER(ipportnethash)
++
++HASH_LIST_MEMBERS_SIZE(ipportnethash, struct ipportip)
++HASH_LIST_MEMBERS_MEMCPY(ipportnethash, struct ipportip)
++
++IP_SET_RTYPE(ipportnethash, IPSET_TYPE_IP | IPSET_TYPE_PORT
++ | IPSET_TYPE_IP1 | IPSET_DATA_TRIPLE)
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("ipportnethash type of IP sets");
++module_param(limit, int, 0600);
++MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
++
++REGISTER_MODULE(ipportnethash)
--- /dev/null
+++ b/net/ipv4/netfilter/ip_set_iptree.c
-@@ -0,0 +1,612 @@
-+/* Copyright (C) 2005 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+@@ -0,0 +1,466 @@
++/* Copyright (C) 2005-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
@@ -4525,24 +5067,20 @@
+
+/* Kernel module implementing an IP set type: the iptree type */
+
-+#include <linux/version.h>
+#include <linux/module.h>
++#include <linux/moduleparam.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
+#include <linux/spinlock.h>
++#include <linux/timer.h>
+
-+/* Backward compatibility */
-+#ifndef __nocast
-+#define __nocast
-+#endif
-+
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_bitmaps.h>
+#include <linux/netfilter_ipv4/ip_set_iptree.h>
+
+static int limit = MAX_RANGE;
@@ -4553,13 +5091,9 @@
+ * to delete the gc timer at destroying/flushing a set */
+#define IPTREE_DESTROY_SLEEP 100
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
-+static struct kmem_cache *branch_cachep;
-+static struct kmem_cache *leaf_cachep;
-+#else
-+static kmem_cache_t *branch_cachep;
-+static kmem_cache_t *leaf_cachep;
-+#endif
++static __KMEM_CACHE_T__ *branch_cachep;
++static __KMEM_CACHE_T__ *leaf_cachep;
++
+
+#if defined(__LITTLE_ENDIAN)
+#define ABCD(a,b,c,d,addrp) do { \
@@ -4587,9 +5121,9 @@
+} while (0)
+
+static inline int
-+__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++iptree_test(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set_iptree *map = set->data;
+ struct ip_set_iptreeb *btree;
+ struct ip_set_iptreec *ctree;
+ struct ip_set_iptreed *dtree;
@@ -4597,7 +5131,7 @@
+
+ if (!ip)
+ return -ERANGE;
-+
++
+ *hash_ip = ip;
+ ABCD(a, b, c, d, hash_ip);
+ DP("%u %u %u %u timeout %u", a, b, c, d, map->timeout);
@@ -4610,53 +5144,10 @@
+ || time_after(dtree->expires[d], jiffies));
+}
+
-+static int
-+testip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_iptree *req =
-+ (struct ip_set_req_iptree *) data;
-+
-+ if (size != sizeof(struct ip_set_req_iptree)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iptree),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __testip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+testip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ int res;
-+
-+ DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
-+ flags[index] & IPSET_SRC ? "SRC" : "DST",
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ NIPQUAD(ip_hdr(skb)->saddr),
-+ NIPQUAD(ip_hdr(skb)->daddr));
-+#else
-+ NIPQUAD(skb->nh.iph->saddr),
-+ NIPQUAD(skb->nh.iph->daddr));
-+#endif
++#define KADT_CONDITION
+
-+ res = __testip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+ return (res < 0 ? 0 : res);
-+}
++UADT(iptree, test)
++KADT(iptree, test, ipaddr)
+
+#define ADDIP_WALK(map, elem, branch, type, cachep) do { \
+ if ((map)->tree[elem]) { \
@@ -4671,24 +5162,24 @@
+ (map)->tree[elem] = branch; \
+ DP("alloc %u", elem); \
+ } \
-+} while (0)
++} while (0)
+
+static inline int
-+__addip(struct ip_set *set, ip_set_ip_t ip, unsigned int timeout,
-+ ip_set_ip_t *hash_ip)
++iptree_add(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, unsigned int timeout)
+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set_iptree *map = set->data;
+ struct ip_set_iptreeb *btree;
+ struct ip_set_iptreec *ctree;
+ struct ip_set_iptreed *dtree;
+ unsigned char a,b,c,d;
+ int ret = 0;
-+
++
+ if (!ip || map->elements >= limit)
+ /* We could call the garbage collector
+ * but it's probably overkill */
+ return -ERANGE;
-+
++
+ *hash_ip = ip;
+ ABCD(a, b, c, d, hash_ip);
+ DP("%u %u %u %u timeout %u", a, b, c, d, timeout);
@@ -4698,6 +5189,8 @@
+ if (dtree->expires[d]
+ && (!map->timeout || time_after(dtree->expires[d], jiffies)))
+ ret = -EEXIST;
++ if (map->timeout && timeout == 0)
++ timeout = map->timeout;
+ dtree->expires[d] = map->timeout ? (timeout * HZ + jiffies) : 1;
+ /* Lottery: I won! */
+ if (dtree->expires[d] == 0)
@@ -4708,47 +5201,8 @@
+ return ret;
+}
+
-+static int
-+addip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
-+ struct ip_set_req_iptree *req =
-+ (struct ip_set_req_iptree *) data;
-+
-+ if (size != sizeof(struct ip_set_req_iptree)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iptree),
-+ size);
-+ return -EINVAL;
-+ }
-+ DP("%u.%u.%u.%u %u", HIPQUAD(req->ip), req->timeout);
-+ return __addip(set, req->ip,
-+ req->timeout ? req->timeout : map->timeout,
-+ hash_ip);
-+}
-+
-+static int
-+addip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
-+
-+ return __addip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ map->timeout,
-+ hash_ip);
-+}
++UADT(iptree, add, req->timeout)
++KADT(iptree, add, ipaddr, 0)
+
+#define DELIP_WALK(map, elem, branch) do { \
+ if ((map)->tree[elem]) { \
@@ -4758,17 +5212,17 @@
+} while (0)
+
+static inline int
-+__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++iptree_del(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set_iptree *map = set->data;
+ struct ip_set_iptreeb *btree;
+ struct ip_set_iptreec *ctree;
+ struct ip_set_iptreed *dtree;
+ unsigned char a,b,c,d;
-+
++
+ if (!ip)
+ return -ERANGE;
-+
++
+ *hash_ip = ip;
+ ABCD(a, b, c, d, hash_ip);
+ DELIP_WALK(map, a, btree);
@@ -4783,40 +5237,8 @@
+ return -EEXIST;
+}
+
-+static int
-+delip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_iptree *req =
-+ (struct ip_set_req_iptree *) data;
-+
-+ if (size != sizeof(struct ip_set_req_iptree)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iptree),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __delip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+delip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __delip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+}
++UADT(iptree, del)
++KADT(iptree, del, ipaddr)
+
+#define LOOP_WALK_BEGIN(map, i, branch) \
+ for (i = 0; i < 256; i++) { \
@@ -4826,10 +5248,11 @@
+
+#define LOOP_WALK_END }
+
-+static void ip_tree_gc(unsigned long ul_set)
++static void
++ip_tree_gc(unsigned long ul_set)
+{
-+ struct ip_set *set = (void *) ul_set;
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set *set = (struct ip_set *) ul_set;
++ struct ip_set_iptree *map = set->data;
+ struct ip_set_iptreeb *btree;
+ struct ip_set_iptreec *ctree;
+ struct ip_set_iptreed *dtree;
@@ -4891,14 +5314,15 @@
+ }
+ LOOP_WALK_END;
+ write_unlock_bh(&set->lock);
-+
++
+ map->gc.expires = jiffies + map->gc_interval * HZ;
+ add_timer(&map->gc);
+}
+
-+static inline void init_gc_timer(struct ip_set *set)
++static inline void
++init_gc_timer(struct ip_set *set)
+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set_iptree *map = set->data;
+
+ /* Even if there is no timeout for the entries,
+ * we still have to call gc because delete
@@ -4911,22 +5335,22 @@
+ add_timer(&map->gc);
+}
+
-+static int create(struct ip_set *set, const void *data, size_t size)
++static int
++iptree_create(struct ip_set *set, const void *data, u_int32_t size)
+{
-+ struct ip_set_req_iptree_create *req =
-+ (struct ip_set_req_iptree_create *) data;
++ const struct ip_set_req_iptree_create *req = data;
+ struct ip_set_iptree *map;
+
+ if (size != sizeof(struct ip_set_req_iptree_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
++ ip_set_printk("data length wrong (want %zu, have %lu)",
+ sizeof(struct ip_set_req_iptree_create),
-+ size);
++ (unsigned long)size);
+ return -EINVAL;
+ }
+
+ map = kmalloc(sizeof(struct ip_set_iptree), GFP_KERNEL);
+ if (!map) {
-+ DP("out of memory for %d bytes",
++ DP("out of memory for %zu bytes",
+ sizeof(struct ip_set_iptree));
+ return -ENOMEM;
+ }
@@ -4940,7 +5364,8 @@
+ return 0;
+}
+
-+static void __flush(struct ip_set_iptree *map)
++static inline void
++__flush(struct ip_set_iptree *map)
+{
+ struct ip_set_iptreeb *btree;
+ struct ip_set_iptreec *ctree;
@@ -4959,9 +5384,10 @@
+ map->elements = 0;
+}
+
-+static void destroy(struct ip_set *set)
++static void
++iptree_destroy(struct ip_set *set)
+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set_iptree *map = set->data;
+
+ /* gc might be running */
+ while (!del_timer(&map->gc))
@@ -4971,11 +5397,12 @@
+ set->data = NULL;
+}
+
-+static void flush(struct ip_set *set)
++static void
++iptree_flush(struct ip_set *set)
+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set_iptree *map = set->data;
+ unsigned int timeout = map->timeout;
-+
++
+ /* gc might be running */
+ while (!del_timer(&map->gc))
+ msleep(IPTREE_DESTROY_SLEEP);
@@ -4986,18 +5413,19 @@
+ init_gc_timer(set);
+}
+
-+static void list_header(const struct ip_set *set, void *data)
++static void
++iptree_list_header(const struct ip_set *set, void *data)
+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
-+ struct ip_set_req_iptree_create *header =
-+ (struct ip_set_req_iptree_create *) data;
++ const struct ip_set_iptree *map = set->data;
++ struct ip_set_req_iptree_create *header = data;
+
+ header->timeout = map->timeout;
+}
+
-+static int list_members_size(const struct ip_set *set)
++static int
++iptree_list_members_size(const struct ip_set *set)
+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ const struct ip_set_iptree *map = set->data;
+ struct ip_set_iptreeb *btree;
+ struct ip_set_iptreec *ctree;
+ struct ip_set_iptreed *dtree;
@@ -5020,9 +5448,10 @@
+ return (count * sizeof(struct ip_set_req_iptree));
+}
+
-+static void list_members(const struct ip_set *set, void *data)
++static void
++iptree_list_members(const struct ip_set *set, void *data)
+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ const struct ip_set_iptree *map = set->data;
+ struct ip_set_iptreeb *btree;
+ struct ip_set_iptreec *ctree;
+ struct ip_set_iptreed *dtree;
@@ -5036,7 +5465,7 @@
+ for (d = 0; d < 256; d++) {
+ if (dtree->expires[d]
+ && (!map->timeout || time_after(dtree->expires[d], jiffies))) {
-+ entry = (struct ip_set_req_iptree *)(data + offset);
++ entry = data + offset;
+ entry->ip = ((a << 24) | (b << 16) | (c << 8) | d);
+ entry->timeout = !map->timeout ? 0
+ : (dtree->expires[d] - jiffies)/HZ;
@@ -5048,26 +5477,7 @@
+ LOOP_WALK_END;
+}
+
-+static struct ip_set_type ip_set_iptree = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_iptree),
-+ .addip = &addip,
-+ .addip_kernel = &addip_kernel,
-+ .delip = &delip,
-+ .delip_kernel = &delip_kernel,
-+ .testip = &testip,
-+ .testip_kernel = &testip_kernel,
-+ .header_size = sizeof(struct ip_set_req_iptree_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
++IP_SET_TYPE(iptree, IPSET_TYPE_IP | IPSET_DATA_SINGLE)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
@@ -5078,30 +5488,16 @@
+static int __init ip_set_iptree_init(void)
+{
+ int ret;
-+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
-+ branch_cachep = kmem_cache_create("ip_set_iptreeb",
-+ sizeof(struct ip_set_iptreeb),
-+ 0, 0, NULL);
-+#else
-+ branch_cachep = kmem_cache_create("ip_set_iptreeb",
-+ sizeof(struct ip_set_iptreeb),
-+ 0, 0, NULL, NULL);
-+#endif
++
++ branch_cachep = KMEM_CACHE_CREATE("ip_set_iptreeb",
++ sizeof(struct ip_set_iptreeb));
+ if (!branch_cachep) {
+ printk(KERN_ERR "Unable to create ip_set_iptreeb slab cache\n");
+ ret = -ENOMEM;
+ goto out;
+ }
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
-+ leaf_cachep = kmem_cache_create("ip_set_iptreed",
-+ sizeof(struct ip_set_iptreed),
-+ 0, 0, NULL);
-+#else
-+ leaf_cachep = kmem_cache_create("ip_set_iptreed",
-+ sizeof(struct ip_set_iptreed),
-+ 0, 0, NULL, NULL);
-+#endif
++ leaf_cachep = KMEM_CACHE_CREATE("ip_set_iptreed",
++ sizeof(struct ip_set_iptreed));
+ if (!leaf_cachep) {
+ printk(KERN_ERR "Unable to create ip_set_iptreed slab cache\n");
+ ret = -ENOMEM;
@@ -5112,7 +5508,7 @@
+ goto out;
+
+ kmem_cache_destroy(leaf_cachep);
-+ free_branch:
++ free_branch:
+ kmem_cache_destroy(branch_cachep);
+ out:
+ return ret;
@@ -5130,7 +5526,7 @@
+module_exit(ip_set_iptree_fini);
--- /dev/null
+++ b/net/ipv4/netfilter/ip_set_iptreemap.c
-@@ -0,0 +1,829 @@
+@@ -0,0 +1,708 @@
+/* Copyright (C) 2007 Sven Wegener <sven.wegener@stealer.net>
+ *
+ * This program is free software; you can redistribute it and/or modify it
@@ -5139,38 +5535,33 @@
+ */
+
+/* This modules implements the iptreemap ipset type. It uses bitmaps to
-+ * represent every single IPv4 address as a single bit. The bitmaps are managed
-+ * in a tree structure, where the first three octets of an addresses are used
-+ * as an index to find the bitmap and the last octet is used as the bit number.
++ * represent every single IPv4 address as a bit. The bitmaps are managed in a
++ * tree structure, where the first three octets of an address are used as an
++ * index to find the bitmap and the last octet is used as the bit number.
+ */
+
-+#include <linux/version.h>
++#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
+#include <linux/spinlock.h>
++#include <linux/timer.h>
+
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_bitmaps.h>
+#include <linux/netfilter_ipv4/ip_set_iptreemap.h>
+
+#define IPTREEMAP_DEFAULT_GC_TIME (5 * 60)
+#define IPTREEMAP_DESTROY_SLEEP (100)
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
-+static struct kmem_cache *cachep_b;
-+static struct kmem_cache *cachep_c;
-+static struct kmem_cache *cachep_d;
-+#else
-+static kmem_cache_t *cachep_b;
-+static kmem_cache_t *cachep_c;
-+static kmem_cache_t *cachep_d;
-+#endif
++static __KMEM_CACHE_T__ *cachep_b;
++static __KMEM_CACHE_T__ *cachep_c;
++static __KMEM_CACHE_T__ *cachep_d;
+
+static struct ip_set_iptreemap_d *fullbitmap_d;
+static struct ip_set_iptreemap_c *fullbitmap_c;
@@ -5319,9 +5710,6 @@
+#define LOOP_WALK_END_COUNT() \
+ }
+
-+#define MIN(a, b) (a < b ? a : b)
-+#define MAX(a, b) (a > b ? a : b)
-+
+#define GETVALUE1(a, a1, b1, r) \
+ (a == a1 ? b1 : r)
+
@@ -5391,9 +5779,9 @@
+}
+
+static inline int
-+__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++iptreemap_test(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap *map = set->data;
+ struct ip_set_iptreemap_b *btree;
+ struct ip_set_iptreemap_c *ctree;
+ struct ip_set_iptreemap_d *dtree;
@@ -5410,40 +5798,13 @@
+ return !!test_bit(d, (void *) dtree->bitmap);
+}
+
-+static int
-+testip(struct ip_set *set, const void *data, size_t size, ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_iptreemap *req = (struct ip_set_req_iptreemap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_iptreemap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)", sizeof(struct ip_set_req_iptreemap), size);
-+ return -EINVAL;
-+ }
-+
-+ return __testip(set, req->start, hash_ip);
-+}
-+
-+static int
-+testip_kernel(struct ip_set *set, const struct sk_buff *skb, ip_set_ip_t *hash_ip, const u_int32_t *flags, unsigned char index)
-+{
-+ int res;
++#define KADT_CONDITION
+
-+ res = __testip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+
-+ return (res < 0 ? 0 : res);
-+}
++UADT(iptreemap, test)
++KADT(iptreemap, test, ipaddr)
+
+static inline int
-+__addip_single(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++__addip_single(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
+ struct ip_set_iptreemap_b *btree;
@@ -5459,18 +5820,19 @@
+ ADDIP_WALK(btree, b, ctree, struct ip_set_iptreemap_c, cachep_c, fullbitmap_c);
+ ADDIP_WALK(ctree, c, dtree, struct ip_set_iptreemap_d, cachep_d, fullbitmap_d);
+
-+ if (test_and_set_bit(d, (void *) dtree->bitmap))
++ if (__test_and_set_bit(d, (void *) dtree->bitmap))
+ return -EEXIST;
+
-+ set_bit(b, (void *) btree->dirty);
++ __set_bit(b, (void *) btree->dirty);
+
+ return 0;
+}
+
+static inline int
-+__addip_range(struct ip_set *set, ip_set_ip_t start, ip_set_ip_t end, ip_set_ip_t *hash_ip)
++iptreemap_add(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t start, ip_set_ip_t end)
+{
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap *map = set->data;
+ struct ip_set_iptreemap_b *btree;
+ struct ip_set_iptreemap_c *ctree;
+ struct ip_set_iptreemap_d *dtree;
@@ -5479,7 +5841,7 @@
+ unsigned char a2, b2, c2, d2;
+
+ if (start == end)
-+ return __addip_single(set, start, hash_ip);
++ return __addip_single(set, hash_ip, start);
+
+ *hash_ip = start;
+
@@ -5491,8 +5853,8 @@
+ ADDIP_RANGE_LOOP(btree, b, GETVALUE1(a, a1, b1, 0), GETVALUE1(a, a2, b2, 255), CHECK2(a, b, a1, a2, b1, b2, c1, c2, d1, d2), ctree, fullbitmap_c, cachep_c, free_c) {
+ ADDIP_RANGE_LOOP(ctree, c, GETVALUE2(a, b, a1, b1, c1, 0), GETVALUE2(a, b, a2, b2, c2, 255), CHECK3(a, b, c, a1, a2, b1, b2, c1, c2, d1, d2), dtree, fullbitmap_d, cachep_d, free_d) {
+ for (d = GETVALUE3(a, b, c, a1, b1, c1, d1, 0); d <= GETVALUE3(a, b, c, a2, b2, c2, d2, 255); d++)
-+ set_bit(d, (void *) dtree->bitmap);
-+ set_bit(b, (void *) btree->dirty);
++ __set_bit(d, (void *) dtree->bitmap);
++ __set_bit(b, (void *) btree->dirty);
+ } ADDIP_RANGE_LOOP_END();
+ } ADDIP_RANGE_LOOP_END();
+ } ADDIP_RANGE_LOOP_END();
@@ -5500,39 +5862,14 @@
+ return 0;
+}
+
-+static int
-+addip(struct ip_set *set, const void *data, size_t size, ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_iptreemap *req = (struct ip_set_req_iptreemap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_iptreemap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)", sizeof(struct ip_set_req_iptreemap), size);
-+ return -EINVAL;
-+ }
-+
-+ return __addip_range(set, MIN(req->start, req->end), MAX(req->start, req->end), hash_ip);
-+}
-+
-+static int
-+addip_kernel(struct ip_set *set, const struct sk_buff *skb, ip_set_ip_t *hash_ip, const u_int32_t *flags, unsigned char index)
-+{
-+
-+ return __addip_single(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+}
++UADT0(iptreemap, add, min(req->ip, req->end), max(req->ip, req->end))
++KADT(iptreemap, add, ipaddr, ip)
+
+static inline int
-+__delip_single(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip, unsigned int __nocast flags)
++__delip_single(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, gfp_t flags)
+{
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap *map = set->data;
+ struct ip_set_iptreemap_b *btree;
+ struct ip_set_iptreemap_c *ctree;
+ struct ip_set_iptreemap_d *dtree;
@@ -5546,18 +5883,19 @@
+ DELIP_WALK(btree, b, ctree, cachep_c, fullbitmap_c, flags);
+ DELIP_WALK(ctree, c, dtree, cachep_d, fullbitmap_d, flags);
+
-+ if (!test_and_clear_bit(d, (void *) dtree->bitmap))
++ if (!__test_and_clear_bit(d, (void *) dtree->bitmap))
+ return -EEXIST;
+
-+ set_bit(b, (void *) btree->dirty);
++ __set_bit(b, (void *) btree->dirty);
+
+ return 0;
+}
+
+static inline int
-+__delip_range(struct ip_set *set, ip_set_ip_t start, ip_set_ip_t end, ip_set_ip_t *hash_ip, unsigned int __nocast flags)
++iptreemap_del(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t start, ip_set_ip_t end, gfp_t flags)
+{
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap *map = set->data;
+ struct ip_set_iptreemap_b *btree;
+ struct ip_set_iptreemap_c *ctree;
+ struct ip_set_iptreemap_d *dtree;
@@ -5566,7 +5904,7 @@
+ unsigned char a2, b2, c2, d2;
+
+ if (start == end)
-+ return __delip_single(set, start, hash_ip, flags);
++ return __delip_single(set, hash_ip, start, flags);
+
+ *hash_ip = start;
+
@@ -5578,8 +5916,8 @@
+ DELIP_RANGE_LOOP(btree, b, GETVALUE1(a, a1, b1, 0), GETVALUE1(a, a2, b2, 255), CHECK2(a, b, a1, a2, b1, b2, c1, c2, d1, d2), ctree, fullbitmap_c, cachep_c, free_c, flags) {
+ DELIP_RANGE_LOOP(ctree, c, GETVALUE2(a, b, a1, b1, c1, 0), GETVALUE2(a, b, a2, b2, c2, 255), CHECK3(a, b, c, a1, a2, b1, b2, c1, c2, d1, d2), dtree, fullbitmap_d, cachep_d, free_d, flags) {
+ for (d = GETVALUE3(a, b, c, a1, b1, c1, d1, 0); d <= GETVALUE3(a, b, c, a2, b2, c2, d2, 255); d++)
-+ clear_bit(d, (void *) dtree->bitmap);
-+ set_bit(b, (void *) btree->dirty);
++ __clear_bit(d, (void *) dtree->bitmap);
++ __set_bit(b, (void *) btree->dirty);
+ } DELIP_RANGE_LOOP_END();
+ } DELIP_RANGE_LOOP_END();
+ } DELIP_RANGE_LOOP_END();
@@ -5587,34 +5925,8 @@
+ return 0;
+}
+
-+static int
-+delip(struct ip_set *set, const void *data, size_t size, ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_iptreemap *req = (struct ip_set_req_iptreemap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_iptreemap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)", sizeof(struct ip_set_req_iptreemap), size);
-+ return -EINVAL;
-+ }
-+
-+ return __delip_range(set, MIN(req->start, req->end), MAX(req->start, req->end), hash_ip, GFP_KERNEL);
-+}
-+
-+static int
-+delip_kernel(struct ip_set *set, const struct sk_buff *skb, ip_set_ip_t *hash_ip, const u_int32_t *flags, unsigned char index)
-+{
-+ return __delip_single(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip,
-+ GFP_ATOMIC);
-+}
++UADT0(iptreemap, del, min(req->ip, req->end), max(req->ip, req->end), GFP_KERNEL)
++KADT(iptreemap, del, ipaddr, ip, GFP_ATOMIC)
+
+/* Check the status of the bitmap
+ * -1 == all bits cleared
@@ -5638,7 +5950,7 @@
+gc(unsigned long addr)
+{
+ struct ip_set *set = (struct ip_set *) addr;
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap *map = set->data;
+ struct ip_set_iptreemap_b *btree;
+ struct ip_set_iptreemap_c *ctree;
+ struct ip_set_iptreemap_d *dtree;
@@ -5649,7 +5961,7 @@
+
+ LOOP_WALK_BEGIN_GC(map, a, btree, fullbitmap_b, cachep_b, i) {
+ LOOP_WALK_BEGIN_GC(btree, b, ctree, fullbitmap_c, cachep_c, j) {
-+ if (!test_and_clear_bit(b, (void *) btree->dirty))
++ if (!__test_and_clear_bit(b, (void *) btree->dirty))
+ continue;
+ LOOP_WALK_BEGIN_GC(ctree, c, dtree, fullbitmap_d, cachep_d, k) {
+ switch (bitmap_status(dtree)) {
@@ -5677,7 +5989,7 @@
+static inline void
+init_gc_timer(struct ip_set *set)
+{
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap *map = set->data;
+
+ init_timer(&map->gc);
+ map->gc.data = (unsigned long) set;
@@ -5686,16 +5998,12 @@
+ add_timer(&map->gc);
+}
+
-+static int create(struct ip_set *set, const void *data, size_t size)
++static int
++iptreemap_create(struct ip_set *set, const void *data, u_int32_t size)
+{
-+ struct ip_set_req_iptreemap_create *req = (struct ip_set_req_iptreemap_create *) data;
++ const struct ip_set_req_iptreemap_create *req = data;
+ struct ip_set_iptreemap *map;
+
-+ if (size != sizeof(struct ip_set_req_iptreemap_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)", sizeof(struct ip_set_req_iptreemap_create), size);
-+ return -EINVAL;
-+ }
-+
+ map = kzalloc(sizeof(*map), GFP_KERNEL);
+ if (!map)
+ return -ENOMEM;
@@ -5708,7 +6016,8 @@
+ return 0;
+}
+
-+static inline void __flush(struct ip_set_iptreemap *map)
++static inline void
++__flush(struct ip_set_iptreemap *map)
+{
+ struct ip_set_iptreemap_b *btree;
+ unsigned int a;
@@ -5719,9 +6028,10 @@
+ LOOP_WALK_END();
+}
+
-+static void destroy(struct ip_set *set)
++static void
++iptreemap_destroy(struct ip_set *set)
+{
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap *map = set->data;
+
+ while (!del_timer(&map->gc))
+ msleep(IPTREEMAP_DESTROY_SLEEP);
@@ -5732,9 +6042,10 @@
+ set->data = NULL;
+}
+
-+static void flush(struct ip_set *set)
++static void
++iptreemap_flush(struct ip_set *set)
+{
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap *map = set->data;
+
+ while (!del_timer(&map->gc))
+ msleep(IPTREEMAP_DESTROY_SLEEP);
@@ -5746,17 +6057,19 @@
+ init_gc_timer(set);
+}
+
-+static void list_header(const struct ip_set *set, void *data)
++static void
++iptreemap_list_header(const struct ip_set *set, void *data)
+{
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
-+ struct ip_set_req_iptreemap_create *header = (struct ip_set_req_iptreemap_create *) data;
++ struct ip_set_iptreemap *map = set->data;
++ struct ip_set_req_iptreemap_create *header = data;
+
+ header->gc_interval = map->gc_interval;
+}
+
-+static int list_members_size(const struct ip_set *set)
++static int
++iptreemap_list_members_size(const struct ip_set *set)
+{
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap *map = set->data;
+ struct ip_set_iptreemap_b *btree;
+ struct ip_set_iptreemap_c *ctree;
+ struct ip_set_iptreemap_d *dtree;
@@ -5783,19 +6096,21 @@
+ return (count * sizeof(struct ip_set_req_iptreemap));
+}
+
-+static inline size_t add_member(void *data, size_t offset, ip_set_ip_t start, ip_set_ip_t end)
++static inline u_int32_t
++add_member(void *data, size_t offset, ip_set_ip_t start, ip_set_ip_t end)
+{
-+ struct ip_set_req_iptreemap *entry = (struct ip_set_req_iptreemap *) (data + offset);
++ struct ip_set_req_iptreemap *entry = data + offset;
+
-+ entry->start = start;
++ entry->ip = start;
+ entry->end = end;
+
+ return sizeof(*entry);
+}
+
-+static void list_members(const struct ip_set *set, void *data)
++static void
++iptreemap_list_members(const struct ip_set *set, void *data)
+{
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap *map = set->data;
+ struct ip_set_iptreemap_b *btree;
+ struct ip_set_iptreemap_c *ctree;
+ struct ip_set_iptreemap_d *dtree;
@@ -5830,26 +6145,7 @@
+ add_member(data, offset, start, end);
+}
+
-+static struct ip_set_type ip_set_iptreemap = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = create,
-+ .destroy = destroy,
-+ .flush = flush,
-+ .reqsize = sizeof(struct ip_set_req_iptreemap),
-+ .addip = addip,
-+ .addip_kernel = addip_kernel,
-+ .delip = delip,
-+ .delip_kernel = delip_kernel,
-+ .testip = testip,
-+ .testip_kernel = testip_kernel,
-+ .header_size = sizeof(struct ip_set_req_iptreemap_create),
-+ .list_header = list_header,
-+ .list_members_size = list_members_size,
-+ .list_members = list_members,
-+ .me = THIS_MODULE,
-+};
++IP_SET_TYPE(iptreemap, IPSET_TYPE_IP | IPSET_DATA_SINGLE)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Sven Wegener <sven.wegener@stealer.net>");
@@ -5860,43 +6156,22 @@
+ int ret = -ENOMEM;
+ int a;
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
-+ cachep_b = kmem_cache_create("ip_set_iptreemap_b",
-+ sizeof(struct ip_set_iptreemap_b),
-+ 0, 0, NULL);
-+#else
-+ cachep_b = kmem_cache_create("ip_set_iptreemap_b",
-+ sizeof(struct ip_set_iptreemap_b),
-+ 0, 0, NULL, NULL);
-+#endif
++ cachep_b = KMEM_CACHE_CREATE("ip_set_iptreemap_b",
++ sizeof(struct ip_set_iptreemap_b));
+ if (!cachep_b) {
+ ip_set_printk("Unable to create ip_set_iptreemap_b slab cache");
+ goto out;
+ }
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
-+ cachep_c = kmem_cache_create("ip_set_iptreemap_c",
-+ sizeof(struct ip_set_iptreemap_c),
-+ 0, 0, NULL);
-+#else
-+ cachep_c = kmem_cache_create("ip_set_iptreemap_c",
-+ sizeof(struct ip_set_iptreemap_c),
-+ 0, 0, NULL, NULL);
-+#endif
++ cachep_c = KMEM_CACHE_CREATE("ip_set_iptreemap_c",
++ sizeof(struct ip_set_iptreemap_c));
+ if (!cachep_c) {
+ ip_set_printk("Unable to create ip_set_iptreemap_c slab cache");
+ goto outb;
+ }
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
-+ cachep_d = kmem_cache_create("ip_set_iptreemap_d",
-+ sizeof(struct ip_set_iptreemap_d),
-+ 0, 0, NULL);
-+#else
-+ cachep_d = kmem_cache_create("ip_set_iptreemap_d",
-+ sizeof(struct ip_set_iptreemap_d),
-+ 0, 0, NULL, NULL);
-+#endif
++ cachep_d = KMEM_CACHE_CREATE("ip_set_iptreemap_d",
++ sizeof(struct ip_set_iptreemap_d));
+ if (!cachep_d) {
+ ip_set_printk("Unable to create ip_set_iptreemap_d slab cache");
+ goto outc;
@@ -5962,11 +6237,11 @@
+module_exit(ip_set_iptreemap_fini);
--- /dev/null
+++ b/net/ipv4/netfilter/ip_set_macipmap.c
-@@ -0,0 +1,375 @@
+@@ -0,0 +1,164 @@
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ * Patrick Schaaf <bof@bof.de>
+ * Martin Josefsson <gandalf@wlug.westbo.se>
-+ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ * Copyright (C) 2003-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
@@ -5978,41 +6253,29 @@
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
-+#include <linux/version.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
+#include <linux/spinlock.h>
+#include <linux/if_ether.h>
-+#include <linux/vmalloc.h>
+
-+#include <linux/netfilter_ipv4/ip_set_malloc.h>
+#include <linux/netfilter_ipv4/ip_set_macipmap.h>
+
+static int
-+testip(struct ip_set *set, const void *data, size_t size, ip_set_ip_t *hash_ip)
++macipmap_utest(struct ip_set *set, const void *data, u_int32_t size,
++ ip_set_ip_t *hash_ip)
+{
-+ struct ip_set_macipmap *map = (struct ip_set_macipmap *) set->data;
-+ struct ip_set_macip *table = (struct ip_set_macip *) map->members;
-+ struct ip_set_req_macipmap *req = (struct ip_set_req_macipmap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_macipmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_macipmap),
-+ size);
-+ return -EINVAL;
-+ }
++ const struct ip_set_macipmap *map = set->data;
++ const struct ip_set_macip *table = map->members;
++ const struct ip_set_req_macipmap *req = data;
+
+ if (req->ip < map->first_ip || req->ip > map->last_ip)
+ return -ERANGE;
+
+ *hash_ip = req->ip;
+ DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
-+ set->name, HIPQUAD(req->ip), HIPQUAD(*hash_ip));
-+ if (test_bit(IPSET_MACIP_ISSET,
-+ (void *) &table[req->ip - map->first_ip].flags)) {
++ set->name, HIPQUAD(req->ip), HIPQUAD(*hash_ip));
++ if (table[req->ip - map->first_ip].match) {
+ return (memcmp(req->ethernet,
+ &table[req->ip - map->first_ip].ethernet,
+ ETH_ALEN) == 0);
@@ -6022,44 +6285,29 @@
+}
+
+static int
-+testip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+ struct ip_set_macip *table =
-+ (struct ip_set_macip *) map->members;
++macipmap_ktest(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ const struct ip_set_macipmap *map = set->data;
++ const struct ip_set_macip *table = map->members;
+ ip_set_ip_t ip;
-+
-+ ip = ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr);
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr);
-+#endif
++
++ ip = ipaddr(skb, flags[index]);
+
+ if (ip < map->first_ip || ip > map->last_ip)
+ return 0;
+
-+ *hash_ip = ip;
++ *hash_ip = ip;
+ DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
-+ set->name, HIPQUAD(ip), HIPQUAD(*hash_ip));
-+ if (test_bit(IPSET_MACIP_ISSET,
-+ (void *) &table[ip - map->first_ip].flags)) {
++ set->name, HIPQUAD(ip), HIPQUAD(*hash_ip));
++ if (table[ip - map->first_ip].match) {
+ /* Is mac pointer valid?
+ * If so, compare... */
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
+ return (skb_mac_header(skb) >= skb->head
+ && (skb_mac_header(skb) + ETH_HLEN) <= skb->data
-+#else
-+ return (skb->mac.raw >= skb->head
-+ && (skb->mac.raw + ETH_HLEN) <= skb->data
-+#endif
+ && (memcmp(eth_hdr(skb)->h_source,
+ &table[ip - map->first_ip].ethernet,
+ ETH_ALEN) == 0));
@@ -6070,278 +6318,94 @@
+
+/* returns 0 on success */
+static inline int
-+__addip(struct ip_set *set,
-+ ip_set_ip_t ip, unsigned char *ethernet, ip_set_ip_t *hash_ip)
++macipmap_add(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, const unsigned char *ethernet)
+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+ struct ip_set_macip *table =
-+ (struct ip_set_macip *) map->members;
++ struct ip_set_macipmap *map = set->data;
++ struct ip_set_macip *table = map->members;
+
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
-+ if (test_and_set_bit(IPSET_MACIP_ISSET,
-+ (void *) &table[ip - map->first_ip].flags))
++ if (table[ip - map->first_ip].match)
+ return -EEXIST;
+
+ *hash_ip = ip;
+ DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
+ memcpy(&table[ip - map->first_ip].ethernet, ethernet, ETH_ALEN);
++ table[ip - map->first_ip].match = IPSET_MACIP_ISSET;
+ return 0;
+}
+
-+static int
-+addip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_macipmap *req =
-+ (struct ip_set_req_macipmap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_macipmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_macipmap),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __addip(set, req->ip, req->ethernet, hash_ip);
-+}
-+
-+static int
-+addip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ ip_set_ip_t ip;
-+
-+ ip = ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr);
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr);
-+#endif
-+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ if (!(skb_mac_header(skb) >= skb->head
-+ && (skb_mac_header(skb) + ETH_HLEN) <= skb->data))
-+#else
-+ if (!(skb->mac.raw >= skb->head
-+ && (skb->mac.raw + ETH_HLEN) <= skb->data))
-+#endif
++#define KADT_CONDITION \
++ if (!(skb_mac_header(skb) >= skb->head \
++ && (skb_mac_header(skb) + ETH_HLEN) <= skb->data))\
+ return -EINVAL;
+
-+ return __addip(set, ip, eth_hdr(skb)->h_source, hash_ip);
-+}
++UADT(macipmap, add, req->ethernet)
++KADT(macipmap, add, ipaddr, eth_hdr(skb)->h_source)
+
+static inline int
-+__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++macipmap_del(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+ struct ip_set_macip *table =
-+ (struct ip_set_macip *) map->members;
++ struct ip_set_macipmap *map = set->data;
++ struct ip_set_macip *table = map->members;
+
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
-+ if (!test_and_clear_bit(IPSET_MACIP_ISSET,
-+ (void *)&table[ip - map->first_ip].flags))
++ if (!table[ip - map->first_ip].match)
+ return -EEXIST;
+
+ *hash_ip = ip;
++ table[ip - map->first_ip].match = 0;
+ DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
+ return 0;
+}
+
-+static int
-+delip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_macipmap *req =
-+ (struct ip_set_req_macipmap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_macipmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_macipmap),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __delip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+delip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __delip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+}
++#undef KADT_CONDITION
++#define KADT_CONDITION
+
-+static inline size_t members_size(ip_set_id_t from, ip_set_id_t to)
-+{
-+ return (size_t)((to - from + 1) * sizeof(struct ip_set_macip));
-+}
++UADT(macipmap, del)
++KADT(macipmap, del, ipaddr)
+
-+static int create(struct ip_set *set, const void *data, size_t size)
++static inline int
++__macipmap_create(const struct ip_set_req_macipmap_create *req,
++ struct ip_set_macipmap *map)
+{
-+ int newbytes;
-+ struct ip_set_req_macipmap_create *req =
-+ (struct ip_set_req_macipmap_create *) data;
-+ struct ip_set_macipmap *map;
-+
-+ if (size != sizeof(struct ip_set_req_macipmap_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_macipmap_create),
-+ size);
-+ return -EINVAL;
-+ }
-+
-+ DP("from %u.%u.%u.%u to %u.%u.%u.%u",
-+ HIPQUAD(req->from), HIPQUAD(req->to));
-+
-+ if (req->from > req->to) {
-+ DP("bad ip range");
-+ return -ENOEXEC;
-+ }
-+
+ if (req->to - req->from > MAX_RANGE) {
-+ ip_set_printk("range too big (max %d addresses)",
-+ MAX_RANGE+1);
++ ip_set_printk("range too big, %d elements (max %d)",
++ req->to - req->from + 1, MAX_RANGE+1);
+ return -ENOEXEC;
+ }
-+
-+ map = kmalloc(sizeof(struct ip_set_macipmap), GFP_KERNEL);
-+ if (!map) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_macipmap));
-+ return -ENOMEM;
-+ }
+ map->flags = req->flags;
-+ map->first_ip = req->from;
-+ map->last_ip = req->to;
-+ newbytes = members_size(map->first_ip, map->last_ip);
-+ map->members = ip_set_malloc(newbytes);
-+ DP("members: %u %p", newbytes, map->members);
-+ if (!map->members) {
-+ DP("out of memory for %d bytes", newbytes);
-+ kfree(map);
-+ return -ENOMEM;
-+ }
-+ memset(map->members, 0, newbytes);
-+
-+ set->data = map;
-+ return 0;
-+}
-+
-+static void destroy(struct ip_set *set)
-+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+
-+ ip_set_free(map->members, members_size(map->first_ip, map->last_ip));
-+ kfree(map);
-+
-+ set->data = NULL;
++ return (req->to - req->from + 1) * sizeof(struct ip_set_macip);
+}
+
-+static void flush(struct ip_set *set)
-+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+ memset(map->members, 0, members_size(map->first_ip, map->last_ip));
-+}
++BITMAP_CREATE(macipmap)
++BITMAP_DESTROY(macipmap)
++BITMAP_FLUSH(macipmap)
+
-+static void list_header(const struct ip_set *set, void *data)
++static inline void
++__macipmap_list_header(const struct ip_set_macipmap *map,
++ struct ip_set_req_macipmap_create *header)
+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+ struct ip_set_req_macipmap_create *header =
-+ (struct ip_set_req_macipmap_create *) data;
-+
-+ DP("list_header %x %x %u", map->first_ip, map->last_ip,
-+ map->flags);
-+
-+ header->from = map->first_ip;
-+ header->to = map->last_ip;
+ header->flags = map->flags;
+}
+
-+static int list_members_size(const struct ip_set *set)
-+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+
-+ DP("%u", members_size(map->first_ip, map->last_ip));
-+ return members_size(map->first_ip, map->last_ip);
-+}
-+
-+static void list_members(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+
-+ int bytes = members_size(map->first_ip, map->last_ip);
-+
-+ DP("members: %u %p", bytes, map->members);
-+ memcpy(data, map->members, bytes);
-+}
++BITMAP_LIST_HEADER(macipmap)
++BITMAP_LIST_MEMBERS_SIZE(macipmap)
++BITMAP_LIST_MEMBERS(macipmap)
+
-+static struct ip_set_type ip_set_macipmap = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_macipmap),
-+ .addip = &addip,
-+ .addip_kernel = &addip_kernel,
-+ .delip = &delip,
-+ .delip_kernel = &delip_kernel,
-+ .testip = &testip,
-+ .testip_kernel = &testip_kernel,
-+ .header_size = sizeof(struct ip_set_req_macipmap_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
++IP_SET_TYPE(macipmap, IPSET_TYPE_IP | IPSET_DATA_SINGLE)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("macipmap type of IP sets");
+
-+static int __init ip_set_macipmap_init(void)
-+{
-+ init_max_malloc_size();
-+ return ip_set_register_set_type(&ip_set_macipmap);
-+}
-+
-+static void __exit ip_set_macipmap_fini(void)
-+{
-+ /* FIXME: possible race with ip_set_create() */
-+ ip_set_unregister_set_type(&ip_set_macipmap);
-+}
-+
-+module_init(ip_set_macipmap_init);
-+module_exit(ip_set_macipmap_fini);
++REGISTER_MODULE(macipmap)
--- /dev/null
+++ b/net/ipv4/netfilter/ip_set_nethash.c
-@@ -0,0 +1,497 @@
-+/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+@@ -0,0 +1,225 @@
++/* Copyright (C) 2003-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
@@ -6351,63 +6415,56 @@
+/* Kernel module implementing a cidr nethash set */
+
+#include <linux/module.h>
++#include <linux/moduleparam.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
-+#include <linux/version.h>
-+#include <linux/jhash.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_jhash.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
+#include <linux/spinlock.h>
-+#include <linux/vmalloc.h>
+#include <linux/random.h>
+
+#include <net/ip.h>
+
-+#include <linux/netfilter_ipv4/ip_set_malloc.h>
+#include <linux/netfilter_ipv4/ip_set_nethash.h>
+
+static int limit = MAX_RANGE;
+
+static inline __u32
-+jhash_ip(const struct ip_set_nethash *map, uint16_t i, ip_set_ip_t ip)
-+{
-+ return jhash_1word(ip, *(((uint32_t *) map->initval) + i));
-+}
-+
-+static inline __u32
-+hash_id_cidr(struct ip_set_nethash *map,
-+ ip_set_ip_t ip,
-+ unsigned char cidr,
-+ ip_set_ip_t *hash_ip)
++nethash_id_cidr(const struct ip_set_nethash *map,
++ ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip,
++ uint8_t cidr)
+{
+ __u32 id;
+ u_int16_t i;
+ ip_set_ip_t *elem;
+
-+ *hash_ip = pack(ip, cidr);
-+
++ *hash_ip = pack_ip_cidr(ip, cidr);
++ if (!*hash_ip)
++ return MAX_RANGE;
++
+ for (i = 0; i < map->probes; i++) {
+ id = jhash_ip(map, i, *hash_ip) % map->hashsize;
+ DP("hash key: %u", id);
+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
+ if (*elem == *hash_ip)
+ return id;
++ /* No shortcut - there can be deleted entries. */
+ }
+ return UINT_MAX;
+}
+
+static inline __u32
-+hash_id(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++nethash_id(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
++ const struct ip_set_nethash *map = set->data;
+ __u32 id = UINT_MAX;
+ int i;
+
+ for (i = 0; i < 30 && map->cidr[i]; i++) {
-+ id = hash_id_cidr(map, ip, map->cidr[i], hash_ip);
++ id = nethash_id_cidr(map, hash_ip, ip, map->cidr[i]);
+ if (id != UINT_MAX)
+ break;
+ }
@@ -6415,409 +6472,156 @@
+}
+
+static inline int
-+__testip_cidr(struct ip_set *set, ip_set_ip_t ip, unsigned char cidr,
-+ ip_set_ip_t *hash_ip)
++nethash_test_cidr(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, uint8_t cidr)
+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
++ const struct ip_set_nethash *map = set->data;
+
-+ return (ip && hash_id_cidr(map, ip, cidr, hash_ip) != UINT_MAX);
++ return (nethash_id_cidr(map, hash_ip, ip, cidr) != UINT_MAX);
+}
+
+static inline int
-+__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++nethash_test(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ return (ip && hash_id(set, ip, hash_ip) != UINT_MAX);
++ return (nethash_id(set, hash_ip, ip) != UINT_MAX);
+}
+
+static int
-+testip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
++nethash_utest(struct ip_set *set, const void *data, u_int32_t size,
++ ip_set_ip_t *hash_ip)
+{
-+ struct ip_set_req_nethash *req =
-+ (struct ip_set_req_nethash *) data;
++ const struct ip_set_req_nethash *req = data;
+
-+ if (size != sizeof(struct ip_set_req_nethash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_nethash),
-+ size);
++ if (req->cidr <= 0 || req->cidr > 32)
+ return -EINVAL;
-+ }
-+ return (req->cidr == 32 ? __testip(set, req->ip, hash_ip)
-+ : __testip_cidr(set, req->ip, req->cidr, hash_ip));
++ return (req->cidr == 32 ? nethash_test(set, hash_ip, req->ip)
++ : nethash_test_cidr(set, hash_ip, req->ip, req->cidr));
+}
+
-+static int
-+testip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __testip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+}
++#define KADT_CONDITION
++
++KADT(nethash, test, ipaddr)
+
+static inline int
-+__addip_base(struct ip_set_nethash *map, ip_set_ip_t ip)
++__nethash_add(struct ip_set_nethash *map, ip_set_ip_t *ip)
+{
+ __u32 probe;
+ u_int16_t i;
-+ ip_set_ip_t *elem;
-+
++ ip_set_ip_t *elem, *slot = NULL;
++
+ for (i = 0; i < map->probes; i++) {
-+ probe = jhash_ip(map, i, ip) % map->hashsize;
++ probe = jhash_ip(map, i, *ip) % map->hashsize;
+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
-+ if (*elem == ip)
++ if (*elem == *ip)
+ return -EEXIST;
-+ if (!*elem) {
-+ *elem = ip;
-+ map->elements++;
-+ return 0;
-+ }
++ if (!(slot || *elem))
++ slot = elem;
++ /* There can be deleted entries, must check all slots */
++ }
++ if (slot) {
++ *slot = *ip;
++ map->elements++;
++ return 0;
+ }
+ /* Trigger rehashing */
+ return -EAGAIN;
+}
+
+static inline int
-+__addip(struct ip_set_nethash *map, ip_set_ip_t ip, unsigned char cidr,
-+ ip_set_ip_t *hash_ip)
-+{
-+ if (!ip || map->elements >= limit)
-+ return -ERANGE;
-+
-+ *hash_ip = pack(ip, cidr);
-+ DP("%u.%u.%u.%u/%u, %u.%u.%u.%u", HIPQUAD(ip), cidr, HIPQUAD(*hash_ip));
-+
-+ return __addip_base(map, *hash_ip);
-+}
-+
-+static void
-+update_cidr_sizes(struct ip_set_nethash *map, unsigned char cidr)
-+{
-+ unsigned char next;
-+ int i;
-+
-+ for (i = 0; i < 30 && map->cidr[i]; i++) {
-+ if (map->cidr[i] == cidr) {
-+ return;
-+ } else if (map->cidr[i] < cidr) {
-+ next = map->cidr[i];
-+ map->cidr[i] = cidr;
-+ cidr = next;
-+ }
-+ }
-+ if (i < 30)
-+ map->cidr[i] = cidr;
-+}
-+
-+static int
-+addip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
++nethash_add(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, uint8_t cidr)
+{
-+ struct ip_set_req_nethash *req =
-+ (struct ip_set_req_nethash *) data;
++ struct ip_set_nethash *map = set->data;
+ int ret;
-+
-+ if (size != sizeof(struct ip_set_req_nethash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_nethash),
-+ size);
++
++ if (map->elements >= limit || map->nets[cidr-1] == UINT16_MAX)
++ return -ERANGE;
++ if (cidr <= 0 || cidr >= 32)
+ return -EINVAL;
-+ }
-+ ret = __addip((struct ip_set_nethash *) set->data,
-+ req->ip, req->cidr, hash_ip);
-+
-+ if (ret == 0)
-+ update_cidr_sizes((struct ip_set_nethash *) set->data,
-+ req->cidr);
+
++ *hash_ip = pack_ip_cidr(ip, cidr);
++ DP("%u.%u.%u.%u/%u, %u.%u.%u.%u", HIPQUAD(ip), cidr, HIPQUAD(*hash_ip));
++ if (!*hash_ip)
++ return -ERANGE;
++
++ ret = __nethash_add(map, hash_ip);
++ if (ret == 0) {
++ if (!map->nets[cidr-1]++)
++ add_cidr_size(map->cidr, cidr);
++ map->elements++;
++ }
++
+ return ret;
+}
+
-+static int
-+addip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+ int ret = -ERANGE;
-+ ip_set_ip_t ip = ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr);
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr);
-+#endif
++#undef KADT_CONDITION
++#define KADT_CONDITION \
++ struct ip_set_nethash *map = set->data; \
++ uint8_t cidr = map->cidr[0] ? map->cidr[0] : 31;
+
-+ if (map->cidr[0])
-+ ret = __addip(map, ip, map->cidr[0], hash_ip);
++UADT(nethash, add, req->cidr)
++KADT(nethash, add, ipaddr, cidr)
+
-+ return ret;
-+}
-+
-+static int retry(struct ip_set *set)
++static inline void
++__nethash_retry(struct ip_set_nethash *tmp, struct ip_set_nethash *map)
+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+ ip_set_ip_t *elem;
-+ void *members;
-+ u_int32_t i, hashsize = map->hashsize;
-+ int res;
-+ struct ip_set_nethash *tmp;
-+
-+ if (map->resize == 0)
-+ return -ERANGE;
-+
-+ again:
-+ res = 0;
-+
-+ /* Calculate new parameters */
-+ hashsize += (hashsize * map->resize)/100;
-+ if (hashsize == map->hashsize)
-+ hashsize++;
-+
-+ ip_set_printk("rehashing of set %s triggered: "
-+ "hashsize grows from %u to %u",
-+ set->name, map->hashsize, hashsize);
-+
-+ tmp = kmalloc(sizeof(struct ip_set_nethash)
-+ + map->probes * sizeof(uint32_t), GFP_ATOMIC);
-+ if (!tmp) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_nethash)
-+ + map->probes * sizeof(uint32_t));
-+ return -ENOMEM;
-+ }
-+ tmp->members = harray_malloc(hashsize, sizeof(ip_set_ip_t), GFP_ATOMIC);
-+ if (!tmp->members) {
-+ DP("out of memory for %d bytes", hashsize * sizeof(ip_set_ip_t));
-+ kfree(tmp);
-+ return -ENOMEM;
-+ }
-+ tmp->hashsize = hashsize;
-+ tmp->elements = 0;
-+ tmp->probes = map->probes;
-+ tmp->resize = map->resize;
-+ memcpy(tmp->initval, map->initval, map->probes * sizeof(uint32_t));
-+ memcpy(tmp->cidr, map->cidr, 30 * sizeof(unsigned char));
-+
-+ write_lock_bh(&set->lock);
-+ map = (struct ip_set_nethash *) set->data; /* Play safe */
-+ for (i = 0; i < map->hashsize && res == 0; i++) {
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
-+ if (*elem)
-+ res = __addip_base(tmp, *elem);
-+ }
-+ if (res) {
-+ /* Failure, try again */
-+ write_unlock_bh(&set->lock);
-+ harray_free(tmp->members);
-+ kfree(tmp);
-+ goto again;
-+ }
-+
-+ /* Success at resizing! */
-+ members = map->members;
-+
-+ map->hashsize = tmp->hashsize;
-+ map->members = tmp->members;
-+ write_unlock_bh(&set->lock);
-+
-+ harray_free(members);
-+ kfree(tmp);
-+
-+ return 0;
++ memcpy(tmp->cidr, map->cidr, sizeof(tmp->cidr));
++ memcpy(tmp->nets, map->nets, sizeof(tmp->nets));
+}
+
++HASH_RETRY(nethash, ip_set_ip_t)
++
+static inline int
-+__delip(struct ip_set_nethash *map, ip_set_ip_t ip, unsigned char cidr,
-+ ip_set_ip_t *hash_ip)
++nethash_del(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, uint8_t cidr)
+{
++ struct ip_set_nethash *map = set->data;
+ ip_set_ip_t id, *elem;
+
-+ if (!ip)
-+ return -ERANGE;
-+
-+ id = hash_id_cidr(map, ip, cidr, hash_ip);
++ if (cidr <= 0 || cidr >= 32)
++ return -EINVAL;
++
++ id = nethash_id_cidr(map, hash_ip, ip, cidr);
+ if (id == UINT_MAX)
+ return -EEXIST;
-+
++
+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
+ *elem = 0;
+ map->elements--;
++ if (!map->nets[cidr-1]--)
++ del_cidr_size(map->cidr, cidr);
+ return 0;
+}
+
-+static int
-+delip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_nethash *req =
-+ (struct ip_set_req_nethash *) data;
-+
-+ if (size != sizeof(struct ip_set_req_nethash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_nethash),
-+ size);
-+ return -EINVAL;
-+ }
-+ /* TODO: no garbage collection in map->cidr */
-+ return __delip((struct ip_set_nethash *) set->data,
-+ req->ip, req->cidr, hash_ip);
-+}
-+
-+static int
-+delip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+ int ret = -ERANGE;
-+ ip_set_ip_t ip = ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr);
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr);
-+#endif
-+
-+ if (map->cidr[0])
-+ ret = __delip(map, ip, map->cidr[0], hash_ip);
-+
-+ return ret;
-+}
++UADT(nethash, del, req->cidr)
++KADT(nethash, del, ipaddr, cidr)
+
-+static int create(struct ip_set *set, const void *data, size_t size)
++static inline int
++__nethash_create(const struct ip_set_req_nethash_create *req,
++ struct ip_set_nethash *map)
+{
-+ struct ip_set_req_nethash_create *req =
-+ (struct ip_set_req_nethash_create *) data;
-+ struct ip_set_nethash *map;
-+ uint16_t i;
-+
-+ if (size != sizeof(struct ip_set_req_nethash_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_nethash_create),
-+ size);
-+ return -EINVAL;
-+ }
-+
-+ if (req->hashsize < 1) {
-+ ip_set_printk("hashsize too small");
-+ return -ENOEXEC;
-+ }
-+ if (req->probes < 1) {
-+ ip_set_printk("probes too small");
-+ return -ENOEXEC;
-+ }
-+
-+ map = kmalloc(sizeof(struct ip_set_nethash)
-+ + req->probes * sizeof(uint32_t), GFP_KERNEL);
-+ if (!map) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_nethash)
-+ + req->probes * sizeof(uint32_t));
-+ return -ENOMEM;
-+ }
-+ for (i = 0; i < req->probes; i++)
-+ get_random_bytes(((uint32_t *) map->initval)+i, 4);
-+ map->elements = 0;
-+ map->hashsize = req->hashsize;
-+ map->probes = req->probes;
-+ map->resize = req->resize;
-+ memset(map->cidr, 0, 30 * sizeof(unsigned char));
-+ map->members = harray_malloc(map->hashsize, sizeof(ip_set_ip_t), GFP_KERNEL);
-+ if (!map->members) {
-+ DP("out of memory for %d bytes", map->hashsize * sizeof(ip_set_ip_t));
-+ kfree(map);
-+ return -ENOMEM;
-+ }
-+
-+ set->data = map;
++ memset(map->cidr, 0, sizeof(map->cidr));
++ memset(map->nets, 0, sizeof(map->nets));
++
+ return 0;
+}
+
-+static void destroy(struct ip_set *set)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+
-+ harray_free(map->members);
-+ kfree(map);
-+
-+ set->data = NULL;
-+}
-+
-+static void flush(struct ip_set *set)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+ harray_flush(map->members, map->hashsize, sizeof(ip_set_ip_t));
-+ memset(map->cidr, 0, 30 * sizeof(unsigned char));
-+ map->elements = 0;
-+}
-+
-+static void list_header(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+ struct ip_set_req_nethash_create *header =
-+ (struct ip_set_req_nethash_create *) data;
-+
-+ header->hashsize = map->hashsize;
-+ header->probes = map->probes;
-+ header->resize = map->resize;
-+}
++HASH_CREATE(nethash, ip_set_ip_t)
++HASH_DESTROY(nethash)
+
-+static int list_members_size(const struct ip_set *set)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
++HASH_FLUSH_CIDR(nethash, ip_set_ip_t)
+
-+ return (map->hashsize * sizeof(ip_set_ip_t));
++static inline void
++__nethash_list_header(const struct ip_set_nethash *map,
++ struct ip_set_req_nethash_create *header)
++{
+}
+
-+static void list_members(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+ ip_set_ip_t i, *elem;
++HASH_LIST_HEADER(nethash)
++HASH_LIST_MEMBERS_SIZE(nethash, ip_set_ip_t)
++HASH_LIST_MEMBERS(nethash, ip_set_ip_t)
+
-+ for (i = 0; i < map->hashsize; i++) {
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
-+ ((ip_set_ip_t *)data)[i] = *elem;
-+ }
-+}
-+
-+static struct ip_set_type ip_set_nethash = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_nethash),
-+ .addip = &addip,
-+ .addip_kernel = &addip_kernel,
-+ .retry = &retry,
-+ .delip = &delip,
-+ .delip_kernel = &delip_kernel,
-+ .testip = &testip,
-+ .testip_kernel = &testip_kernel,
-+ .header_size = sizeof(struct ip_set_req_nethash_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
++IP_SET_RTYPE(nethash, IPSET_TYPE_IP | IPSET_DATA_SINGLE)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
@@ -6825,23 +6629,11 @@
+module_param(limit, int, 0600);
+MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
+
-+static int __init ip_set_nethash_init(void)
-+{
-+ return ip_set_register_set_type(&ip_set_nethash);
-+}
-+
-+static void __exit ip_set_nethash_fini(void)
-+{
-+ /* FIXME: possible race with ip_set_create() */
-+ ip_set_unregister_set_type(&ip_set_nethash);
-+}
-+
-+module_init(ip_set_nethash_init);
-+module_exit(ip_set_nethash_fini);
++REGISTER_MODULE(nethash)
--- /dev/null
+++ b/net/ipv4/netfilter/ip_set_portmap.c
-@@ -0,0 +1,346 @@
-+/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+@@ -0,0 +1,114 @@
++/* Copyright (C) 2003-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
@@ -6855,9 +6647,6 @@
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/skbuff.h>
-+#include <linux/version.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
@@ -6866,330 +6655,434 @@
+#include <net/ip.h>
+
+#include <linux/netfilter_ipv4/ip_set_portmap.h>
++#include <linux/netfilter_ipv4/ip_set_getport.h>
+
-+/* We must handle non-linear skbs */
-+static inline ip_set_ip_t
-+get_port(const struct sk_buff *skb, u_int32_t flags)
++static inline int
++portmap_test(const struct ip_set *set, ip_set_ip_t *hash_port,
++ ip_set_ip_t port)
+{
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ struct iphdr *iph = ip_hdr(skb);
-+#else
-+ struct iphdr *iph = skb->nh.iph;
-+#endif
-+ u_int16_t offset = ntohs(iph->frag_off) & IP_OFFSET;
-+ switch (iph->protocol) {
-+ case IPPROTO_TCP: {
-+ struct tcphdr tcph;
-+
-+ /* See comments at tcp_match in ip_tables.c */
-+ if (offset)
-+ return INVALID_PORT;
++ const struct ip_set_portmap *map = set->data;
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &tcph, sizeof(tcph)) < 0)
-+#else
-+ if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &tcph, sizeof(tcph)) < 0)
-+#endif
-+ /* No choice either */
-+ return INVALID_PORT;
++ if (port < map->first_ip || port > map->last_ip)
++ return -ERANGE;
++
++ *hash_port = port;
++ DP("set: %s, port:%u, %u", set->name, port, *hash_port);
++ return !!test_bit(port - map->first_ip, map->members);
++}
+
-+ return ntohs(flags & IPSET_SRC ?
-+ tcph.source : tcph.dest);
-+ }
-+ case IPPROTO_UDP: {
-+ struct udphdr udph;
++#define KADT_CONDITION \
++ if (ip == INVALID_PORT) \
++ return 0;
+
-+ if (offset)
-+ return INVALID_PORT;
++UADT(portmap, test)
++KADT(portmap, test, get_port)
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &udph, sizeof(udph)) < 0)
-+#else
-+ if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &udph, sizeof(udph)) < 0)
-+#endif
-+ /* No choice either */
-+ return INVALID_PORT;
++static inline int
++portmap_add(struct ip_set *set, ip_set_ip_t *hash_port, ip_set_ip_t port)
++{
++ struct ip_set_portmap *map = set->data;
+
-+ return ntohs(flags & IPSET_SRC ?
-+ udph.source : udph.dest);
-+ }
-+ default:
-+ return INVALID_PORT;
-+ }
++ if (port < map->first_ip || port > map->last_ip)
++ return -ERANGE;
++ if (test_and_set_bit(port - map->first_ip, map->members))
++ return -EEXIST;
++
++ *hash_port = port;
++ DP("port %u", port);
++ return 0;
+}
+
++UADT(portmap, add)
++KADT(portmap, add, get_port)
++
+static inline int
-+__testport(struct ip_set *set, ip_set_ip_t port, ip_set_ip_t *hash_port)
++portmap_del(struct ip_set *set, ip_set_ip_t *hash_port, ip_set_ip_t port)
+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
++ struct ip_set_portmap *map = set->data;
+
-+ if (port < map->first_port || port > map->last_port)
++ if (port < map->first_ip || port > map->last_ip)
+ return -ERANGE;
-+
++ if (!test_and_clear_bit(port - map->first_ip, map->members))
++ return -EEXIST;
++
+ *hash_port = port;
-+ DP("set: %s, port:%u, %u", set->name, port, *hash_port);
-+ return !!test_bit(port - map->first_port, map->members);
++ DP("port %u", port);
++ return 0;
+}
+
-+static int
-+testport(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_port)
-+{
-+ struct ip_set_req_portmap *req =
-+ (struct ip_set_req_portmap *) data;
++UADT(portmap, del)
++KADT(portmap, del, get_port)
+
-+ if (size != sizeof(struct ip_set_req_portmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_portmap),
-+ size);
-+ return -EINVAL;
++static inline int
++__portmap_create(const struct ip_set_req_portmap_create *req,
++ struct ip_set_portmap *map)
++{
++ if (req->to - req->from > MAX_RANGE) {
++ ip_set_printk("range too big, %d elements (max %d)",
++ req->to - req->from + 1, MAX_RANGE+1);
++ return -ENOEXEC;
+ }
-+ return __testport(set, req->port, hash_port);
++ return bitmap_bytes(req->from, req->to);
+}
+
-+static int
-+testport_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_port,
-+ const u_int32_t *flags,
-+ unsigned char index)
++BITMAP_CREATE(portmap)
++BITMAP_DESTROY(portmap)
++BITMAP_FLUSH(portmap)
++
++static inline void
++__portmap_list_header(const struct ip_set_portmap *map,
++ struct ip_set_req_portmap_create *header)
+{
-+ int res;
-+ ip_set_ip_t port = get_port(skb, flags[index]);
++}
+
-+ DP("flag %s port %u", flags[index] & IPSET_SRC ? "SRC" : "DST", port);
-+ if (port == INVALID_PORT)
-+ return 0;
++BITMAP_LIST_HEADER(portmap)
++BITMAP_LIST_MEMBERS_SIZE(portmap)
++BITMAP_LIST_MEMBERS(portmap)
+
-+ res = __testport(set, port, hash_port);
++IP_SET_TYPE(portmap, IPSET_TYPE_PORT | IPSET_DATA_SINGLE)
+
-+ return (res < 0 ? 0 : res);
-+}
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("portmap type of IP sets");
+
-+static inline int
-+__addport(struct ip_set *set, ip_set_ip_t port, ip_set_ip_t *hash_port)
-+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
++REGISTER_MODULE(portmap)
+--- /dev/null
++++ b/net/ipv4/netfilter/ip_set_setlist.c
+@@ -0,0 +1,330 @@
++/* Copyright (C) 2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
+
-+ if (port < map->first_port || port > map->last_port)
-+ return -ERANGE;
-+ if (test_and_set_bit(port - map->first_port, map->members))
-+ return -EEXIST;
++/* Kernel module implementing an IP set type: the setlist type */
+
-+ *hash_port = port;
-+ DP("port %u", port);
-+ return 0;
++#include <linux/module.h>
++#include <linux/ip.h>
++#include <linux/skbuff.h>
++#include <linux/errno.h>
++
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_bitmaps.h>
++#include <linux/netfilter_ipv4/ip_set_setlist.h>
++
++/*
++ * before ==> index, ref
++ * after ==> ref, index
++ */
++
++static inline int
++next_index_eq(const struct ip_set_setlist *map, int i, ip_set_id_t index)
++{
++ return i < map->size && map->index[i] == index;
+}
+
+static int
-+addport(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_port)
-+{
-+ struct ip_set_req_portmap *req =
-+ (struct ip_set_req_portmap *) data;
++setlist_utest(struct ip_set *set, const void *data, u_int32_t size,
++ ip_set_ip_t *hash_ip)
++{
++ const struct ip_set_setlist *map = set->data;
++ const struct ip_set_req_setlist *req = data;
++ ip_set_id_t index, ref = IP_SET_INVALID_ID;
++ int i, res = 0;
++ struct ip_set *s;
++
++ if (req->before && req->ref[0] == '\0')
++ return 0;
+
-+ if (size != sizeof(struct ip_set_req_portmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_portmap),
-+ size);
-+ return -EINVAL;
++ index = __ip_set_get_byname(req->name, &s);
++ if (index == IP_SET_INVALID_ID)
++ return 0;
++ if (req->ref[0] != '\0') {
++ ref = __ip_set_get_byname(req->ref, &s);
++ if (ref == IP_SET_INVALID_ID)
++ goto finish;
++ }
++ for (i = 0; i < map->size
++ && map->index[i] != IP_SET_INVALID_ID; i++) {
++ if (req->before && map->index[i] == index) {
++ res = next_index_eq(map, i + 1, ref);
++ break;
++ } else if (!req->before) {
++ if ((ref == IP_SET_INVALID_ID
++ && map->index[i] == index)
++ || (map->index[i] == ref
++ && next_index_eq(map, i + 1, index))) {
++ res = 1;
++ break;
++ }
++ }
+ }
-+ return __addport(set, req->port, hash_port);
++ if (ref != IP_SET_INVALID_ID)
++ __ip_set_put_byindex(ref);
++finish:
++ __ip_set_put_byindex(index);
++ return res;
+}
+
+static int
-+addport_kernel(struct ip_set *set,
++setlist_ktest(struct ip_set *set,
+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_port,
++ ip_set_ip_t *hash_ip,
+ const u_int32_t *flags,
+ unsigned char index)
+{
-+ ip_set_ip_t port = get_port(skb, flags[index]);
-+
-+ if (port == INVALID_PORT)
-+ return -EINVAL;
-+
-+ return __addport(set, port, hash_port);
++ struct ip_set_setlist *map = set->data;
++ int i, res = 0;
++
++ for (i = 0; i < map->size
++ && map->index[i] != IP_SET_INVALID_ID
++ && res == 0; i++)
++ res = ip_set_testip_kernel(map->index[i], skb, flags);
++ return res;
+}
+
+static inline int
-+__delport(struct ip_set *set, ip_set_ip_t port, ip_set_ip_t *hash_port)
++insert_setlist(struct ip_set_setlist *map, int i, ip_set_id_t index)
+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
++ ip_set_id_t tmp;
++ int j;
+
-+ if (port < map->first_port || port > map->last_port)
++ DP("i: %u, last %u\n", i, map->index[map->size - 1]);
++ if (i >= map->size || map->index[map->size - 1] != IP_SET_INVALID_ID)
+ return -ERANGE;
-+ if (!test_and_clear_bit(port - map->first_port, map->members))
-+ return -EEXIST;
-+
-+ *hash_port = port;
-+ DP("port %u", port);
++
++ for (j = i; j < map->size
++ && index != IP_SET_INVALID_ID; j++) {
++ tmp = map->index[j];
++ map->index[j] = index;
++ index = tmp;
++ }
+ return 0;
+}
+
+static int
-+delport(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_port)
++setlist_uadd(struct ip_set *set, const void *data, u_int32_t size,
++ ip_set_ip_t *hash_ip)
+{
-+ struct ip_set_req_portmap *req =
-+ (struct ip_set_req_portmap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_portmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_portmap),
-+ size);
++ struct ip_set_setlist *map = set->data;
++ const struct ip_set_req_setlist *req = data;
++ ip_set_id_t index, ref = IP_SET_INVALID_ID;
++ int i, res = -ERANGE;
++ struct ip_set *s;
++
++ if (req->before && req->ref[0] == '\0')
+ return -EINVAL;
++
++ index = __ip_set_get_byname(req->name, &s);
++ if (index == IP_SET_INVALID_ID)
++ return -EEXIST;
++ /* "Loop detection" */
++ if (strcmp(s->type->typename, "setlist") == 0)
++ goto finish;
++
++ if (req->ref[0] != '\0') {
++ ref = __ip_set_get_byname(req->ref, &s);
++ if (ref == IP_SET_INVALID_ID) {
++ res = -EEXIST;
++ goto finish;
++ }
+ }
-+ return __delport(set, req->port, hash_port);
++ for (i = 0; i < map->size; i++) {
++ if (map->index[i] != ref)
++ continue;
++ if (req->before)
++ res = insert_setlist(map, i, index);
++ else
++ res = insert_setlist(map,
++ ref == IP_SET_INVALID_ID ? i : i + 1,
++ index);
++ break;
++ }
++ if (ref != IP_SET_INVALID_ID)
++ __ip_set_put_byindex(ref);
++ /* In case of success, we keep the reference to the set */
++finish:
++ if (res != 0)
++ __ip_set_put_byindex(index);
++ return res;
+}
+
+static int
-+delport_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_port,
-+ const u_int32_t *flags,
-+ unsigned char index)
++setlist_kadd(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
+{
-+ ip_set_ip_t port = get_port(skb, flags[index]);
-+
-+ if (port == INVALID_PORT)
-+ return -EINVAL;
-+
-+ return __delport(set, port, hash_port);
++ struct ip_set_setlist *map = set->data;
++ int i, res = -EINVAL;
++
++ for (i = 0; i < map->size
++ && map->index[i] != IP_SET_INVALID_ID
++ && res != 0; i++)
++ res = ip_set_addip_kernel(map->index[i], skb, flags);
++ return res;
+}
+
-+static int create(struct ip_set *set, const void *data, size_t size)
++static inline int
++unshift_setlist(struct ip_set_setlist *map, int i)
+{
-+ int newbytes;
-+ struct ip_set_req_portmap_create *req =
-+ (struct ip_set_req_portmap_create *) data;
-+ struct ip_set_portmap *map;
++ int j;
++
++ for (j = i; j < map->size - 1; j++)
++ map->index[j] = map->index[j+1];
++ map->index[map->size-1] = IP_SET_INVALID_ID;
++ return 0;
++}
+
-+ if (size != sizeof(struct ip_set_req_portmap_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_portmap_create),
-+ size);
++static int
++setlist_udel(struct ip_set *set, const void *data, u_int32_t size,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_setlist *map = set->data;
++ const struct ip_set_req_setlist *req = data;
++ ip_set_id_t index, ref = IP_SET_INVALID_ID;
++ int i, res = -EEXIST;
++ struct ip_set *s;
++
++ if (req->before && req->ref[0] == '\0')
+ return -EINVAL;
-+ }
+
-+ DP("from %u to %u", req->from, req->to);
-+
-+ if (req->from > req->to) {
-+ DP("bad port range");
-+ return -ENOEXEC;
++ index = __ip_set_get_byname(req->name, &s);
++ if (index == IP_SET_INVALID_ID)
++ return -EEXIST;
++ if (req->ref[0] != '\0') {
++ ref = __ip_set_get_byname(req->ref, &s);
++ if (ref == IP_SET_INVALID_ID)
++ goto finish;
++ }
++ for (i = 0; i < map->size
++ && map->index[i] != IP_SET_INVALID_ID; i++) {
++ if (req->before) {
++ if (map->index[i] == index
++ && next_index_eq(map, i + 1, ref)) {
++ res = unshift_setlist(map, i);
++ break;
++ }
++ } else if (ref == IP_SET_INVALID_ID) {
++ if (map->index[i] == index) {
++ res = unshift_setlist(map, i);
++ break;
++ }
++ } else if (map->index[i] == ref
++ && next_index_eq(map, i + 1, index)) {
++ res = unshift_setlist(map, i + 1);
++ break;
++ }
+ }
++ if (ref != IP_SET_INVALID_ID)
++ __ip_set_put_byindex(ref);
++finish:
++ __ip_set_put_byindex(index);
++ /* In case of success, release the reference to the set */
++ if (res == 0)
++ __ip_set_put_byindex(index);
++ return res;
++}
+
-+ if (req->to - req->from > MAX_RANGE) {
-+ ip_set_printk("range too big (max %d ports)",
-+ MAX_RANGE+1);
-+ return -ENOEXEC;
-+ }
++static int
++setlist_kdel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ struct ip_set_setlist *map = set->data;
++ int i, res = -EINVAL;
++
++ for (i = 0; i < map->size
++ && map->index[i] != IP_SET_INVALID_ID
++ && res != 0; i++)
++ res = ip_set_delip_kernel(map->index[i], skb, flags);
++ return res;
++}
+
-+ map = kmalloc(sizeof(struct ip_set_portmap), GFP_KERNEL);
-+ if (!map) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_portmap));
-+ return -ENOMEM;
-+ }
-+ map->first_port = req->from;
-+ map->last_port = req->to;
-+ newbytes = bitmap_bytes(req->from, req->to);
-+ map->members = kmalloc(newbytes, GFP_KERNEL);
-+ if (!map->members) {
-+ DP("out of memory for %d bytes", newbytes);
-+ kfree(map);
++static int
++setlist_create(struct ip_set *set, const void *data, u_int32_t size)
++{
++ struct ip_set_setlist *map;
++ const struct ip_set_req_setlist_create *req = data;
++ int i;
++
++ map = kmalloc(sizeof(struct ip_set_setlist) +
++ req->size * sizeof(ip_set_id_t), GFP_KERNEL);
++ if (!map)
+ return -ENOMEM;
-+ }
-+ memset(map->members, 0, newbytes);
-+
++ map->size = req->size;
++ for (i = 0; i < map->size; i++)
++ map->index[i] = IP_SET_INVALID_ID;
++
+ set->data = map;
+ return 0;
-+}
++}
+
-+static void destroy(struct ip_set *set)
++static void
++setlist_destroy(struct ip_set *set)
+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
++ struct ip_set_setlist *map = set->data;
++ int i;
++
++ for (i = 0; i < map->size
++ && map->index[i] != IP_SET_INVALID_ID; i++)
++ __ip_set_put_byindex(map->index[i]);
+
-+ kfree(map->members);
+ kfree(map);
-+
+ set->data = NULL;
+}
+
-+static void flush(struct ip_set *set)
++static void
++setlist_flush(struct ip_set *set)
+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
-+ memset(map->members, 0, bitmap_bytes(map->first_port, map->last_port));
++ struct ip_set_setlist *map = set->data;
++ int i;
++
++ for (i = 0; i < map->size
++ && map->index[i] != IP_SET_INVALID_ID; i++) {
++ __ip_set_put_byindex(map->index[i]);
++ map->index[i] = IP_SET_INVALID_ID;
++ }
+}
+
-+static void list_header(const struct ip_set *set, void *data)
++static void
++setlist_list_header(const struct ip_set *set, void *data)
+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
-+ struct ip_set_req_portmap_create *header =
-+ (struct ip_set_req_portmap_create *) data;
-+
-+ DP("list_header %u %u", map->first_port, map->last_port);
-+
-+ header->from = map->first_port;
-+ header->to = map->last_port;
++ const struct ip_set_setlist *map = set->data;
++ struct ip_set_req_setlist_create *header = data;
++
++ header->size = map->size;
+}
+
-+static int list_members_size(const struct ip_set *set)
++static int
++setlist_list_members_size(const struct ip_set *set)
+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
-+
-+ return bitmap_bytes(map->first_port, map->last_port);
++ const struct ip_set_setlist *map = set->data;
++
++ return map->size * sizeof(ip_set_id_t);
+}
+
-+static void list_members(const struct ip_set *set, void *data)
++static void
++setlist_list_members(const struct ip_set *set, void *data)
+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
-+ int bytes = bitmap_bytes(map->first_port, map->last_port);
-+
-+ memcpy(data, map->members, bytes);
++ struct ip_set_setlist *map = set->data;
++ int i;
++
++ for (i = 0; i < map->size; i++)
++ *((ip_set_id_t *)data + i) = ip_set_id(map->index[i]);
+}
+
-+static struct ip_set_type ip_set_portmap = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_PORT | IPSET_DATA_SINGLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_portmap),
-+ .addip = &addport,
-+ .addip_kernel = &addport_kernel,
-+ .delip = &delport,
-+ .delip_kernel = &delport_kernel,
-+ .testip = &testport,
-+ .testip_kernel = &testport_kernel,
-+ .header_size = sizeof(struct ip_set_req_portmap_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
++IP_SET_TYPE(setlist, IPSET_TYPE_SETNAME | IPSET_DATA_SINGLE)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-+MODULE_DESCRIPTION("portmap type of IP sets");
++MODULE_DESCRIPTION("setlist type of IP sets");
+
-+static int __init ip_set_portmap_init(void)
-+{
-+ return ip_set_register_set_type(&ip_set_portmap);
-+}
-+
-+static void __exit ip_set_portmap_fini(void)
-+{
-+ /* FIXME: possible race with ip_set_create() */
-+ ip_set_unregister_set_type(&ip_set_portmap);
-+}
-+
-+module_init(ip_set_portmap_init);
-+module_exit(ip_set_portmap_fini);
++REGISTER_MODULE(setlist)
--- /dev/null
+++ b/net/ipv4/netfilter/ipt_set.c
-@@ -0,0 +1,160 @@
+@@ -0,0 +1,238 @@
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ * Patrick Schaaf <bof@bof.de>
+ * Martin Josefsson <gandalf@wlug.westbo.se>
@@ -7207,7 +7100,14 @@
+#include <linux/skbuff.h>
+#include <linux/version.h>
+
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
+#include <linux/netfilter_ipv4/ip_tables.h>
++#define xt_register_match ipt_register_match
++#define xt_unregister_match ipt_unregister_match
++#define xt_match ipt_match
++#else
++#include <linux/netfilter/x_tables.h>
++#endif
+#include <linux/netfilter_ipv4/ip_set.h>
+#include <linux/netfilter_ipv4/ipt_set.h>
+
@@ -7215,64 +7115,125 @@
+match_set(const struct ipt_set_info *info,
+ const struct sk_buff *skb,
+ int inv)
-+{
++{
+ if (ip_set_testip_kernel(info->index, skb, info->flags))
+ inv = !inv;
+ return inv;
+}
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
-+static bool
-+#else
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++static int
++match(const struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ const void *matchinfo,
++ int offset,
++ const void *hdr,
++ u_int16_t datalen,
++ int *hotdrop)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
++static int
++match(const struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ const void *matchinfo,
++ int offset,
++ int *hotdrop)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
++static int
++match(const struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ const void *matchinfo,
++ int offset,
++ unsigned int protoff,
++ int *hotdrop)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
+static int
-+#endif
+match(const struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
+ const struct xt_match *match,
-+#endif
+ const void *matchinfo,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
-+ int offset, unsigned int protoff, bool *hotdrop)
-+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
-+ int offset, unsigned int protoff, int *hotdrop)
-+#else
-+ int offset, int *hotdrop)
++ int offset,
++ unsigned int protoff,
++ int *hotdrop)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++static bool
++match(const struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ const struct xt_match *match,
++ const void *matchinfo,
++ int offset,
++ unsigned int protoff,
++ bool *hotdrop)
++#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */
++static bool
++match(const struct sk_buff *skb,
++ const struct xt_match_param *par)
+#endif
+{
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
+ const struct ipt_set_info_match *info = matchinfo;
-+
++#else
++ const struct ipt_set_info_match *info = par->matchinfo;
++#endif
++
+ return match_set(&info->match_set,
+ skb,
+ info->match_set.flags[0] & IPSET_MATCH_INV);
+}
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
-+bool
-+#else
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
+static int
-+#endif
+checkentry(const char *tablename,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
-+ const void *inf,
-+#else
+ const struct ipt_ip *ip,
-+#endif
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
++ void *matchinfo,
++ unsigned int matchsize,
++ unsigned int hook_mask)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
++static int
++checkentry(const char *tablename,
++ const void *inf,
++ void *matchinfo,
++ unsigned int matchsize,
++ unsigned int hook_mask)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++static int
++checkentry(const char *tablename,
++ const void *inf,
+ const struct xt_match *match,
-+#endif
+ void *matchinfo,
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
+ unsigned int matchsize,
-+#endif
+ unsigned int hook_mask)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
++static int
++checkentry(const char *tablename,
++ const void *inf,
++ const struct xt_match *match,
++ void *matchinfo,
++ unsigned int hook_mask)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++static bool
++checkentry(const char *tablename,
++ const void *inf,
++ const struct xt_match *match,
++ void *matchinfo,
++ unsigned int hook_mask)
++#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */
++static bool
++checkentry(const struct xt_mtchk_param *par)
++#endif
+{
-+ struct ipt_set_info_match *info =
-+ (struct ipt_set_info_match *) matchinfo;
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++ struct ipt_set_info_match *info = matchinfo;
++#else
++ struct ipt_set_info_match *info = par->matchinfo;
++#endif
+ ip_set_id_t index;
+
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
+ if (matchsize != IPT_ALIGN(sizeof(struct ipt_set_info_match))) {
+ ip_set_printk("invalid matchsize %d", matchsize);
+ return 0;
@@ -7280,7 +7241,7 @@
+#endif
+
+ index = ip_set_get_byindex(info->match_set.index);
-+
++
+ if (index == IP_SET_INVALID_ID) {
+ ip_set_printk("Cannot find set indentified by id %u to match",
+ info->match_set.index);
@@ -7294,65 +7255,75 @@
+ return 1;
+}
+
-+static void destroy(
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
-+ const struct xt_match *match,
-+#endif
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-+ void *matchinfo, unsigned int matchsize)
-+#else
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
++static void destroy(void *matchinfo,
++ unsigned int matchsize)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++static void destroy(const struct xt_match *match,
++ void *matchinfo,
++ unsigned int matchsize)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++static void destroy(const struct xt_match *match,
+ void *matchinfo)
++#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */
++static void destroy(const struct xt_mtdtor_param *par)
+#endif
+{
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
+ struct ipt_set_info_match *info = matchinfo;
++#else
++ struct ipt_set_info_match *info = par->matchinfo;
++#endif
+
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
+ if (matchsize != IPT_ALIGN(sizeof(struct ipt_set_info_match))) {
+ ip_set_printk("invalid matchsize %d", matchsize);
+ return;
+ }
+#endif
-+ ip_set_put(info->match_set.index);
++ ip_set_put_byindex(info->match_set.index);
+}
+
-+static struct ipt_match set_match = {
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
++static struct xt_match set_match = {
++ .name = "set",
++ .match = &match,
++ .checkentry = &checkentry,
++ .destroy = &destroy,
++ .me = THIS_MODULE
++};
++#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17) */
++static struct xt_match set_match = {
+ .name = "set",
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
+ .family = AF_INET,
-+#endif
+ .match = &match,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
+ .matchsize = sizeof(struct ipt_set_info_match),
-+#endif
+ .checkentry = &checkentry,
+ .destroy = &destroy,
+ .me = THIS_MODULE
+};
++#endif
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("iptables IP set match module");
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
-+#define ipt_register_match xt_register_match
-+#define ipt_unregister_match xt_unregister_match
-+#endif
-+
+static int __init ipt_ipset_init(void)
+{
-+ return ipt_register_match(&set_match);
++ return xt_register_match(&set_match);
+}
+
+static void __exit ipt_ipset_fini(void)
+{
-+ ipt_unregister_match(&set_match);
++ xt_unregister_match(&set_match);
+}
+
+module_init(ipt_ipset_init);
+module_exit(ipt_ipset_fini);
--- /dev/null
+++ b/net/ipv4/netfilter/ipt_SET.c
-@@ -0,0 +1,179 @@
+@@ -0,0 +1,242 @@
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ * Patrick Schaaf <bof@bof.de>
+ * Martin Josefsson <gandalf@wlug.westbo.se>
@@ -7365,45 +7336,75 @@
+
+/* ipt_SET.c - netfilter target to manipulate IP sets */
+
-+#include <linux/types.h>
-+#include <linux/ip.h>
-+#include <linux/timer.h>
+#include <linux/module.h>
-+#include <linux/netfilter.h>
-+#include <linux/netdevice.h>
-+#include <linux/if.h>
-+#include <linux/inetdevice.h>
++#include <linux/ip.h>
++#include <linux/skbuff.h>
+#include <linux/version.h>
-+#include <net/protocol.h>
-+#include <net/checksum.h>
++
+#include <linux/netfilter_ipv4.h>
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
+#include <linux/netfilter_ipv4/ip_tables.h>
++#define xt_register_target ipt_register_target
++#define xt_unregister_target ipt_unregister_target
++#define xt_target ipt_target
++#define XT_CONTINUE IPT_CONTINUE
++#else
++#include <linux/netfilter/x_tables.h>
++#endif
+#include <linux/netfilter_ipv4/ipt_set.h>
+
+static unsigned int
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
-+target(struct sk_buff *skb,
-+#else
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++target(struct sk_buff **pskb,
++ unsigned int hooknum,
++ const struct net_device *in,
++ const struct net_device *out,
++ const void *targinfo,
++ void *userinfo)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
++target(struct sk_buff **pskb,
++ const struct net_device *in,
++ const struct net_device *out,
++ unsigned int hooknum,
++ const void *targinfo,
++ void *userinfo)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
+target(struct sk_buff **pskb,
-+#endif
+ const struct net_device *in,
+ const struct net_device *out,
+ unsigned int hooknum,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
+ const struct xt_target *target,
-+#endif
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
+ const void *targinfo,
+ void *userinfo)
-+#else
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
++target(struct sk_buff **pskb,
++ const struct net_device *in,
++ const struct net_device *out,
++ unsigned int hooknum,
++ const struct xt_target *target,
++ const void *targinfo)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++target(struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ unsigned int hooknum,
++ const struct xt_target *target,
+ const void *targinfo)
++#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */
++target(struct sk_buff *skb,
++ const struct xt_target_param *par)
+#endif
+{
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
+ const struct ipt_set_info_target *info = targinfo;
++#else
++ const struct ipt_set_info_target *info = par->targinfo;
++#endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+ struct sk_buff *skb = *pskb;
+#endif
+
++
+ if (info->add_set.index != IP_SET_INVALID_ID)
+ ip_set_addip_kernel(info->add_set.index,
+ skb,
@@ -7413,34 +7414,58 @@
+ skb,
+ info->del_set.flags);
+
-+ return IPT_CONTINUE;
++ return XT_CONTINUE;
+}
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
-+static bool
-+#else
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
+static int
-+#endif
+checkentry(const char *tablename,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
-+ const void *e,
-+#else
+ const struct ipt_entry *e,
-+#endif
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
++ void *targinfo,
++ unsigned int targinfosize,
++ unsigned int hook_mask)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
++static int
++checkentry(const char *tablename,
++ const void *e,
++ void *targinfo,
++ unsigned int targinfosize,
++ unsigned int hook_mask)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++static int
++checkentry(const char *tablename,
++ const void *e,
+ const struct xt_target *target,
-+#endif
+ void *targinfo,
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
+ unsigned int targinfosize,
-+#endif
+ unsigned int hook_mask)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
++static int
++checkentry(const char *tablename,
++ const void *e,
++ const struct xt_target *target,
++ void *targinfo,
++ unsigned int hook_mask)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++static bool
++checkentry(const char *tablename,
++ const void *e,
++ const struct xt_target *target,
++ void *targinfo,
++ unsigned int hook_mask)
++#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */
++static bool
++checkentry(const struct xt_tgchk_param *par)
++#endif
+{
-+ struct ipt_set_info_target *info =
-+ (struct ipt_set_info_target *) targinfo;
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++ const struct ipt_set_info_target *info = targinfo;
++#else
++ const struct ipt_set_info_target *info = par->targinfo;
++#endif
+ ip_set_id_t index;
+
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
+ if (targinfosize != IPT_ALIGN(sizeof(*info))) {
+ DP("bad target info size %u", targinfosize);
+ return 0;
@@ -7473,68 +7498,77 @@
+ return 1;
+}
+
-+static void destroy(
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
-+ const struct xt_target *target,
-+#endif
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-+ void *targetinfo, unsigned int targetsize)
-+#else
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
++static void destroy(void *targetinfo,
++ unsigned int targetsize)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++static void destroy(const struct xt_target *target,
++ void *targetinfo,
++ unsigned int targetsize)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++static void destroy(const struct xt_target *target,
+ void *targetinfo)
++#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */
++static void destroy(const struct xt_tgdtor_param *par)
+#endif
+{
-+ struct ipt_set_info_target *info = targetinfo;
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++ const struct ipt_set_info_target *info = targetinfo;
++#else
++ const struct ipt_set_info_target *info = par->targinfo;
++#endif
+
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
+ if (targetsize != IPT_ALIGN(sizeof(struct ipt_set_info_target))) {
+ ip_set_printk("invalid targetsize %d", targetsize);
+ return;
+ }
+#endif
+ if (info->add_set.index != IP_SET_INVALID_ID)
-+ ip_set_put(info->add_set.index);
++ ip_set_put_byindex(info->add_set.index);
+ if (info->del_set.index != IP_SET_INVALID_ID)
-+ ip_set_put(info->del_set.index);
++ ip_set_put_byindex(info->del_set.index);
+}
+
-+static struct ipt_target SET_target = {
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
++static struct xt_target SET_target = {
++ .name = "SET",
++ .target = target,
++ .checkentry = checkentry,
++ .destroy = destroy,
++ .me = THIS_MODULE
++};
++#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17) */
++static struct xt_target SET_target = {
+ .name = "SET",
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
+ .family = AF_INET,
-+#endif
+ .target = target,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
+ .targetsize = sizeof(struct ipt_set_info_target),
-+#endif
+ .checkentry = checkentry,
+ .destroy = destroy,
+ .me = THIS_MODULE
+};
++#endif
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("iptables IP set target module");
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
-+#define ipt_register_target xt_register_target
-+#define ipt_unregister_target xt_unregister_target
-+#endif
-+
+static int __init ipt_SET_init(void)
+{
-+ return ipt_register_target(&SET_target);
++ return xt_register_target(&SET_target);
+}
+
+static void __exit ipt_SET_fini(void)
+{
-+ ipt_unregister_target(&SET_target);
++ xt_unregister_target(&SET_target);
+}
+
+module_init(ipt_SET_init);
+module_exit(ipt_SET_fini);
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
-@@ -406,5 +406,122 @@ config IP_NF_ARP_MANGLE
+@@ -406,5 +406,146 @@ config IP_NF_ARP_MANGLE
Allows altering the ARP packet payload: source and destination
hardware and network addresses.
@@ -7619,6 +7653,22 @@
+
+ To compile it as a module, choose M here. If unsure, say N.
+
++config IP_NF_SET_IPPORTIPHASH
++ tristate "ipportiphash set support"
++ depends on IP_NF_SET
++ help
++ This option adds the ipportiphash set type support.
++
++ To compile it as a module, choose M here. If unsure, say N.
++
++config IP_NF_SET_IPPORTNETHASH
++ tristate "ipportnethash set support"
++ depends on IP_NF_SET
++ help
++ This option adds the ipportnethash set type support.
++
++ To compile it as a module, choose M here. If unsure, say N.
++
+config IP_NF_SET_IPTREE
+ tristate "iptree set support"
+ depends on IP_NF_SET
@@ -7635,6 +7685,14 @@
+
+ To compile it as a module, choose M here. If unsure, say N.
+
++config IP_NF_SET_SETLIST
++ tristate "setlist set support"
++ depends on IP_NF_SET
++ help
++ This option adds the setlist set type support.
++
++ To compile it as a module, choose M here. If unsure, say N.
++
+config IP_NF_MATCH_SET
+ tristate "set match support"
+ depends on IP_NF_SET
@@ -7667,7 +7725,7 @@
# targets
obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o
-@@ -61,6 +62,18 @@ obj-$(CONFIG_IP_NF_TARGET_REDIRECT) += i
+@@ -61,6 +62,21 @@ obj-$(CONFIG_IP_NF_TARGET_REDIRECT) += i
obj-$(CONFIG_IP_NF_TARGET_REJECT) += ipt_REJECT.o
obj-$(CONFIG_IP_NF_TARGET_TTL) += ipt_TTL.o
obj-$(CONFIG_IP_NF_TARGET_ULOG) += ipt_ULOG.o
@@ -7681,8 +7739,11 @@
+obj-$(CONFIG_IP_NF_SET_IPHASH) += ip_set_iphash.o
+obj-$(CONFIG_IP_NF_SET_NETHASH) += ip_set_nethash.o
+obj-$(CONFIG_IP_NF_SET_IPPORTHASH) += ip_set_ipporthash.o
++obj-$(CONFIG_IP_NF_SET_IPPORTIPHASH) += ip_set_ipportiphash.o
++obj-$(CONFIG_IP_NF_SET_IPPORTNETHASH) += ip_set_ipportnethash.o
+obj-$(CONFIG_IP_NF_SET_IPTREE) += ip_set_iptree.o
+obj-$(CONFIG_IP_NF_SET_IPTREEMAP) += ip_set_iptreemap.o
++obj-$(CONFIG_IP_NF_SET_SETLIST) += ip_set_setlist.o
# generic ARP tables
obj-$(CONFIG_IP_NF_ARPTABLES) += arp_tables.o
diff --git a/target/linux/generic-2.6/patches-2.6.28/130-netfilter_ipset.patch b/target/linux/generic-2.6/patches-2.6.28/130-netfilter_ipset.patch
index 87bf3c25f9..f127533d2b 100644
--- a/target/linux/generic-2.6/patches-2.6.28/130-netfilter_ipset.patch
+++ b/target/linux/generic-2.6/patches-2.6.28/130-netfilter_ipset.patch
@@ -1,23 +1,29 @@
--- a/include/linux/netfilter_ipv4/Kbuild
+++ b/include/linux/netfilter_ipv4/Kbuild
-@@ -45,3 +45,14 @@ header-y += ipt_ttl.h
+@@ -45,3 +45,20 @@ header-y += ipt_ttl.h
unifdef-y += ip_queue.h
unifdef-y += ip_tables.h
+
+unifdef-y += ip_set.h
+header-y += ip_set_iphash.h
++unifdef-y += ip_set_bitmaps.h
++unifdef-y += ip_set_getport.h
++unifdef-y += ip_set_hashes.h
+header-y += ip_set_ipmap.h
+header-y += ip_set_ipporthash.h
++header-y += ip_set_ipportiphash.h
++header-y += ip_set_ipportnethash.h
+unifdef-y += ip_set_iptree.h
+unifdef-y += ip_set_iptreemap.h
+header-y += ip_set_jhash.h
+header-y += ip_set_macipmap.h
-+unifdef-y += ip_set_nethash.h
++header-y += ip_set_nethash.h
+header-y += ip_set_portmap.h
++header-y += ip_set_setlist.h
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set.h
-@@ -0,0 +1,498 @@
+@@ -0,0 +1,574 @@
+#ifndef _IP_SET_H
+#define _IP_SET_H
+
@@ -28,7 +34,7 @@
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
++ * published by the Free Software Foundation.
+ */
+
+#if 0
@@ -57,10 +63,10 @@
+ * - in order to "deal with" backward compatibility, renamed to ipset
+ */
+
-+/*
-+ * Used so that the kernel module and ipset-binary can match their versions
++/*
++ * Used so that the kernel module and ipset-binary can match their versions
+ */
-+#define IP_SET_PROTOCOL_VERSION 2
++#define IP_SET_PROTOCOL_VERSION 3
+
+#define IP_SET_MAXNAMELEN 32 /* set names and set typenames */
+
@@ -69,7 +75,7 @@
+ *
+ * The representation works in HOST byte order, because most set types
+ * will perform arithmetic operations and compare operations.
-+ *
++ *
+ * For now the type is an uint32_t.
+ *
+ * Make sure to ONLY use the functions when translating and parsing
@@ -107,6 +113,9 @@
+#define IPSET_TYPE_PORT 0x02 /* Port type of set */
+#define IPSET_DATA_SINGLE 0x04 /* Single data storage */
+#define IPSET_DATA_DOUBLE 0x08 /* Double data storage */
++#define IPSET_DATA_TRIPLE 0x10 /* Triple data storage */
++#define IPSET_TYPE_IP1 0x20 /* IP address type of set */
++#define IPSET_TYPE_SETNAME 0x40 /* setname type of set */
+
+/* Reserved keywords */
+#define IPSET_TOKEN_DEFAULT ":default:"
@@ -120,8 +129,8 @@
+ * 200-299: list, save, restore
+ */
+
-+/* Single shot operations:
-+ * version, create, destroy, flush, rename and swap
++/* Single shot operations:
++ * version, create, destroy, flush, rename and swap
+ *
+ * Sets are identified by name.
+ */
@@ -172,7 +181,7 @@
+ unsigned version;
+};
+
-+/* Double shots operations:
++/* Double shots operations:
+ * add, del, test, bind and unbind.
+ *
+ * First we query the kernel to get the index and type of the target set,
@@ -214,7 +223,7 @@
+};
+
+#define IP_SET_OP_UNBIND_SET 0x00000105 /* Unbind an IP from a set */
-+/* Uses ip_set_req_bind, with type speficic addage
++/* Uses ip_set_req_bind, with type speficic addage
+ * index = 0 means unbinding for all sets */
+
+#define IP_SET_OP_TEST_BIND_SET 0x00000106 /* Test binding an IP to a set */
@@ -245,7 +254,7 @@
+struct ip_set_req_setnames {
+ unsigned op;
+ ip_set_id_t index; /* set to list/save */
-+ size_t size; /* size to get setdata/bindings */
++ u_int32_t size; /* size to get setdata/bindings */
+ /* followed by sets number of struct ip_set_name_list */
+};
+
@@ -260,16 +269,16 @@
+#define IP_SET_OP_LIST 0x00000203
+struct ip_set_req_list {
+ IP_SET_REQ_BYINDEX;
-+ /* sets number of struct ip_set_list in reply */
++ /* sets number of struct ip_set_list in reply */
+};
+
+struct ip_set_list {
+ ip_set_id_t index;
+ ip_set_id_t binding;
+ u_int32_t ref;
-+ size_t header_size; /* Set header data of header_size */
-+ size_t members_size; /* Set members data of members_size */
-+ size_t bindings_size; /* Set bindings data of bindings_size */
++ u_int32_t header_size; /* Set header data of header_size */
++ u_int32_t members_size; /* Set members data of members_size */
++ u_int32_t bindings_size;/* Set bindings data of bindings_size */
+};
+
+struct ip_set_hash_list {
@@ -286,8 +295,8 @@
+struct ip_set_save {
+ ip_set_id_t index;
+ ip_set_id_t binding;
-+ size_t header_size; /* Set header data of header_size */
-+ size_t members_size; /* Set members data of members_size */
++ u_int32_t header_size; /* Set header data of header_size */
++ u_int32_t members_size; /* Set members data of members_size */
+};
+
+/* At restoring, ip == 0 means default binding for the given set: */
@@ -300,15 +309,15 @@
+/* The restore operation */
+#define IP_SET_OP_RESTORE 0x00000205
+/* Uses ip_set_req_setnames followed by ip_set_restore structures
-+ * plus a marker ip_set_restore, followed by ip_set_hash_save
++ * plus a marker ip_set_restore, followed by ip_set_hash_save
+ * structures.
+ */
+struct ip_set_restore {
+ char name[IP_SET_MAXNAMELEN];
+ char typename[IP_SET_MAXNAMELEN];
+ ip_set_id_t index;
-+ size_t header_size; /* Create data of header_size */
-+ size_t members_size; /* Set members data of members_size */
++ u_int32_t header_size; /* Create data of header_size */
++ u_int32_t members_size; /* Set members data of members_size */
+};
+
+static inline int bitmap_bytes(ip_set_ip_t a, ip_set_ip_t b)
@@ -316,7 +325,12 @@
+ return 4 * ((((b - a + 8) / 8) + 3) / 4);
+}
+
++/* General limit for the elements in a set */
++#define MAX_RANGE 0x0000FFFF
++
+#ifdef __KERNEL__
++#include <linux/netfilter_ipv4/ip_set_compat.h>
++#include <linux/netfilter_ipv4/ip_set_malloc.h>
+
+#define ip_set_printk(format, args...) \
+ do { \
@@ -361,7 +375,7 @@
+ * return 0 if not in set, 1 if in set.
+ */
+ int (*testip_kernel) (struct ip_set *set,
-+ const struct sk_buff * skb,
++ const struct sk_buff * skb,
+ ip_set_ip_t *ip,
+ const u_int32_t *flags,
+ unsigned char index);
@@ -370,22 +384,22 @@
+ * return 0 if not in set, 1 if in set.
+ */
+ int (*testip) (struct ip_set *set,
-+ const void *data, size_t size,
++ const void *data, u_int32_t size,
+ ip_set_ip_t *ip);
+
+ /*
+ * Size of the data structure passed by when
+ * adding/deletin/testing an entry.
+ */
-+ size_t reqsize;
++ u_int32_t reqsize;
+
+ /* Add IP into set (userspace: ipset -A set IP)
+ * Return -EEXIST if the address is already in the set,
+ * and -ERANGE if the address lies outside the set bounds.
+ * If the address was not already in the set, 0 is returned.
+ */
-+ int (*addip) (struct ip_set *set,
-+ const void *data, size_t size,
++ int (*addip) (struct ip_set *set,
++ const void *data, u_int32_t size,
+ ip_set_ip_t *ip);
+
+ /* Add IP into set (kernel: iptables ... -j SET set src|dst)
@@ -394,7 +408,7 @@
+ * If the address was not already in the set, 0 is returned.
+ */
+ int (*addip_kernel) (struct ip_set *set,
-+ const struct sk_buff * skb,
++ const struct sk_buff * skb,
+ ip_set_ip_t *ip,
+ const u_int32_t *flags,
+ unsigned char index);
@@ -404,8 +418,8 @@
+ * and -ERANGE if the address lies outside the set bounds.
+ * If the address really was in the set, 0 is returned.
+ */
-+ int (*delip) (struct ip_set *set,
-+ const void *data, size_t size,
++ int (*delip) (struct ip_set *set,
++ const void *data, u_int32_t size,
+ ip_set_ip_t *ip);
+
+ /* remove IP from set (kernel: iptables ... -j SET --entry x)
@@ -414,7 +428,7 @@
+ * If the address really was in the set, 0 is returned.
+ */
+ int (*delip_kernel) (struct ip_set *set,
-+ const struct sk_buff * skb,
++ const struct sk_buff * skb,
+ ip_set_ip_t *ip,
+ const u_int32_t *flags,
+ unsigned char index);
@@ -422,7 +436,7 @@
+ /* new set creation - allocated type specific items
+ */
+ int (*create) (struct ip_set *set,
-+ const void *data, size_t size);
++ const void *data, u_int32_t size);
+
+ /* retry the operation after successfully tweaking the set
+ */
@@ -441,16 +455,16 @@
+
+ /* Listing: size needed for header
+ */
-+ size_t header_size;
++ u_int32_t header_size;
+
+ /* Listing: Get the header
+ *
+ * Fill in the information in "data".
-+ * This function is always run after list_header_size() under a
-+ * writelock on the set. Therefor is the length of "data" always
-+ * correct.
++ * This function is always run after list_header_size() under a
++ * writelock on the set. Therefor is the length of "data" always
++ * correct.
+ */
-+ void (*list_header) (const struct ip_set *set,
++ void (*list_header) (const struct ip_set *set,
+ void *data);
+
+ /* Listing: Get the size for the set members
@@ -460,9 +474,9 @@
+ /* Listing: Get the set members
+ *
+ * Fill in the information in "data".
-+ * This function is always run after list_member_size() under a
-+ * writelock on the set. Therefor is the length of "data" always
-+ * correct.
++ * This function is always run after list_member_size() under a
++ * writelock on the set. Therefor is the length of "data" always
++ * correct.
+ */
+ void (*list_members) (const struct ip_set *set,
+ void *data);
@@ -499,33 +513,659 @@
+
+/* register and unregister set references */
+extern ip_set_id_t ip_set_get_byname(const char name[IP_SET_MAXNAMELEN]);
-+extern ip_set_id_t ip_set_get_byindex(ip_set_id_t id);
-+extern void ip_set_put(ip_set_id_t id);
++extern ip_set_id_t ip_set_get_byindex(ip_set_id_t index);
++extern void ip_set_put_byindex(ip_set_id_t index);
++extern ip_set_id_t ip_set_id(ip_set_id_t index);
++extern ip_set_id_t __ip_set_get_byname(const char name[IP_SET_MAXNAMELEN],
++ struct ip_set **set);
++extern void __ip_set_put_byindex(ip_set_id_t index);
+
+/* API for iptables set match, and SET target */
-+extern void ip_set_addip_kernel(ip_set_id_t id,
-+ const struct sk_buff *skb,
-+ const u_int32_t *flags);
-+extern void ip_set_delip_kernel(ip_set_id_t id,
-+ const struct sk_buff *skb,
-+ const u_int32_t *flags);
++extern int ip_set_addip_kernel(ip_set_id_t id,
++ const struct sk_buff *skb,
++ const u_int32_t *flags);
++extern int ip_set_delip_kernel(ip_set_id_t id,
++ const struct sk_buff *skb,
++ const u_int32_t *flags);
+extern int ip_set_testip_kernel(ip_set_id_t id,
+ const struct sk_buff *skb,
+ const u_int32_t *flags);
+
++/* Macros to generate functions */
++
++#define STRUCT(pre, type) CONCAT2(pre, type)
++#define CONCAT2(pre, type) struct pre##type
++
++#define FNAME(pre, mid, post) CONCAT3(pre, mid, post)
++#define CONCAT3(pre, mid, post) pre##mid##post
++
++#define UADT0(type, adt, args...) \
++static int \
++FNAME(type,_u,adt)(struct ip_set *set, const void *data, u_int32_t size,\
++ ip_set_ip_t *hash_ip) \
++{ \
++ const STRUCT(ip_set_req_,type) *req = data; \
++ \
++ return FNAME(type,_,adt)(set, hash_ip , ## args); \
++}
++
++#define UADT(type, adt, args...) \
++ UADT0(type, adt, req->ip , ## args)
++
++#define KADT(type, adt, getfn, args...) \
++static int \
++FNAME(type,_k,adt)(struct ip_set *set, \
++ const struct sk_buff *skb, \
++ ip_set_ip_t *hash_ip, \
++ const u_int32_t *flags, \
++ unsigned char index) \
++{ \
++ ip_set_ip_t ip = getfn(skb, flags[index]); \
++ \
++ KADT_CONDITION \
++ return FNAME(type,_,adt)(set, hash_ip, ip , ##args); \
++}
++
++#define REGISTER_MODULE(type) \
++static int __init ip_set_##type##_init(void) \
++{ \
++ init_max_page_size(); \
++ return ip_set_register_set_type(&ip_set_##type); \
++} \
++ \
++static void __exit ip_set_##type##_fini(void) \
++{ \
++ /* FIXME: possible race with ip_set_create() */ \
++ ip_set_unregister_set_type(&ip_set_##type); \
++} \
++ \
++module_init(ip_set_##type##_init); \
++module_exit(ip_set_##type##_fini);
++
++/* Common functions */
++
++static inline ip_set_ip_t
++ipaddr(const struct sk_buff *skb, u_int32_t flag)
++{
++ return ntohl(flag & IPSET_SRC ? ip_hdr(skb)->saddr : ip_hdr(skb)->daddr);
++}
++
++#define jhash_ip(map, i, ip) jhash_1word(ip, *(map->initval + i))
++
++#define pack_ip_port(map, ip, port) \
++ (port + ((ip - ((map)->first_ip)) << 16))
++
+#endif /* __KERNEL__ */
+
+#endif /*_IP_SET_H*/
--- /dev/null
++++ b/include/linux/netfilter_ipv4/ip_set_bitmaps.h
+@@ -0,0 +1,121 @@
++#ifndef __IP_SET_BITMAPS_H
++#define __IP_SET_BITMAPS_H
++
++/* Macros to generate functions */
++
++#ifdef __KERNEL__
++#define BITMAP_CREATE(type) \
++static int \
++type##_create(struct ip_set *set, const void *data, u_int32_t size) \
++{ \
++ int newbytes; \
++ const struct ip_set_req_##type##_create *req = data; \
++ struct ip_set_##type *map; \
++ \
++ if (req->from > req->to) { \
++ DP("bad range"); \
++ return -ENOEXEC; \
++ } \
++ \
++ map = kmalloc(sizeof(struct ip_set_##type), GFP_KERNEL); \
++ if (!map) { \
++ DP("out of memory for %zu bytes", \
++ sizeof(struct ip_set_##type)); \
++ return -ENOMEM; \
++ } \
++ map->first_ip = req->from; \
++ map->last_ip = req->to; \
++ \
++ newbytes = __##type##_create(req, map); \
++ if (newbytes < 0) { \
++ kfree(map); \
++ return newbytes; \
++ } \
++ \
++ map->size = newbytes; \
++ map->members = ip_set_malloc(newbytes); \
++ if (!map->members) { \
++ DP("out of memory for %i bytes", newbytes); \
++ kfree(map); \
++ return -ENOMEM; \
++ } \
++ memset(map->members, 0, newbytes); \
++ \
++ set->data = map; \
++ return 0; \
++}
++
++#define BITMAP_DESTROY(type) \
++static void \
++type##_destroy(struct ip_set *set) \
++{ \
++ struct ip_set_##type *map = set->data; \
++ \
++ ip_set_free(map->members, map->size); \
++ kfree(map); \
++ \
++ set->data = NULL; \
++}
++
++#define BITMAP_FLUSH(type) \
++static void \
++type##_flush(struct ip_set *set) \
++{ \
++ struct ip_set_##type *map = set->data; \
++ memset(map->members, 0, map->size); \
++}
++
++#define BITMAP_LIST_HEADER(type) \
++static void \
++type##_list_header(const struct ip_set *set, void *data) \
++{ \
++ const struct ip_set_##type *map = set->data; \
++ struct ip_set_req_##type##_create *header = data; \
++ \
++ header->from = map->first_ip; \
++ header->to = map->last_ip; \
++ __##type##_list_header(map, header); \
++}
++
++#define BITMAP_LIST_MEMBERS_SIZE(type) \
++static int \
++type##_list_members_size(const struct ip_set *set) \
++{ \
++ const struct ip_set_##type *map = set->data; \
++ \
++ return map->size; \
++}
++
++#define BITMAP_LIST_MEMBERS(type) \
++static void \
++type##_list_members(const struct ip_set *set, void *data) \
++{ \
++ const struct ip_set_##type *map = set->data; \
++ \
++ memcpy(data, map->members, map->size); \
++}
++
++#define IP_SET_TYPE(type, __features) \
++struct ip_set_type ip_set_##type = { \
++ .typename = #type, \
++ .features = __features, \
++ .protocol_version = IP_SET_PROTOCOL_VERSION, \
++ .create = &type##_create, \
++ .destroy = &type##_destroy, \
++ .flush = &type##_flush, \
++ .reqsize = sizeof(struct ip_set_req_##type), \
++ .addip = &type##_uadd, \
++ .addip_kernel = &type##_kadd, \
++ .delip = &type##_udel, \
++ .delip_kernel = &type##_kdel, \
++ .testip = &type##_utest, \
++ .testip_kernel = &type##_ktest, \
++ .header_size = sizeof(struct ip_set_req_##type##_create),\
++ .list_header = &type##_list_header, \
++ .list_members_size = &type##_list_members_size, \
++ .list_members = &type##_list_members, \
++ .me = THIS_MODULE, \
++};
++#endif /* __KERNEL */
++
++#endif /* __IP_SET_BITMAPS_H */
+--- /dev/null
++++ b/include/linux/netfilter_ipv4/ip_set_compat.h
+@@ -0,0 +1,71 @@
++#ifndef _IP_SET_COMPAT_H
++#define _IP_SET_COMPAT_H
++
++#ifdef __KERNEL__
++#include <linux/version.h>
++
++/* Arrgh */
++#ifdef MODULE
++#define __MOD_INC(foo) __MOD_INC_USE_COUNT(foo)
++#define __MOD_DEC(foo) __MOD_DEC_USE_COUNT(foo)
++#else
++#define __MOD_INC(foo) 1
++#define __MOD_DEC(foo)
++#endif
++
++/* Backward compatibility */
++#ifndef __nocast
++#define __nocast
++#endif
++#ifndef __bitwise__
++#define __bitwise__
++#endif
++
++/* Compatibility glue code */
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++#include <linux/interrupt.h>
++#define DEFINE_RWLOCK(x) rwlock_t x = RW_LOCK_UNLOCKED
++#define try_module_get(x) __MOD_INC(x)
++#define module_put(x) __MOD_DEC(x)
++#define __clear_bit(nr, addr) clear_bit(nr, addr)
++#define __set_bit(nr, addr) set_bit(nr, addr)
++#define __test_and_set_bit(nr, addr) test_and_set_bit(nr, addr)
++#define __test_and_clear_bit(nr, addr) test_and_clear_bit(nr, addr)
++
++typedef unsigned __bitwise__ gfp_t;
++
++static inline void *kzalloc(size_t size, gfp_t flags)
++{
++ void *data = kmalloc(size, flags);
++
++ if (data)
++ memset(data, 0, size);
++
++ return data;
++}
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
++#define __KMEM_CACHE_T__ kmem_cache_t
++#else
++#define __KMEM_CACHE_T__ struct kmem_cache
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
++#define ip_hdr(skb) ((skb)->nh.iph)
++#define skb_mac_header(skb) ((skb)->mac.raw)
++#define eth_hdr(skb) ((struct ethhdr *)skb_mac_header(skb))
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
++#include <linux/netfilter.h>
++#define KMEM_CACHE_CREATE(name, size) \
++ kmem_cache_create(name, size, 0, 0, NULL, NULL)
++#else
++#define KMEM_CACHE_CREATE(name, size) \
++ kmem_cache_create(name, size, 0, 0, NULL)
++#endif
++
++
++#endif /* __KERNEL__ */
++#endif /* _IP_SET_COMPAT_H */
+--- /dev/null
++++ b/include/linux/netfilter_ipv4/ip_set_getport.h
+@@ -0,0 +1,48 @@
++#ifndef _IP_SET_GETPORT_H
++#define _IP_SET_GETPORT_H
++
++#ifdef __KERNEL__
++
++#define INVALID_PORT (MAX_RANGE + 1)
++
++/* We must handle non-linear skbs */
++static inline ip_set_ip_t
++get_port(const struct sk_buff *skb, u_int32_t flags)
++{
++ struct iphdr *iph = ip_hdr(skb);
++ u_int16_t offset = ntohs(iph->frag_off) & IP_OFFSET;
++ switch (iph->protocol) {
++ case IPPROTO_TCP: {
++ struct tcphdr tcph;
++
++ /* See comments at tcp_match in ip_tables.c */
++ if (offset)
++ return INVALID_PORT;
++
++ if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &tcph, sizeof(tcph)) < 0)
++ /* No choice either */
++ return INVALID_PORT;
++
++ return ntohs(flags & IPSET_SRC ?
++ tcph.source : tcph.dest);
++ }
++ case IPPROTO_UDP: {
++ struct udphdr udph;
++
++ if (offset)
++ return INVALID_PORT;
++
++ if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &udph, sizeof(udph)) < 0)
++ /* No choice either */
++ return INVALID_PORT;
++
++ return ntohs(flags & IPSET_SRC ?
++ udph.source : udph.dest);
++ }
++ default:
++ return INVALID_PORT;
++ }
++}
++#endif /* __KERNEL__ */
++
++#endif /*_IP_SET_GETPORT_H*/
+--- /dev/null
++++ b/include/linux/netfilter_ipv4/ip_set_hashes.h
+@@ -0,0 +1,306 @@
++#ifndef __IP_SET_HASHES_H
++#define __IP_SET_HASHES_H
++
++#define initval_t uint32_t
++
++/* Macros to generate functions */
++
++#ifdef __KERNEL__
++#define HASH_RETRY0(type, dtype, cond) \
++static int \
++type##_retry(struct ip_set *set) \
++{ \
++ struct ip_set_##type *map = set->data, *tmp; \
++ dtype *elem; \
++ void *members; \
++ u_int32_t i, hashsize = map->hashsize; \
++ int res; \
++ \
++ if (map->resize == 0) \
++ return -ERANGE; \
++ \
++ again: \
++ res = 0; \
++ \
++ /* Calculate new hash size */ \
++ hashsize += (hashsize * map->resize)/100; \
++ if (hashsize == map->hashsize) \
++ hashsize++; \
++ \
++ ip_set_printk("rehashing of set %s triggered: " \
++ "hashsize grows from %lu to %lu", \
++ set->name, \
++ (long unsigned)map->hashsize, \
++ (long unsigned)hashsize); \
++ \
++ tmp = kmalloc(sizeof(struct ip_set_##type) \
++ + map->probes * sizeof(initval_t), GFP_ATOMIC); \
++ if (!tmp) { \
++ DP("out of memory for %zu bytes", \
++ sizeof(struct ip_set_##type) \
++ + map->probes * sizeof(initval_t)); \
++ return -ENOMEM; \
++ } \
++ tmp->members = harray_malloc(hashsize, sizeof(dtype), GFP_ATOMIC);\
++ if (!tmp->members) { \
++ DP("out of memory for %zu bytes", hashsize * sizeof(dtype));\
++ kfree(tmp); \
++ return -ENOMEM; \
++ } \
++ tmp->hashsize = hashsize; \
++ tmp->elements = 0; \
++ tmp->probes = map->probes; \
++ tmp->resize = map->resize; \
++ memcpy(tmp->initval, map->initval, map->probes * sizeof(initval_t));\
++ __##type##_retry(tmp, map); \
++ \
++ write_lock_bh(&set->lock); \
++ map = set->data; /* Play safe */ \
++ for (i = 0; i < map->hashsize && res == 0; i++) { \
++ elem = HARRAY_ELEM(map->members, dtype *, i); \
++ if (cond) \
++ res = __##type##_add(tmp, elem); \
++ } \
++ if (res) { \
++ /* Failure, try again */ \
++ write_unlock_bh(&set->lock); \
++ harray_free(tmp->members); \
++ kfree(tmp); \
++ goto again; \
++ } \
++ \
++ /* Success at resizing! */ \
++ members = map->members; \
++ \
++ map->hashsize = tmp->hashsize; \
++ map->members = tmp->members; \
++ write_unlock_bh(&set->lock); \
++ \
++ harray_free(members); \
++ kfree(tmp); \
++ \
++ return 0; \
++}
++
++#define HASH_RETRY(type, dtype) \
++ HASH_RETRY0(type, dtype, *elem)
++
++#define HASH_RETRY2(type, dtype) \
++ HASH_RETRY0(type, dtype, elem->ip || elem->ip1)
++
++#define HASH_CREATE(type, dtype) \
++static int \
++type##_create(struct ip_set *set, const void *data, u_int32_t size) \
++{ \
++ const struct ip_set_req_##type##_create *req = data; \
++ struct ip_set_##type *map; \
++ uint16_t i; \
++ \
++ if (req->hashsize < 1) { \
++ ip_set_printk("hashsize too small"); \
++ return -ENOEXEC; \
++ } \
++ \
++ if (req->probes < 1) { \
++ ip_set_printk("probes too small"); \
++ return -ENOEXEC; \
++ } \
++ \
++ map = kmalloc(sizeof(struct ip_set_##type) \
++ + req->probes * sizeof(initval_t), GFP_KERNEL); \
++ if (!map) { \
++ DP("out of memory for %zu bytes", \
++ sizeof(struct ip_set_##type) \
++ + req->probes * sizeof(initval_t)); \
++ return -ENOMEM; \
++ } \
++ for (i = 0; i < req->probes; i++) \
++ get_random_bytes(((initval_t *) map->initval)+i, 4); \
++ map->elements = 0; \
++ map->hashsize = req->hashsize; \
++ map->probes = req->probes; \
++ map->resize = req->resize; \
++ if (__##type##_create(req, map)) { \
++ kfree(map); \
++ return -ENOEXEC; \
++ } \
++ map->members = harray_malloc(map->hashsize, sizeof(dtype), GFP_KERNEL);\
++ if (!map->members) { \
++ DP("out of memory for %zu bytes", map->hashsize * sizeof(dtype));\
++ kfree(map); \
++ return -ENOMEM; \
++ } \
++ \
++ set->data = map; \
++ return 0; \
++}
++
++#define HASH_DESTROY(type) \
++static void \
++type##_destroy(struct ip_set *set) \
++{ \
++ struct ip_set_##type *map = set->data; \
++ \
++ harray_free(map->members); \
++ kfree(map); \
++ \
++ set->data = NULL; \
++}
++
++#define HASH_FLUSH(type, dtype) \
++static void \
++type##_flush(struct ip_set *set) \
++{ \
++ struct ip_set_##type *map = set->data; \
++ harray_flush(map->members, map->hashsize, sizeof(dtype)); \
++ map->elements = 0; \
++}
++
++#define HASH_FLUSH_CIDR(type, dtype) \
++static void \
++type##_flush(struct ip_set *set) \
++{ \
++ struct ip_set_##type *map = set->data; \
++ harray_flush(map->members, map->hashsize, sizeof(dtype)); \
++ memset(map->cidr, 0, sizeof(map->cidr)); \
++ memset(map->nets, 0, sizeof(map->nets)); \
++ map->elements = 0; \
++}
++
++#define HASH_LIST_HEADER(type) \
++static void \
++type##_list_header(const struct ip_set *set, void *data) \
++{ \
++ const struct ip_set_##type *map = set->data; \
++ struct ip_set_req_##type##_create *header = data; \
++ \
++ header->hashsize = map->hashsize; \
++ header->probes = map->probes; \
++ header->resize = map->resize; \
++ __##type##_list_header(map, header); \
++}
++
++#define HASH_LIST_MEMBERS_SIZE(type, dtype) \
++static int \
++type##_list_members_size(const struct ip_set *set) \
++{ \
++ const struct ip_set_##type *map = set->data; \
++ \
++ return (map->hashsize * sizeof(dtype)); \
++}
++
++#define HASH_LIST_MEMBERS(type, dtype) \
++static void \
++type##_list_members(const struct ip_set *set, void *data) \
++{ \
++ const struct ip_set_##type *map = set->data; \
++ dtype *elem; \
++ uint32_t i; \
++ \
++ for (i = 0; i < map->hashsize; i++) { \
++ elem = HARRAY_ELEM(map->members, dtype *, i); \
++ ((dtype *)data)[i] = *elem; \
++ } \
++}
++
++#define HASH_LIST_MEMBERS_MEMCPY(type, dtype) \
++static void \
++type##_list_members(const struct ip_set *set, void *data) \
++{ \
++ const struct ip_set_##type *map = set->data; \
++ dtype *elem; \
++ uint32_t i; \
++ \
++ for (i = 0; i < map->hashsize; i++) { \
++ elem = HARRAY_ELEM(map->members, dtype *, i); \
++ memcpy((((dtype *)data)+i), elem, sizeof(dtype)); \
++ } \
++}
++
++#define IP_SET_RTYPE(type, __features) \
++struct ip_set_type ip_set_##type = { \
++ .typename = #type, \
++ .features = __features, \
++ .protocol_version = IP_SET_PROTOCOL_VERSION, \
++ .create = &type##_create, \
++ .retry = &type##_retry, \
++ .destroy = &type##_destroy, \
++ .flush = &type##_flush, \
++ .reqsize = sizeof(struct ip_set_req_##type), \
++ .addip = &type##_uadd, \
++ .addip_kernel = &type##_kadd, \
++ .delip = &type##_udel, \
++ .delip_kernel = &type##_kdel, \
++ .testip = &type##_utest, \
++ .testip_kernel = &type##_ktest, \
++ .header_size = sizeof(struct ip_set_req_##type##_create),\
++ .list_header = &type##_list_header, \
++ .list_members_size = &type##_list_members_size, \
++ .list_members = &type##_list_members, \
++ .me = THIS_MODULE, \
++};
++
++/* Helper functions */
++static inline void
++add_cidr_size(uint8_t *cidr, uint8_t size)
++{
++ uint8_t next;
++ int i;
++
++ for (i = 0; i < 30 && cidr[i]; i++) {
++ if (cidr[i] < size) {
++ next = cidr[i];
++ cidr[i] = size;
++ size = next;
++ }
++ }
++ if (i < 30)
++ cidr[i] = size;
++}
++
++static inline void
++del_cidr_size(uint8_t *cidr, uint8_t size)
++{
++ int i;
++
++ for (i = 0; i < 29 && cidr[i]; i++) {
++ if (cidr[i] == size)
++ cidr[i] = size = cidr[i+1];
++ }
++ cidr[29] = 0;
++}
++#else
++#include <arpa/inet.h>
++#endif /* __KERNEL */
++
++#ifndef UINT16_MAX
++#define UINT16_MAX 65535
++#endif
++
++static unsigned char shifts[] = {255, 253, 249, 241, 225, 193, 129, 1};
++
++static inline ip_set_ip_t
++pack_ip_cidr(ip_set_ip_t ip, unsigned char cidr)
++{
++ ip_set_ip_t addr, *paddr = &addr;
++ unsigned char n, t, *a;
++
++ addr = htonl(ip & (0xFFFFFFFF << (32 - (cidr))));
++#ifdef __KERNEL__
++ DP("ip:%u.%u.%u.%u/%u", NIPQUAD(addr), cidr);
++#endif
++ n = cidr / 8;
++ t = cidr % 8;
++ a = &((unsigned char *)paddr)[n];
++ *a = *a /(1 << (8 - t)) + shifts[t];
++#ifdef __KERNEL__
++ DP("n: %u, t: %u, a: %u", n, t, *a);
++ DP("ip:%u.%u.%u.%u/%u, %u.%u.%u.%u",
++ HIPQUAD(ip), cidr, NIPQUAD(addr));
++#endif
++
++ return ntohl(addr);
++}
++
++
++#endif /* __IP_SET_HASHES_H */
+--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_iphash.h
@@ -0,0 +1,30 @@
+#ifndef __IP_SET_IPHASH_H
+#define __IP_SET_IPHASH_H
+
+#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_hashes.h>
+
+#define SETTYPE_NAME "iphash"
-+#define MAX_RANGE 0x0000FFFF
+
+struct ip_set_iphash {
+ ip_set_ip_t *members; /* the iphash proper */
@@ -534,7 +1174,7 @@
+ uint16_t probes; /* max number of probes */
+ uint16_t resize; /* resize factor in percent */
+ ip_set_ip_t netmask; /* netmask */
-+ void *initval[0]; /* initvals for jhash_1word */
++ initval_t initval[0]; /* initvals for jhash_1word */
+};
+
+struct ip_set_req_iphash_create {
@@ -551,14 +1191,14 @@
+#endif /* __IP_SET_IPHASH_H */
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_ipmap.h
-@@ -0,0 +1,56 @@
+@@ -0,0 +1,57 @@
+#ifndef __IP_SET_IPMAP_H
+#define __IP_SET_IPMAP_H
+
+#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_bitmaps.h>
+
+#define SETTYPE_NAME "ipmap"
-+#define MAX_RANGE 0x0000FFFF
+
+struct ip_set_ipmap {
+ void *members; /* the ipmap proper */
@@ -567,6 +1207,7 @@
+ ip_set_ip_t netmask; /* subnet netmask */
+ ip_set_ip_t sizeid; /* size of set in IPs */
+ ip_set_ip_t hosts; /* number of hosts in a subnet */
++ u_int32_t size; /* size of the ipmap proper */
+};
+
+struct ip_set_req_ipmap_create {
@@ -579,46 +1220,45 @@
+ ip_set_ip_t ip;
+};
+
-+unsigned int
++static inline unsigned int
+mask_to_bits(ip_set_ip_t mask)
+{
+ unsigned int bits = 32;
+ ip_set_ip_t maskaddr;
-+
++
+ if (mask == 0xFFFFFFFF)
+ return bits;
-+
++
+ maskaddr = 0xFFFFFFFE;
-+ while (--bits >= 0 && maskaddr != mask)
++ while (--bits > 0 && maskaddr != mask)
+ maskaddr <<= 1;
-+
++
+ return bits;
+}
+
-+ip_set_ip_t
++static inline ip_set_ip_t
+range_to_mask(ip_set_ip_t from, ip_set_ip_t to, unsigned int *bits)
+{
+ ip_set_ip_t mask = 0xFFFFFFFE;
-+
++
+ *bits = 32;
-+ while (--(*bits) >= 0 && mask && (to & mask) != from)
++ while (--(*bits) > 0 && mask && (to & mask) != from)
+ mask <<= 1;
-+
++
+ return mask;
+}
-+
++
+#endif /* __IP_SET_IPMAP_H */
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_ipporthash.h
-@@ -0,0 +1,34 @@
+@@ -0,0 +1,33 @@
+#ifndef __IP_SET_IPPORTHASH_H
+#define __IP_SET_IPPORTHASH_H
+
+#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_hashes.h>
+
+#define SETTYPE_NAME "ipporthash"
-+#define MAX_RANGE 0x0000FFFF
-+#define INVALID_PORT (MAX_RANGE + 1)
+
+struct ip_set_ipporthash {
+ ip_set_ip_t *members; /* the ipporthash proper */
@@ -628,7 +1268,7 @@
+ uint16_t resize; /* resize factor in percent */
+ ip_set_ip_t first_ip; /* host byte order, included in range */
+ ip_set_ip_t last_ip; /* host byte order, included in range */
-+ void *initval[0]; /* initvals for jhash_1word */
++ initval_t initval[0]; /* initvals for jhash_1word */
+};
+
+struct ip_set_req_ipporthash_create {
@@ -646,15 +1286,101 @@
+
+#endif /* __IP_SET_IPPORTHASH_H */
--- /dev/null
++++ b/include/linux/netfilter_ipv4/ip_set_ipportiphash.h
+@@ -0,0 +1,39 @@
++#ifndef __IP_SET_IPPORTIPHASH_H
++#define __IP_SET_IPPORTIPHASH_H
++
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_hashes.h>
++
++#define SETTYPE_NAME "ipportiphash"
++
++struct ipportip {
++ ip_set_ip_t ip;
++ ip_set_ip_t ip1;
++};
++
++struct ip_set_ipportiphash {
++ struct ipportip *members; /* the ipportip proper */
++ uint32_t elements; /* number of elements */
++ uint32_t hashsize; /* hash size */
++ uint16_t probes; /* max number of probes */
++ uint16_t resize; /* resize factor in percent */
++ ip_set_ip_t first_ip; /* host byte order, included in range */
++ ip_set_ip_t last_ip; /* host byte order, included in range */
++ initval_t initval[0]; /* initvals for jhash_1word */
++};
++
++struct ip_set_req_ipportiphash_create {
++ uint32_t hashsize;
++ uint16_t probes;
++ uint16_t resize;
++ ip_set_ip_t from;
++ ip_set_ip_t to;
++};
++
++struct ip_set_req_ipportiphash {
++ ip_set_ip_t ip;
++ ip_set_ip_t port;
++ ip_set_ip_t ip1;
++};
++
++#endif /* __IP_SET_IPPORTIPHASH_H */
+--- /dev/null
++++ b/include/linux/netfilter_ipv4/ip_set_ipportnethash.h
+@@ -0,0 +1,42 @@
++#ifndef __IP_SET_IPPORTNETHASH_H
++#define __IP_SET_IPPORTNETHASH_H
++
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_hashes.h>
++
++#define SETTYPE_NAME "ipportnethash"
++
++struct ipportip {
++ ip_set_ip_t ip;
++ ip_set_ip_t ip1;
++};
++
++struct ip_set_ipportnethash {
++ struct ipportip *members; /* the ipportip proper */
++ uint32_t elements; /* number of elements */
++ uint32_t hashsize; /* hash size */
++ uint16_t probes; /* max number of probes */
++ uint16_t resize; /* resize factor in percent */
++ ip_set_ip_t first_ip; /* host byte order, included in range */
++ ip_set_ip_t last_ip; /* host byte order, included in range */
++ uint8_t cidr[30]; /* CIDR sizes */
++ uint16_t nets[30]; /* nr of nets by CIDR sizes */
++ initval_t initval[0]; /* initvals for jhash_1word */
++};
++
++struct ip_set_req_ipportnethash_create {
++ uint32_t hashsize;
++ uint16_t probes;
++ uint16_t resize;
++ ip_set_ip_t from;
++ ip_set_ip_t to;
++};
++
++struct ip_set_req_ipportnethash {
++ ip_set_ip_t ip;
++ ip_set_ip_t port;
++ ip_set_ip_t ip1;
++ uint8_t cidr;
++};
++
++#endif /* __IP_SET_IPPORTNETHASH_H */
+--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_iptree.h
-@@ -0,0 +1,40 @@
+@@ -0,0 +1,39 @@
+#ifndef __IP_SET_IPTREE_H
+#define __IP_SET_IPTREE_H
+
+#include <linux/netfilter_ipv4/ip_set.h>
+
+#define SETTYPE_NAME "iptree"
-+#define MAX_RANGE 0x0000FFFF
+
+struct ip_set_iptreed {
+ unsigned long expires[256]; /* x.x.x.ADDR */
@@ -726,172 +1452,181 @@
+};
+
+struct ip_set_req_iptreemap {
-+ ip_set_ip_t start;
++ ip_set_ip_t ip;
+ ip_set_ip_t end;
+};
+
+#endif /* __IP_SET_IPTREEMAP_H */
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_jhash.h
-@@ -0,0 +1,148 @@
-+#ifndef _LINUX_IPSET_JHASH_H
-+#define _LINUX_IPSET_JHASH_H
-+
-+/* This is a copy of linux/jhash.h but the types u32/u8 are changed
-+ * to __u32/__u8 so that the header file can be included into
-+ * userspace code as well. Jozsef Kadlecsik (kadlec@blackhole.kfki.hu)
-+ */
+@@ -0,0 +1,157 @@
++#ifndef _LINUX_JHASH_H
++#define _LINUX_JHASH_H
+
+/* jhash.h: Jenkins hash support.
+ *
-+ * Copyright (C) 1996 Bob Jenkins (bob_jenkins@burtleburtle.net)
++ * Copyright (C) 2006. Bob Jenkins (bob_jenkins@burtleburtle.net)
+ *
+ * http://burtleburtle.net/bob/hash/
+ *
+ * These are the credits from Bob's sources:
+ *
-+ * lookup2.c, by Bob Jenkins, December 1996, Public Domain.
-+ * hash(), hash2(), hash3, and mix() are externally useful functions.
-+ * Routines to test the hash are included if SELF_TEST is defined.
-+ * You can use this free for any purpose. It has no warranty.
++ * lookup3.c, by Bob Jenkins, May 2006, Public Domain.
+ *
-+ * Copyright (C) 2003 David S. Miller (davem@redhat.com)
++ * These are functions for producing 32-bit hashes for hash table lookup.
++ * hashword(), hashlittle(), hashlittle2(), hashbig(), mix(), and final()
++ * are externally useful functions. Routines to test the hash are included
++ * if SELF_TEST is defined. You can use this free for any purpose. It's in
++ * the public domain. It has no warranty.
++ *
++ * Copyright (C) 2009 Jozsef Kadlecsik (kadlec@blackhole.kfki.hu)
+ *
+ * I've modified Bob's hash to be useful in the Linux kernel, and
-+ * any bugs present are surely my fault. -DaveM
++ * any bugs present are my fault. Jozsef
+ */
+
-+/* NOTE: Arguments are modified. */
-+#define __jhash_mix(a, b, c) \
++#define __rot(x,k) (((x)<<(k)) | ((x)>>(32-(k))))
++
++/* __jhash_mix - mix 3 32-bit values reversibly. */
++#define __jhash_mix(a,b,c) \
+{ \
-+ a -= b; a -= c; a ^= (c>>13); \
-+ b -= c; b -= a; b ^= (a<<8); \
-+ c -= a; c -= b; c ^= (b>>13); \
-+ a -= b; a -= c; a ^= (c>>12); \
-+ b -= c; b -= a; b ^= (a<<16); \
-+ c -= a; c -= b; c ^= (b>>5); \
-+ a -= b; a -= c; a ^= (c>>3); \
-+ b -= c; b -= a; b ^= (a<<10); \
-+ c -= a; c -= b; c ^= (b>>15); \
++ a -= c; a ^= __rot(c, 4); c += b; \
++ b -= a; b ^= __rot(a, 6); a += c; \
++ c -= b; c ^= __rot(b, 8); b += a; \
++ a -= c; a ^= __rot(c,16); c += b; \
++ b -= a; b ^= __rot(a,19); a += c; \
++ c -= b; c ^= __rot(b, 4); b += a; \
++}
++
++/* __jhash_final - final mixing of 3 32-bit values (a,b,c) into c */
++#define __jhash_final(a,b,c) \
++{ \
++ c ^= b; c -= __rot(b,14); \
++ a ^= c; a -= __rot(c,11); \
++ b ^= a; b -= __rot(a,25); \
++ c ^= b; c -= __rot(b,16); \
++ a ^= c; a -= __rot(c,4); \
++ b ^= a; b -= __rot(a,14); \
++ c ^= b; c -= __rot(b,24); \
+}
+
+/* The golden ration: an arbitrary value */
-+#define JHASH_GOLDEN_RATIO 0x9e3779b9
++#define JHASH_GOLDEN_RATIO 0xdeadbeef
+
+/* The most generic version, hashes an arbitrary sequence
+ * of bytes. No alignment or length assumptions are made about
-+ * the input key.
++ * the input key. The result depends on endianness.
+ */
-+static inline __u32 jhash(void *key, __u32 length, __u32 initval)
++static inline u32 jhash(const void *key, u32 length, u32 initval)
+{
-+ __u32 a, b, c, len;
-+ __u8 *k = key;
-+
-+ len = length;
-+ a = b = JHASH_GOLDEN_RATIO;
-+ c = initval;
++ u32 a,b,c;
++ const u8 *k = key;
+
-+ while (len >= 12) {
-+ a += (k[0] +((__u32)k[1]<<8) +((__u32)k[2]<<16) +((__u32)k[3]<<24));
-+ b += (k[4] +((__u32)k[5]<<8) +((__u32)k[6]<<16) +((__u32)k[7]<<24));
-+ c += (k[8] +((__u32)k[9]<<8) +((__u32)k[10]<<16)+((__u32)k[11]<<24));
-+
-+ __jhash_mix(a,b,c);
++ /* Set up the internal state */
++ a = b = c = JHASH_GOLDEN_RATIO + length + initval;
+
++ /* all but the last block: affect some 32 bits of (a,b,c) */
++ while (length > 12) {
++ a += (k[0] + ((u32)k[1]<<8) + ((u32)k[2]<<16) + ((u32)k[3]<<24));
++ b += (k[4] + ((u32)k[5]<<8) + ((u32)k[6]<<16) + ((u32)k[7]<<24));
++ c += (k[8] + ((u32)k[9]<<8) + ((u32)k[10]<<16) + ((u32)k[11]<<24));
++ __jhash_mix(a, b, c);
++ length -= 12;
+ k += 12;
-+ len -= 12;
+ }
+
-+ c += length;
-+ switch (len) {
-+ case 11: c += ((__u32)k[10]<<24);
-+ case 10: c += ((__u32)k[9]<<16);
-+ case 9 : c += ((__u32)k[8]<<8);
-+ case 8 : b += ((__u32)k[7]<<24);
-+ case 7 : b += ((__u32)k[6]<<16);
-+ case 6 : b += ((__u32)k[5]<<8);
++ /* last block: affect all 32 bits of (c) */
++ /* all the case statements fall through */
++ switch (length) {
++ case 12: c += (u32)k[11]<<24;
++ case 11: c += (u32)k[10]<<16;
++ case 10: c += (u32)k[9]<<8;
++ case 9 : c += k[8];
++ case 8 : b += (u32)k[7]<<24;
++ case 7 : b += (u32)k[6]<<16;
++ case 6 : b += (u32)k[5]<<8;
+ case 5 : b += k[4];
-+ case 4 : a += ((__u32)k[3]<<24);
-+ case 3 : a += ((__u32)k[2]<<16);
-+ case 2 : a += ((__u32)k[1]<<8);
++ case 4 : a += (u32)k[3]<<24;
++ case 3 : a += (u32)k[2]<<16;
++ case 2 : a += (u32)k[1]<<8;
+ case 1 : a += k[0];
-+ };
-+
-+ __jhash_mix(a,b,c);
++ __jhash_final(a, b, c);
++ case 0 :
++ break;
++ }
+
+ return c;
+}
+
-+/* A special optimized version that handles 1 or more of __u32s.
-+ * The length parameter here is the number of __u32s in the key.
++/* A special optimized version that handles 1 or more of u32s.
++ * The length parameter here is the number of u32s in the key.
+ */
-+static inline __u32 jhash2(__u32 *k, __u32 length, __u32 initval)
++static inline u32 jhash2(const u32 *k, u32 length, u32 initval)
+{
-+ __u32 a, b, c, len;
++ u32 a, b, c;
+
-+ a = b = JHASH_GOLDEN_RATIO;
-+ c = initval;
-+ len = length;
++ /* Set up the internal state */
++ a = b = c = JHASH_GOLDEN_RATIO + (length<<2) + initval;
+
-+ while (len >= 3) {
++ /* handle most of the key */
++ while (length > 3) {
+ a += k[0];
+ b += k[1];
+ c += k[2];
+ __jhash_mix(a, b, c);
-+ k += 3; len -= 3;
++ length -= 3;
++ k += 3;
+ }
+
-+ c += length * 4;
-+
-+ switch (len) {
-+ case 2 : b += k[1];
-+ case 1 : a += k[0];
-+ };
-+
-+ __jhash_mix(a,b,c);
++ /* handle the last 3 u32's */
++ /* all the case statements fall through */
++ switch (length) {
++ case 3: c += k[2];
++ case 2: b += k[1];
++ case 1: a += k[0];
++ __jhash_final(a, b, c);
++ case 0: /* case 0: nothing left to add */
++ break;
++ }
+
+ return c;
+}
+
-+
+/* A special ultra-optimized versions that knows they are hashing exactly
+ * 3, 2 or 1 word(s).
-+ *
-+ * NOTE: In partilar the "c += length; __jhash_mix(a,b,c);" normally
-+ * done at the end is not done here.
+ */
-+static inline __u32 jhash_3words(__u32 a, __u32 b, __u32 c, __u32 initval)
++static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval)
+{
-+ a += JHASH_GOLDEN_RATIO;
-+ b += JHASH_GOLDEN_RATIO;
-+ c += initval;
++ a += JHASH_GOLDEN_RATIO + initval;
++ b += JHASH_GOLDEN_RATIO + initval;
++ c += JHASH_GOLDEN_RATIO + initval;
+
-+ __jhash_mix(a, b, c);
++ __jhash_final(a, b, c);
+
+ return c;
+}
+
-+static inline __u32 jhash_2words(__u32 a, __u32 b, __u32 initval)
++static inline u32 jhash_2words(u32 a, u32 b, u32 initval)
+{
-+ return jhash_3words(a, b, 0, initval);
++ return jhash_3words(0, a, b, initval);
+}
+
-+static inline __u32 jhash_1word(__u32 a, __u32 initval)
++static inline u32 jhash_1word(u32 a, u32 initval)
+{
-+ return jhash_3words(a, 0, 0, initval);
++ return jhash_3words(0, 0, a, initval);
+}
+
-+#endif /* _LINUX_IPSET_JHASH_H */
++#endif /* _LINUX_JHASH_H */
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_macipmap.h
-@@ -0,0 +1,38 @@
+@@ -0,0 +1,39 @@
+#ifndef __IP_SET_MACIPMAP_H
+#define __IP_SET_MACIPMAP_H
+
+#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_bitmaps.h>
+
+#define SETTYPE_NAME "macipmap"
-+#define MAX_RANGE 0x0000FFFF
+
+/* general flags */
+#define IPSET_MACIP_MATCHUNSET 1
@@ -904,6 +1639,7 @@
+ ip_set_ip_t first_ip; /* host byte order, included in range */
+ ip_set_ip_t last_ip; /* host byte order, included in range */
+ u_int32_t flags;
++ u_int32_t size; /* size of the ipmap proper */
+};
+
+struct ip_set_req_macipmap_create {
@@ -918,43 +1654,48 @@
+};
+
+struct ip_set_macip {
-+ unsigned short flags;
++ unsigned short match;
+ unsigned char ethernet[ETH_ALEN];
+};
+
+#endif /* __IP_SET_MACIPMAP_H */
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_malloc.h
-@@ -0,0 +1,116 @@
+@@ -0,0 +1,153 @@
+#ifndef _IP_SET_MALLOC_H
+#define _IP_SET_MALLOC_H
+
+#ifdef __KERNEL__
++#include <linux/vmalloc.h>
+
-+/* Memory allocation and deallocation */
-+static size_t max_malloc_size = 0;
++static size_t max_malloc_size = 0, max_page_size = 0;
++static size_t default_max_malloc_size = 131072; /* Guaranteed: slab.c */
+
-+static inline void init_max_malloc_size(void)
++static inline int init_max_page_size(void)
+{
-+#define CACHE(x) max_malloc_size = x;
++/* Compatibility glues to support 2.4.36 */
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++#define __GFP_NOWARN 0
++
++ /* Guaranteed: slab.c */
++ max_malloc_size = max_page_size = default_max_malloc_size;
++#else
++ size_t page_size = 0;
++
++#define CACHE(x) if (max_page_size == 0 || x < max_page_size) \
++ page_size = x;
+#include <linux/kmalloc_sizes.h>
+#undef CACHE
-+}
++ if (page_size) {
++ if (max_malloc_size == 0)
++ max_malloc_size = page_size;
+
-+static inline void * ip_set_malloc(size_t bytes)
-+{
-+ if (bytes > max_malloc_size)
-+ return vmalloc(bytes);
-+ else
-+ return kmalloc(bytes, GFP_KERNEL);
-+}
++ max_page_size = page_size;
+
-+static inline void ip_set_free(void * data, size_t bytes)
-+{
-+ if (bytes > max_malloc_size)
-+ vfree(data);
-+ else
-+ kfree(data);
++ return 1;
++ }
++#endif
++ return 0;
+}
+
+struct harray {
@@ -962,37 +1703,36 @@
+ void *arrays[0];
+};
+
-+static inline void *
-+harray_malloc(size_t hashsize, size_t typesize, int flags)
++static inline void *
++__harray_malloc(size_t hashsize, size_t typesize, gfp_t flags)
+{
+ struct harray *harray;
+ size_t max_elements, size, i, j;
+
-+ if (!max_malloc_size)
-+ init_max_malloc_size();
++ BUG_ON(max_page_size == 0);
+
-+ if (typesize > max_malloc_size)
++ if (typesize > max_page_size)
+ return NULL;
+
-+ max_elements = max_malloc_size/typesize;
++ max_elements = max_page_size/typesize;
+ size = hashsize/max_elements;
+ if (hashsize % max_elements)
+ size++;
-+
++
+ /* Last pointer signals end of arrays */
+ harray = kmalloc(sizeof(struct harray) + (size + 1) * sizeof(void *),
+ flags);
+
+ if (!harray)
+ return NULL;
-+
++
+ for (i = 0; i < size - 1; i++) {
+ harray->arrays[i] = kmalloc(max_elements * typesize, flags);
+ if (!harray->arrays[i])
+ goto undo;
+ memset(harray->arrays[i], 0, max_elements * typesize);
+ }
-+ harray->arrays[i] = kmalloc((hashsize - i * max_elements) * typesize,
++ harray->arrays[i] = kmalloc((hashsize - i * max_elements) * typesize,
+ flags);
+ if (!harray->arrays[i])
+ goto undo;
@@ -1000,7 +1740,7 @@
+
+ harray->max_elements = max_elements;
+ harray->arrays[size] = NULL;
-+
++
+ return (void *)harray;
+
+ undo:
@@ -1011,11 +1751,23 @@
+ return NULL;
+}
+
++static inline void *
++harray_malloc(size_t hashsize, size_t typesize, gfp_t flags)
++{
++ void *harray;
++
++ do {
++ harray = __harray_malloc(hashsize, typesize, flags|__GFP_NOWARN);
++ } while (harray == NULL && init_max_page_size());
++
++ return harray;
++}
++
+static inline void harray_free(void *h)
+{
+ struct harray *harray = (struct harray *) h;
+ size_t i;
-+
++
+ for (i = 0; harray->arrays[i] != NULL; i++)
+ kfree(harray->arrays[i]);
+ kfree(harray);
@@ -1025,10 +1777,10 @@
+{
+ struct harray *harray = (struct harray *) h;
+ size_t i;
-+
++
+ for (i = 0; harray->arrays[i+1] != NULL; i++)
+ memset(harray->arrays[i], 0, harray->max_elements * typesize);
-+ memset(harray->arrays[i], 0,
++ memset(harray->arrays[i], 0,
+ (hashsize - i * harray->max_elements) * typesize);
+}
+
@@ -1039,19 +1791,40 @@
+ + (which)%(__h)->max_elements); \
+})
+
++/* General memory allocation and deallocation */
++static inline void * ip_set_malloc(size_t bytes)
++{
++ BUG_ON(max_malloc_size == 0);
++
++ if (bytes > default_max_malloc_size)
++ return vmalloc(bytes);
++ else
++ return kmalloc(bytes, GFP_KERNEL | __GFP_NOWARN);
++}
++
++static inline void ip_set_free(void * data, size_t bytes)
++{
++ BUG_ON(max_malloc_size == 0);
++
++ if (bytes > default_max_malloc_size)
++ vfree(data);
++ else
++ kfree(data);
++}
++
+#endif /* __KERNEL__ */
+
+#endif /*_IP_SET_MALLOC_H*/
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_nethash.h
-@@ -0,0 +1,55 @@
+@@ -0,0 +1,31 @@
+#ifndef __IP_SET_NETHASH_H
+#define __IP_SET_NETHASH_H
+
+#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_hashes.h>
+
+#define SETTYPE_NAME "nethash"
-+#define MAX_RANGE 0x0000FFFF
+
+struct ip_set_nethash {
+ ip_set_ip_t *members; /* the nethash proper */
@@ -1059,8 +1832,9 @@
+ uint32_t hashsize; /* hash size */
+ uint16_t probes; /* max number of probes */
+ uint16_t resize; /* resize factor in percent */
-+ unsigned char cidr[30]; /* CIDR sizes */
-+ void *initval[0]; /* initvals for jhash_1word */
++ uint8_t cidr[30]; /* CIDR sizes */
++ uint16_t nets[30]; /* nr of nets by CIDR sizes */
++ initval_t initval[0]; /* initvals for jhash_1word */
+};
+
+struct ip_set_req_nethash_create {
@@ -1071,34 +1845,9 @@
+
+struct ip_set_req_nethash {
+ ip_set_ip_t ip;
-+ unsigned char cidr;
++ uint8_t cidr;
+};
+
-+static unsigned char shifts[] = {255, 253, 249, 241, 225, 193, 129, 1};
-+
-+static inline ip_set_ip_t
-+pack(ip_set_ip_t ip, unsigned char cidr)
-+{
-+ ip_set_ip_t addr, *paddr = &addr;
-+ unsigned char n, t, *a;
-+
-+ addr = htonl(ip & (0xFFFFFFFF << (32 - (cidr))));
-+#ifdef __KERNEL__
-+ DP("ip:%u.%u.%u.%u/%u", NIPQUAD(addr), cidr);
-+#endif
-+ n = cidr / 8;
-+ t = cidr % 8;
-+ a = &((unsigned char *)paddr)[n];
-+ *a = *a /(1 << (8 - t)) + shifts[t];
-+#ifdef __KERNEL__
-+ DP("n: %u, t: %u, a: %u", n, t, *a);
-+ DP("ip:%u.%u.%u.%u/%u, %u.%u.%u.%u",
-+ HIPQUAD(ip), cidr, NIPQUAD(addr));
-+#endif
-+
-+ return ntohl(addr);
-+}
-+
+#endif /* __IP_SET_NETHASH_H */
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_portmap.h
@@ -1107,15 +1856,15 @@
+#define __IP_SET_PORTMAP_H
+
+#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_bitmaps.h>
+
+#define SETTYPE_NAME "portmap"
-+#define MAX_RANGE 0x0000FFFF
-+#define INVALID_PORT (MAX_RANGE + 1)
+
+struct ip_set_portmap {
+ void *members; /* the portmap proper */
-+ ip_set_ip_t first_port; /* host byte order, included in range */
-+ ip_set_ip_t last_port; /* host byte order, included in range */
++ ip_set_ip_t first_ip; /* host byte order, included in range */
++ ip_set_ip_t last_ip; /* host byte order, included in range */
++ u_int32_t size; /* size of the ipmap proper */
+};
+
+struct ip_set_req_portmap_create {
@@ -1124,11 +1873,40 @@
+};
+
+struct ip_set_req_portmap {
-+ ip_set_ip_t port;
++ ip_set_ip_t ip;
+};
+
+#endif /* __IP_SET_PORTMAP_H */
--- /dev/null
++++ b/include/linux/netfilter_ipv4/ip_set_setlist.h
+@@ -0,0 +1,26 @@
++#ifndef __IP_SET_SETLIST_H
++#define __IP_SET_SETLIST_H
++
++#include <linux/netfilter_ipv4/ip_set.h>
++
++#define SETTYPE_NAME "setlist"
++
++#define IP_SET_SETLIST_ADD_AFTER 0
++#define IP_SET_SETLIST_ADD_BEFORE 1
++
++struct ip_set_setlist {
++ uint8_t size;
++ ip_set_id_t index[0];
++};
++
++struct ip_set_req_setlist_create {
++ uint8_t size;
++};
++
++struct ip_set_req_setlist {
++ char name[IP_SET_MAXNAMELEN];
++ char ref[IP_SET_MAXNAMELEN];
++ uint8_t before;
++};
++
++#endif /* __IP_SET_SETLIST_H */
+--- /dev/null
+++ b/include/linux/netfilter_ipv4/ipt_set.h
@@ -0,0 +1,21 @@
+#ifndef _IPT_SET_H
@@ -1154,7 +1932,7 @@
+#endif /*_IPT_SET_H*/
--- /dev/null
+++ b/net/ipv4/netfilter/ip_set.c
-@@ -0,0 +1,2003 @@
+@@ -0,0 +1,2076 @@
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ * Patrick Schaaf <bof@bof.de>
+ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
@@ -1176,17 +1954,21 @@
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/random.h>
-+#include <linux/jhash.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ip_set_jhash.h>
+#include <linux/errno.h>
-+#include <linux/semaphore.h>
++#include <linux/capability.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
++#include <asm/semaphore.h>
++#else
++#include <linux/semaphore.h>
++#endif
+#include <linux/spinlock.h>
-+#include <linux/vmalloc.h>
+
+#define ASSERT_READ_LOCK(x)
+#define ASSERT_WRITE_LOCK(x)
++#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4/ip_set.h>
+
+static struct list_head set_type_list; /* all registered sets */
@@ -1198,6 +1980,8 @@
+static struct list_head *ip_set_hash; /* hash of bindings */
+static unsigned int ip_set_hash_random; /* random seed */
+
++#define SETNAME_EQ(a,b) (strncmp(a,b,IP_SET_MAXNAMELEN) == 0)
++
+/*
+ * Sets are identified either by the index in ip_set_list or by id.
+ * The id never changes and is used to find a key in the hash.
@@ -1236,7 +2020,7 @@
+ list_for_each_entry(set_hash, &ip_set_hash[key], list)
+ if (set_hash->id == id && set_hash->ip == ip)
+ return set_hash;
-+
++
+ return NULL;
+}
+
@@ -1249,10 +2033,10 @@
+
+ ASSERT_READ_LOCK(&ip_set_lock);
+ IP_SET_ASSERT(ip_set_list[id]);
-+ DP("set: %s, ip: %u.%u.%u.%u", ip_set_list[id]->name, HIPQUAD(ip));
-+
++ DP("set: %s, ip: %u.%u.%u.%u", ip_set_list[id]->name, HIPQUAD(ip));
++
+ set_hash = __ip_set_find(key, id, ip);
-+
++
+ DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
+ HIPQUAD(ip),
+ set_hash != NULL ? ip_set_list[set_hash->binding]->name : "");
@@ -1264,7 +2048,7 @@
+__set_hash_del(struct ip_set_hash *set_hash)
+{
+ ASSERT_WRITE_LOCK(&ip_set_lock);
-+ IP_SET_ASSERT(ip_set_list[set_hash->binding]);
++ IP_SET_ASSERT(ip_set_list[set_hash->binding]);
+
+ __ip_set_put(set_hash->binding);
+ list_del(&set_hash->list);
@@ -1277,9 +2061,9 @@
+ u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
+ % ip_set_bindings_hash_size;
+ struct ip_set_hash *set_hash;
-+
++
+ IP_SET_ASSERT(ip_set_list[id]);
-+ DP("set: %s, ip: %u.%u.%u.%u", ip_set_list[id]->name, HIPQUAD(ip));
++ DP("set: %s, ip: %u.%u.%u.%u", ip_set_list[id]->name, HIPQUAD(ip));
+ write_lock_bh(&ip_set_lock);
+ set_hash = __ip_set_find(key, id, ip);
+ DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
@@ -1288,7 +2072,7 @@
+
+ if (set_hash != NULL)
+ __set_hash_del(set_hash);
-+ write_unlock_bh(&ip_set_lock);
++ write_unlock_bh(&ip_set_lock);
+ return 0;
+}
+
@@ -1299,7 +2083,7 @@
+ % ip_set_bindings_hash_size;
+ struct ip_set_hash *set_hash;
+ int ret = 0;
-+
++
+ IP_SET_ASSERT(ip_set_list[id]);
+ IP_SET_ASSERT(ip_set_list[binding]);
+ DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
@@ -1317,7 +2101,7 @@
+ set_hash->ip = ip;
+ list_add(&set_hash->list, &ip_set_hash[key]);
+ } else {
-+ IP_SET_ASSERT(ip_set_list[set_hash->binding]);
++ IP_SET_ASSERT(ip_set_list[set_hash->binding]);
+ DP("overwrite binding: %s",
+ ip_set_list[set_hash->binding]->name);
+ __ip_set_put(set_hash->binding);
@@ -1370,7 +2154,7 @@
+ ip_set_ip_t ip;
+ int res;
+ unsigned char i = 0;
-+
++
+ IP_SET_ASSERT(flags[i]);
+ read_lock_bh(&ip_set_lock);
+ do {
@@ -1386,10 +2170,10 @@
+ && follow_bindings(index, set, ip));
+ read_unlock_bh(&ip_set_lock);
+
-+ return res;
++ return (res < 0 ? 0 : res);
+}
+
-+void
++int
+ip_set_addip_kernel(ip_set_id_t index,
+ const struct sk_buff *skb,
+ const u_int32_t *flags)
@@ -1419,9 +2203,11 @@
+ && set->type->retry
+ && (res = set->type->retry(set)) == 0)
+ goto retry;
++
++ return res;
+}
+
-+void
++int
+ip_set_delip_kernel(ip_set_id_t index,
+ const struct sk_buff *skb,
+ const u_int32_t *flags)
@@ -1445,6 +2231,8 @@
+ && flags[i]
+ && follow_bindings(index, set, ip));
+ read_unlock_bh(&ip_set_lock);
++
++ return res;
+}
+
+/* Register and deregister settype */
@@ -1464,7 +2252,7 @@
+ip_set_register_set_type(struct ip_set_type *set_type)
+{
+ int ret = 0;
-+
++
+ if (set_type->protocol_version != IP_SET_PROTOCOL_VERSION) {
+ ip_set_printk("'%s' uses wrong protocol version %u (want %u)",
+ set_type->typename,
@@ -1509,6 +2297,29 @@
+
+}
+
++ip_set_id_t
++__ip_set_get_byname(const char *name, struct ip_set **set)
++{
++ ip_set_id_t i, index = IP_SET_INVALID_ID;
++
++ for (i = 0; i < ip_set_max; i++) {
++ if (ip_set_list[i] != NULL
++ && SETNAME_EQ(ip_set_list[i]->name, name)) {
++ __ip_set_get(i);
++ index = i;
++ *set = ip_set_list[i];
++ break;
++ }
++ }
++ return index;
++}
++
++void __ip_set_put_byindex(ip_set_id_t index)
++{
++ if (ip_set_list[index])
++ __ip_set_put(index);
++}
++
+/*
+ * Userspace routines
+ */
@@ -1522,11 +2333,11 @@
+ip_set_get_byname(const char *name)
+{
+ ip_set_id_t i, index = IP_SET_INVALID_ID;
-+
++
+ down(&ip_set_app_mutex);
+ for (i = 0; i < ip_set_max; i++) {
+ if (ip_set_list[i] != NULL
-+ && strcmp(ip_set_list[i]->name, name) == 0) {
++ && SETNAME_EQ(ip_set_list[i]->name, name)) {
+ __ip_set_get(i);
+ index = i;
+ break;
@@ -1548,22 +2359,36 @@
+
+ if (index >= ip_set_max)
+ return IP_SET_INVALID_ID;
-+
++
+ if (ip_set_list[index])
+ __ip_set_get(index);
+ else
+ index = IP_SET_INVALID_ID;
-+
++
+ up(&ip_set_app_mutex);
+ return index;
+}
+
+/*
++ * Find the set id belonging to the index.
++ * We are protected by the mutex, so we do not need to use
++ * ip_set_lock. There is no need to reference the sets either.
++ */
++ip_set_id_t
++ip_set_id(ip_set_id_t index)
++{
++ if (index >= ip_set_max || !ip_set_list[index])
++ return IP_SET_INVALID_ID;
++
++ return ip_set_list[index]->id;
++}
++
++/*
+ * If the given set pointer points to a valid set, decrement
+ * reference count by 1. The caller shall not assume the index
+ * to be valid, after calling this function.
+ */
-+void ip_set_put(ip_set_id_t index)
++void ip_set_put_byindex(ip_set_id_t index)
+{
+ down(&ip_set_app_mutex);
+ if (ip_set_list[index])
@@ -1576,10 +2401,10 @@
+ip_set_find_byname(const char *name)
+{
+ ip_set_id_t i, index = IP_SET_INVALID_ID;
-+
++
+ for (i = 0; i < ip_set_max; i++) {
+ if (ip_set_list[i] != NULL
-+ && strcmp(ip_set_list[i]->name, name) == 0) {
++ && SETNAME_EQ(ip_set_list[i]->name, name)) {
+ index = i;
+ break;
+ }
@@ -1592,7 +2417,7 @@
+{
+ if (index >= ip_set_max || ip_set_list[index] == NULL)
+ index = IP_SET_INVALID_ID;
-+
++
+ return index;
+}
+
@@ -1603,7 +2428,7 @@
+static inline int
+__ip_set_testip(struct ip_set *set,
+ const void *data,
-+ size_t size,
++ u_int32_t size,
+ ip_set_ip_t *ip)
+{
+ int res;
@@ -1618,12 +2443,12 @@
+static int
+__ip_set_addip(ip_set_id_t index,
+ const void *data,
-+ size_t size)
++ u_int32_t size)
+{
+ struct ip_set *set = ip_set_list[index];
+ ip_set_ip_t ip;
+ int res;
-+
++
+ IP_SET_ASSERT(set);
+ do {
+ write_lock_bh(&set->lock);
@@ -1639,9 +2464,18 @@
+static int
+ip_set_addip(ip_set_id_t index,
+ const void *data,
-+ size_t size)
++ u_int32_t size)
+{
++ struct ip_set *set = ip_set_list[index];
++
++ IP_SET_ASSERT(set);
+
++ if (size - sizeof(struct ip_set_req_adt) != set->type->reqsize) {
++ ip_set_printk("data length wrong (want %lu, have %zu)",
++ (long unsigned)set->type->reqsize,
++ size - sizeof(struct ip_set_req_adt));
++ return -EINVAL;
++ }
+ return __ip_set_addip(index,
+ data + sizeof(struct ip_set_req_adt),
+ size - sizeof(struct ip_set_req_adt));
@@ -1650,13 +2484,20 @@
+static int
+ip_set_delip(ip_set_id_t index,
+ const void *data,
-+ size_t size)
++ u_int32_t size)
+{
+ struct ip_set *set = ip_set_list[index];
+ ip_set_ip_t ip;
+ int res;
-+
++
+ IP_SET_ASSERT(set);
++
++ if (size - sizeof(struct ip_set_req_adt) != set->type->reqsize) {
++ ip_set_printk("data length wrong (want %lu, have %zu)",
++ (long unsigned)set->type->reqsize,
++ size - sizeof(struct ip_set_req_adt));
++ return -EINVAL;
++ }
+ write_lock_bh(&set->lock);
+ res = set->type->delip(set,
+ data + sizeof(struct ip_set_req_adt),
@@ -1670,13 +2511,20 @@
+static int
+ip_set_testip(ip_set_id_t index,
+ const void *data,
-+ size_t size)
++ u_int32_t size)
+{
+ struct ip_set *set = ip_set_list[index];
+ ip_set_ip_t ip;
+ int res;
+
+ IP_SET_ASSERT(set);
++
++ if (size - sizeof(struct ip_set_req_adt) != set->type->reqsize) {
++ ip_set_printk("data length wrong (want %lu, have %zu)",
++ (long unsigned)set->type->reqsize,
++ size - sizeof(struct ip_set_req_adt));
++ return -EINVAL;
++ }
+ res = __ip_set_testip(set,
+ data + sizeof(struct ip_set_req_adt),
+ size - sizeof(struct ip_set_req_adt),
@@ -1688,10 +2536,10 @@
+static int
+ip_set_bindip(ip_set_id_t index,
+ const void *data,
-+ size_t size)
++ u_int32_t size)
+{
+ struct ip_set *set = ip_set_list[index];
-+ struct ip_set_req_bind *req_bind;
++ const struct ip_set_req_bind *req_bind;
+ ip_set_id_t binding;
+ ip_set_ip_t ip;
+ int res;
@@ -1699,19 +2547,17 @@
+ IP_SET_ASSERT(set);
+ if (size < sizeof(struct ip_set_req_bind))
+ return -EINVAL;
++
++ req_bind = data;
+
-+ req_bind = (struct ip_set_req_bind *) data;
-+ req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
-+
-+ if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
++ if (SETNAME_EQ(req_bind->binding, IPSET_TOKEN_DEFAULT)) {
+ /* Default binding of a set */
-+ char *binding_name;
-+
++ const char *binding_name;
++
+ if (size != sizeof(struct ip_set_req_bind) + IP_SET_MAXNAMELEN)
+ return -EINVAL;
+
-+ binding_name = (char *)(data + sizeof(struct ip_set_req_bind));
-+ binding_name[IP_SET_MAXNAMELEN - 1] = '\0';
++ binding_name = data + sizeof(struct ip_set_req_bind);
+
+ binding = ip_set_find_byname(binding_name);
+ if (binding == IP_SET_INVALID_ID)
@@ -1737,7 +2583,7 @@
+ &ip);
+ DP("set %s, ip: %u.%u.%u.%u, binding %s",
+ set->name, HIPQUAD(ip), ip_set_list[binding]->name);
-+
++
+ if (res >= 0)
+ res = ip_set_hash_add(set->id, ip, binding);
+
@@ -1776,30 +2622,29 @@
+static int
+ip_set_unbindip(ip_set_id_t index,
+ const void *data,
-+ size_t size)
++ u_int32_t size)
+{
+ struct ip_set *set;
-+ struct ip_set_req_bind *req_bind;
++ const struct ip_set_req_bind *req_bind;
+ ip_set_ip_t ip;
+ int res;
+
+ DP("");
+ if (size < sizeof(struct ip_set_req_bind))
+ return -EINVAL;
-+
-+ req_bind = (struct ip_set_req_bind *) data;
-+ req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
-+
++
++ req_bind = data;
++
+ DP("%u %s", index, req_bind->binding);
+ if (index == IP_SET_INVALID_ID) {
+ /* unbind :all: */
-+ if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
++ if (SETNAME_EQ(req_bind->binding, IPSET_TOKEN_DEFAULT)) {
+ /* Default binding of sets */
+ write_lock_bh(&ip_set_lock);
+ FOREACH_SET_DO(__unbind_default);
+ write_unlock_bh(&ip_set_lock);
+ return 0;
-+ } else if (strcmp(req_bind->binding, IPSET_TOKEN_ALL) == 0) {
++ } else if (SETNAME_EQ(req_bind->binding, IPSET_TOKEN_ALL)) {
+ /* Flush all bindings of all sets*/
+ write_lock_bh(&ip_set_lock);
+ FOREACH_HASH_RW_DO(__set_hash_del);
@@ -1809,16 +2654,16 @@
+ DP("unreachable reached!");
+ return -EINVAL;
+ }
-+
++
+ set = ip_set_list[index];
+ IP_SET_ASSERT(set);
-+ if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
++ if (SETNAME_EQ(req_bind->binding, IPSET_TOKEN_DEFAULT)) {
+ /* Default binding of set */
+ ip_set_id_t binding = ip_set_find_byindex(set->binding);
+
+ if (binding == IP_SET_INVALID_ID)
+ return -ENOENT;
-+
++
+ write_lock_bh(&ip_set_lock);
+ /* Sets in hash values are referenced */
+ __ip_set_put(set->binding);
@@ -1826,7 +2671,7 @@
+ write_unlock_bh(&ip_set_lock);
+
+ return 0;
-+ } else if (strcmp(req_bind->binding, IPSET_TOKEN_ALL) == 0) {
++ } else if (SETNAME_EQ(req_bind->binding, IPSET_TOKEN_ALL)) {
+ /* Flush all bindings */
+
+ write_lock_bh(&ip_set_lock);
@@ -1834,7 +2679,7 @@
+ write_unlock_bh(&ip_set_lock);
+ return 0;
+ }
-+
++
+ res = __ip_set_testip(set,
+ data + sizeof(struct ip_set_req_bind),
+ size - sizeof(struct ip_set_req_bind),
@@ -1850,10 +2695,10 @@
+static int
+ip_set_testbind(ip_set_id_t index,
+ const void *data,
-+ size_t size)
++ u_int32_t size)
+{
+ struct ip_set *set = ip_set_list[index];
-+ struct ip_set_req_bind *req_bind;
++ const struct ip_set_req_bind *req_bind;
+ ip_set_id_t binding;
+ ip_set_ip_t ip;
+ int res;
@@ -1861,24 +2706,22 @@
+ IP_SET_ASSERT(set);
+ if (size < sizeof(struct ip_set_req_bind))
+ return -EINVAL;
++
++ req_bind = data;
+
-+ req_bind = (struct ip_set_req_bind *) data;
-+ req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
-+
-+ if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
++ if (SETNAME_EQ(req_bind->binding, IPSET_TOKEN_DEFAULT)) {
+ /* Default binding of set */
-+ char *binding_name;
-+
++ const char *binding_name;
++
+ if (size != sizeof(struct ip_set_req_bind) + IP_SET_MAXNAMELEN)
+ return -EINVAL;
+
-+ binding_name = (char *)(data + sizeof(struct ip_set_req_bind));
-+ binding_name[IP_SET_MAXNAMELEN - 1] = '\0';
++ binding_name = data + sizeof(struct ip_set_req_bind);
+
+ binding = ip_set_find_byname(binding_name);
+ if (binding == IP_SET_INVALID_ID)
+ return -ENOENT;
-+
++
+ res = (set->binding == binding) ? -EEXIST : 0;
+
+ return res;
@@ -1886,15 +2729,15 @@
+ binding = ip_set_find_byname(req_bind->binding);
+ if (binding == IP_SET_INVALID_ID)
+ return -ENOENT;
-+
-+
++
++
+ res = __ip_set_testip(set,
+ data + sizeof(struct ip_set_req_bind),
+ size - sizeof(struct ip_set_req_bind),
+ &ip);
+ DP("set %s, ip: %u.%u.%u.%u, binding %s",
+ set->name, HIPQUAD(ip), ip_set_list[binding]->name);
-+
++
+ if (res >= 0)
+ res = (ip_set_find_in_hash(set->id, ip) == binding)
+ ? -EEXIST : 0;
@@ -1906,7 +2749,7 @@
+find_set_type_rlock(const char *typename)
+{
+ struct ip_set_type *type;
-+
++
+ read_lock_bh(&ip_set_lock);
+ type = find_set_type(typename);
+ if (type == NULL)
@@ -1927,7 +2770,7 @@
+ if (ip_set_list[i] == NULL) {
+ if (*id == IP_SET_INVALID_ID)
+ *id = *index = i;
-+ } else if (strcmp(name, ip_set_list[i]->name) == 0)
++ } else if (SETNAME_EQ(name, ip_set_list[i]->name))
+ /* Name clash */
+ return -EEXIST;
+ }
@@ -1935,7 +2778,7 @@
+ /* No free slot remained */
+ return -ERANGE;
+ /* Check that index is usable as id (swapping) */
-+ check:
++ check:
+ for (i = 0; i < ip_set_max; i++) {
+ if (ip_set_list[i] != NULL
+ && ip_set_list[i]->id == *id) {
@@ -1954,13 +2797,14 @@
+ const char *typename,
+ ip_set_id_t restore,
+ const void *data,
-+ size_t size)
++ u_int32_t size)
+{
+ struct ip_set *set;
+ ip_set_id_t index = 0, id;
+ int res = 0;
+
+ DP("setname: %s, typename: %s, id: %u", name, typename, restore);
++
+ /*
+ * First, and without any locks, allocate and initialize
+ * a normal base set structure.
@@ -1968,7 +2812,7 @@
+ set = kmalloc(sizeof(struct ip_set), GFP_KERNEL);
+ if (!set)
+ return -ENOMEM;
-+ set->lock = RW_LOCK_UNLOCKED;
++ rwlock_init(&set->lock);
+ strncpy(set->name, name, IP_SET_MAXNAMELEN);
+ set->binding = IP_SET_INVALID_ID;
+ atomic_set(&set->ref, 0);
@@ -2004,6 +2848,14 @@
+ }
+ read_unlock_bh(&ip_set_lock);
+
++ /* Check request size */
++ if (size != set->type->header_size) {
++ ip_set_printk("data length wrong (want %lu, have %lu)",
++ (long unsigned)set->type->header_size,
++ (long unsigned)size);
++ goto put_out;
++ }
++
+ /*
+ * Without holding any locks, create private part.
+ */
@@ -2030,7 +2882,7 @@
+ res = -ERANGE;
+ goto cleanup;
+ }
-+
++
+ /*
+ * Finally! Add our shiny new set to the list, and be done.
+ */
@@ -2039,7 +2891,7 @@
+ ip_set_list[index] = set;
+ write_unlock_bh(&ip_set_lock);
+ return res;
-+
++
+ cleanup:
+ write_unlock_bh(&ip_set_lock);
+ set->type->destroy(set);
@@ -2139,9 +2991,7 @@
+ write_lock_bh(&ip_set_lock);
+ for (i = 0; i < ip_set_max; i++) {
+ if (ip_set_list[i] != NULL
-+ && strncmp(ip_set_list[i]->name,
-+ name,
-+ IP_SET_MAXNAMELEN - 1) == 0) {
++ && SETNAME_EQ(ip_set_list[i]->name, name)) {
+ res = -EEXIST;
+ goto unlock;
+ }
@@ -2165,11 +3015,13 @@
+ u_int32_t from_ref;
+
+ DP("set: %s to %s", from->name, to->name);
-+ /* Features must not change. Artifical restriction. */
++ /* Features must not change.
++ * Not an artifical restriction anymore, as we must prevent
++ * possible loops created by swapping in setlist type of sets. */
+ if (from->type->features != to->type->features)
+ return -ENOEXEC;
+
-+ /* No magic here: ref munging protected by the mutex */
++ /* No magic here: ref munging protected by the mutex */
+ write_lock_bh(&ip_set_lock);
+ strncpy(from_name, from->name, IP_SET_MAXNAMELEN);
+ from_ref = atomic_read(&from->ref);
@@ -2178,10 +3030,10 @@
+ atomic_set(&from->ref, atomic_read(&to->ref));
+ strncpy(to->name, from_name, IP_SET_MAXNAMELEN);
+ atomic_set(&to->ref, from_ref);
-+
++
+ ip_set_list[from_index] = to;
+ ip_set_list[to_index] = from;
-+
++
+ write_unlock_bh(&ip_set_lock);
+ return 0;
+}
@@ -2192,7 +3044,7 @@
+
+static inline void
+__set_hash_bindings_size_list(struct ip_set_hash *set_hash,
-+ ip_set_id_t id, size_t *size)
++ ip_set_id_t id, u_int32_t *size)
+{
+ if (set_hash->id == id)
+ *size += sizeof(struct ip_set_hash_list);
@@ -2200,7 +3052,7 @@
+
+static inline void
+__set_hash_bindings_size_save(struct ip_set_hash *set_hash,
-+ ip_set_id_t id, size_t *size)
++ ip_set_id_t id, u_int32_t *size)
+{
+ if (set_hash->id == id)
+ *size += sizeof(struct ip_set_hash_save);
@@ -2211,8 +3063,7 @@
+ ip_set_id_t id, void *data, int *used)
+{
+ if (set_hash->id == id) {
-+ struct ip_set_hash_list *hash_list =
-+ (struct ip_set_hash_list *)(data + *used);
++ struct ip_set_hash_list *hash_list = data + *used;
+
+ hash_list->ip = set_hash->ip;
+ hash_list->binding = set_hash->binding;
@@ -2229,7 +3080,7 @@
+ struct ip_set_list *set_list;
+
+ /* Pointer to our header */
-+ set_list = (struct ip_set_list *) (data + *used);
++ set_list = data + *used;
+
+ DP("set: %s, used: %d %p %p", set->name, *used, data, data + *used);
+
@@ -2274,7 +3125,7 @@
+
+ /* Fill in set spefific bindings data */
+ FOREACH_HASH_DO(__set_hash_bindings, set->id, data, used);
-+
++
+ return 0;
+
+ unlock_set:
@@ -2296,7 +3147,7 @@
+ struct ip_set_save *set_save;
+
+ /* Pointer to our header */
-+ set_save = (struct ip_set_save *) (data + *used);
++ set_save = data + *used;
+
+ /* Get and ensure header size */
+ if (*used + sizeof(struct ip_set_save) > len)
@@ -2304,7 +3155,7 @@
+ *used += sizeof(struct ip_set_save);
+
+ set = ip_set_list[index];
-+ DP("set: %s, used: %u(%u) %p %p", set->name, *used, len,
++ DP("set: %s, used: %d(%d) %p %p", set->name, *used, len,
+ data, data + *used);
+
+ read_lock_bh(&set->lock);
@@ -2321,8 +3172,8 @@
+ set->type->list_header(set, data + *used);
+ *used += set_save->header_size;
+
-+ DP("set header filled: %s, used: %u(%u) %p %p", set->name, *used,
-+ set_save->header_size, data, data + *used);
++ DP("set header filled: %s, used: %d(%lu) %p %p", set->name, *used,
++ (unsigned long)set_save->header_size, data, data + *used);
+ /* Get and ensure set specific members size */
+ set_save->members_size = set->type->list_members_size(set);
+ if (*used + set_save->members_size > len)
@@ -2332,8 +3183,8 @@
+ set->type->list_members(set, data + *used);
+ *used += set_save->members_size;
+ read_unlock_bh(&set->lock);
-+ DP("set members filled: %s, used: %u(%u) %p %p", set->name, *used,
-+ set_save->members_size, data, data + *used);
++ DP("set members filled: %s, used: %d(%lu) %p %p", set->name, *used,
++ (unsigned long)set_save->members_size, data, data + *used);
+ return 0;
+
+ unlock_set:
@@ -2353,8 +3204,7 @@
+{
+ if (*res == 0
+ && (id == IP_SET_INVALID_ID || set_hash->id == id)) {
-+ struct ip_set_hash_save *hash_save =
-+ (struct ip_set_hash_save *)(data + *used);
++ struct ip_set_hash_save *hash_save = data + *used;
+ /* Ensure bindings size */
+ if (*used + sizeof(struct ip_set_hash_save) > len) {
+ *res = -ENOMEM;
@@ -2381,7 +3231,7 @@
+ return -ENOMEM;
+
+ /* Marker */
-+ set_save = (struct ip_set_save *) (data + *used);
++ set_save = data + *used;
+ set_save->index = IP_SET_INVALID_ID;
+ set_save->header_size = 0;
+ set_save->members_size = 0;
@@ -2394,7 +3244,7 @@
+ index = ip_set_list[index]->id;
+ FOREACH_HASH_DO(__set_hash_save_bindings, index, data, used, len, &res);
+
-+ return res;
++ return res;
+}
+
+/*
@@ -2413,12 +3263,12 @@
+ /* Loop to restore sets */
+ while (1) {
+ line++;
-+
-+ DP("%u %u %u", used, sizeof(struct ip_set_restore), len);
++
++ DP("%d %zu %d", used, sizeof(struct ip_set_restore), len);
+ /* Get and ensure header size */
+ if (used + sizeof(struct ip_set_restore) > len)
+ return line;
-+ set_restore = (struct ip_set_restore *) (data + used);
++ set_restore = data + used;
+ used += sizeof(struct ip_set_restore);
+
+ /* Ensure data size */
@@ -2432,7 +3282,7 @@
+ line--;
+ goto bindings;
+ }
-+
++
+ /* Try to create the set */
+ DP("restore %s %s", set_restore->name, set_restore->typename);
+ res = ip_set_create(set_restore->name,
@@ -2440,7 +3290,7 @@
+ set_restore->index,
+ data + used,
+ set_restore->header_size);
-+
++
+ if (res != 0)
+ return line;
+ used += set_restore->header_size;
@@ -2452,12 +3302,13 @@
+ /* Try to restore members data */
+ set = ip_set_list[index];
+ members_size = 0;
-+ DP("members_size %u reqsize %u",
-+ set_restore->members_size, set->type->reqsize);
++ DP("members_size %lu reqsize %lu",
++ (unsigned long)set_restore->members_size,
++ (unsigned long)set->type->reqsize);
+ while (members_size + set->type->reqsize <=
+ set_restore->members_size) {
+ line++;
-+ DP("members: %u, line %u", members_size, line);
++ DP("members: %d, line %d", members_size, line);
+ res = __ip_set_addip(index,
+ data + used + members_size,
+ set->type->reqsize);
@@ -2466,29 +3317,29 @@
+ members_size += set->type->reqsize;
+ }
+
-+ DP("members_size %u %u",
-+ set_restore->members_size, members_size);
++ DP("members_size %lu %d",
++ (unsigned long)set_restore->members_size, members_size);
+ if (members_size != set_restore->members_size)
+ return line++;
-+ used += set_restore->members_size;
++ used += set_restore->members_size;
+ }
-+
++
+ bindings:
+ /* Loop to restore bindings */
+ while (used < len) {
+ line++;
+
-+ DP("restore binding, line %u", line);
++ DP("restore binding, line %u", line);
+ /* Get and ensure size */
+ if (used + sizeof(struct ip_set_hash_save) > len)
+ return line;
-+ hash_save = (struct ip_set_hash_save *) (data + used);
++ hash_save = data + used;
+ used += sizeof(struct ip_set_hash_save);
-+
++
+ /* hash_save->id is used to store the index */
+ index = ip_set_find_byindex(hash_save->id);
+ DP("restore binding index %u, id %u, %u -> %u",
-+ index, hash_save->id, hash_save->ip, hash_save->binding);
++ index, hash_save->id, hash_save->ip, hash_save->binding);
+ if (index != hash_save->id)
+ return line;
+ if (ip_set_find_byindex(hash_save->binding) == IP_SET_INVALID_ID) {
@@ -2514,8 +3365,8 @@
+ }
+ if (used != len)
+ return line;
-+
-+ return 0;
++
++ return 0;
+}
+
+static int
@@ -2527,10 +3378,10 @@
+ struct ip_set_req_adt *req_adt;
+ ip_set_id_t index = IP_SET_INVALID_ID;
+ int (*adtfn)(ip_set_id_t index,
-+ const void *data, size_t size);
++ const void *data, u_int32_t size);
+ struct fn_table {
+ int (*fn)(ip_set_id_t index,
-+ const void *data, size_t size);
++ const void *data, u_int32_t size);
+ } adtfn_table[] =
+ { { ip_set_addip }, { ip_set_delip }, { ip_set_testip},
+ { ip_set_bindip}, { ip_set_unbindip }, { ip_set_testbind },
@@ -2562,11 +3413,10 @@
+
+ op = (unsigned *)data;
+ DP("op=%x", *op);
-+
++
+ if (*op < IP_SET_OP_VERSION) {
+ /* Check the version at the beginning of operations */
-+ struct ip_set_req_version *req_version =
-+ (struct ip_set_req_version *) data;
++ struct ip_set_req_version *req_version = data;
+ if (req_version->version != IP_SET_PROTOCOL_VERSION) {
+ res = -EPROTO;
+ goto done;
@@ -2575,9 +3425,8 @@
+
+ switch (*op) {
+ case IP_SET_OP_CREATE:{
-+ struct ip_set_req_create *req_create
-+ = (struct ip_set_req_create *) data;
-+
++ struct ip_set_req_create *req_create = data;
++
+ if (len < sizeof(struct ip_set_req_create)) {
+ ip_set_printk("short CREATE data (want >=%zu, got %u)",
+ sizeof(struct ip_set_req_create), len);
@@ -2594,16 +3443,15 @@
+ goto done;
+ }
+ case IP_SET_OP_DESTROY:{
-+ struct ip_set_req_std *req_destroy
-+ = (struct ip_set_req_std *) data;
-+
++ struct ip_set_req_std *req_destroy = data;
++
+ if (len != sizeof(struct ip_set_req_std)) {
+ ip_set_printk("invalid DESTROY data (want %zu, got %u)",
+ sizeof(struct ip_set_req_std), len);
+ res = -EINVAL;
+ goto done;
+ }
-+ if (strcmp(req_destroy->name, IPSET_TOKEN_ALL) == 0) {
++ if (SETNAME_EQ(req_destroy->name, IPSET_TOKEN_ALL)) {
+ /* Destroy all sets */
+ index = IP_SET_INVALID_ID;
+ } else {
@@ -2615,13 +3463,12 @@
+ goto done;
+ }
+ }
-+
++
+ res = ip_set_destroy(index);
+ goto done;
+ }
+ case IP_SET_OP_FLUSH:{
-+ struct ip_set_req_std *req_flush =
-+ (struct ip_set_req_std *) data;
++ struct ip_set_req_std *req_flush = data;
+
+ if (len != sizeof(struct ip_set_req_std)) {
+ ip_set_printk("invalid FLUSH data (want %zu, got %u)",
@@ -2629,7 +3476,7 @@
+ res = -EINVAL;
+ goto done;
+ }
-+ if (strcmp(req_flush->name, IPSET_TOKEN_ALL) == 0) {
++ if (SETNAME_EQ(req_flush->name, IPSET_TOKEN_ALL)) {
+ /* Flush all sets */
+ index = IP_SET_INVALID_ID;
+ } else {
@@ -2645,8 +3492,7 @@
+ goto done;
+ }
+ case IP_SET_OP_RENAME:{
-+ struct ip_set_req_create *req_rename
-+ = (struct ip_set_req_create *) data;
++ struct ip_set_req_create *req_rename = data;
+
+ if (len != sizeof(struct ip_set_req_create)) {
+ ip_set_printk("invalid RENAME data (want %zu, got %u)",
@@ -2657,7 +3503,7 @@
+
+ req_rename->name[IP_SET_MAXNAMELEN - 1] = '\0';
+ req_rename->typename[IP_SET_MAXNAMELEN - 1] = '\0';
-+
++
+ index = ip_set_find_byname(req_rename->name);
+ if (index == IP_SET_INVALID_ID) {
+ res = -ENOENT;
@@ -2667,8 +3513,7 @@
+ goto done;
+ }
+ case IP_SET_OP_SWAP:{
-+ struct ip_set_req_create *req_swap
-+ = (struct ip_set_req_create *) data;
++ struct ip_set_req_create *req_swap = data;
+ ip_set_id_t to_index;
+
+ if (len != sizeof(struct ip_set_req_create)) {
@@ -2697,7 +3542,7 @@
+ default:
+ break; /* Set identified by id */
+ }
-+
++
+ /* There we may have add/del/test/bind/unbind/test_bind operations */
+ if (*op < IP_SET_OP_ADD_IP || *op > IP_SET_OP_TEST_BIND_SET) {
+ res = -EBADMSG;
@@ -2711,7 +3556,7 @@
+ res = -EINVAL;
+ goto done;
+ }
-+ req_adt = (struct ip_set_req_adt *) data;
++ req_adt = data;
+
+ /* -U :all: :all:|:default: uses IP_SET_INVALID_ID */
+ if (!(*op == IP_SET_OP_UNBIND_SET
@@ -2771,8 +3616,7 @@
+
+ if (*op < IP_SET_OP_VERSION) {
+ /* Check the version at the beginning of operations */
-+ struct ip_set_req_version *req_version =
-+ (struct ip_set_req_version *) data;
++ struct ip_set_req_version *req_version = data;
+ if (req_version->version != IP_SET_PROTOCOL_VERSION) {
+ res = -EPROTO;
+ goto done;
@@ -2781,8 +3625,7 @@
+
+ switch (*op) {
+ case IP_SET_OP_VERSION: {
-+ struct ip_set_req_version *req_version =
-+ (struct ip_set_req_version *) data;
++ struct ip_set_req_version *req_version = data;
+
+ if (*len != sizeof(struct ip_set_req_version)) {
+ ip_set_printk("invalid VERSION (want %zu, got %d)",
@@ -2798,8 +3641,7 @@
+ goto done;
+ }
+ case IP_SET_OP_GET_BYNAME: {
-+ struct ip_set_req_get_set *req_get
-+ = (struct ip_set_req_get_set *) data;
++ struct ip_set_req_get_set *req_get = data;
+
+ if (*len != sizeof(struct ip_set_req_get_set)) {
+ ip_set_printk("invalid GET_BYNAME (want %zu, got %d)",
@@ -2813,8 +3655,7 @@
+ goto copy;
+ }
+ case IP_SET_OP_GET_BYINDEX: {
-+ struct ip_set_req_get_set *req_get
-+ = (struct ip_set_req_get_set *) data;
++ struct ip_set_req_get_set *req_get = data;
+
+ if (*len != sizeof(struct ip_set_req_get_set)) {
+ ip_set_printk("invalid GET_BYINDEX (want %zu, got %d)",
@@ -2830,8 +3671,7 @@
+ goto copy;
+ }
+ case IP_SET_OP_ADT_GET: {
-+ struct ip_set_req_adt_get *req_get
-+ = (struct ip_set_req_adt_get *) data;
++ struct ip_set_req_adt_get *req_get = data;
+
+ if (*len != sizeof(struct ip_set_req_adt_get)) {
+ ip_set_printk("invalid ADT_GET (want %zu, got %d)",
@@ -2853,8 +3693,7 @@
+ goto copy;
+ }
+ case IP_SET_OP_MAX_SETS: {
-+ struct ip_set_req_max_sets *req_max_sets
-+ = (struct ip_set_req_max_sets *) data;
++ struct ip_set_req_max_sets *req_max_sets = data;
+ ip_set_id_t i;
+
+ if (*len != sizeof(struct ip_set_req_max_sets)) {
@@ -2864,7 +3703,7 @@
+ goto done;
+ }
+
-+ if (strcmp(req_max_sets->set.name, IPSET_TOKEN_ALL) == 0) {
++ if (SETNAME_EQ(req_max_sets->set.name, IPSET_TOKEN_ALL)) {
+ req_max_sets->set.index = IP_SET_INVALID_ID;
+ } else {
+ req_max_sets->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
@@ -2885,8 +3724,7 @@
+ }
+ case IP_SET_OP_LIST_SIZE:
+ case IP_SET_OP_SAVE_SIZE: {
-+ struct ip_set_req_setnames *req_setnames
-+ = (struct ip_set_req_setnames *) data;
++ struct ip_set_req_setnames *req_setnames = data;
+ struct ip_set_name_list *name_list;
+ struct ip_set *set;
+ ip_set_id_t i;
@@ -2904,8 +3742,7 @@
+ for (i = 0; i < ip_set_max; i++) {
+ if (ip_set_list[i] == NULL)
+ continue;
-+ name_list = (struct ip_set_name_list *)
-+ (data + used);
++ name_list = data + used;
+ used += sizeof(struct ip_set_name_list);
+ if (used > copylen) {
+ res = -EAGAIN;
@@ -2957,8 +3794,7 @@
+ goto copy;
+ }
+ case IP_SET_OP_LIST: {
-+ struct ip_set_req_list *req_list
-+ = (struct ip_set_req_list *) data;
++ struct ip_set_req_list *req_list = data;
+ ip_set_id_t i;
+ int used;
+
@@ -2994,8 +3830,7 @@
+ goto copy;
+ }
+ case IP_SET_OP_SAVE: {
-+ struct ip_set_req_list *req_save
-+ = (struct ip_set_req_list *) data;
++ struct ip_set_req_list *req_save = data;
+ ip_set_id_t i;
+ int used;
+
@@ -3011,20 +3846,30 @@
+ res = -ENOENT;
+ goto done;
+ }
++
++#define SETLIST(set) (strcmp(set->type->typename, "setlist") == 0)
++
+ used = 0;
+ if (index == IP_SET_INVALID_ID) {
-+ /* Save all sets */
++ /* Save all sets: ugly setlist type dependency */
++ int setlist = 0;
++ setlists:
+ for (i = 0; i < ip_set_max && res == 0; i++) {
-+ if (ip_set_list[i] != NULL)
++ if (ip_set_list[i] != NULL
++ && !(setlist ^ SETLIST(ip_set_list[i])))
+ res = ip_set_save_set(i, data, &used, *len);
+ }
++ if (!setlist) {
++ setlist = 1;
++ goto setlists;
++ }
+ } else {
+ /* Save an individual set */
+ res = ip_set_save_set(index, data, &used, *len);
+ }
+ if (res == 0)
+ res = ip_set_save_bindings(index, data, &used, *len);
-+
++
+ if (res != 0)
+ goto done;
+ else if (copylen != used) {
@@ -3034,20 +3879,19 @@
+ goto copy;
+ }
+ case IP_SET_OP_RESTORE: {
-+ struct ip_set_req_setnames *req_restore
-+ = (struct ip_set_req_setnames *) data;
++ struct ip_set_req_setnames *req_restore = data;
+ int line;
+
+ if (*len < sizeof(struct ip_set_req_setnames)
+ || *len != req_restore->size) {
-+ ip_set_printk("invalid RESTORE (want =%zu, got %d)",
-+ req_restore->size, *len);
++ ip_set_printk("invalid RESTORE (want =%lu, got %d)",
++ (long unsigned)req_restore->size, *len);
+ res = -EINVAL;
+ goto done;
+ }
+ line = ip_set_restore(data + sizeof(struct ip_set_req_setnames),
+ req_restore->size - sizeof(struct ip_set_req_setnames));
-+ DP("ip_set_restore: %u", line);
++ DP("ip_set_restore: %d", line);
+ if (line != 0) {
+ res = -EAGAIN;
+ req_restore->size = line;
@@ -3062,12 +3906,12 @@
+ } /* end of switch(op) */
+
+ copy:
-+ DP("set %s, copylen %u", index != IP_SET_INVALID_ID
++ DP("set %s, copylen %d", index != IP_SET_INVALID_ID
+ && ip_set_list[index]
+ ? ip_set_list[index]->name
+ : ":all:", copylen);
+ res = copy_to_user(user, data, copylen);
-+
++
+ done:
+ up(&ip_set_app_mutex);
+ vfree(data);
@@ -3085,12 +3929,15 @@
+ .get_optmin = SO_IP_SET,
+ .get_optmax = SO_IP_SET + 1,
+ .get = &ip_set_sockfn_get,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
++ .use = 0,
++#else
+ .owner = THIS_MODULE,
+#endif
+};
+
+static int max_sets, hash_size;
++
+module_param(max_sets, int, 0600);
+MODULE_PARM_DESC(max_sets, "maximal number of sets");
+module_param(hash_size, int, 0600);
@@ -3133,6 +3980,7 @@
+ vfree(ip_set_hash);
+ return res;
+ }
++
+ return 0;
+}
+
@@ -3150,7 +3998,10 @@
+
+EXPORT_SYMBOL(ip_set_get_byname);
+EXPORT_SYMBOL(ip_set_get_byindex);
-+EXPORT_SYMBOL(ip_set_put);
++EXPORT_SYMBOL(ip_set_put_byindex);
++EXPORT_SYMBOL(ip_set_id);
++EXPORT_SYMBOL(__ip_set_get_byname);
++EXPORT_SYMBOL(__ip_set_put_byindex);
+
+EXPORT_SYMBOL(ip_set_addip_kernel);
+EXPORT_SYMBOL(ip_set_delip_kernel);
@@ -3160,8 +4011,8 @@
+module_exit(ip_set_fini);
--- /dev/null
+++ b/net/ipv4/netfilter/ip_set_iphash.c
-@@ -0,0 +1,429 @@
-+/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+@@ -0,0 +1,166 @@
++/* Copyright (C) 2003-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
@@ -3171,36 +4022,26 @@
+/* Kernel module implementing an ip hash set */
+
+#include <linux/module.h>
++#include <linux/moduleparam.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
-+#include <linux/version.h>
-+#include <linux/jhash.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_jhash.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
+#include <linux/spinlock.h>
-+#include <linux/vmalloc.h>
+#include <linux/random.h>
+
+#include <net/ip.h>
+
-+#include <linux/netfilter_ipv4/ip_set_malloc.h>
+#include <linux/netfilter_ipv4/ip_set_iphash.h>
+
+static int limit = MAX_RANGE;
+
+static inline __u32
-+jhash_ip(const struct ip_set_iphash *map, uint16_t i, ip_set_ip_t ip)
-+{
-+ return jhash_1word(ip, *(((uint32_t *) map->initval) + i));
-+}
-+
-+static inline __u32
-+hash_id(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++iphash_id(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
++ struct ip_set_iphash *map = set->data;
+ __u32 id;
+ u_int16_t i;
+ ip_set_ip_t *elem;
@@ -3208,208 +4049,91 @@
+ *hash_ip = ip & map->netmask;
+ DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u, %u.%u.%u.%u",
+ set->name, HIPQUAD(ip), HIPQUAD(*hash_ip), HIPQUAD(map->netmask));
-+
++
+ for (i = 0; i < map->probes; i++) {
+ id = jhash_ip(map, i, *hash_ip) % map->hashsize;
+ DP("hash key: %u", id);
+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
+ if (*elem == *hash_ip)
+ return id;
-+ /* No shortcut at testing - there can be deleted
-+ * entries. */
++ /* No shortcut - there can be deleted entries. */
+ }
+ return UINT_MAX;
+}
+
+static inline int
-+__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++iphash_test(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ return (ip && hash_id(set, ip, hash_ip) != UINT_MAX);
++ return (ip && iphash_id(set, hash_ip, ip) != UINT_MAX);
+}
+
-+static int
-+testip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_iphash *req =
-+ (struct ip_set_req_iphash *) data;
++#define KADT_CONDITION
+
-+ if (size != sizeof(struct ip_set_req_iphash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iphash),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __testip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+testip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __testip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+}
++UADT(iphash, test)
++KADT(iphash, test, ipaddr)
+
+static inline int
-+__addip(struct ip_set_iphash *map, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++__iphash_add(struct ip_set_iphash *map, ip_set_ip_t *ip)
+{
+ __u32 probe;
+ u_int16_t i;
-+ ip_set_ip_t *elem;
-+
-+ if (!ip || map->elements >= limit)
-+ return -ERANGE;
-+
-+ *hash_ip = ip & map->netmask;
-+
++ ip_set_ip_t *elem, *slot = NULL;
++
+ for (i = 0; i < map->probes; i++) {
-+ probe = jhash_ip(map, i, *hash_ip) % map->hashsize;
++ probe = jhash_ip(map, i, *ip) % map->hashsize;
+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
-+ if (*elem == *hash_ip)
++ if (*elem == *ip)
+ return -EEXIST;
-+ if (!*elem) {
-+ *elem = *hash_ip;
-+ map->elements++;
-+ return 0;
-+ }
++ if (!(slot || *elem))
++ slot = elem;
++ /* There can be deleted entries, must check all slots */
++ }
++ if (slot) {
++ *slot = *ip;
++ map->elements++;
++ return 0;
+ }
+ /* Trigger rehashing */
+ return -EAGAIN;
+}
+
-+static int
-+addip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
++static inline int
++iphash_add(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_req_iphash *req =
-+ (struct ip_set_req_iphash *) data;
++ struct ip_set_iphash *map = set->data;
++
++ if (!ip || map->elements >= limit)
++ return -ERANGE;
+
-+ if (size != sizeof(struct ip_set_req_iphash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iphash),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __addip((struct ip_set_iphash *) set->data, req->ip, hash_ip);
++ *hash_ip = ip & map->netmask;
++
++ return __iphash_add(map, hash_ip);
+}
+
-+static int
-+addip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __addip((struct ip_set_iphash *) set->data,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+}
++UADT(iphash, add)
++KADT(iphash, add, ipaddr)
+
-+static int retry(struct ip_set *set)
++static inline void
++__iphash_retry(struct ip_set_iphash *tmp, struct ip_set_iphash *map)
+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
-+ ip_set_ip_t hash_ip, *elem;
-+ void *members;
-+ u_int32_t i, hashsize = map->hashsize;
-+ int res;
-+ struct ip_set_iphash *tmp;
-+
-+ if (map->resize == 0)
-+ return -ERANGE;
-+
-+ again:
-+ res = 0;
-+
-+ /* Calculate new hash size */
-+ hashsize += (hashsize * map->resize)/100;
-+ if (hashsize == map->hashsize)
-+ hashsize++;
-+
-+ ip_set_printk("rehashing of set %s triggered: "
-+ "hashsize grows from %u to %u",
-+ set->name, map->hashsize, hashsize);
-+
-+ tmp = kmalloc(sizeof(struct ip_set_iphash)
-+ + map->probes * sizeof(uint32_t), GFP_ATOMIC);
-+ if (!tmp) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_iphash)
-+ + map->probes * sizeof(uint32_t));
-+ return -ENOMEM;
-+ }
-+ tmp->members = harray_malloc(hashsize, sizeof(ip_set_ip_t), GFP_ATOMIC);
-+ if (!tmp->members) {
-+ DP("out of memory for %d bytes", hashsize * sizeof(ip_set_ip_t));
-+ kfree(tmp);
-+ return -ENOMEM;
-+ }
-+ tmp->hashsize = hashsize;
-+ tmp->elements = 0;
-+ tmp->probes = map->probes;
-+ tmp->resize = map->resize;
+ tmp->netmask = map->netmask;
-+ memcpy(tmp->initval, map->initval, map->probes * sizeof(uint32_t));
-+
-+ write_lock_bh(&set->lock);
-+ map = (struct ip_set_iphash *) set->data; /* Play safe */
-+ for (i = 0; i < map->hashsize && res == 0; i++) {
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
-+ if (*elem)
-+ res = __addip(tmp, *elem, &hash_ip);
-+ }
-+ if (res) {
-+ /* Failure, try again */
-+ write_unlock_bh(&set->lock);
-+ harray_free(tmp->members);
-+ kfree(tmp);
-+ goto again;
-+ }
-+
-+ /* Success at resizing! */
-+ members = map->members;
-+
-+ map->hashsize = tmp->hashsize;
-+ map->members = tmp->members;
-+ write_unlock_bh(&set->lock);
-+
-+ harray_free(members);
-+ kfree(tmp);
-+
-+ return 0;
+}
+
++HASH_RETRY(iphash, ip_set_ip_t)
++
+static inline int
-+__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++iphash_del(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
++ struct ip_set_iphash *map = set->data;
+ ip_set_ip_t id, *elem;
+
+ if (!ip)
+ return -ERANGE;
+
-+ id = hash_id(set, ip, hash_ip);
++ id = iphash_id(set, hash_ip, ip);
+ if (id == UINT_MAX)
+ return -EEXIST;
-+
++
+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
+ *elem = 0;
+ map->elements--;
@@ -3417,159 +4141,35 @@
+ return 0;
+}
+
-+static int
-+delip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_iphash *req =
-+ (struct ip_set_req_iphash *) data;
-+
-+ if (size != sizeof(struct ip_set_req_iphash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iphash),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __delip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+delip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __delip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+}
++UADT(iphash, del)
++KADT(iphash, del, ipaddr)
+
-+static int create(struct ip_set *set, const void *data, size_t size)
++static inline int
++__iphash_create(const struct ip_set_req_iphash_create *req,
++ struct ip_set_iphash *map)
+{
-+ struct ip_set_req_iphash_create *req =
-+ (struct ip_set_req_iphash_create *) data;
-+ struct ip_set_iphash *map;
-+ uint16_t i;
-+
-+ if (size != sizeof(struct ip_set_req_iphash_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iphash_create),
-+ size);
-+ return -EINVAL;
-+ }
-+
-+ if (req->hashsize < 1) {
-+ ip_set_printk("hashsize too small");
-+ return -ENOEXEC;
-+ }
-+
-+ if (req->probes < 1) {
-+ ip_set_printk("probes too small");
-+ return -ENOEXEC;
-+ }
-+
-+ map = kmalloc(sizeof(struct ip_set_iphash)
-+ + req->probes * sizeof(uint32_t), GFP_KERNEL);
-+ if (!map) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_iphash)
-+ + req->probes * sizeof(uint32_t));
-+ return -ENOMEM;
-+ }
-+ for (i = 0; i < req->probes; i++)
-+ get_random_bytes(((uint32_t *) map->initval)+i, 4);
-+ map->elements = 0;
-+ map->hashsize = req->hashsize;
-+ map->probes = req->probes;
-+ map->resize = req->resize;
+ map->netmask = req->netmask;
-+ map->members = harray_malloc(map->hashsize, sizeof(ip_set_ip_t), GFP_KERNEL);
-+ if (!map->members) {
-+ DP("out of memory for %d bytes", map->hashsize * sizeof(ip_set_ip_t));
-+ kfree(map);
-+ return -ENOMEM;
-+ }
-+
-+ set->data = map;
++
+ return 0;
+}
+
-+static void destroy(struct ip_set *set)
-+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
-+
-+ harray_free(map->members);
-+ kfree(map);
-+
-+ set->data = NULL;
-+}
-+
-+static void flush(struct ip_set *set)
-+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
-+ harray_flush(map->members, map->hashsize, sizeof(ip_set_ip_t));
-+ map->elements = 0;
-+}
++HASH_CREATE(iphash, ip_set_ip_t)
++HASH_DESTROY(iphash)
+
-+static void list_header(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
-+ struct ip_set_req_iphash_create *header =
-+ (struct ip_set_req_iphash_create *) data;
++HASH_FLUSH(iphash, ip_set_ip_t)
+
-+ header->hashsize = map->hashsize;
-+ header->probes = map->probes;
-+ header->resize = map->resize;
++static inline void
++__iphash_list_header(const struct ip_set_iphash *map,
++ struct ip_set_req_iphash_create *header)
++{
+ header->netmask = map->netmask;
+}
+
-+static int list_members_size(const struct ip_set *set)
-+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
-+
-+ return (map->hashsize * sizeof(ip_set_ip_t));
-+}
-+
-+static void list_members(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
-+ ip_set_ip_t i, *elem;
-+
-+ for (i = 0; i < map->hashsize; i++) {
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
-+ ((ip_set_ip_t *)data)[i] = *elem;
-+ }
-+}
++HASH_LIST_HEADER(iphash)
++HASH_LIST_MEMBERS_SIZE(iphash, ip_set_ip_t)
++HASH_LIST_MEMBERS(iphash, ip_set_ip_t)
+
-+static struct ip_set_type ip_set_iphash = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_iphash),
-+ .addip = &addip,
-+ .addip_kernel = &addip_kernel,
-+ .retry = &retry,
-+ .delip = &delip,
-+ .delip_kernel = &delip_kernel,
-+ .testip = &testip,
-+ .testip_kernel = &testip_kernel,
-+ .header_size = sizeof(struct ip_set_req_iphash_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
++IP_SET_RTYPE(iphash, IPSET_TYPE_IP | IPSET_DATA_SINGLE)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
@@ -3577,25 +4177,13 @@
+module_param(limit, int, 0600);
+MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
+
-+static int __init ip_set_iphash_init(void)
-+{
-+ return ip_set_register_set_type(&ip_set_iphash);
-+}
-+
-+static void __exit ip_set_iphash_fini(void)
-+{
-+ /* FIXME: possible race with ip_set_create() */
-+ ip_set_unregister_set_type(&ip_set_iphash);
-+}
-+
-+module_init(ip_set_iphash_init);
-+module_exit(ip_set_iphash_fini);
++REGISTER_MODULE(iphash)
--- /dev/null
+++ b/net/ipv4/netfilter/ip_set_ipmap.c
-@@ -0,0 +1,336 @@
+@@ -0,0 +1,142 @@
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ * Patrick Schaaf <bof@bof.de>
-+ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ * Copyright (C) 2003-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
@@ -3607,9 +4195,6 @@
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
-+#include <linux/version.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
@@ -3624,10 +4209,10 @@
+}
+
+static inline int
-+__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++ipmap_test(const struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
-+
++ const struct ip_set_ipmap *map = set->data;
++
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
+
@@ -3637,46 +4222,15 @@
+ return !!test_bit(ip_to_id(map, *hash_ip), map->members);
+}
+
-+static int
-+testip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_ipmap *req =
-+ (struct ip_set_req_ipmap *) data;
++#define KADT_CONDITION
+
-+ if (size != sizeof(struct ip_set_req_ipmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipmap),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __testip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+testip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ int res = __testip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+ return (res < 0 ? 0 : res);
-+}
++UADT(ipmap, test)
++KADT(ipmap, test, ipaddr)
+
+static inline int
-+__addip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++ipmap_add(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
++ struct ip_set_ipmap *map = set->data;
+
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
@@ -3689,46 +4243,13 @@
+ return 0;
+}
+
-+static int
-+addip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_ipmap *req =
-+ (struct ip_set_req_ipmap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_ipmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipmap),
-+ size);
-+ return -EINVAL;
-+ }
-+ DP("%u.%u.%u.%u", HIPQUAD(req->ip));
-+ return __addip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+addip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __addip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+}
++UADT(ipmap, add)
++KADT(ipmap, add, ipaddr)
+
+static inline int
-+__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++ipmap_del(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
++ struct ip_set_ipmap *map = set->data;
+
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
@@ -3737,75 +4258,17 @@
+ DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
+ if (!test_and_clear_bit(ip_to_id(map, *hash_ip), map->members))
+ return -EEXIST;
-+
++
+ return 0;
+}
+
-+static int
-+delip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_ipmap *req =
-+ (struct ip_set_req_ipmap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_ipmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipmap),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __delip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+delip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __delip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+}
++UADT(ipmap, del)
++KADT(ipmap, del, ipaddr)
+
-+static int create(struct ip_set *set, const void *data, size_t size)
++static inline int
++__ipmap_create(const struct ip_set_req_ipmap_create *req,
++ struct ip_set_ipmap *map)
+{
-+ int newbytes;
-+ struct ip_set_req_ipmap_create *req =
-+ (struct ip_set_req_ipmap_create *) data;
-+ struct ip_set_ipmap *map;
-+
-+ if (size != sizeof(struct ip_set_req_ipmap_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipmap_create),
-+ size);
-+ return -EINVAL;
-+ }
-+
-+ DP("from %u.%u.%u.%u to %u.%u.%u.%u",
-+ HIPQUAD(req->from), HIPQUAD(req->to));
-+
-+ if (req->from > req->to) {
-+ DP("bad ip range");
-+ return -ENOEXEC;
-+ }
-+
-+ map = kmalloc(sizeof(struct ip_set_ipmap), GFP_KERNEL);
-+ if (!map) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_ipmap));
-+ return -ENOMEM;
-+ }
-+ map->first_ip = req->from;
-+ map->last_ip = req->to;
+ map->netmask = req->netmask;
+
+ if (req->netmask == 0xFFFFFFFF) {
@@ -3830,109 +4293,40 @@
+ map->sizeid = 2 << (netmask_bits - mask_bits - 1);
+ }
+ if (map->sizeid > MAX_RANGE + 1) {
-+ ip_set_printk("range too big (max %d addresses)",
-+ MAX_RANGE+1);
-+ kfree(map);
++ ip_set_printk("range too big, %d elements (max %d)",
++ map->sizeid, MAX_RANGE+1);
+ return -ENOEXEC;
+ }
+ DP("hosts %u, sizeid %u", map->hosts, map->sizeid);
-+ newbytes = bitmap_bytes(0, map->sizeid - 1);
-+ map->members = kmalloc(newbytes, GFP_KERNEL);
-+ if (!map->members) {
-+ DP("out of memory for %d bytes", newbytes);
-+ kfree(map);
-+ return -ENOMEM;
-+ }
-+ memset(map->members, 0, newbytes);
-+
-+ set->data = map;
-+ return 0;
-+}
-+
-+static void destroy(struct ip_set *set)
-+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
-+
-+ kfree(map->members);
-+ kfree(map);
-+
-+ set->data = NULL;
++ return bitmap_bytes(0, map->sizeid - 1);
+}
+
-+static void flush(struct ip_set *set)
-+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
-+ memset(map->members, 0, bitmap_bytes(0, map->sizeid - 1));
-+}
++BITMAP_CREATE(ipmap)
++BITMAP_DESTROY(ipmap)
++BITMAP_FLUSH(ipmap)
+
-+static void list_header(const struct ip_set *set, void *data)
++static inline void
++__ipmap_list_header(const struct ip_set_ipmap *map,
++ struct ip_set_req_ipmap_create *header)
+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
-+ struct ip_set_req_ipmap_create *header =
-+ (struct ip_set_req_ipmap_create *) data;
-+
-+ header->from = map->first_ip;
-+ header->to = map->last_ip;
+ header->netmask = map->netmask;
+}
+
-+static int list_members_size(const struct ip_set *set)
-+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
-+
-+ return bitmap_bytes(0, map->sizeid - 1);
-+}
++BITMAP_LIST_HEADER(ipmap)
++BITMAP_LIST_MEMBERS_SIZE(ipmap)
++BITMAP_LIST_MEMBERS(ipmap)
+
-+static void list_members(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
-+ int bytes = bitmap_bytes(0, map->sizeid - 1);
-+
-+ memcpy(data, map->members, bytes);
-+}
-+
-+static struct ip_set_type ip_set_ipmap = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_ipmap),
-+ .addip = &addip,
-+ .addip_kernel = &addip_kernel,
-+ .delip = &delip,
-+ .delip_kernel = &delip_kernel,
-+ .testip = &testip,
-+ .testip_kernel = &testip_kernel,
-+ .header_size = sizeof(struct ip_set_req_ipmap_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
++IP_SET_TYPE(ipmap, IPSET_TYPE_IP | IPSET_DATA_SINGLE)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("ipmap type of IP sets");
+
-+static int __init ip_set_ipmap_init(void)
-+{
-+ return ip_set_register_set_type(&ip_set_ipmap);
-+}
-+
-+static void __exit ip_set_ipmap_fini(void)
-+{
-+ /* FIXME: possible race with ip_set_create() */
-+ ip_set_unregister_set_type(&ip_set_ipmap);
-+}
-+
-+module_init(ip_set_ipmap_init);
-+module_exit(ip_set_ipmap_fini);
++REGISTER_MODULE(ipmap)
--- /dev/null
+++ b/net/ipv4/netfilter/ip_set_ipporthash.c
-@@ -0,0 +1,581 @@
-+/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+@@ -0,0 +1,203 @@
++/* Copyright (C) 2003-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
@@ -3942,581 +4336,729 @@
+/* Kernel module implementing an ip+port hash set */
+
+#include <linux/module.h>
++#include <linux/moduleparam.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/skbuff.h>
-+#include <linux/version.h>
-+#include <linux/jhash.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_jhash.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
+#include <linux/spinlock.h>
-+#include <linux/vmalloc.h>
+#include <linux/random.h>
+
+#include <net/ip.h>
+
-+#include <linux/netfilter_ipv4/ip_set_malloc.h>
+#include <linux/netfilter_ipv4/ip_set_ipporthash.h>
++#include <linux/netfilter_ipv4/ip_set_getport.h>
+
+static int limit = MAX_RANGE;
+
-+/* We must handle non-linear skbs */
-+static inline ip_set_ip_t
-+get_port(const struct sk_buff *skb, u_int32_t flags)
-+{
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ struct iphdr *iph = ip_hdr(skb);
-+#else
-+ struct iphdr *iph = skb->nh.iph;
-+#endif
-+ u_int16_t offset = ntohs(iph->frag_off) & IP_OFFSET;
-+
-+ switch (iph->protocol) {
-+ case IPPROTO_TCP: {
-+ struct tcphdr tcph;
-+
-+ /* See comments at tcp_match in ip_tables.c */
-+ if (offset)
-+ return INVALID_PORT;
-+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &tcph, sizeof(tcph)) < 0)
-+#else
-+ if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &tcph, sizeof(tcph)) < 0)
-+#endif
-+ /* No choice either */
-+ return INVALID_PORT;
-+
-+ return ntohs(flags & IPSET_SRC ?
-+ tcph.source : tcph.dest);
-+ }
-+ case IPPROTO_UDP: {
-+ struct udphdr udph;
-+
-+ if (offset)
-+ return INVALID_PORT;
-+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &udph, sizeof(udph)) < 0)
-+#else
-+ if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &udph, sizeof(udph)) < 0)
-+#endif
-+ /* No choice either */
-+ return INVALID_PORT;
-+
-+ return ntohs(flags & IPSET_SRC ?
-+ udph.source : udph.dest);
-+ }
-+ default:
-+ return INVALID_PORT;
-+ }
-+}
-+
+static inline __u32
-+jhash_ip(const struct ip_set_ipporthash *map, uint16_t i, ip_set_ip_t ip)
++ipporthash_id(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port)
+{
-+ return jhash_1word(ip, *(((uint32_t *) map->initval) + i));
-+}
-+
-+#define HASH_IP(map, ip, port) (port + ((ip - ((map)->first_ip)) << 16))
-+
-+static inline __u32
-+hash_id(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t port,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_ipporthash *map =
-+ (struct ip_set_ipporthash *) set->data;
++ struct ip_set_ipporthash *map = set->data;
+ __u32 id;
+ u_int16_t i;
+ ip_set_ip_t *elem;
+
-+ *hash_ip = HASH_IP(map, ip, port);
++ *hash_ip = pack_ip_port(map, ip, port);
++
+ DP("set: %s, ipport:%u.%u.%u.%u:%u, %u.%u.%u.%u",
+ set->name, HIPQUAD(ip), port, HIPQUAD(*hash_ip));
-+
++ if (!*hash_ip)
++ return UINT_MAX;
++
+ for (i = 0; i < map->probes; i++) {
+ id = jhash_ip(map, i, *hash_ip) % map->hashsize;
+ DP("hash key: %u", id);
+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
+ if (*elem == *hash_ip)
+ return id;
-+ /* No shortcut at testing - there can be deleted
-+ * entries. */
++ /* No shortcut - there can be deleted entries. */
+ }
+ return UINT_MAX;
+}
+
+static inline int
-+__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t port,
-+ ip_set_ip_t *hash_ip)
++ipporthash_test(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port)
+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+
++ struct ip_set_ipporthash *map = set->data;
++
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
+
-+ return (hash_id(set, ip, port, hash_ip) != UINT_MAX);
++ return (ipporthash_id(set, hash_ip, ip, port) != UINT_MAX);
+}
+
-+static int
-+testip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_ipporthash *req =
-+ (struct ip_set_req_ipporthash *) data;
-+
-+ if (size != sizeof(struct ip_set_req_ipporthash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipporthash),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __testip(set, req->ip, req->port, hash_ip);
-+}
-+
-+static int
-+testip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ ip_set_ip_t port;
-+ int res;
-+
-+ if (flags[index+1] == 0)
-+ return 0;
-+
-+ port = get_port(skb, flags[index+1]);
-+
-+ DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
-+ flags[index] & IPSET_SRC ? "SRC" : "DST",
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ NIPQUAD(ip_hdr(skb)->saddr),
-+ NIPQUAD(ip_hdr(skb)->daddr));
-+#else
-+ NIPQUAD(skb->nh.iph->saddr),
-+ NIPQUAD(skb->nh.iph->daddr));
-+#endif
-+ DP("flag %s port %u",
-+ flags[index+1] & IPSET_SRC ? "SRC" : "DST",
-+ port);
-+ if (port == INVALID_PORT)
++#define KADT_CONDITION \
++ ip_set_ip_t port; \
++ \
++ if (flags[index+1] == 0) \
++ return 0; \
++ \
++ port = get_port(skb, flags[index+1]); \
++ \
++ if (port == INVALID_PORT) \
+ return 0;
+
-+ res = __testip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ port,
-+ hash_ip);
-+ return (res < 0 ? 0 : res);
-+
-+}
++UADT(ipporthash, test, req->port)
++KADT(ipporthash, test, ipaddr, port)
+
+static inline int
-+__add_haship(struct ip_set_ipporthash *map, ip_set_ip_t hash_ip)
++__ipporthash_add(struct ip_set_ipporthash *map, ip_set_ip_t *ip)
+{
+ __u32 probe;
+ u_int16_t i;
-+ ip_set_ip_t *elem;
++ ip_set_ip_t *elem, *slot = NULL;
+
+ for (i = 0; i < map->probes; i++) {
-+ probe = jhash_ip(map, i, hash_ip) % map->hashsize;
++ probe = jhash_ip(map, i, *ip) % map->hashsize;
+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
-+ if (*elem == hash_ip)
++ if (*elem == *ip)
+ return -EEXIST;
-+ if (!*elem) {
-+ *elem = hash_ip;
-+ map->elements++;
-+ return 0;
-+ }
++ if (!(slot || *elem))
++ slot = elem;
++ /* There can be deleted entries, must check all slots */
++ }
++ if (slot) {
++ *slot = *ip;
++ map->elements++;
++ return 0;
+ }
+ /* Trigger rehashing */
+ return -EAGAIN;
+}
+
+static inline int
-+__addip(struct ip_set_ipporthash *map, ip_set_ip_t ip, ip_set_ip_t port,
-+ ip_set_ip_t *hash_ip)
++ipporthash_add(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port)
+{
++ struct ip_set_ipporthash *map = set->data;
+ if (map->elements > limit)
+ return -ERANGE;
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
+
-+ *hash_ip = HASH_IP(map, ip, port);
++ *hash_ip = pack_ip_port(map, ip, port);
+
-+ return __add_haship(map, *hash_ip);
++ if (!*hash_ip)
++ return -ERANGE;
++
++ return __ipporthash_add(map, hash_ip);
+}
+
-+static int
-+addip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
++UADT(ipporthash, add, req->port)
++KADT(ipporthash, add, ipaddr, port)
++
++static inline void
++__ipporthash_retry(struct ip_set_ipporthash *tmp,
++ struct ip_set_ipporthash *map)
+{
-+ struct ip_set_req_ipporthash *req =
-+ (struct ip_set_req_ipporthash *) data;
++ tmp->first_ip = map->first_ip;
++ tmp->last_ip = map->last_ip;
++}
+
-+ if (size != sizeof(struct ip_set_req_ipporthash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipporthash),
-+ size);
-+ return -EINVAL;
++HASH_RETRY(ipporthash, ip_set_ip_t)
++
++static inline int
++ipporthash_del(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port)
++{
++ struct ip_set_ipporthash *map = set->data;
++ ip_set_ip_t id;
++ ip_set_ip_t *elem;
++
++ if (ip < map->first_ip || ip > map->last_ip)
++ return -ERANGE;
++
++ id = ipporthash_id(set, hash_ip, ip, port);
++
++ if (id == UINT_MAX)
++ return -EEXIST;
++
++ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
++ *elem = 0;
++ map->elements--;
++
++ return 0;
++}
++
++UADT(ipporthash, del, req->port)
++KADT(ipporthash, del, ipaddr, port)
++
++static inline int
++__ipporthash_create(const struct ip_set_req_ipporthash_create *req,
++ struct ip_set_ipporthash *map)
++{
++ if (req->to - req->from > MAX_RANGE) {
++ ip_set_printk("range too big, %d elements (max %d)",
++ req->to - req->from + 1, MAX_RANGE+1);
++ return -ENOEXEC;
+ }
-+ return __addip((struct ip_set_ipporthash *) set->data,
-+ req->ip, req->port, hash_ip);
++ map->first_ip = req->from;
++ map->last_ip = req->to;
++ return 0;
+}
+
-+static int
-+addip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
++HASH_CREATE(ipporthash, ip_set_ip_t)
++HASH_DESTROY(ipporthash)
++HASH_FLUSH(ipporthash, ip_set_ip_t)
++
++static inline void
++__ipporthash_list_header(const struct ip_set_ipporthash *map,
++ struct ip_set_req_ipporthash_create *header)
+{
-+ ip_set_ip_t port;
++ header->from = map->first_ip;
++ header->to = map->last_ip;
++}
+
-+ if (flags[index+1] == 0)
-+ return -EINVAL;
++HASH_LIST_HEADER(ipporthash)
++HASH_LIST_MEMBERS_SIZE(ipporthash, ip_set_ip_t)
++HASH_LIST_MEMBERS(ipporthash, ip_set_ip_t)
+
-+ port = get_port(skb, flags[index+1]);
++IP_SET_RTYPE(ipporthash, IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_DATA_DOUBLE)
+
-+ DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
-+ flags[index] & IPSET_SRC ? "SRC" : "DST",
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ NIPQUAD(ip_hdr(skb)->saddr),
-+ NIPQUAD(ip_hdr(skb)->daddr));
-+#else
-+ NIPQUAD(skb->nh.iph->saddr),
-+ NIPQUAD(skb->nh.iph->daddr));
-+#endif
-+ DP("flag %s port %u",
-+ flags[index+1] & IPSET_SRC ? "SRC" : "DST",
-+ port);
-+ if (port == INVALID_PORT)
-+ return -EINVAL;
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("ipporthash type of IP sets");
++module_param(limit, int, 0600);
++MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
+
-+ return __addip((struct ip_set_ipporthash *) set->data,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ port,
-+ hash_ip);
-+}
++REGISTER_MODULE(ipporthash)
+--- /dev/null
++++ b/net/ipv4/netfilter/ip_set_ipportiphash.c
+@@ -0,0 +1,216 @@
++/* Copyright (C) 2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/* Kernel module implementing an ip+port+ip hash set */
++
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++#include <linux/ip.h>
++#include <linux/tcp.h>
++#include <linux/udp.h>
++#include <linux/skbuff.h>
++#include <linux/netfilter_ipv4/ip_set_jhash.h>
++#include <linux/errno.h>
++#include <asm/uaccess.h>
++#include <asm/bitops.h>
++#include <linux/spinlock.h>
++#include <linux/random.h>
+
-+static int retry(struct ip_set *set)
++#include <net/ip.h>
++
++#include <linux/netfilter_ipv4/ip_set_ipportiphash.h>
++#include <linux/netfilter_ipv4/ip_set_getport.h>
++
++static int limit = MAX_RANGE;
++
++#define jhash_ip2(map, i, ipport, ip1) \
++ jhash_2words(ipport, ip1, *(map->initval + i))
++
++static inline __u32
++ipportiphash_id(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port, ip_set_ip_t ip1)
+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+ ip_set_ip_t *elem;
-+ void *members;
-+ u_int32_t i, hashsize = map->hashsize;
-+ int res;
-+ struct ip_set_ipporthash *tmp;
++ struct ip_set_ipportiphash *map = set->data;
++ __u32 id;
++ u_int16_t i;
++ struct ipportip *elem;
+
-+ if (map->resize == 0)
++ *hash_ip = pack_ip_port(map, ip, port);
++ DP("set: %s, ipport:%u.%u.%u.%u:%u, %u.%u.%u.%u",
++ set->name, HIPQUAD(ip), port, HIPQUAD(*hash_ip));
++ if (!(*hash_ip || ip1))
++ return UINT_MAX;
++
++ for (i = 0; i < map->probes; i++) {
++ id = jhash_ip2(map, i, *hash_ip, ip1) % map->hashsize;
++ DP("hash key: %u", id);
++ elem = HARRAY_ELEM(map->members, struct ipportip *, id);
++ if (elem->ip == *hash_ip && elem->ip1 == ip1)
++ return id;
++ /* No shortcut - there can be deleted entries. */
++ }
++ return UINT_MAX;
++}
++
++static inline int
++ipportiphash_test(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port, ip_set_ip_t ip1)
++{
++ struct ip_set_ipportiphash *map = set->data;
++
++ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
+
-+ again:
-+ res = 0;
++ return (ipportiphash_id(set, hash_ip, ip, port, ip1) != UINT_MAX);
++}
+
-+ /* Calculate new hash size */
-+ hashsize += (hashsize * map->resize)/100;
-+ if (hashsize == map->hashsize)
-+ hashsize++;
++#define KADT_CONDITION \
++ ip_set_ip_t port, ip1; \
++ \
++ if (flags[index+2] == 0) \
++ return 0; \
++ \
++ port = get_port(skb, flags[index+1]); \
++ ip1 = ipaddr(skb, flags[index+2]); \
++ \
++ if (port == INVALID_PORT) \
++ return 0;
+
-+ ip_set_printk("rehashing of set %s triggered: "
-+ "hashsize grows from %u to %u",
-+ set->name, map->hashsize, hashsize);
++UADT(ipportiphash, test, req->port, req->ip1)
++KADT(ipportiphash, test, ipaddr, port, ip1)
+
-+ tmp = kmalloc(sizeof(struct ip_set_ipporthash)
-+ + map->probes * sizeof(uint32_t), GFP_ATOMIC);
-+ if (!tmp) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_ipporthash)
-+ + map->probes * sizeof(uint32_t));
-+ return -ENOMEM;
++static inline int
++__ipportip_add(struct ip_set_ipportiphash *map,
++ ip_set_ip_t hash_ip, ip_set_ip_t ip1)
++{
++ __u32 probe;
++ u_int16_t i;
++ struct ipportip *elem, *slot = NULL;
++
++ for (i = 0; i < map->probes; i++) {
++ probe = jhash_ip2(map, i, hash_ip, ip1) % map->hashsize;
++ elem = HARRAY_ELEM(map->members, struct ipportip *, probe);
++ if (elem->ip == hash_ip && elem->ip1 == ip1)
++ return -EEXIST;
++ if (!(slot || elem->ip || elem->ip1))
++ slot = elem;
++ /* There can be deleted entries, must check all slots */
+ }
-+ tmp->members = harray_malloc(hashsize, sizeof(ip_set_ip_t), GFP_ATOMIC);
-+ if (!tmp->members) {
-+ DP("out of memory for %d bytes", hashsize * sizeof(ip_set_ip_t));
-+ kfree(tmp);
-+ return -ENOMEM;
++ if (slot) {
++ slot->ip = hash_ip;
++ slot->ip1 = ip1;
++ map->elements++;
++ return 0;
+ }
-+ tmp->hashsize = hashsize;
-+ tmp->elements = 0;
-+ tmp->probes = map->probes;
-+ tmp->resize = map->resize;
-+ tmp->first_ip = map->first_ip;
-+ tmp->last_ip = map->last_ip;
-+ memcpy(tmp->initval, map->initval, map->probes * sizeof(uint32_t));
++ /* Trigger rehashing */
++ return -EAGAIN;
++}
+
-+ write_lock_bh(&set->lock);
-+ map = (struct ip_set_ipporthash *) set->data; /* Play safe */
-+ for (i = 0; i < map->hashsize && res == 0; i++) {
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
-+ if (*elem)
-+ res = __add_haship(tmp, *elem);
-+ }
-+ if (res) {
-+ /* Failure, try again */
-+ write_unlock_bh(&set->lock);
-+ harray_free(tmp->members);
-+ kfree(tmp);
-+ goto again;
-+ }
++static inline int
++__ipportiphash_add(struct ip_set_ipportiphash *map,
++ struct ipportip *elem)
++{
++ return __ipportip_add(map, elem->ip, elem->ip1);
++}
+
-+ /* Success at resizing! */
-+ members = map->members;
++static inline int
++ipportiphash_add(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port, ip_set_ip_t ip1)
++{
++ struct ip_set_ipportiphash *map = set->data;
++
++ if (map->elements > limit)
++ return -ERANGE;
++ if (ip < map->first_ip || ip > map->last_ip)
++ return -ERANGE;
+
-+ map->hashsize = tmp->hashsize;
-+ map->members = tmp->members;
-+ write_unlock_bh(&set->lock);
++ *hash_ip = pack_ip_port(map, ip, port);
++ if (!(*hash_ip || ip1))
++ return -ERANGE;
++
++ return __ipportip_add(map, *hash_ip, ip1);
++}
+
-+ harray_free(members);
-+ kfree(tmp);
++UADT(ipportiphash, add, req->port, req->ip1)
++KADT(ipportiphash, add, ipaddr, port, ip1)
+
-+ return 0;
++static inline void
++__ipportiphash_retry(struct ip_set_ipportiphash *tmp,
++ struct ip_set_ipportiphash *map)
++{
++ tmp->first_ip = map->first_ip;
++ tmp->last_ip = map->last_ip;
+}
+
++HASH_RETRY2(ipportiphash, struct ipportip)
++
+static inline int
-+__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t port,
-+ ip_set_ip_t *hash_ip)
++ipportiphash_del(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port, ip_set_ip_t ip1)
+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
++ struct ip_set_ipportiphash *map = set->data;
+ ip_set_ip_t id;
-+ ip_set_ip_t *elem;
++ struct ipportip *elem;
+
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
+
-+ id = hash_id(set, ip, port, hash_ip);
++ id = ipportiphash_id(set, hash_ip, ip, port, ip1);
+
+ if (id == UINT_MAX)
+ return -EEXIST;
-+
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
-+ *elem = 0;
++
++ elem = HARRAY_ELEM(map->members, struct ipportip *, id);
++ elem->ip = elem->ip1 = 0;
+ map->elements--;
+
+ return 0;
+}
+
-+static int
-+delip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_ipporthash *req =
-+ (struct ip_set_req_ipporthash *) data;
++UADT(ipportiphash, del, req->port, req->ip1)
++KADT(ipportiphash, del, ipaddr, port, ip1)
+
-+ if (size != sizeof(struct ip_set_req_ipporthash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipporthash),
-+ size);
-+ return -EINVAL;
++static inline int
++__ipportiphash_create(const struct ip_set_req_ipportiphash_create *req,
++ struct ip_set_ipportiphash *map)
++{
++ if (req->to - req->from > MAX_RANGE) {
++ ip_set_printk("range too big, %d elements (max %d)",
++ req->to - req->from + 1, MAX_RANGE+1);
++ return -ENOEXEC;
+ }
-+ return __delip(set, req->ip, req->port, hash_ip);
++ map->first_ip = req->from;
++ map->last_ip = req->to;
++ return 0;
+}
+
-+static int
-+delip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
++HASH_CREATE(ipportiphash, struct ipportip)
++HASH_DESTROY(ipportiphash)
++HASH_FLUSH(ipportiphash, struct ipportip)
++
++static inline void
++__ipportiphash_list_header(const struct ip_set_ipportiphash *map,
++ struct ip_set_req_ipportiphash_create *header)
+{
-+ ip_set_ip_t port;
++ header->from = map->first_ip;
++ header->to = map->last_ip;
++}
+
-+ if (flags[index+1] == 0)
-+ return -EINVAL;
++HASH_LIST_HEADER(ipportiphash)
++HASH_LIST_MEMBERS_SIZE(ipportiphash, struct ipportip)
++HASH_LIST_MEMBERS_MEMCPY(ipportiphash, struct ipportip)
+
-+ port = get_port(skb, flags[index+1]);
++IP_SET_RTYPE(ipportiphash, IPSET_TYPE_IP | IPSET_TYPE_PORT
++ | IPSET_TYPE_IP1 | IPSET_DATA_TRIPLE)
+
-+ DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
-+ flags[index] & IPSET_SRC ? "SRC" : "DST",
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ NIPQUAD(ip_hdr(skb)->saddr),
-+ NIPQUAD(ip_hdr(skb)->daddr));
-+#else
-+ NIPQUAD(skb->nh.iph->saddr),
-+ NIPQUAD(skb->nh.iph->daddr));
-+#endif
-+ DP("flag %s port %u",
-+ flags[index+1] & IPSET_SRC ? "SRC" : "DST",
-+ port);
-+ if (port == INVALID_PORT)
-+ return -EINVAL;
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("ipportiphash type of IP sets");
++module_param(limit, int, 0600);
++MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
+
-+ return __delip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ port,
-+ hash_ip);
-+}
++REGISTER_MODULE(ipportiphash)
+--- /dev/null
++++ b/net/ipv4/netfilter/ip_set_ipportnethash.c
+@@ -0,0 +1,304 @@
++/* Copyright (C) 2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/* Kernel module implementing an ip+port+net hash set */
++
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++#include <linux/ip.h>
++#include <linux/tcp.h>
++#include <linux/udp.h>
++#include <linux/skbuff.h>
++#include <linux/netfilter_ipv4/ip_set_jhash.h>
++#include <linux/errno.h>
++#include <asm/uaccess.h>
++#include <asm/bitops.h>
++#include <linux/spinlock.h>
++#include <linux/random.h>
+
-+static int create(struct ip_set *set, const void *data, size_t size)
++#include <net/ip.h>
++
++#include <linux/netfilter_ipv4/ip_set_ipportnethash.h>
++#include <linux/netfilter_ipv4/ip_set_getport.h>
++
++static int limit = MAX_RANGE;
++
++#define jhash_ip2(map, i, ipport, ip1) \
++ jhash_2words(ipport, ip1, *(map->initval + i))
++
++static inline __u32
++ipportnethash_id_cidr(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port,
++ ip_set_ip_t ip1, uint8_t cidr)
+{
-+ struct ip_set_req_ipporthash_create *req =
-+ (struct ip_set_req_ipporthash_create *) data;
-+ struct ip_set_ipporthash *map;
-+ uint16_t i;
++ struct ip_set_ipportnethash *map = set->data;
++ __u32 id;
++ u_int16_t i;
++ struct ipportip *elem;
+
-+ if (size != sizeof(struct ip_set_req_ipporthash_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipporthash_create),
-+ size);
-+ return -EINVAL;
++ *hash_ip = pack_ip_port(map, ip, port);
++ DP("set: %s, ipport:%u.%u.%u.%u:%u, %u.%u.%u.%u",
++ set->name, HIPQUAD(ip), port, HIPQUAD(*hash_ip));
++ ip1 = pack_ip_cidr(ip1, cidr);
++ if (!(*hash_ip || ip1))
++ return UINT_MAX;
++
++ for (i = 0; i < map->probes; i++) {
++ id = jhash_ip2(map, i, *hash_ip, ip1) % map->hashsize;
++ DP("hash key: %u", id);
++ elem = HARRAY_ELEM(map->members, struct ipportip *, id);
++ if (elem->ip == *hash_ip && elem->ip1 == ip1)
++ return id;
++ /* No shortcut - there can be deleted entries. */
+ }
++ return UINT_MAX;
++}
+
-+ if (req->hashsize < 1) {
-+ ip_set_printk("hashsize too small");
-+ return -ENOEXEC;
-+ }
++static inline __u32
++ipportnethash_id(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port, ip_set_ip_t ip1)
++{
++ struct ip_set_ipportnethash *map = set->data;
++ __u32 id = UINT_MAX;
++ int i;
+
-+ if (req->probes < 1) {
-+ ip_set_printk("probes too small");
-+ return -ENOEXEC;
++ for (i = 0; i < 30 && map->cidr[i]; i++) {
++ id = ipportnethash_id_cidr(set, hash_ip, ip, port, ip1,
++ map->cidr[i]);
++ if (id != UINT_MAX)
++ break;
+ }
++ return id;
++}
+
-+ map = kmalloc(sizeof(struct ip_set_ipporthash)
-+ + req->probes * sizeof(uint32_t), GFP_KERNEL);
-+ if (!map) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_ipporthash)
-+ + req->probes * sizeof(uint32_t));
-+ return -ENOMEM;
-+ }
-+ for (i = 0; i < req->probes; i++)
-+ get_random_bytes(((uint32_t *) map->initval)+i, 4);
-+ map->elements = 0;
-+ map->hashsize = req->hashsize;
-+ map->probes = req->probes;
-+ map->resize = req->resize;
-+ map->first_ip = req->from;
-+ map->last_ip = req->to;
-+ map->members = harray_malloc(map->hashsize, sizeof(ip_set_ip_t), GFP_KERNEL);
-+ if (!map->members) {
-+ DP("out of memory for %d bytes", map->hashsize * sizeof(ip_set_ip_t));
-+ kfree(map);
-+ return -ENOMEM;
-+ }
++static inline int
++ipportnethash_test_cidr(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port,
++ ip_set_ip_t ip1, uint8_t cidr)
++{
++ struct ip_set_ipportnethash *map = set->data;
++
++ if (ip < map->first_ip || ip > map->last_ip)
++ return -ERANGE;
+
-+ set->data = map;
-+ return 0;
++ return (ipportnethash_id_cidr(set, hash_ip, ip, port, ip1,
++ cidr) != UINT_MAX);
+}
+
-+static void destroy(struct ip_set *set)
++static inline int
++ipportnethash_test(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port, ip_set_ip_t ip1)
+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+
-+ harray_free(map->members);
-+ kfree(map);
++ struct ip_set_ipportnethash *map = set->data;
++
++ if (ip < map->first_ip || ip > map->last_ip)
++ return -ERANGE;
+
-+ set->data = NULL;
++ return (ipportnethash_id(set, hash_ip, ip, port, ip1) != UINT_MAX);
+}
+
-+static void flush(struct ip_set *set)
++static int
++ipportnethash_utest(struct ip_set *set, const void *data, u_int32_t size,
++ ip_set_ip_t *hash_ip)
+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+ harray_flush(map->members, map->hashsize, sizeof(ip_set_ip_t));
-+ map->elements = 0;
++ const struct ip_set_req_ipportnethash *req = data;
++
++ if (req->cidr <= 0 || req->cidr > 32)
++ return -EINVAL;
++ return (req->cidr == 32
++ ? ipportnethash_test(set, hash_ip, req->ip, req->port,
++ req->ip1)
++ : ipportnethash_test_cidr(set, hash_ip, req->ip, req->port,
++ req->ip1, req->cidr));
+}
+
-+static void list_header(const struct ip_set *set, void *data)
++#define KADT_CONDITION \
++ ip_set_ip_t port, ip1; \
++ \
++ if (flags[index+2] == 0) \
++ return 0; \
++ \
++ port = get_port(skb, flags[index+1]); \
++ ip1 = ipaddr(skb, flags[index+2]); \
++ \
++ if (port == INVALID_PORT) \
++ return 0;
++
++KADT(ipportnethash, test, ipaddr, port, ip1)
++
++static inline int
++__ipportnet_add(struct ip_set_ipportnethash *map,
++ ip_set_ip_t hash_ip, ip_set_ip_t ip1)
+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+ struct ip_set_req_ipporthash_create *header =
-+ (struct ip_set_req_ipporthash_create *) data;
++ __u32 probe;
++ u_int16_t i;
++ struct ipportip *elem, *slot = NULL;
+
-+ header->hashsize = map->hashsize;
-+ header->probes = map->probes;
-+ header->resize = map->resize;
-+ header->from = map->first_ip;
-+ header->to = map->last_ip;
++ for (i = 0; i < map->probes; i++) {
++ probe = jhash_ip2(map, i, hash_ip, ip1) % map->hashsize;
++ elem = HARRAY_ELEM(map->members, struct ipportip *, probe);
++ if (elem->ip == hash_ip && elem->ip1 == ip1)
++ return -EEXIST;
++ if (!(slot || elem->ip || elem->ip1))
++ slot = elem;
++ /* There can be deleted entries, must check all slots */
++ }
++ if (slot) {
++ slot->ip = hash_ip;
++ slot->ip1 = ip1;
++ map->elements++;
++ return 0;
++ }
++ /* Trigger rehashing */
++ return -EAGAIN;
+}
+
-+static int list_members_size(const struct ip_set *set)
++static inline int
++__ipportnethash_add(struct ip_set_ipportnethash *map,
++ struct ipportip *elem)
+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+
-+ return (map->hashsize * sizeof(ip_set_ip_t));
++ return __ipportnet_add(map, elem->ip, elem->ip1);
+}
+
-+static void list_members(const struct ip_set *set, void *data)
++static inline int
++ipportnethash_add(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port,
++ ip_set_ip_t ip1, uint8_t cidr)
+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+ ip_set_ip_t i, *elem;
++ struct ip_set_ipportnethash *map = set->data;
++ struct ipportip;
++ int ret;
++
++ if (map->elements > limit)
++ return -ERANGE;
++ if (ip < map->first_ip || ip > map->last_ip)
++ return -ERANGE;
++ if (cidr <= 0 || cidr >= 32)
++ return -EINVAL;
++ if (map->nets[cidr-1] == UINT16_MAX)
++ return -ERANGE;
+
-+ for (i = 0; i < map->hashsize; i++) {
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
-+ ((ip_set_ip_t *)data)[i] = *elem;
++ *hash_ip = pack_ip_port(map, ip, port);
++ ip1 = pack_ip_cidr(ip1, cidr);
++ if (!(*hash_ip || ip1))
++ return -ERANGE;
++
++ ret =__ipportnet_add(map, *hash_ip, ip1);
++ if (ret == 0) {
++ if (!map->nets[cidr-1]++)
++ add_cidr_size(map->cidr, cidr);
++ map->elements++;
+ }
++ return ret;
+}
+
-+static struct ip_set_type ip_set_ipporthash = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_DATA_DOUBLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_ipporthash),
-+ .addip = &addip,
-+ .addip_kernel = &addip_kernel,
-+ .retry = &retry,
-+ .delip = &delip,
-+ .delip_kernel = &delip_kernel,
-+ .testip = &testip,
-+ .testip_kernel = &testip_kernel,
-+ .header_size = sizeof(struct ip_set_req_ipporthash_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
++#undef KADT_CONDITION
++#define KADT_CONDITION \
++ struct ip_set_ipportnethash *map = set->data; \
++ uint8_t cidr = map->cidr[0] ? map->cidr[0] : 31; \
++ ip_set_ip_t port, ip1; \
++ \
++ if (flags[index+2] == 0) \
++ return 0; \
++ \
++ port = get_port(skb, flags[index+1]); \
++ ip1 = ipaddr(skb, flags[index+2]); \
++ \
++ if (port == INVALID_PORT) \
++ return 0;
+
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-+MODULE_DESCRIPTION("ipporthash type of IP sets");
-+module_param(limit, int, 0600);
-+MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
++UADT(ipportnethash, add, req->port, req->ip1, req->cidr)
++KADT(ipportnethash, add, ipaddr, port, ip1, cidr)
+
-+static int __init ip_set_ipporthash_init(void)
++static inline void
++__ipportnethash_retry(struct ip_set_ipportnethash *tmp,
++ struct ip_set_ipportnethash *map)
+{
-+ return ip_set_register_set_type(&ip_set_ipporthash);
++ tmp->first_ip = map->first_ip;
++ tmp->last_ip = map->last_ip;
++ memcpy(tmp->cidr, map->cidr, sizeof(tmp->cidr));
++ memcpy(tmp->nets, map->nets, sizeof(tmp->nets));
+}
+
-+static void __exit ip_set_ipporthash_fini(void)
++HASH_RETRY2(ipportnethash, struct ipportip)
++
++static inline int
++ipportnethash_del(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port,
++ ip_set_ip_t ip1, uint8_t cidr)
+{
-+ /* FIXME: possible race with ip_set_create() */
-+ ip_set_unregister_set_type(&ip_set_ipporthash);
++ struct ip_set_ipportnethash *map = set->data;
++ ip_set_ip_t id;
++ struct ipportip *elem;
++
++ if (ip < map->first_ip || ip > map->last_ip)
++ return -ERANGE;
++ if (!ip)
++ return -ERANGE;
++ if (cidr <= 0 || cidr >= 32)
++ return -EINVAL;
++
++ id = ipportnethash_id_cidr(set, hash_ip, ip, port, ip1, cidr);
++
++ if (id == UINT_MAX)
++ return -EEXIST;
++
++ elem = HARRAY_ELEM(map->members, struct ipportip *, id);
++ elem->ip = elem->ip1 = 0;
++ map->elements--;
++ if (!map->nets[cidr-1]--)
++ del_cidr_size(map->cidr, cidr);
++
++ return 0;
++}
++
++UADT(ipportnethash, del, req->port, req->ip1, req->cidr)
++KADT(ipportnethash, del, ipaddr, port, ip1, cidr)
++
++static inline int
++__ipportnethash_create(const struct ip_set_req_ipportnethash_create *req,
++ struct ip_set_ipportnethash *map)
++{
++ if (req->to - req->from > MAX_RANGE) {
++ ip_set_printk("range too big, %d elements (max %d)",
++ req->to - req->from + 1, MAX_RANGE+1);
++ return -ENOEXEC;
++ }
++ map->first_ip = req->from;
++ map->last_ip = req->to;
++ memset(map->cidr, 0, sizeof(map->cidr));
++ memset(map->nets, 0, sizeof(map->nets));
++ return 0;
+}
+
-+module_init(ip_set_ipporthash_init);
-+module_exit(ip_set_ipporthash_fini);
++HASH_CREATE(ipportnethash, struct ipportip)
++HASH_DESTROY(ipportnethash)
++HASH_FLUSH_CIDR(ipportnethash, struct ipportip);
++
++static inline void
++__ipportnethash_list_header(const struct ip_set_ipportnethash *map,
++ struct ip_set_req_ipportnethash_create *header)
++{
++ header->from = map->first_ip;
++ header->to = map->last_ip;
++}
++
++HASH_LIST_HEADER(ipportnethash)
++
++HASH_LIST_MEMBERS_SIZE(ipportnethash, struct ipportip)
++HASH_LIST_MEMBERS_MEMCPY(ipportnethash, struct ipportip)
++
++IP_SET_RTYPE(ipportnethash, IPSET_TYPE_IP | IPSET_TYPE_PORT
++ | IPSET_TYPE_IP1 | IPSET_DATA_TRIPLE)
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("ipportnethash type of IP sets");
++module_param(limit, int, 0600);
++MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
++
++REGISTER_MODULE(ipportnethash)
--- /dev/null
+++ b/net/ipv4/netfilter/ip_set_iptree.c
-@@ -0,0 +1,612 @@
-+/* Copyright (C) 2005 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+@@ -0,0 +1,466 @@
++/* Copyright (C) 2005-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
@@ -4525,24 +5067,20 @@
+
+/* Kernel module implementing an IP set type: the iptree type */
+
-+#include <linux/version.h>
+#include <linux/module.h>
++#include <linux/moduleparam.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
+#include <linux/spinlock.h>
++#include <linux/timer.h>
+
-+/* Backward compatibility */
-+#ifndef __nocast
-+#define __nocast
-+#endif
-+
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_bitmaps.h>
+#include <linux/netfilter_ipv4/ip_set_iptree.h>
+
+static int limit = MAX_RANGE;
@@ -4553,13 +5091,9 @@
+ * to delete the gc timer at destroying/flushing a set */
+#define IPTREE_DESTROY_SLEEP 100
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
-+static struct kmem_cache *branch_cachep;
-+static struct kmem_cache *leaf_cachep;
-+#else
-+static kmem_cache_t *branch_cachep;
-+static kmem_cache_t *leaf_cachep;
-+#endif
++static __KMEM_CACHE_T__ *branch_cachep;
++static __KMEM_CACHE_T__ *leaf_cachep;
++
+
+#if defined(__LITTLE_ENDIAN)
+#define ABCD(a,b,c,d,addrp) do { \
@@ -4587,9 +5121,9 @@
+} while (0)
+
+static inline int
-+__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++iptree_test(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set_iptree *map = set->data;
+ struct ip_set_iptreeb *btree;
+ struct ip_set_iptreec *ctree;
+ struct ip_set_iptreed *dtree;
@@ -4597,7 +5131,7 @@
+
+ if (!ip)
+ return -ERANGE;
-+
++
+ *hash_ip = ip;
+ ABCD(a, b, c, d, hash_ip);
+ DP("%u %u %u %u timeout %u", a, b, c, d, map->timeout);
@@ -4610,53 +5144,10 @@
+ || time_after(dtree->expires[d], jiffies));
+}
+
-+static int
-+testip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_iptree *req =
-+ (struct ip_set_req_iptree *) data;
-+
-+ if (size != sizeof(struct ip_set_req_iptree)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iptree),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __testip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+testip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ int res;
-+
-+ DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
-+ flags[index] & IPSET_SRC ? "SRC" : "DST",
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ NIPQUAD(ip_hdr(skb)->saddr),
-+ NIPQUAD(ip_hdr(skb)->daddr));
-+#else
-+ NIPQUAD(skb->nh.iph->saddr),
-+ NIPQUAD(skb->nh.iph->daddr));
-+#endif
++#define KADT_CONDITION
+
-+ res = __testip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+ return (res < 0 ? 0 : res);
-+}
++UADT(iptree, test)
++KADT(iptree, test, ipaddr)
+
+#define ADDIP_WALK(map, elem, branch, type, cachep) do { \
+ if ((map)->tree[elem]) { \
@@ -4671,24 +5162,24 @@
+ (map)->tree[elem] = branch; \
+ DP("alloc %u", elem); \
+ } \
-+} while (0)
++} while (0)
+
+static inline int
-+__addip(struct ip_set *set, ip_set_ip_t ip, unsigned int timeout,
-+ ip_set_ip_t *hash_ip)
++iptree_add(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, unsigned int timeout)
+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set_iptree *map = set->data;
+ struct ip_set_iptreeb *btree;
+ struct ip_set_iptreec *ctree;
+ struct ip_set_iptreed *dtree;
+ unsigned char a,b,c,d;
+ int ret = 0;
-+
++
+ if (!ip || map->elements >= limit)
+ /* We could call the garbage collector
+ * but it's probably overkill */
+ return -ERANGE;
-+
++
+ *hash_ip = ip;
+ ABCD(a, b, c, d, hash_ip);
+ DP("%u %u %u %u timeout %u", a, b, c, d, timeout);
@@ -4698,6 +5189,8 @@
+ if (dtree->expires[d]
+ && (!map->timeout || time_after(dtree->expires[d], jiffies)))
+ ret = -EEXIST;
++ if (map->timeout && timeout == 0)
++ timeout = map->timeout;
+ dtree->expires[d] = map->timeout ? (timeout * HZ + jiffies) : 1;
+ /* Lottery: I won! */
+ if (dtree->expires[d] == 0)
@@ -4708,47 +5201,8 @@
+ return ret;
+}
+
-+static int
-+addip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
-+ struct ip_set_req_iptree *req =
-+ (struct ip_set_req_iptree *) data;
-+
-+ if (size != sizeof(struct ip_set_req_iptree)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iptree),
-+ size);
-+ return -EINVAL;
-+ }
-+ DP("%u.%u.%u.%u %u", HIPQUAD(req->ip), req->timeout);
-+ return __addip(set, req->ip,
-+ req->timeout ? req->timeout : map->timeout,
-+ hash_ip);
-+}
-+
-+static int
-+addip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
-+
-+ return __addip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ map->timeout,
-+ hash_ip);
-+}
++UADT(iptree, add, req->timeout)
++KADT(iptree, add, ipaddr, 0)
+
+#define DELIP_WALK(map, elem, branch) do { \
+ if ((map)->tree[elem]) { \
@@ -4758,17 +5212,17 @@
+} while (0)
+
+static inline int
-+__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++iptree_del(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set_iptree *map = set->data;
+ struct ip_set_iptreeb *btree;
+ struct ip_set_iptreec *ctree;
+ struct ip_set_iptreed *dtree;
+ unsigned char a,b,c,d;
-+
++
+ if (!ip)
+ return -ERANGE;
-+
++
+ *hash_ip = ip;
+ ABCD(a, b, c, d, hash_ip);
+ DELIP_WALK(map, a, btree);
@@ -4783,40 +5237,8 @@
+ return -EEXIST;
+}
+
-+static int
-+delip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_iptree *req =
-+ (struct ip_set_req_iptree *) data;
-+
-+ if (size != sizeof(struct ip_set_req_iptree)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iptree),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __delip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+delip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __delip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+}
++UADT(iptree, del)
++KADT(iptree, del, ipaddr)
+
+#define LOOP_WALK_BEGIN(map, i, branch) \
+ for (i = 0; i < 256; i++) { \
@@ -4826,10 +5248,11 @@
+
+#define LOOP_WALK_END }
+
-+static void ip_tree_gc(unsigned long ul_set)
++static void
++ip_tree_gc(unsigned long ul_set)
+{
-+ struct ip_set *set = (void *) ul_set;
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set *set = (struct ip_set *) ul_set;
++ struct ip_set_iptree *map = set->data;
+ struct ip_set_iptreeb *btree;
+ struct ip_set_iptreec *ctree;
+ struct ip_set_iptreed *dtree;
@@ -4891,14 +5314,15 @@
+ }
+ LOOP_WALK_END;
+ write_unlock_bh(&set->lock);
-+
++
+ map->gc.expires = jiffies + map->gc_interval * HZ;
+ add_timer(&map->gc);
+}
+
-+static inline void init_gc_timer(struct ip_set *set)
++static inline void
++init_gc_timer(struct ip_set *set)
+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set_iptree *map = set->data;
+
+ /* Even if there is no timeout for the entries,
+ * we still have to call gc because delete
@@ -4911,22 +5335,22 @@
+ add_timer(&map->gc);
+}
+
-+static int create(struct ip_set *set, const void *data, size_t size)
++static int
++iptree_create(struct ip_set *set, const void *data, u_int32_t size)
+{
-+ struct ip_set_req_iptree_create *req =
-+ (struct ip_set_req_iptree_create *) data;
++ const struct ip_set_req_iptree_create *req = data;
+ struct ip_set_iptree *map;
+
+ if (size != sizeof(struct ip_set_req_iptree_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
++ ip_set_printk("data length wrong (want %zu, have %lu)",
+ sizeof(struct ip_set_req_iptree_create),
-+ size);
++ (unsigned long)size);
+ return -EINVAL;
+ }
+
+ map = kmalloc(sizeof(struct ip_set_iptree), GFP_KERNEL);
+ if (!map) {
-+ DP("out of memory for %d bytes",
++ DP("out of memory for %zu bytes",
+ sizeof(struct ip_set_iptree));
+ return -ENOMEM;
+ }
@@ -4940,7 +5364,8 @@
+ return 0;
+}
+
-+static void __flush(struct ip_set_iptree *map)
++static inline void
++__flush(struct ip_set_iptree *map)
+{
+ struct ip_set_iptreeb *btree;
+ struct ip_set_iptreec *ctree;
@@ -4959,9 +5384,10 @@
+ map->elements = 0;
+}
+
-+static void destroy(struct ip_set *set)
++static void
++iptree_destroy(struct ip_set *set)
+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set_iptree *map = set->data;
+
+ /* gc might be running */
+ while (!del_timer(&map->gc))
@@ -4971,11 +5397,12 @@
+ set->data = NULL;
+}
+
-+static void flush(struct ip_set *set)
++static void
++iptree_flush(struct ip_set *set)
+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set_iptree *map = set->data;
+ unsigned int timeout = map->timeout;
-+
++
+ /* gc might be running */
+ while (!del_timer(&map->gc))
+ msleep(IPTREE_DESTROY_SLEEP);
@@ -4986,18 +5413,19 @@
+ init_gc_timer(set);
+}
+
-+static void list_header(const struct ip_set *set, void *data)
++static void
++iptree_list_header(const struct ip_set *set, void *data)
+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
-+ struct ip_set_req_iptree_create *header =
-+ (struct ip_set_req_iptree_create *) data;
++ const struct ip_set_iptree *map = set->data;
++ struct ip_set_req_iptree_create *header = data;
+
+ header->timeout = map->timeout;
+}
+
-+static int list_members_size(const struct ip_set *set)
++static int
++iptree_list_members_size(const struct ip_set *set)
+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ const struct ip_set_iptree *map = set->data;
+ struct ip_set_iptreeb *btree;
+ struct ip_set_iptreec *ctree;
+ struct ip_set_iptreed *dtree;
@@ -5020,9 +5448,10 @@
+ return (count * sizeof(struct ip_set_req_iptree));
+}
+
-+static void list_members(const struct ip_set *set, void *data)
++static void
++iptree_list_members(const struct ip_set *set, void *data)
+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ const struct ip_set_iptree *map = set->data;
+ struct ip_set_iptreeb *btree;
+ struct ip_set_iptreec *ctree;
+ struct ip_set_iptreed *dtree;
@@ -5036,7 +5465,7 @@
+ for (d = 0; d < 256; d++) {
+ if (dtree->expires[d]
+ && (!map->timeout || time_after(dtree->expires[d], jiffies))) {
-+ entry = (struct ip_set_req_iptree *)(data + offset);
++ entry = data + offset;
+ entry->ip = ((a << 24) | (b << 16) | (c << 8) | d);
+ entry->timeout = !map->timeout ? 0
+ : (dtree->expires[d] - jiffies)/HZ;
@@ -5048,26 +5477,7 @@
+ LOOP_WALK_END;
+}
+
-+static struct ip_set_type ip_set_iptree = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_iptree),
-+ .addip = &addip,
-+ .addip_kernel = &addip_kernel,
-+ .delip = &delip,
-+ .delip_kernel = &delip_kernel,
-+ .testip = &testip,
-+ .testip_kernel = &testip_kernel,
-+ .header_size = sizeof(struct ip_set_req_iptree_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
++IP_SET_TYPE(iptree, IPSET_TYPE_IP | IPSET_DATA_SINGLE)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
@@ -5078,30 +5488,16 @@
+static int __init ip_set_iptree_init(void)
+{
+ int ret;
-+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
-+ branch_cachep = kmem_cache_create("ip_set_iptreeb",
-+ sizeof(struct ip_set_iptreeb),
-+ 0, 0, NULL);
-+#else
-+ branch_cachep = kmem_cache_create("ip_set_iptreeb",
-+ sizeof(struct ip_set_iptreeb),
-+ 0, 0, NULL, NULL);
-+#endif
++
++ branch_cachep = KMEM_CACHE_CREATE("ip_set_iptreeb",
++ sizeof(struct ip_set_iptreeb));
+ if (!branch_cachep) {
+ printk(KERN_ERR "Unable to create ip_set_iptreeb slab cache\n");
+ ret = -ENOMEM;
+ goto out;
+ }
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
-+ leaf_cachep = kmem_cache_create("ip_set_iptreed",
-+ sizeof(struct ip_set_iptreed),
-+ 0, 0, NULL);
-+#else
-+ leaf_cachep = kmem_cache_create("ip_set_iptreed",
-+ sizeof(struct ip_set_iptreed),
-+ 0, 0, NULL, NULL);
-+#endif
++ leaf_cachep = KMEM_CACHE_CREATE("ip_set_iptreed",
++ sizeof(struct ip_set_iptreed));
+ if (!leaf_cachep) {
+ printk(KERN_ERR "Unable to create ip_set_iptreed slab cache\n");
+ ret = -ENOMEM;
@@ -5112,7 +5508,7 @@
+ goto out;
+
+ kmem_cache_destroy(leaf_cachep);
-+ free_branch:
++ free_branch:
+ kmem_cache_destroy(branch_cachep);
+ out:
+ return ret;
@@ -5130,7 +5526,7 @@
+module_exit(ip_set_iptree_fini);
--- /dev/null
+++ b/net/ipv4/netfilter/ip_set_iptreemap.c
-@@ -0,0 +1,829 @@
+@@ -0,0 +1,708 @@
+/* Copyright (C) 2007 Sven Wegener <sven.wegener@stealer.net>
+ *
+ * This program is free software; you can redistribute it and/or modify it
@@ -5139,38 +5535,33 @@
+ */
+
+/* This modules implements the iptreemap ipset type. It uses bitmaps to
-+ * represent every single IPv4 address as a single bit. The bitmaps are managed
-+ * in a tree structure, where the first three octets of an addresses are used
-+ * as an index to find the bitmap and the last octet is used as the bit number.
++ * represent every single IPv4 address as a bit. The bitmaps are managed in a
++ * tree structure, where the first three octets of an address are used as an
++ * index to find the bitmap and the last octet is used as the bit number.
+ */
+
-+#include <linux/version.h>
++#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
+#include <linux/spinlock.h>
++#include <linux/timer.h>
+
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_bitmaps.h>
+#include <linux/netfilter_ipv4/ip_set_iptreemap.h>
+
+#define IPTREEMAP_DEFAULT_GC_TIME (5 * 60)
+#define IPTREEMAP_DESTROY_SLEEP (100)
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
-+static struct kmem_cache *cachep_b;
-+static struct kmem_cache *cachep_c;
-+static struct kmem_cache *cachep_d;
-+#else
-+static kmem_cache_t *cachep_b;
-+static kmem_cache_t *cachep_c;
-+static kmem_cache_t *cachep_d;
-+#endif
++static __KMEM_CACHE_T__ *cachep_b;
++static __KMEM_CACHE_T__ *cachep_c;
++static __KMEM_CACHE_T__ *cachep_d;
+
+static struct ip_set_iptreemap_d *fullbitmap_d;
+static struct ip_set_iptreemap_c *fullbitmap_c;
@@ -5319,9 +5710,6 @@
+#define LOOP_WALK_END_COUNT() \
+ }
+
-+#define MIN(a, b) (a < b ? a : b)
-+#define MAX(a, b) (a > b ? a : b)
-+
+#define GETVALUE1(a, a1, b1, r) \
+ (a == a1 ? b1 : r)
+
@@ -5391,9 +5779,9 @@
+}
+
+static inline int
-+__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++iptreemap_test(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap *map = set->data;
+ struct ip_set_iptreemap_b *btree;
+ struct ip_set_iptreemap_c *ctree;
+ struct ip_set_iptreemap_d *dtree;
@@ -5410,40 +5798,13 @@
+ return !!test_bit(d, (void *) dtree->bitmap);
+}
+
-+static int
-+testip(struct ip_set *set, const void *data, size_t size, ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_iptreemap *req = (struct ip_set_req_iptreemap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_iptreemap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)", sizeof(struct ip_set_req_iptreemap), size);
-+ return -EINVAL;
-+ }
-+
-+ return __testip(set, req->start, hash_ip);
-+}
-+
-+static int
-+testip_kernel(struct ip_set *set, const struct sk_buff *skb, ip_set_ip_t *hash_ip, const u_int32_t *flags, unsigned char index)
-+{
-+ int res;
++#define KADT_CONDITION
+
-+ res = __testip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+
-+ return (res < 0 ? 0 : res);
-+}
++UADT(iptreemap, test)
++KADT(iptreemap, test, ipaddr)
+
+static inline int
-+__addip_single(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++__addip_single(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
+ struct ip_set_iptreemap_b *btree;
@@ -5459,18 +5820,19 @@
+ ADDIP_WALK(btree, b, ctree, struct ip_set_iptreemap_c, cachep_c, fullbitmap_c);
+ ADDIP_WALK(ctree, c, dtree, struct ip_set_iptreemap_d, cachep_d, fullbitmap_d);
+
-+ if (test_and_set_bit(d, (void *) dtree->bitmap))
++ if (__test_and_set_bit(d, (void *) dtree->bitmap))
+ return -EEXIST;
+
-+ set_bit(b, (void *) btree->dirty);
++ __set_bit(b, (void *) btree->dirty);
+
+ return 0;
+}
+
+static inline int
-+__addip_range(struct ip_set *set, ip_set_ip_t start, ip_set_ip_t end, ip_set_ip_t *hash_ip)
++iptreemap_add(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t start, ip_set_ip_t end)
+{
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap *map = set->data;
+ struct ip_set_iptreemap_b *btree;
+ struct ip_set_iptreemap_c *ctree;
+ struct ip_set_iptreemap_d *dtree;
@@ -5479,7 +5841,7 @@
+ unsigned char a2, b2, c2, d2;
+
+ if (start == end)
-+ return __addip_single(set, start, hash_ip);
++ return __addip_single(set, hash_ip, start);
+
+ *hash_ip = start;
+
@@ -5491,8 +5853,8 @@
+ ADDIP_RANGE_LOOP(btree, b, GETVALUE1(a, a1, b1, 0), GETVALUE1(a, a2, b2, 255), CHECK2(a, b, a1, a2, b1, b2, c1, c2, d1, d2), ctree, fullbitmap_c, cachep_c, free_c) {
+ ADDIP_RANGE_LOOP(ctree, c, GETVALUE2(a, b, a1, b1, c1, 0), GETVALUE2(a, b, a2, b2, c2, 255), CHECK3(a, b, c, a1, a2, b1, b2, c1, c2, d1, d2), dtree, fullbitmap_d, cachep_d, free_d) {
+ for (d = GETVALUE3(a, b, c, a1, b1, c1, d1, 0); d <= GETVALUE3(a, b, c, a2, b2, c2, d2, 255); d++)
-+ set_bit(d, (void *) dtree->bitmap);
-+ set_bit(b, (void *) btree->dirty);
++ __set_bit(d, (void *) dtree->bitmap);
++ __set_bit(b, (void *) btree->dirty);
+ } ADDIP_RANGE_LOOP_END();
+ } ADDIP_RANGE_LOOP_END();
+ } ADDIP_RANGE_LOOP_END();
@@ -5500,39 +5862,14 @@
+ return 0;
+}
+
-+static int
-+addip(struct ip_set *set, const void *data, size_t size, ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_iptreemap *req = (struct ip_set_req_iptreemap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_iptreemap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)", sizeof(struct ip_set_req_iptreemap), size);
-+ return -EINVAL;
-+ }
-+
-+ return __addip_range(set, MIN(req->start, req->end), MAX(req->start, req->end), hash_ip);
-+}
-+
-+static int
-+addip_kernel(struct ip_set *set, const struct sk_buff *skb, ip_set_ip_t *hash_ip, const u_int32_t *flags, unsigned char index)
-+{
-+
-+ return __addip_single(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+}
++UADT0(iptreemap, add, min(req->ip, req->end), max(req->ip, req->end))
++KADT(iptreemap, add, ipaddr, ip)
+
+static inline int
-+__delip_single(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip, unsigned int __nocast flags)
++__delip_single(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, gfp_t flags)
+{
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap *map = set->data;
+ struct ip_set_iptreemap_b *btree;
+ struct ip_set_iptreemap_c *ctree;
+ struct ip_set_iptreemap_d *dtree;
@@ -5546,18 +5883,19 @@
+ DELIP_WALK(btree, b, ctree, cachep_c, fullbitmap_c, flags);
+ DELIP_WALK(ctree, c, dtree, cachep_d, fullbitmap_d, flags);
+
-+ if (!test_and_clear_bit(d, (void *) dtree->bitmap))
++ if (!__test_and_clear_bit(d, (void *) dtree->bitmap))
+ return -EEXIST;
+
-+ set_bit(b, (void *) btree->dirty);
++ __set_bit(b, (void *) btree->dirty);
+
+ return 0;
+}
+
+static inline int
-+__delip_range(struct ip_set *set, ip_set_ip_t start, ip_set_ip_t end, ip_set_ip_t *hash_ip, unsigned int __nocast flags)
++iptreemap_del(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t start, ip_set_ip_t end, gfp_t flags)
+{
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap *map = set->data;
+ struct ip_set_iptreemap_b *btree;
+ struct ip_set_iptreemap_c *ctree;
+ struct ip_set_iptreemap_d *dtree;
@@ -5566,7 +5904,7 @@
+ unsigned char a2, b2, c2, d2;
+
+ if (start == end)
-+ return __delip_single(set, start, hash_ip, flags);
++ return __delip_single(set, hash_ip, start, flags);
+
+ *hash_ip = start;
+
@@ -5578,8 +5916,8 @@
+ DELIP_RANGE_LOOP(btree, b, GETVALUE1(a, a1, b1, 0), GETVALUE1(a, a2, b2, 255), CHECK2(a, b, a1, a2, b1, b2, c1, c2, d1, d2), ctree, fullbitmap_c, cachep_c, free_c, flags) {
+ DELIP_RANGE_LOOP(ctree, c, GETVALUE2(a, b, a1, b1, c1, 0), GETVALUE2(a, b, a2, b2, c2, 255), CHECK3(a, b, c, a1, a2, b1, b2, c1, c2, d1, d2), dtree, fullbitmap_d, cachep_d, free_d, flags) {
+ for (d = GETVALUE3(a, b, c, a1, b1, c1, d1, 0); d <= GETVALUE3(a, b, c, a2, b2, c2, d2, 255); d++)
-+ clear_bit(d, (void *) dtree->bitmap);
-+ set_bit(b, (void *) btree->dirty);
++ __clear_bit(d, (void *) dtree->bitmap);
++ __set_bit(b, (void *) btree->dirty);
+ } DELIP_RANGE_LOOP_END();
+ } DELIP_RANGE_LOOP_END();
+ } DELIP_RANGE_LOOP_END();
@@ -5587,34 +5925,8 @@
+ return 0;
+}
+
-+static int
-+delip(struct ip_set *set, const void *data, size_t size, ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_iptreemap *req = (struct ip_set_req_iptreemap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_iptreemap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)", sizeof(struct ip_set_req_iptreemap), size);
-+ return -EINVAL;
-+ }
-+
-+ return __delip_range(set, MIN(req->start, req->end), MAX(req->start, req->end), hash_ip, GFP_KERNEL);
-+}
-+
-+static int
-+delip_kernel(struct ip_set *set, const struct sk_buff *skb, ip_set_ip_t *hash_ip, const u_int32_t *flags, unsigned char index)
-+{
-+ return __delip_single(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip,
-+ GFP_ATOMIC);
-+}
++UADT0(iptreemap, del, min(req->ip, req->end), max(req->ip, req->end), GFP_KERNEL)
++KADT(iptreemap, del, ipaddr, ip, GFP_ATOMIC)
+
+/* Check the status of the bitmap
+ * -1 == all bits cleared
@@ -5638,7 +5950,7 @@
+gc(unsigned long addr)
+{
+ struct ip_set *set = (struct ip_set *) addr;
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap *map = set->data;
+ struct ip_set_iptreemap_b *btree;
+ struct ip_set_iptreemap_c *ctree;
+ struct ip_set_iptreemap_d *dtree;
@@ -5649,7 +5961,7 @@
+
+ LOOP_WALK_BEGIN_GC(map, a, btree, fullbitmap_b, cachep_b, i) {
+ LOOP_WALK_BEGIN_GC(btree, b, ctree, fullbitmap_c, cachep_c, j) {
-+ if (!test_and_clear_bit(b, (void *) btree->dirty))
++ if (!__test_and_clear_bit(b, (void *) btree->dirty))
+ continue;
+ LOOP_WALK_BEGIN_GC(ctree, c, dtree, fullbitmap_d, cachep_d, k) {
+ switch (bitmap_status(dtree)) {
@@ -5677,7 +5989,7 @@
+static inline void
+init_gc_timer(struct ip_set *set)
+{
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap *map = set->data;
+
+ init_timer(&map->gc);
+ map->gc.data = (unsigned long) set;
@@ -5686,16 +5998,12 @@
+ add_timer(&map->gc);
+}
+
-+static int create(struct ip_set *set, const void *data, size_t size)
++static int
++iptreemap_create(struct ip_set *set, const void *data, u_int32_t size)
+{
-+ struct ip_set_req_iptreemap_create *req = (struct ip_set_req_iptreemap_create *) data;
++ const struct ip_set_req_iptreemap_create *req = data;
+ struct ip_set_iptreemap *map;
+
-+ if (size != sizeof(struct ip_set_req_iptreemap_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)", sizeof(struct ip_set_req_iptreemap_create), size);
-+ return -EINVAL;
-+ }
-+
+ map = kzalloc(sizeof(*map), GFP_KERNEL);
+ if (!map)
+ return -ENOMEM;
@@ -5708,7 +6016,8 @@
+ return 0;
+}
+
-+static inline void __flush(struct ip_set_iptreemap *map)
++static inline void
++__flush(struct ip_set_iptreemap *map)
+{
+ struct ip_set_iptreemap_b *btree;
+ unsigned int a;
@@ -5719,9 +6028,10 @@
+ LOOP_WALK_END();
+}
+
-+static void destroy(struct ip_set *set)
++static void
++iptreemap_destroy(struct ip_set *set)
+{
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap *map = set->data;
+
+ while (!del_timer(&map->gc))
+ msleep(IPTREEMAP_DESTROY_SLEEP);
@@ -5732,9 +6042,10 @@
+ set->data = NULL;
+}
+
-+static void flush(struct ip_set *set)
++static void
++iptreemap_flush(struct ip_set *set)
+{
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap *map = set->data;
+
+ while (!del_timer(&map->gc))
+ msleep(IPTREEMAP_DESTROY_SLEEP);
@@ -5746,17 +6057,19 @@
+ init_gc_timer(set);
+}
+
-+static void list_header(const struct ip_set *set, void *data)
++static void
++iptreemap_list_header(const struct ip_set *set, void *data)
+{
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
-+ struct ip_set_req_iptreemap_create *header = (struct ip_set_req_iptreemap_create *) data;
++ struct ip_set_iptreemap *map = set->data;
++ struct ip_set_req_iptreemap_create *header = data;
+
+ header->gc_interval = map->gc_interval;
+}
+
-+static int list_members_size(const struct ip_set *set)
++static int
++iptreemap_list_members_size(const struct ip_set *set)
+{
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap *map = set->data;
+ struct ip_set_iptreemap_b *btree;
+ struct ip_set_iptreemap_c *ctree;
+ struct ip_set_iptreemap_d *dtree;
@@ -5783,19 +6096,21 @@
+ return (count * sizeof(struct ip_set_req_iptreemap));
+}
+
-+static inline size_t add_member(void *data, size_t offset, ip_set_ip_t start, ip_set_ip_t end)
++static inline u_int32_t
++add_member(void *data, size_t offset, ip_set_ip_t start, ip_set_ip_t end)
+{
-+ struct ip_set_req_iptreemap *entry = (struct ip_set_req_iptreemap *) (data + offset);
++ struct ip_set_req_iptreemap *entry = data + offset;
+
-+ entry->start = start;
++ entry->ip = start;
+ entry->end = end;
+
+ return sizeof(*entry);
+}
+
-+static void list_members(const struct ip_set *set, void *data)
++static void
++iptreemap_list_members(const struct ip_set *set, void *data)
+{
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap *map = set->data;
+ struct ip_set_iptreemap_b *btree;
+ struct ip_set_iptreemap_c *ctree;
+ struct ip_set_iptreemap_d *dtree;
@@ -5830,26 +6145,7 @@
+ add_member(data, offset, start, end);
+}
+
-+static struct ip_set_type ip_set_iptreemap = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = create,
-+ .destroy = destroy,
-+ .flush = flush,
-+ .reqsize = sizeof(struct ip_set_req_iptreemap),
-+ .addip = addip,
-+ .addip_kernel = addip_kernel,
-+ .delip = delip,
-+ .delip_kernel = delip_kernel,
-+ .testip = testip,
-+ .testip_kernel = testip_kernel,
-+ .header_size = sizeof(struct ip_set_req_iptreemap_create),
-+ .list_header = list_header,
-+ .list_members_size = list_members_size,
-+ .list_members = list_members,
-+ .me = THIS_MODULE,
-+};
++IP_SET_TYPE(iptreemap, IPSET_TYPE_IP | IPSET_DATA_SINGLE)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Sven Wegener <sven.wegener@stealer.net>");
@@ -5860,43 +6156,22 @@
+ int ret = -ENOMEM;
+ int a;
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
-+ cachep_b = kmem_cache_create("ip_set_iptreemap_b",
-+ sizeof(struct ip_set_iptreemap_b),
-+ 0, 0, NULL);
-+#else
-+ cachep_b = kmem_cache_create("ip_set_iptreemap_b",
-+ sizeof(struct ip_set_iptreemap_b),
-+ 0, 0, NULL, NULL);
-+#endif
++ cachep_b = KMEM_CACHE_CREATE("ip_set_iptreemap_b",
++ sizeof(struct ip_set_iptreemap_b));
+ if (!cachep_b) {
+ ip_set_printk("Unable to create ip_set_iptreemap_b slab cache");
+ goto out;
+ }
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
-+ cachep_c = kmem_cache_create("ip_set_iptreemap_c",
-+ sizeof(struct ip_set_iptreemap_c),
-+ 0, 0, NULL);
-+#else
-+ cachep_c = kmem_cache_create("ip_set_iptreemap_c",
-+ sizeof(struct ip_set_iptreemap_c),
-+ 0, 0, NULL, NULL);
-+#endif
++ cachep_c = KMEM_CACHE_CREATE("ip_set_iptreemap_c",
++ sizeof(struct ip_set_iptreemap_c));
+ if (!cachep_c) {
+ ip_set_printk("Unable to create ip_set_iptreemap_c slab cache");
+ goto outb;
+ }
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
-+ cachep_d = kmem_cache_create("ip_set_iptreemap_d",
-+ sizeof(struct ip_set_iptreemap_d),
-+ 0, 0, NULL);
-+#else
-+ cachep_d = kmem_cache_create("ip_set_iptreemap_d",
-+ sizeof(struct ip_set_iptreemap_d),
-+ 0, 0, NULL, NULL);
-+#endif
++ cachep_d = KMEM_CACHE_CREATE("ip_set_iptreemap_d",
++ sizeof(struct ip_set_iptreemap_d));
+ if (!cachep_d) {
+ ip_set_printk("Unable to create ip_set_iptreemap_d slab cache");
+ goto outc;
@@ -5962,11 +6237,11 @@
+module_exit(ip_set_iptreemap_fini);
--- /dev/null
+++ b/net/ipv4/netfilter/ip_set_macipmap.c
-@@ -0,0 +1,375 @@
+@@ -0,0 +1,164 @@
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ * Patrick Schaaf <bof@bof.de>
+ * Martin Josefsson <gandalf@wlug.westbo.se>
-+ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ * Copyright (C) 2003-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
@@ -5978,41 +6253,29 @@
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
-+#include <linux/version.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
+#include <linux/spinlock.h>
+#include <linux/if_ether.h>
-+#include <linux/vmalloc.h>
+
-+#include <linux/netfilter_ipv4/ip_set_malloc.h>
+#include <linux/netfilter_ipv4/ip_set_macipmap.h>
+
+static int
-+testip(struct ip_set *set, const void *data, size_t size, ip_set_ip_t *hash_ip)
++macipmap_utest(struct ip_set *set, const void *data, u_int32_t size,
++ ip_set_ip_t *hash_ip)
+{
-+ struct ip_set_macipmap *map = (struct ip_set_macipmap *) set->data;
-+ struct ip_set_macip *table = (struct ip_set_macip *) map->members;
-+ struct ip_set_req_macipmap *req = (struct ip_set_req_macipmap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_macipmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_macipmap),
-+ size);
-+ return -EINVAL;
-+ }
++ const struct ip_set_macipmap *map = set->data;
++ const struct ip_set_macip *table = map->members;
++ const struct ip_set_req_macipmap *req = data;
+
+ if (req->ip < map->first_ip || req->ip > map->last_ip)
+ return -ERANGE;
+
+ *hash_ip = req->ip;
+ DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
-+ set->name, HIPQUAD(req->ip), HIPQUAD(*hash_ip));
-+ if (test_bit(IPSET_MACIP_ISSET,
-+ (void *) &table[req->ip - map->first_ip].flags)) {
++ set->name, HIPQUAD(req->ip), HIPQUAD(*hash_ip));
++ if (table[req->ip - map->first_ip].match) {
+ return (memcmp(req->ethernet,
+ &table[req->ip - map->first_ip].ethernet,
+ ETH_ALEN) == 0);
@@ -6022,44 +6285,29 @@
+}
+
+static int
-+testip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+ struct ip_set_macip *table =
-+ (struct ip_set_macip *) map->members;
++macipmap_ktest(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ const struct ip_set_macipmap *map = set->data;
++ const struct ip_set_macip *table = map->members;
+ ip_set_ip_t ip;
-+
-+ ip = ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr);
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr);
-+#endif
++
++ ip = ipaddr(skb, flags[index]);
+
+ if (ip < map->first_ip || ip > map->last_ip)
+ return 0;
+
-+ *hash_ip = ip;
++ *hash_ip = ip;
+ DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
-+ set->name, HIPQUAD(ip), HIPQUAD(*hash_ip));
-+ if (test_bit(IPSET_MACIP_ISSET,
-+ (void *) &table[ip - map->first_ip].flags)) {
++ set->name, HIPQUAD(ip), HIPQUAD(*hash_ip));
++ if (table[ip - map->first_ip].match) {
+ /* Is mac pointer valid?
+ * If so, compare... */
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
+ return (skb_mac_header(skb) >= skb->head
+ && (skb_mac_header(skb) + ETH_HLEN) <= skb->data
-+#else
-+ return (skb->mac.raw >= skb->head
-+ && (skb->mac.raw + ETH_HLEN) <= skb->data
-+#endif
+ && (memcmp(eth_hdr(skb)->h_source,
+ &table[ip - map->first_ip].ethernet,
+ ETH_ALEN) == 0));
@@ -6070,278 +6318,94 @@
+
+/* returns 0 on success */
+static inline int
-+__addip(struct ip_set *set,
-+ ip_set_ip_t ip, unsigned char *ethernet, ip_set_ip_t *hash_ip)
++macipmap_add(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, const unsigned char *ethernet)
+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+ struct ip_set_macip *table =
-+ (struct ip_set_macip *) map->members;
++ struct ip_set_macipmap *map = set->data;
++ struct ip_set_macip *table = map->members;
+
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
-+ if (test_and_set_bit(IPSET_MACIP_ISSET,
-+ (void *) &table[ip - map->first_ip].flags))
++ if (table[ip - map->first_ip].match)
+ return -EEXIST;
+
+ *hash_ip = ip;
+ DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
+ memcpy(&table[ip - map->first_ip].ethernet, ethernet, ETH_ALEN);
++ table[ip - map->first_ip].match = IPSET_MACIP_ISSET;
+ return 0;
+}
+
-+static int
-+addip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_macipmap *req =
-+ (struct ip_set_req_macipmap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_macipmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_macipmap),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __addip(set, req->ip, req->ethernet, hash_ip);
-+}
-+
-+static int
-+addip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ ip_set_ip_t ip;
-+
-+ ip = ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr);
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr);
-+#endif
-+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ if (!(skb_mac_header(skb) >= skb->head
-+ && (skb_mac_header(skb) + ETH_HLEN) <= skb->data))
-+#else
-+ if (!(skb->mac.raw >= skb->head
-+ && (skb->mac.raw + ETH_HLEN) <= skb->data))
-+#endif
++#define KADT_CONDITION \
++ if (!(skb_mac_header(skb) >= skb->head \
++ && (skb_mac_header(skb) + ETH_HLEN) <= skb->data))\
+ return -EINVAL;
+
-+ return __addip(set, ip, eth_hdr(skb)->h_source, hash_ip);
-+}
++UADT(macipmap, add, req->ethernet)
++KADT(macipmap, add, ipaddr, eth_hdr(skb)->h_source)
+
+static inline int
-+__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++macipmap_del(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+ struct ip_set_macip *table =
-+ (struct ip_set_macip *) map->members;
++ struct ip_set_macipmap *map = set->data;
++ struct ip_set_macip *table = map->members;
+
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
-+ if (!test_and_clear_bit(IPSET_MACIP_ISSET,
-+ (void *)&table[ip - map->first_ip].flags))
++ if (!table[ip - map->first_ip].match)
+ return -EEXIST;
+
+ *hash_ip = ip;
++ table[ip - map->first_ip].match = 0;
+ DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
+ return 0;
+}
+
-+static int
-+delip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_macipmap *req =
-+ (struct ip_set_req_macipmap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_macipmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_macipmap),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __delip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+delip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __delip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+}
++#undef KADT_CONDITION
++#define KADT_CONDITION
+
-+static inline size_t members_size(ip_set_id_t from, ip_set_id_t to)
-+{
-+ return (size_t)((to - from + 1) * sizeof(struct ip_set_macip));
-+}
++UADT(macipmap, del)
++KADT(macipmap, del, ipaddr)
+
-+static int create(struct ip_set *set, const void *data, size_t size)
++static inline int
++__macipmap_create(const struct ip_set_req_macipmap_create *req,
++ struct ip_set_macipmap *map)
+{
-+ int newbytes;
-+ struct ip_set_req_macipmap_create *req =
-+ (struct ip_set_req_macipmap_create *) data;
-+ struct ip_set_macipmap *map;
-+
-+ if (size != sizeof(struct ip_set_req_macipmap_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_macipmap_create),
-+ size);
-+ return -EINVAL;
-+ }
-+
-+ DP("from %u.%u.%u.%u to %u.%u.%u.%u",
-+ HIPQUAD(req->from), HIPQUAD(req->to));
-+
-+ if (req->from > req->to) {
-+ DP("bad ip range");
-+ return -ENOEXEC;
-+ }
-+
+ if (req->to - req->from > MAX_RANGE) {
-+ ip_set_printk("range too big (max %d addresses)",
-+ MAX_RANGE+1);
++ ip_set_printk("range too big, %d elements (max %d)",
++ req->to - req->from + 1, MAX_RANGE+1);
+ return -ENOEXEC;
+ }
-+
-+ map = kmalloc(sizeof(struct ip_set_macipmap), GFP_KERNEL);
-+ if (!map) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_macipmap));
-+ return -ENOMEM;
-+ }
+ map->flags = req->flags;
-+ map->first_ip = req->from;
-+ map->last_ip = req->to;
-+ newbytes = members_size(map->first_ip, map->last_ip);
-+ map->members = ip_set_malloc(newbytes);
-+ DP("members: %u %p", newbytes, map->members);
-+ if (!map->members) {
-+ DP("out of memory for %d bytes", newbytes);
-+ kfree(map);
-+ return -ENOMEM;
-+ }
-+ memset(map->members, 0, newbytes);
-+
-+ set->data = map;
-+ return 0;
-+}
-+
-+static void destroy(struct ip_set *set)
-+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+
-+ ip_set_free(map->members, members_size(map->first_ip, map->last_ip));
-+ kfree(map);
-+
-+ set->data = NULL;
++ return (req->to - req->from + 1) * sizeof(struct ip_set_macip);
+}
+
-+static void flush(struct ip_set *set)
-+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+ memset(map->members, 0, members_size(map->first_ip, map->last_ip));
-+}
++BITMAP_CREATE(macipmap)
++BITMAP_DESTROY(macipmap)
++BITMAP_FLUSH(macipmap)
+
-+static void list_header(const struct ip_set *set, void *data)
++static inline void
++__macipmap_list_header(const struct ip_set_macipmap *map,
++ struct ip_set_req_macipmap_create *header)
+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+ struct ip_set_req_macipmap_create *header =
-+ (struct ip_set_req_macipmap_create *) data;
-+
-+ DP("list_header %x %x %u", map->first_ip, map->last_ip,
-+ map->flags);
-+
-+ header->from = map->first_ip;
-+ header->to = map->last_ip;
+ header->flags = map->flags;
+}
+
-+static int list_members_size(const struct ip_set *set)
-+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+
-+ DP("%u", members_size(map->first_ip, map->last_ip));
-+ return members_size(map->first_ip, map->last_ip);
-+}
-+
-+static void list_members(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+
-+ int bytes = members_size(map->first_ip, map->last_ip);
-+
-+ DP("members: %u %p", bytes, map->members);
-+ memcpy(data, map->members, bytes);
-+}
++BITMAP_LIST_HEADER(macipmap)
++BITMAP_LIST_MEMBERS_SIZE(macipmap)
++BITMAP_LIST_MEMBERS(macipmap)
+
-+static struct ip_set_type ip_set_macipmap = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_macipmap),
-+ .addip = &addip,
-+ .addip_kernel = &addip_kernel,
-+ .delip = &delip,
-+ .delip_kernel = &delip_kernel,
-+ .testip = &testip,
-+ .testip_kernel = &testip_kernel,
-+ .header_size = sizeof(struct ip_set_req_macipmap_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
++IP_SET_TYPE(macipmap, IPSET_TYPE_IP | IPSET_DATA_SINGLE)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("macipmap type of IP sets");
+
-+static int __init ip_set_macipmap_init(void)
-+{
-+ init_max_malloc_size();
-+ return ip_set_register_set_type(&ip_set_macipmap);
-+}
-+
-+static void __exit ip_set_macipmap_fini(void)
-+{
-+ /* FIXME: possible race with ip_set_create() */
-+ ip_set_unregister_set_type(&ip_set_macipmap);
-+}
-+
-+module_init(ip_set_macipmap_init);
-+module_exit(ip_set_macipmap_fini);
++REGISTER_MODULE(macipmap)
--- /dev/null
+++ b/net/ipv4/netfilter/ip_set_nethash.c
-@@ -0,0 +1,497 @@
-+/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+@@ -0,0 +1,225 @@
++/* Copyright (C) 2003-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
@@ -6351,63 +6415,56 @@
+/* Kernel module implementing a cidr nethash set */
+
+#include <linux/module.h>
++#include <linux/moduleparam.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
-+#include <linux/version.h>
-+#include <linux/jhash.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_jhash.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
+#include <linux/spinlock.h>
-+#include <linux/vmalloc.h>
+#include <linux/random.h>
+
+#include <net/ip.h>
+
-+#include <linux/netfilter_ipv4/ip_set_malloc.h>
+#include <linux/netfilter_ipv4/ip_set_nethash.h>
+
+static int limit = MAX_RANGE;
+
+static inline __u32
-+jhash_ip(const struct ip_set_nethash *map, uint16_t i, ip_set_ip_t ip)
-+{
-+ return jhash_1word(ip, *(((uint32_t *) map->initval) + i));
-+}
-+
-+static inline __u32
-+hash_id_cidr(struct ip_set_nethash *map,
-+ ip_set_ip_t ip,
-+ unsigned char cidr,
-+ ip_set_ip_t *hash_ip)
++nethash_id_cidr(const struct ip_set_nethash *map,
++ ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip,
++ uint8_t cidr)
+{
+ __u32 id;
+ u_int16_t i;
+ ip_set_ip_t *elem;
+
-+ *hash_ip = pack(ip, cidr);
-+
++ *hash_ip = pack_ip_cidr(ip, cidr);
++ if (!*hash_ip)
++ return MAX_RANGE;
++
+ for (i = 0; i < map->probes; i++) {
+ id = jhash_ip(map, i, *hash_ip) % map->hashsize;
+ DP("hash key: %u", id);
+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
+ if (*elem == *hash_ip)
+ return id;
++ /* No shortcut - there can be deleted entries. */
+ }
+ return UINT_MAX;
+}
+
+static inline __u32
-+hash_id(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++nethash_id(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
++ const struct ip_set_nethash *map = set->data;
+ __u32 id = UINT_MAX;
+ int i;
+
+ for (i = 0; i < 30 && map->cidr[i]; i++) {
-+ id = hash_id_cidr(map, ip, map->cidr[i], hash_ip);
++ id = nethash_id_cidr(map, hash_ip, ip, map->cidr[i]);
+ if (id != UINT_MAX)
+ break;
+ }
@@ -6415,409 +6472,156 @@
+}
+
+static inline int
-+__testip_cidr(struct ip_set *set, ip_set_ip_t ip, unsigned char cidr,
-+ ip_set_ip_t *hash_ip)
++nethash_test_cidr(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, uint8_t cidr)
+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
++ const struct ip_set_nethash *map = set->data;
+
-+ return (ip && hash_id_cidr(map, ip, cidr, hash_ip) != UINT_MAX);
++ return (nethash_id_cidr(map, hash_ip, ip, cidr) != UINT_MAX);
+}
+
+static inline int
-+__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++nethash_test(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ return (ip && hash_id(set, ip, hash_ip) != UINT_MAX);
++ return (nethash_id(set, hash_ip, ip) != UINT_MAX);
+}
+
+static int
-+testip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
++nethash_utest(struct ip_set *set, const void *data, u_int32_t size,
++ ip_set_ip_t *hash_ip)
+{
-+ struct ip_set_req_nethash *req =
-+ (struct ip_set_req_nethash *) data;
++ const struct ip_set_req_nethash *req = data;
+
-+ if (size != sizeof(struct ip_set_req_nethash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_nethash),
-+ size);
++ if (req->cidr <= 0 || req->cidr > 32)
+ return -EINVAL;
-+ }
-+ return (req->cidr == 32 ? __testip(set, req->ip, hash_ip)
-+ : __testip_cidr(set, req->ip, req->cidr, hash_ip));
++ return (req->cidr == 32 ? nethash_test(set, hash_ip, req->ip)
++ : nethash_test_cidr(set, hash_ip, req->ip, req->cidr));
+}
+
-+static int
-+testip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __testip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+}
++#define KADT_CONDITION
++
++KADT(nethash, test, ipaddr)
+
+static inline int
-+__addip_base(struct ip_set_nethash *map, ip_set_ip_t ip)
++__nethash_add(struct ip_set_nethash *map, ip_set_ip_t *ip)
+{
+ __u32 probe;
+ u_int16_t i;
-+ ip_set_ip_t *elem;
-+
++ ip_set_ip_t *elem, *slot = NULL;
++
+ for (i = 0; i < map->probes; i++) {
-+ probe = jhash_ip(map, i, ip) % map->hashsize;
++ probe = jhash_ip(map, i, *ip) % map->hashsize;
+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
-+ if (*elem == ip)
++ if (*elem == *ip)
+ return -EEXIST;
-+ if (!*elem) {
-+ *elem = ip;
-+ map->elements++;
-+ return 0;
-+ }
++ if (!(slot || *elem))
++ slot = elem;
++ /* There can be deleted entries, must check all slots */
++ }
++ if (slot) {
++ *slot = *ip;
++ map->elements++;
++ return 0;
+ }
+ /* Trigger rehashing */
+ return -EAGAIN;
+}
+
+static inline int
-+__addip(struct ip_set_nethash *map, ip_set_ip_t ip, unsigned char cidr,
-+ ip_set_ip_t *hash_ip)
-+{
-+ if (!ip || map->elements >= limit)
-+ return -ERANGE;
-+
-+ *hash_ip = pack(ip, cidr);
-+ DP("%u.%u.%u.%u/%u, %u.%u.%u.%u", HIPQUAD(ip), cidr, HIPQUAD(*hash_ip));
-+
-+ return __addip_base(map, *hash_ip);
-+}
-+
-+static void
-+update_cidr_sizes(struct ip_set_nethash *map, unsigned char cidr)
-+{
-+ unsigned char next;
-+ int i;
-+
-+ for (i = 0; i < 30 && map->cidr[i]; i++) {
-+ if (map->cidr[i] == cidr) {
-+ return;
-+ } else if (map->cidr[i] < cidr) {
-+ next = map->cidr[i];
-+ map->cidr[i] = cidr;
-+ cidr = next;
-+ }
-+ }
-+ if (i < 30)
-+ map->cidr[i] = cidr;
-+}
-+
-+static int
-+addip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
++nethash_add(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, uint8_t cidr)
+{
-+ struct ip_set_req_nethash *req =
-+ (struct ip_set_req_nethash *) data;
++ struct ip_set_nethash *map = set->data;
+ int ret;
-+
-+ if (size != sizeof(struct ip_set_req_nethash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_nethash),
-+ size);
++
++ if (map->elements >= limit || map->nets[cidr-1] == UINT16_MAX)
++ return -ERANGE;
++ if (cidr <= 0 || cidr >= 32)
+ return -EINVAL;
-+ }
-+ ret = __addip((struct ip_set_nethash *) set->data,
-+ req->ip, req->cidr, hash_ip);
-+
-+ if (ret == 0)
-+ update_cidr_sizes((struct ip_set_nethash *) set->data,
-+ req->cidr);
+
++ *hash_ip = pack_ip_cidr(ip, cidr);
++ DP("%u.%u.%u.%u/%u, %u.%u.%u.%u", HIPQUAD(ip), cidr, HIPQUAD(*hash_ip));
++ if (!*hash_ip)
++ return -ERANGE;
++
++ ret = __nethash_add(map, hash_ip);
++ if (ret == 0) {
++ if (!map->nets[cidr-1]++)
++ add_cidr_size(map->cidr, cidr);
++ map->elements++;
++ }
++
+ return ret;
+}
+
-+static int
-+addip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+ int ret = -ERANGE;
-+ ip_set_ip_t ip = ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr);
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr);
-+#endif
++#undef KADT_CONDITION
++#define KADT_CONDITION \
++ struct ip_set_nethash *map = set->data; \
++ uint8_t cidr = map->cidr[0] ? map->cidr[0] : 31;
+
-+ if (map->cidr[0])
-+ ret = __addip(map, ip, map->cidr[0], hash_ip);
++UADT(nethash, add, req->cidr)
++KADT(nethash, add, ipaddr, cidr)
+
-+ return ret;
-+}
-+
-+static int retry(struct ip_set *set)
++static inline void
++__nethash_retry(struct ip_set_nethash *tmp, struct ip_set_nethash *map)
+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+ ip_set_ip_t *elem;
-+ void *members;
-+ u_int32_t i, hashsize = map->hashsize;
-+ int res;
-+ struct ip_set_nethash *tmp;
-+
-+ if (map->resize == 0)
-+ return -ERANGE;
-+
-+ again:
-+ res = 0;
-+
-+ /* Calculate new parameters */
-+ hashsize += (hashsize * map->resize)/100;
-+ if (hashsize == map->hashsize)
-+ hashsize++;
-+
-+ ip_set_printk("rehashing of set %s triggered: "
-+ "hashsize grows from %u to %u",
-+ set->name, map->hashsize, hashsize);
-+
-+ tmp = kmalloc(sizeof(struct ip_set_nethash)
-+ + map->probes * sizeof(uint32_t), GFP_ATOMIC);
-+ if (!tmp) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_nethash)
-+ + map->probes * sizeof(uint32_t));
-+ return -ENOMEM;
-+ }
-+ tmp->members = harray_malloc(hashsize, sizeof(ip_set_ip_t), GFP_ATOMIC);
-+ if (!tmp->members) {
-+ DP("out of memory for %d bytes", hashsize * sizeof(ip_set_ip_t));
-+ kfree(tmp);
-+ return -ENOMEM;
-+ }
-+ tmp->hashsize = hashsize;
-+ tmp->elements = 0;
-+ tmp->probes = map->probes;
-+ tmp->resize = map->resize;
-+ memcpy(tmp->initval, map->initval, map->probes * sizeof(uint32_t));
-+ memcpy(tmp->cidr, map->cidr, 30 * sizeof(unsigned char));
-+
-+ write_lock_bh(&set->lock);
-+ map = (struct ip_set_nethash *) set->data; /* Play safe */
-+ for (i = 0; i < map->hashsize && res == 0; i++) {
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
-+ if (*elem)
-+ res = __addip_base(tmp, *elem);
-+ }
-+ if (res) {
-+ /* Failure, try again */
-+ write_unlock_bh(&set->lock);
-+ harray_free(tmp->members);
-+ kfree(tmp);
-+ goto again;
-+ }
-+
-+ /* Success at resizing! */
-+ members = map->members;
-+
-+ map->hashsize = tmp->hashsize;
-+ map->members = tmp->members;
-+ write_unlock_bh(&set->lock);
-+
-+ harray_free(members);
-+ kfree(tmp);
-+
-+ return 0;
++ memcpy(tmp->cidr, map->cidr, sizeof(tmp->cidr));
++ memcpy(tmp->nets, map->nets, sizeof(tmp->nets));
+}
+
++HASH_RETRY(nethash, ip_set_ip_t)
++
+static inline int
-+__delip(struct ip_set_nethash *map, ip_set_ip_t ip, unsigned char cidr,
-+ ip_set_ip_t *hash_ip)
++nethash_del(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, uint8_t cidr)
+{
++ struct ip_set_nethash *map = set->data;
+ ip_set_ip_t id, *elem;
+
-+ if (!ip)
-+ return -ERANGE;
-+
-+ id = hash_id_cidr(map, ip, cidr, hash_ip);
++ if (cidr <= 0 || cidr >= 32)
++ return -EINVAL;
++
++ id = nethash_id_cidr(map, hash_ip, ip, cidr);
+ if (id == UINT_MAX)
+ return -EEXIST;
-+
++
+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
+ *elem = 0;
+ map->elements--;
++ if (!map->nets[cidr-1]--)
++ del_cidr_size(map->cidr, cidr);
+ return 0;
+}
+
-+static int
-+delip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_nethash *req =
-+ (struct ip_set_req_nethash *) data;
-+
-+ if (size != sizeof(struct ip_set_req_nethash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_nethash),
-+ size);
-+ return -EINVAL;
-+ }
-+ /* TODO: no garbage collection in map->cidr */
-+ return __delip((struct ip_set_nethash *) set->data,
-+ req->ip, req->cidr, hash_ip);
-+}
-+
-+static int
-+delip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+ int ret = -ERANGE;
-+ ip_set_ip_t ip = ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr);
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr);
-+#endif
-+
-+ if (map->cidr[0])
-+ ret = __delip(map, ip, map->cidr[0], hash_ip);
-+
-+ return ret;
-+}
++UADT(nethash, del, req->cidr)
++KADT(nethash, del, ipaddr, cidr)
+
-+static int create(struct ip_set *set, const void *data, size_t size)
++static inline int
++__nethash_create(const struct ip_set_req_nethash_create *req,
++ struct ip_set_nethash *map)
+{
-+ struct ip_set_req_nethash_create *req =
-+ (struct ip_set_req_nethash_create *) data;
-+ struct ip_set_nethash *map;
-+ uint16_t i;
-+
-+ if (size != sizeof(struct ip_set_req_nethash_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_nethash_create),
-+ size);
-+ return -EINVAL;
-+ }
-+
-+ if (req->hashsize < 1) {
-+ ip_set_printk("hashsize too small");
-+ return -ENOEXEC;
-+ }
-+ if (req->probes < 1) {
-+ ip_set_printk("probes too small");
-+ return -ENOEXEC;
-+ }
-+
-+ map = kmalloc(sizeof(struct ip_set_nethash)
-+ + req->probes * sizeof(uint32_t), GFP_KERNEL);
-+ if (!map) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_nethash)
-+ + req->probes * sizeof(uint32_t));
-+ return -ENOMEM;
-+ }
-+ for (i = 0; i < req->probes; i++)
-+ get_random_bytes(((uint32_t *) map->initval)+i, 4);
-+ map->elements = 0;
-+ map->hashsize = req->hashsize;
-+ map->probes = req->probes;
-+ map->resize = req->resize;
-+ memset(map->cidr, 0, 30 * sizeof(unsigned char));
-+ map->members = harray_malloc(map->hashsize, sizeof(ip_set_ip_t), GFP_KERNEL);
-+ if (!map->members) {
-+ DP("out of memory for %d bytes", map->hashsize * sizeof(ip_set_ip_t));
-+ kfree(map);
-+ return -ENOMEM;
-+ }
-+
-+ set->data = map;
++ memset(map->cidr, 0, sizeof(map->cidr));
++ memset(map->nets, 0, sizeof(map->nets));
++
+ return 0;
+}
+
-+static void destroy(struct ip_set *set)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+
-+ harray_free(map->members);
-+ kfree(map);
-+
-+ set->data = NULL;
-+}
-+
-+static void flush(struct ip_set *set)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+ harray_flush(map->members, map->hashsize, sizeof(ip_set_ip_t));
-+ memset(map->cidr, 0, 30 * sizeof(unsigned char));
-+ map->elements = 0;
-+}
-+
-+static void list_header(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+ struct ip_set_req_nethash_create *header =
-+ (struct ip_set_req_nethash_create *) data;
-+
-+ header->hashsize = map->hashsize;
-+ header->probes = map->probes;
-+ header->resize = map->resize;
-+}
++HASH_CREATE(nethash, ip_set_ip_t)
++HASH_DESTROY(nethash)
+
-+static int list_members_size(const struct ip_set *set)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
++HASH_FLUSH_CIDR(nethash, ip_set_ip_t)
+
-+ return (map->hashsize * sizeof(ip_set_ip_t));
++static inline void
++__nethash_list_header(const struct ip_set_nethash *map,
++ struct ip_set_req_nethash_create *header)
++{
+}
+
-+static void list_members(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+ ip_set_ip_t i, *elem;
++HASH_LIST_HEADER(nethash)
++HASH_LIST_MEMBERS_SIZE(nethash, ip_set_ip_t)
++HASH_LIST_MEMBERS(nethash, ip_set_ip_t)
+
-+ for (i = 0; i < map->hashsize; i++) {
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
-+ ((ip_set_ip_t *)data)[i] = *elem;
-+ }
-+}
-+
-+static struct ip_set_type ip_set_nethash = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_nethash),
-+ .addip = &addip,
-+ .addip_kernel = &addip_kernel,
-+ .retry = &retry,
-+ .delip = &delip,
-+ .delip_kernel = &delip_kernel,
-+ .testip = &testip,
-+ .testip_kernel = &testip_kernel,
-+ .header_size = sizeof(struct ip_set_req_nethash_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
++IP_SET_RTYPE(nethash, IPSET_TYPE_IP | IPSET_DATA_SINGLE)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
@@ -6825,23 +6629,11 @@
+module_param(limit, int, 0600);
+MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
+
-+static int __init ip_set_nethash_init(void)
-+{
-+ return ip_set_register_set_type(&ip_set_nethash);
-+}
-+
-+static void __exit ip_set_nethash_fini(void)
-+{
-+ /* FIXME: possible race with ip_set_create() */
-+ ip_set_unregister_set_type(&ip_set_nethash);
-+}
-+
-+module_init(ip_set_nethash_init);
-+module_exit(ip_set_nethash_fini);
++REGISTER_MODULE(nethash)
--- /dev/null
+++ b/net/ipv4/netfilter/ip_set_portmap.c
-@@ -0,0 +1,346 @@
-+/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+@@ -0,0 +1,114 @@
++/* Copyright (C) 2003-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
@@ -6855,9 +6647,6 @@
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/skbuff.h>
-+#include <linux/version.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
@@ -6866,330 +6655,434 @@
+#include <net/ip.h>
+
+#include <linux/netfilter_ipv4/ip_set_portmap.h>
++#include <linux/netfilter_ipv4/ip_set_getport.h>
+
-+/* We must handle non-linear skbs */
-+static inline ip_set_ip_t
-+get_port(const struct sk_buff *skb, u_int32_t flags)
++static inline int
++portmap_test(const struct ip_set *set, ip_set_ip_t *hash_port,
++ ip_set_ip_t port)
+{
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ struct iphdr *iph = ip_hdr(skb);
-+#else
-+ struct iphdr *iph = skb->nh.iph;
-+#endif
-+ u_int16_t offset = ntohs(iph->frag_off) & IP_OFFSET;
-+ switch (iph->protocol) {
-+ case IPPROTO_TCP: {
-+ struct tcphdr tcph;
-+
-+ /* See comments at tcp_match in ip_tables.c */
-+ if (offset)
-+ return INVALID_PORT;
++ const struct ip_set_portmap *map = set->data;
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &tcph, sizeof(tcph)) < 0)
-+#else
-+ if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &tcph, sizeof(tcph)) < 0)
-+#endif
-+ /* No choice either */
-+ return INVALID_PORT;
++ if (port < map->first_ip || port > map->last_ip)
++ return -ERANGE;
++
++ *hash_port = port;
++ DP("set: %s, port:%u, %u", set->name, port, *hash_port);
++ return !!test_bit(port - map->first_ip, map->members);
++}
+
-+ return ntohs(flags & IPSET_SRC ?
-+ tcph.source : tcph.dest);
-+ }
-+ case IPPROTO_UDP: {
-+ struct udphdr udph;
++#define KADT_CONDITION \
++ if (ip == INVALID_PORT) \
++ return 0;
+
-+ if (offset)
-+ return INVALID_PORT;
++UADT(portmap, test)
++KADT(portmap, test, get_port)
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &udph, sizeof(udph)) < 0)
-+#else
-+ if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &udph, sizeof(udph)) < 0)
-+#endif
-+ /* No choice either */
-+ return INVALID_PORT;
++static inline int
++portmap_add(struct ip_set *set, ip_set_ip_t *hash_port, ip_set_ip_t port)
++{
++ struct ip_set_portmap *map = set->data;
+
-+ return ntohs(flags & IPSET_SRC ?
-+ udph.source : udph.dest);
-+ }
-+ default:
-+ return INVALID_PORT;
-+ }
++ if (port < map->first_ip || port > map->last_ip)
++ return -ERANGE;
++ if (test_and_set_bit(port - map->first_ip, map->members))
++ return -EEXIST;
++
++ *hash_port = port;
++ DP("port %u", port);
++ return 0;
+}
+
++UADT(portmap, add)
++KADT(portmap, add, get_port)
++
+static inline int
-+__testport(struct ip_set *set, ip_set_ip_t port, ip_set_ip_t *hash_port)
++portmap_del(struct ip_set *set, ip_set_ip_t *hash_port, ip_set_ip_t port)
+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
++ struct ip_set_portmap *map = set->data;
+
-+ if (port < map->first_port || port > map->last_port)
++ if (port < map->first_ip || port > map->last_ip)
+ return -ERANGE;
-+
++ if (!test_and_clear_bit(port - map->first_ip, map->members))
++ return -EEXIST;
++
+ *hash_port = port;
-+ DP("set: %s, port:%u, %u", set->name, port, *hash_port);
-+ return !!test_bit(port - map->first_port, map->members);
++ DP("port %u", port);
++ return 0;
+}
+
-+static int
-+testport(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_port)
-+{
-+ struct ip_set_req_portmap *req =
-+ (struct ip_set_req_portmap *) data;
++UADT(portmap, del)
++KADT(portmap, del, get_port)
+
-+ if (size != sizeof(struct ip_set_req_portmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_portmap),
-+ size);
-+ return -EINVAL;
++static inline int
++__portmap_create(const struct ip_set_req_portmap_create *req,
++ struct ip_set_portmap *map)
++{
++ if (req->to - req->from > MAX_RANGE) {
++ ip_set_printk("range too big, %d elements (max %d)",
++ req->to - req->from + 1, MAX_RANGE+1);
++ return -ENOEXEC;
+ }
-+ return __testport(set, req->port, hash_port);
++ return bitmap_bytes(req->from, req->to);
+}
+
-+static int
-+testport_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_port,
-+ const u_int32_t *flags,
-+ unsigned char index)
++BITMAP_CREATE(portmap)
++BITMAP_DESTROY(portmap)
++BITMAP_FLUSH(portmap)
++
++static inline void
++__portmap_list_header(const struct ip_set_portmap *map,
++ struct ip_set_req_portmap_create *header)
+{
-+ int res;
-+ ip_set_ip_t port = get_port(skb, flags[index]);
++}
+
-+ DP("flag %s port %u", flags[index] & IPSET_SRC ? "SRC" : "DST", port);
-+ if (port == INVALID_PORT)
-+ return 0;
++BITMAP_LIST_HEADER(portmap)
++BITMAP_LIST_MEMBERS_SIZE(portmap)
++BITMAP_LIST_MEMBERS(portmap)
+
-+ res = __testport(set, port, hash_port);
++IP_SET_TYPE(portmap, IPSET_TYPE_PORT | IPSET_DATA_SINGLE)
+
-+ return (res < 0 ? 0 : res);
-+}
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("portmap type of IP sets");
+
-+static inline int
-+__addport(struct ip_set *set, ip_set_ip_t port, ip_set_ip_t *hash_port)
-+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
++REGISTER_MODULE(portmap)
+--- /dev/null
++++ b/net/ipv4/netfilter/ip_set_setlist.c
+@@ -0,0 +1,330 @@
++/* Copyright (C) 2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
+
-+ if (port < map->first_port || port > map->last_port)
-+ return -ERANGE;
-+ if (test_and_set_bit(port - map->first_port, map->members))
-+ return -EEXIST;
++/* Kernel module implementing an IP set type: the setlist type */
+
-+ *hash_port = port;
-+ DP("port %u", port);
-+ return 0;
++#include <linux/module.h>
++#include <linux/ip.h>
++#include <linux/skbuff.h>
++#include <linux/errno.h>
++
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_bitmaps.h>
++#include <linux/netfilter_ipv4/ip_set_setlist.h>
++
++/*
++ * before ==> index, ref
++ * after ==> ref, index
++ */
++
++static inline int
++next_index_eq(const struct ip_set_setlist *map, int i, ip_set_id_t index)
++{
++ return i < map->size && map->index[i] == index;
+}
+
+static int
-+addport(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_port)
-+{
-+ struct ip_set_req_portmap *req =
-+ (struct ip_set_req_portmap *) data;
++setlist_utest(struct ip_set *set, const void *data, u_int32_t size,
++ ip_set_ip_t *hash_ip)
++{
++ const struct ip_set_setlist *map = set->data;
++ const struct ip_set_req_setlist *req = data;
++ ip_set_id_t index, ref = IP_SET_INVALID_ID;
++ int i, res = 0;
++ struct ip_set *s;
++
++ if (req->before && req->ref[0] == '\0')
++ return 0;
+
-+ if (size != sizeof(struct ip_set_req_portmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_portmap),
-+ size);
-+ return -EINVAL;
++ index = __ip_set_get_byname(req->name, &s);
++ if (index == IP_SET_INVALID_ID)
++ return 0;
++ if (req->ref[0] != '\0') {
++ ref = __ip_set_get_byname(req->ref, &s);
++ if (ref == IP_SET_INVALID_ID)
++ goto finish;
++ }
++ for (i = 0; i < map->size
++ && map->index[i] != IP_SET_INVALID_ID; i++) {
++ if (req->before && map->index[i] == index) {
++ res = next_index_eq(map, i + 1, ref);
++ break;
++ } else if (!req->before) {
++ if ((ref == IP_SET_INVALID_ID
++ && map->index[i] == index)
++ || (map->index[i] == ref
++ && next_index_eq(map, i + 1, index))) {
++ res = 1;
++ break;
++ }
++ }
+ }
-+ return __addport(set, req->port, hash_port);
++ if (ref != IP_SET_INVALID_ID)
++ __ip_set_put_byindex(ref);
++finish:
++ __ip_set_put_byindex(index);
++ return res;
+}
+
+static int
-+addport_kernel(struct ip_set *set,
++setlist_ktest(struct ip_set *set,
+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_port,
++ ip_set_ip_t *hash_ip,
+ const u_int32_t *flags,
+ unsigned char index)
+{
-+ ip_set_ip_t port = get_port(skb, flags[index]);
-+
-+ if (port == INVALID_PORT)
-+ return -EINVAL;
-+
-+ return __addport(set, port, hash_port);
++ struct ip_set_setlist *map = set->data;
++ int i, res = 0;
++
++ for (i = 0; i < map->size
++ && map->index[i] != IP_SET_INVALID_ID
++ && res == 0; i++)
++ res = ip_set_testip_kernel(map->index[i], skb, flags);
++ return res;
+}
+
+static inline int
-+__delport(struct ip_set *set, ip_set_ip_t port, ip_set_ip_t *hash_port)
++insert_setlist(struct ip_set_setlist *map, int i, ip_set_id_t index)
+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
++ ip_set_id_t tmp;
++ int j;
+
-+ if (port < map->first_port || port > map->last_port)
++ DP("i: %u, last %u\n", i, map->index[map->size - 1]);
++ if (i >= map->size || map->index[map->size - 1] != IP_SET_INVALID_ID)
+ return -ERANGE;
-+ if (!test_and_clear_bit(port - map->first_port, map->members))
-+ return -EEXIST;
-+
-+ *hash_port = port;
-+ DP("port %u", port);
++
++ for (j = i; j < map->size
++ && index != IP_SET_INVALID_ID; j++) {
++ tmp = map->index[j];
++ map->index[j] = index;
++ index = tmp;
++ }
+ return 0;
+}
+
+static int
-+delport(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_port)
++setlist_uadd(struct ip_set *set, const void *data, u_int32_t size,
++ ip_set_ip_t *hash_ip)
+{
-+ struct ip_set_req_portmap *req =
-+ (struct ip_set_req_portmap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_portmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_portmap),
-+ size);
++ struct ip_set_setlist *map = set->data;
++ const struct ip_set_req_setlist *req = data;
++ ip_set_id_t index, ref = IP_SET_INVALID_ID;
++ int i, res = -ERANGE;
++ struct ip_set *s;
++
++ if (req->before && req->ref[0] == '\0')
+ return -EINVAL;
++
++ index = __ip_set_get_byname(req->name, &s);
++ if (index == IP_SET_INVALID_ID)
++ return -EEXIST;
++ /* "Loop detection" */
++ if (strcmp(s->type->typename, "setlist") == 0)
++ goto finish;
++
++ if (req->ref[0] != '\0') {
++ ref = __ip_set_get_byname(req->ref, &s);
++ if (ref == IP_SET_INVALID_ID) {
++ res = -EEXIST;
++ goto finish;
++ }
+ }
-+ return __delport(set, req->port, hash_port);
++ for (i = 0; i < map->size; i++) {
++ if (map->index[i] != ref)
++ continue;
++ if (req->before)
++ res = insert_setlist(map, i, index);
++ else
++ res = insert_setlist(map,
++ ref == IP_SET_INVALID_ID ? i : i + 1,
++ index);
++ break;
++ }
++ if (ref != IP_SET_INVALID_ID)
++ __ip_set_put_byindex(ref);
++ /* In case of success, we keep the reference to the set */
++finish:
++ if (res != 0)
++ __ip_set_put_byindex(index);
++ return res;
+}
+
+static int
-+delport_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_port,
-+ const u_int32_t *flags,
-+ unsigned char index)
++setlist_kadd(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
+{
-+ ip_set_ip_t port = get_port(skb, flags[index]);
-+
-+ if (port == INVALID_PORT)
-+ return -EINVAL;
-+
-+ return __delport(set, port, hash_port);
++ struct ip_set_setlist *map = set->data;
++ int i, res = -EINVAL;
++
++ for (i = 0; i < map->size
++ && map->index[i] != IP_SET_INVALID_ID
++ && res != 0; i++)
++ res = ip_set_addip_kernel(map->index[i], skb, flags);
++ return res;
+}
+
-+static int create(struct ip_set *set, const void *data, size_t size)
++static inline int
++unshift_setlist(struct ip_set_setlist *map, int i)
+{
-+ int newbytes;
-+ struct ip_set_req_portmap_create *req =
-+ (struct ip_set_req_portmap_create *) data;
-+ struct ip_set_portmap *map;
++ int j;
++
++ for (j = i; j < map->size - 1; j++)
++ map->index[j] = map->index[j+1];
++ map->index[map->size-1] = IP_SET_INVALID_ID;
++ return 0;
++}
+
-+ if (size != sizeof(struct ip_set_req_portmap_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_portmap_create),
-+ size);
++static int
++setlist_udel(struct ip_set *set, const void *data, u_int32_t size,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_setlist *map = set->data;
++ const struct ip_set_req_setlist *req = data;
++ ip_set_id_t index, ref = IP_SET_INVALID_ID;
++ int i, res = -EEXIST;
++ struct ip_set *s;
++
++ if (req->before && req->ref[0] == '\0')
+ return -EINVAL;
-+ }
+
-+ DP("from %u to %u", req->from, req->to);
-+
-+ if (req->from > req->to) {
-+ DP("bad port range");
-+ return -ENOEXEC;
++ index = __ip_set_get_byname(req->name, &s);
++ if (index == IP_SET_INVALID_ID)
++ return -EEXIST;
++ if (req->ref[0] != '\0') {
++ ref = __ip_set_get_byname(req->ref, &s);
++ if (ref == IP_SET_INVALID_ID)
++ goto finish;
++ }
++ for (i = 0; i < map->size
++ && map->index[i] != IP_SET_INVALID_ID; i++) {
++ if (req->before) {
++ if (map->index[i] == index
++ && next_index_eq(map, i + 1, ref)) {
++ res = unshift_setlist(map, i);
++ break;
++ }
++ } else if (ref == IP_SET_INVALID_ID) {
++ if (map->index[i] == index) {
++ res = unshift_setlist(map, i);
++ break;
++ }
++ } else if (map->index[i] == ref
++ && next_index_eq(map, i + 1, index)) {
++ res = unshift_setlist(map, i + 1);
++ break;
++ }
+ }
++ if (ref != IP_SET_INVALID_ID)
++ __ip_set_put_byindex(ref);
++finish:
++ __ip_set_put_byindex(index);
++ /* In case of success, release the reference to the set */
++ if (res == 0)
++ __ip_set_put_byindex(index);
++ return res;
++}
+
-+ if (req->to - req->from > MAX_RANGE) {
-+ ip_set_printk("range too big (max %d ports)",
-+ MAX_RANGE+1);
-+ return -ENOEXEC;
-+ }
++static int
++setlist_kdel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ struct ip_set_setlist *map = set->data;
++ int i, res = -EINVAL;
++
++ for (i = 0; i < map->size
++ && map->index[i] != IP_SET_INVALID_ID
++ && res != 0; i++)
++ res = ip_set_delip_kernel(map->index[i], skb, flags);
++ return res;
++}
+
-+ map = kmalloc(sizeof(struct ip_set_portmap), GFP_KERNEL);
-+ if (!map) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_portmap));
-+ return -ENOMEM;
-+ }
-+ map->first_port = req->from;
-+ map->last_port = req->to;
-+ newbytes = bitmap_bytes(req->from, req->to);
-+ map->members = kmalloc(newbytes, GFP_KERNEL);
-+ if (!map->members) {
-+ DP("out of memory for %d bytes", newbytes);
-+ kfree(map);
++static int
++setlist_create(struct ip_set *set, const void *data, u_int32_t size)
++{
++ struct ip_set_setlist *map;
++ const struct ip_set_req_setlist_create *req = data;
++ int i;
++
++ map = kmalloc(sizeof(struct ip_set_setlist) +
++ req->size * sizeof(ip_set_id_t), GFP_KERNEL);
++ if (!map)
+ return -ENOMEM;
-+ }
-+ memset(map->members, 0, newbytes);
-+
++ map->size = req->size;
++ for (i = 0; i < map->size; i++)
++ map->index[i] = IP_SET_INVALID_ID;
++
+ set->data = map;
+ return 0;
-+}
++}
+
-+static void destroy(struct ip_set *set)
++static void
++setlist_destroy(struct ip_set *set)
+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
++ struct ip_set_setlist *map = set->data;
++ int i;
++
++ for (i = 0; i < map->size
++ && map->index[i] != IP_SET_INVALID_ID; i++)
++ __ip_set_put_byindex(map->index[i]);
+
-+ kfree(map->members);
+ kfree(map);
-+
+ set->data = NULL;
+}
+
-+static void flush(struct ip_set *set)
++static void
++setlist_flush(struct ip_set *set)
+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
-+ memset(map->members, 0, bitmap_bytes(map->first_port, map->last_port));
++ struct ip_set_setlist *map = set->data;
++ int i;
++
++ for (i = 0; i < map->size
++ && map->index[i] != IP_SET_INVALID_ID; i++) {
++ __ip_set_put_byindex(map->index[i]);
++ map->index[i] = IP_SET_INVALID_ID;
++ }
+}
+
-+static void list_header(const struct ip_set *set, void *data)
++static void
++setlist_list_header(const struct ip_set *set, void *data)
+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
-+ struct ip_set_req_portmap_create *header =
-+ (struct ip_set_req_portmap_create *) data;
-+
-+ DP("list_header %u %u", map->first_port, map->last_port);
-+
-+ header->from = map->first_port;
-+ header->to = map->last_port;
++ const struct ip_set_setlist *map = set->data;
++ struct ip_set_req_setlist_create *header = data;
++
++ header->size = map->size;
+}
+
-+static int list_members_size(const struct ip_set *set)
++static int
++setlist_list_members_size(const struct ip_set *set)
+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
-+
-+ return bitmap_bytes(map->first_port, map->last_port);
++ const struct ip_set_setlist *map = set->data;
++
++ return map->size * sizeof(ip_set_id_t);
+}
+
-+static void list_members(const struct ip_set *set, void *data)
++static void
++setlist_list_members(const struct ip_set *set, void *data)
+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
-+ int bytes = bitmap_bytes(map->first_port, map->last_port);
-+
-+ memcpy(data, map->members, bytes);
++ struct ip_set_setlist *map = set->data;
++ int i;
++
++ for (i = 0; i < map->size; i++)
++ *((ip_set_id_t *)data + i) = ip_set_id(map->index[i]);
+}
+
-+static struct ip_set_type ip_set_portmap = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_PORT | IPSET_DATA_SINGLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_portmap),
-+ .addip = &addport,
-+ .addip_kernel = &addport_kernel,
-+ .delip = &delport,
-+ .delip_kernel = &delport_kernel,
-+ .testip = &testport,
-+ .testip_kernel = &testport_kernel,
-+ .header_size = sizeof(struct ip_set_req_portmap_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
++IP_SET_TYPE(setlist, IPSET_TYPE_SETNAME | IPSET_DATA_SINGLE)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-+MODULE_DESCRIPTION("portmap type of IP sets");
++MODULE_DESCRIPTION("setlist type of IP sets");
+
-+static int __init ip_set_portmap_init(void)
-+{
-+ return ip_set_register_set_type(&ip_set_portmap);
-+}
-+
-+static void __exit ip_set_portmap_fini(void)
-+{
-+ /* FIXME: possible race with ip_set_create() */
-+ ip_set_unregister_set_type(&ip_set_portmap);
-+}
-+
-+module_init(ip_set_portmap_init);
-+module_exit(ip_set_portmap_fini);
++REGISTER_MODULE(setlist)
--- /dev/null
+++ b/net/ipv4/netfilter/ipt_set.c
-@@ -0,0 +1,160 @@
+@@ -0,0 +1,238 @@
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ * Patrick Schaaf <bof@bof.de>
+ * Martin Josefsson <gandalf@wlug.westbo.se>
@@ -7207,7 +7100,14 @@
+#include <linux/skbuff.h>
+#include <linux/version.h>
+
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
+#include <linux/netfilter_ipv4/ip_tables.h>
++#define xt_register_match ipt_register_match
++#define xt_unregister_match ipt_unregister_match
++#define xt_match ipt_match
++#else
++#include <linux/netfilter/x_tables.h>
++#endif
+#include <linux/netfilter_ipv4/ip_set.h>
+#include <linux/netfilter_ipv4/ipt_set.h>
+
@@ -7215,64 +7115,125 @@
+match_set(const struct ipt_set_info *info,
+ const struct sk_buff *skb,
+ int inv)
-+{
++{
+ if (ip_set_testip_kernel(info->index, skb, info->flags))
+ inv = !inv;
+ return inv;
+}
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
-+static bool
-+#else
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++static int
++match(const struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ const void *matchinfo,
++ int offset,
++ const void *hdr,
++ u_int16_t datalen,
++ int *hotdrop)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
++static int
++match(const struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ const void *matchinfo,
++ int offset,
++ int *hotdrop)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
++static int
++match(const struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ const void *matchinfo,
++ int offset,
++ unsigned int protoff,
++ int *hotdrop)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
+static int
-+#endif
+match(const struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
+ const struct xt_match *match,
-+#endif
+ const void *matchinfo,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
-+ int offset, unsigned int protoff, bool *hotdrop)
-+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
-+ int offset, unsigned int protoff, int *hotdrop)
-+#else
-+ int offset, int *hotdrop)
++ int offset,
++ unsigned int protoff,
++ int *hotdrop)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++static bool
++match(const struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ const struct xt_match *match,
++ const void *matchinfo,
++ int offset,
++ unsigned int protoff,
++ bool *hotdrop)
++#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */
++static bool
++match(const struct sk_buff *skb,
++ const struct xt_match_param *par)
+#endif
+{
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
+ const struct ipt_set_info_match *info = matchinfo;
-+
++#else
++ const struct ipt_set_info_match *info = par->matchinfo;
++#endif
++
+ return match_set(&info->match_set,
+ skb,
+ info->match_set.flags[0] & IPSET_MATCH_INV);
+}
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
-+bool
-+#else
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
+static int
-+#endif
+checkentry(const char *tablename,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
-+ const void *inf,
-+#else
+ const struct ipt_ip *ip,
-+#endif
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
++ void *matchinfo,
++ unsigned int matchsize,
++ unsigned int hook_mask)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
++static int
++checkentry(const char *tablename,
++ const void *inf,
++ void *matchinfo,
++ unsigned int matchsize,
++ unsigned int hook_mask)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++static int
++checkentry(const char *tablename,
++ const void *inf,
+ const struct xt_match *match,
-+#endif
+ void *matchinfo,
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
+ unsigned int matchsize,
-+#endif
+ unsigned int hook_mask)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
++static int
++checkentry(const char *tablename,
++ const void *inf,
++ const struct xt_match *match,
++ void *matchinfo,
++ unsigned int hook_mask)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++static bool
++checkentry(const char *tablename,
++ const void *inf,
++ const struct xt_match *match,
++ void *matchinfo,
++ unsigned int hook_mask)
++#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */
++static bool
++checkentry(const struct xt_mtchk_param *par)
++#endif
+{
-+ struct ipt_set_info_match *info =
-+ (struct ipt_set_info_match *) matchinfo;
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++ struct ipt_set_info_match *info = matchinfo;
++#else
++ struct ipt_set_info_match *info = par->matchinfo;
++#endif
+ ip_set_id_t index;
+
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
+ if (matchsize != IPT_ALIGN(sizeof(struct ipt_set_info_match))) {
+ ip_set_printk("invalid matchsize %d", matchsize);
+ return 0;
@@ -7280,7 +7241,7 @@
+#endif
+
+ index = ip_set_get_byindex(info->match_set.index);
-+
++
+ if (index == IP_SET_INVALID_ID) {
+ ip_set_printk("Cannot find set indentified by id %u to match",
+ info->match_set.index);
@@ -7294,65 +7255,75 @@
+ return 1;
+}
+
-+static void destroy(
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
-+ const struct xt_match *match,
-+#endif
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-+ void *matchinfo, unsigned int matchsize)
-+#else
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
++static void destroy(void *matchinfo,
++ unsigned int matchsize)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++static void destroy(const struct xt_match *match,
++ void *matchinfo,
++ unsigned int matchsize)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++static void destroy(const struct xt_match *match,
+ void *matchinfo)
++#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */
++static void destroy(const struct xt_mtdtor_param *par)
+#endif
+{
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
+ struct ipt_set_info_match *info = matchinfo;
++#else
++ struct ipt_set_info_match *info = par->matchinfo;
++#endif
+
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
+ if (matchsize != IPT_ALIGN(sizeof(struct ipt_set_info_match))) {
+ ip_set_printk("invalid matchsize %d", matchsize);
+ return;
+ }
+#endif
-+ ip_set_put(info->match_set.index);
++ ip_set_put_byindex(info->match_set.index);
+}
+
-+static struct ipt_match set_match = {
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
++static struct xt_match set_match = {
++ .name = "set",
++ .match = &match,
++ .checkentry = &checkentry,
++ .destroy = &destroy,
++ .me = THIS_MODULE
++};
++#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17) */
++static struct xt_match set_match = {
+ .name = "set",
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
+ .family = AF_INET,
-+#endif
+ .match = &match,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
+ .matchsize = sizeof(struct ipt_set_info_match),
-+#endif
+ .checkentry = &checkentry,
+ .destroy = &destroy,
+ .me = THIS_MODULE
+};
++#endif
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("iptables IP set match module");
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
-+#define ipt_register_match xt_register_match
-+#define ipt_unregister_match xt_unregister_match
-+#endif
-+
+static int __init ipt_ipset_init(void)
+{
-+ return ipt_register_match(&set_match);
++ return xt_register_match(&set_match);
+}
+
+static void __exit ipt_ipset_fini(void)
+{
-+ ipt_unregister_match(&set_match);
++ xt_unregister_match(&set_match);
+}
+
+module_init(ipt_ipset_init);
+module_exit(ipt_ipset_fini);
--- /dev/null
+++ b/net/ipv4/netfilter/ipt_SET.c
-@@ -0,0 +1,179 @@
+@@ -0,0 +1,242 @@
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ * Patrick Schaaf <bof@bof.de>
+ * Martin Josefsson <gandalf@wlug.westbo.se>
@@ -7365,45 +7336,75 @@
+
+/* ipt_SET.c - netfilter target to manipulate IP sets */
+
-+#include <linux/types.h>
-+#include <linux/ip.h>
-+#include <linux/timer.h>
+#include <linux/module.h>
-+#include <linux/netfilter.h>
-+#include <linux/netdevice.h>
-+#include <linux/if.h>
-+#include <linux/inetdevice.h>
++#include <linux/ip.h>
++#include <linux/skbuff.h>
+#include <linux/version.h>
-+#include <net/protocol.h>
-+#include <net/checksum.h>
++
+#include <linux/netfilter_ipv4.h>
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
+#include <linux/netfilter_ipv4/ip_tables.h>
++#define xt_register_target ipt_register_target
++#define xt_unregister_target ipt_unregister_target
++#define xt_target ipt_target
++#define XT_CONTINUE IPT_CONTINUE
++#else
++#include <linux/netfilter/x_tables.h>
++#endif
+#include <linux/netfilter_ipv4/ipt_set.h>
+
+static unsigned int
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
-+target(struct sk_buff *skb,
-+#else
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++target(struct sk_buff **pskb,
++ unsigned int hooknum,
++ const struct net_device *in,
++ const struct net_device *out,
++ const void *targinfo,
++ void *userinfo)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
++target(struct sk_buff **pskb,
++ const struct net_device *in,
++ const struct net_device *out,
++ unsigned int hooknum,
++ const void *targinfo,
++ void *userinfo)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
+target(struct sk_buff **pskb,
-+#endif
+ const struct net_device *in,
+ const struct net_device *out,
+ unsigned int hooknum,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
+ const struct xt_target *target,
-+#endif
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
+ const void *targinfo,
+ void *userinfo)
-+#else
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
++target(struct sk_buff **pskb,
++ const struct net_device *in,
++ const struct net_device *out,
++ unsigned int hooknum,
++ const struct xt_target *target,
++ const void *targinfo)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++target(struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ unsigned int hooknum,
++ const struct xt_target *target,
+ const void *targinfo)
++#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */
++target(struct sk_buff *skb,
++ const struct xt_target_param *par)
+#endif
+{
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
+ const struct ipt_set_info_target *info = targinfo;
++#else
++ const struct ipt_set_info_target *info = par->targinfo;
++#endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+ struct sk_buff *skb = *pskb;
+#endif
+
++
+ if (info->add_set.index != IP_SET_INVALID_ID)
+ ip_set_addip_kernel(info->add_set.index,
+ skb,
@@ -7413,34 +7414,58 @@
+ skb,
+ info->del_set.flags);
+
-+ return IPT_CONTINUE;
++ return XT_CONTINUE;
+}
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
-+static bool
-+#else
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
+static int
-+#endif
+checkentry(const char *tablename,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
-+ const void *e,
-+#else
+ const struct ipt_entry *e,
-+#endif
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
++ void *targinfo,
++ unsigned int targinfosize,
++ unsigned int hook_mask)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
++static int
++checkentry(const char *tablename,
++ const void *e,
++ void *targinfo,
++ unsigned int targinfosize,
++ unsigned int hook_mask)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++static int
++checkentry(const char *tablename,
++ const void *e,
+ const struct xt_target *target,
-+#endif
+ void *targinfo,
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
+ unsigned int targinfosize,
-+#endif
+ unsigned int hook_mask)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
++static int
++checkentry(const char *tablename,
++ const void *e,
++ const struct xt_target *target,
++ void *targinfo,
++ unsigned int hook_mask)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++static bool
++checkentry(const char *tablename,
++ const void *e,
++ const struct xt_target *target,
++ void *targinfo,
++ unsigned int hook_mask)
++#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */
++static bool
++checkentry(const struct xt_tgchk_param *par)
++#endif
+{
-+ struct ipt_set_info_target *info =
-+ (struct ipt_set_info_target *) targinfo;
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++ const struct ipt_set_info_target *info = targinfo;
++#else
++ const struct ipt_set_info_target *info = par->targinfo;
++#endif
+ ip_set_id_t index;
+
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
+ if (targinfosize != IPT_ALIGN(sizeof(*info))) {
+ DP("bad target info size %u", targinfosize);
+ return 0;
@@ -7473,68 +7498,77 @@
+ return 1;
+}
+
-+static void destroy(
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
-+ const struct xt_target *target,
-+#endif
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-+ void *targetinfo, unsigned int targetsize)
-+#else
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
++static void destroy(void *targetinfo,
++ unsigned int targetsize)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++static void destroy(const struct xt_target *target,
++ void *targetinfo,
++ unsigned int targetsize)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++static void destroy(const struct xt_target *target,
+ void *targetinfo)
++#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */
++static void destroy(const struct xt_tgdtor_param *par)
+#endif
+{
-+ struct ipt_set_info_target *info = targetinfo;
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++ const struct ipt_set_info_target *info = targetinfo;
++#else
++ const struct ipt_set_info_target *info = par->targinfo;
++#endif
+
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
+ if (targetsize != IPT_ALIGN(sizeof(struct ipt_set_info_target))) {
+ ip_set_printk("invalid targetsize %d", targetsize);
+ return;
+ }
+#endif
+ if (info->add_set.index != IP_SET_INVALID_ID)
-+ ip_set_put(info->add_set.index);
++ ip_set_put_byindex(info->add_set.index);
+ if (info->del_set.index != IP_SET_INVALID_ID)
-+ ip_set_put(info->del_set.index);
++ ip_set_put_byindex(info->del_set.index);
+}
+
-+static struct ipt_target SET_target = {
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
++static struct xt_target SET_target = {
++ .name = "SET",
++ .target = target,
++ .checkentry = checkentry,
++ .destroy = destroy,
++ .me = THIS_MODULE
++};
++#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17) */
++static struct xt_target SET_target = {
+ .name = "SET",
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
+ .family = AF_INET,
-+#endif
+ .target = target,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
+ .targetsize = sizeof(struct ipt_set_info_target),
-+#endif
+ .checkentry = checkentry,
+ .destroy = destroy,
+ .me = THIS_MODULE
+};
++#endif
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("iptables IP set target module");
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
-+#define ipt_register_target xt_register_target
-+#define ipt_unregister_target xt_unregister_target
-+#endif
-+
+static int __init ipt_SET_init(void)
+{
-+ return ipt_register_target(&SET_target);
++ return xt_register_target(&SET_target);
+}
+
+static void __exit ipt_SET_fini(void)
+{
-+ ipt_unregister_target(&SET_target);
++ xt_unregister_target(&SET_target);
+}
+
+module_init(ipt_SET_init);
+module_exit(ipt_SET_fini);
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
-@@ -394,5 +394,122 @@ config IP_NF_ARP_MANGLE
+@@ -394,5 +394,146 @@ config IP_NF_ARP_MANGLE
endif # IP_NF_ARPTABLES
@@ -7619,6 +7653,22 @@
+
+ To compile it as a module, choose M here. If unsure, say N.
+
++config IP_NF_SET_IPPORTIPHASH
++ tristate "ipportiphash set support"
++ depends on IP_NF_SET
++ help
++ This option adds the ipportiphash set type support.
++
++ To compile it as a module, choose M here. If unsure, say N.
++
++config IP_NF_SET_IPPORTNETHASH
++ tristate "ipportnethash set support"
++ depends on IP_NF_SET
++ help
++ This option adds the ipportnethash set type support.
++
++ To compile it as a module, choose M here. If unsure, say N.
++
+config IP_NF_SET_IPTREE
+ tristate "iptree set support"
+ depends on IP_NF_SET
@@ -7635,6 +7685,14 @@
+
+ To compile it as a module, choose M here. If unsure, say N.
+
++config IP_NF_SET_SETLIST
++ tristate "setlist set support"
++ depends on IP_NF_SET
++ help
++ This option adds the setlist set type support.
++
++ To compile it as a module, choose M here. If unsure, say N.
++
+config IP_NF_MATCH_SET
+ tristate "set match support"
+ depends on IP_NF_SET
@@ -7667,7 +7725,7 @@
# targets
obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o
-@@ -63,6 +64,18 @@ obj-$(CONFIG_IP_NF_TARGET_REDIRECT) += i
+@@ -63,6 +64,21 @@ obj-$(CONFIG_IP_NF_TARGET_REDIRECT) += i
obj-$(CONFIG_IP_NF_TARGET_REJECT) += ipt_REJECT.o
obj-$(CONFIG_IP_NF_TARGET_TTL) += ipt_TTL.o
obj-$(CONFIG_IP_NF_TARGET_ULOG) += ipt_ULOG.o
@@ -7681,8 +7739,11 @@
+obj-$(CONFIG_IP_NF_SET_IPHASH) += ip_set_iphash.o
+obj-$(CONFIG_IP_NF_SET_NETHASH) += ip_set_nethash.o
+obj-$(CONFIG_IP_NF_SET_IPPORTHASH) += ip_set_ipporthash.o
++obj-$(CONFIG_IP_NF_SET_IPPORTIPHASH) += ip_set_ipportiphash.o
++obj-$(CONFIG_IP_NF_SET_IPPORTNETHASH) += ip_set_ipportnethash.o
+obj-$(CONFIG_IP_NF_SET_IPTREE) += ip_set_iptree.o
+obj-$(CONFIG_IP_NF_SET_IPTREEMAP) += ip_set_iptreemap.o
++obj-$(CONFIG_IP_NF_SET_SETLIST) += ip_set_setlist.o
# generic ARP tables
obj-$(CONFIG_IP_NF_ARPTABLES) += arp_tables.o
diff --git a/target/linux/generic-2.6/patches-2.6.30/130-netfilter_ipset.patch b/target/linux/generic-2.6/patches-2.6.30/130-netfilter_ipset.patch
index efe1041e6d..832f679d7a 100644
--- a/target/linux/generic-2.6/patches-2.6.30/130-netfilter_ipset.patch
+++ b/target/linux/generic-2.6/patches-2.6.30/130-netfilter_ipset.patch
@@ -1,23 +1,29 @@
--- a/include/linux/netfilter_ipv4/Kbuild
+++ b/include/linux/netfilter_ipv4/Kbuild
-@@ -45,3 +45,14 @@ header-y += ipt_ttl.h
+@@ -45,3 +45,20 @@ header-y += ipt_ttl.h
unifdef-y += ip_queue.h
unifdef-y += ip_tables.h
+
+unifdef-y += ip_set.h
+header-y += ip_set_iphash.h
++unifdef-y += ip_set_bitmaps.h
++unifdef-y += ip_set_getport.h
++unifdef-y += ip_set_hashes.h
+header-y += ip_set_ipmap.h
+header-y += ip_set_ipporthash.h
++header-y += ip_set_ipportiphash.h
++header-y += ip_set_ipportnethash.h
+unifdef-y += ip_set_iptree.h
+unifdef-y += ip_set_iptreemap.h
+header-y += ip_set_jhash.h
+header-y += ip_set_macipmap.h
-+unifdef-y += ip_set_nethash.h
++header-y += ip_set_nethash.h
+header-y += ip_set_portmap.h
++header-y += ip_set_setlist.h
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set.h
-@@ -0,0 +1,498 @@
+@@ -0,0 +1,574 @@
+#ifndef _IP_SET_H
+#define _IP_SET_H
+
@@ -28,7 +34,7 @@
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
++ * published by the Free Software Foundation.
+ */
+
+#if 0
@@ -57,10 +63,10 @@
+ * - in order to "deal with" backward compatibility, renamed to ipset
+ */
+
-+/*
-+ * Used so that the kernel module and ipset-binary can match their versions
++/*
++ * Used so that the kernel module and ipset-binary can match their versions
+ */
-+#define IP_SET_PROTOCOL_VERSION 2
++#define IP_SET_PROTOCOL_VERSION 3
+
+#define IP_SET_MAXNAMELEN 32 /* set names and set typenames */
+
@@ -69,7 +75,7 @@
+ *
+ * The representation works in HOST byte order, because most set types
+ * will perform arithmetic operations and compare operations.
-+ *
++ *
+ * For now the type is an uint32_t.
+ *
+ * Make sure to ONLY use the functions when translating and parsing
@@ -107,6 +113,9 @@
+#define IPSET_TYPE_PORT 0x02 /* Port type of set */
+#define IPSET_DATA_SINGLE 0x04 /* Single data storage */
+#define IPSET_DATA_DOUBLE 0x08 /* Double data storage */
++#define IPSET_DATA_TRIPLE 0x10 /* Triple data storage */
++#define IPSET_TYPE_IP1 0x20 /* IP address type of set */
++#define IPSET_TYPE_SETNAME 0x40 /* setname type of set */
+
+/* Reserved keywords */
+#define IPSET_TOKEN_DEFAULT ":default:"
@@ -120,8 +129,8 @@
+ * 200-299: list, save, restore
+ */
+
-+/* Single shot operations:
-+ * version, create, destroy, flush, rename and swap
++/* Single shot operations:
++ * version, create, destroy, flush, rename and swap
+ *
+ * Sets are identified by name.
+ */
@@ -172,7 +181,7 @@
+ unsigned version;
+};
+
-+/* Double shots operations:
++/* Double shots operations:
+ * add, del, test, bind and unbind.
+ *
+ * First we query the kernel to get the index and type of the target set,
@@ -214,7 +223,7 @@
+};
+
+#define IP_SET_OP_UNBIND_SET 0x00000105 /* Unbind an IP from a set */
-+/* Uses ip_set_req_bind, with type speficic addage
++/* Uses ip_set_req_bind, with type speficic addage
+ * index = 0 means unbinding for all sets */
+
+#define IP_SET_OP_TEST_BIND_SET 0x00000106 /* Test binding an IP to a set */
@@ -245,7 +254,7 @@
+struct ip_set_req_setnames {
+ unsigned op;
+ ip_set_id_t index; /* set to list/save */
-+ size_t size; /* size to get setdata/bindings */
++ u_int32_t size; /* size to get setdata/bindings */
+ /* followed by sets number of struct ip_set_name_list */
+};
+
@@ -260,16 +269,16 @@
+#define IP_SET_OP_LIST 0x00000203
+struct ip_set_req_list {
+ IP_SET_REQ_BYINDEX;
-+ /* sets number of struct ip_set_list in reply */
++ /* sets number of struct ip_set_list in reply */
+};
+
+struct ip_set_list {
+ ip_set_id_t index;
+ ip_set_id_t binding;
+ u_int32_t ref;
-+ size_t header_size; /* Set header data of header_size */
-+ size_t members_size; /* Set members data of members_size */
-+ size_t bindings_size; /* Set bindings data of bindings_size */
++ u_int32_t header_size; /* Set header data of header_size */
++ u_int32_t members_size; /* Set members data of members_size */
++ u_int32_t bindings_size;/* Set bindings data of bindings_size */
+};
+
+struct ip_set_hash_list {
@@ -286,8 +295,8 @@
+struct ip_set_save {
+ ip_set_id_t index;
+ ip_set_id_t binding;
-+ size_t header_size; /* Set header data of header_size */
-+ size_t members_size; /* Set members data of members_size */
++ u_int32_t header_size; /* Set header data of header_size */
++ u_int32_t members_size; /* Set members data of members_size */
+};
+
+/* At restoring, ip == 0 means default binding for the given set: */
@@ -300,15 +309,15 @@
+/* The restore operation */
+#define IP_SET_OP_RESTORE 0x00000205
+/* Uses ip_set_req_setnames followed by ip_set_restore structures
-+ * plus a marker ip_set_restore, followed by ip_set_hash_save
++ * plus a marker ip_set_restore, followed by ip_set_hash_save
+ * structures.
+ */
+struct ip_set_restore {
+ char name[IP_SET_MAXNAMELEN];
+ char typename[IP_SET_MAXNAMELEN];
+ ip_set_id_t index;
-+ size_t header_size; /* Create data of header_size */
-+ size_t members_size; /* Set members data of members_size */
++ u_int32_t header_size; /* Create data of header_size */
++ u_int32_t members_size; /* Set members data of members_size */
+};
+
+static inline int bitmap_bytes(ip_set_ip_t a, ip_set_ip_t b)
@@ -316,7 +325,12 @@
+ return 4 * ((((b - a + 8) / 8) + 3) / 4);
+}
+
++/* General limit for the elements in a set */
++#define MAX_RANGE 0x0000FFFF
++
+#ifdef __KERNEL__
++#include <linux/netfilter_ipv4/ip_set_compat.h>
++#include <linux/netfilter_ipv4/ip_set_malloc.h>
+
+#define ip_set_printk(format, args...) \
+ do { \
@@ -361,7 +375,7 @@
+ * return 0 if not in set, 1 if in set.
+ */
+ int (*testip_kernel) (struct ip_set *set,
-+ const struct sk_buff * skb,
++ const struct sk_buff * skb,
+ ip_set_ip_t *ip,
+ const u_int32_t *flags,
+ unsigned char index);
@@ -370,22 +384,22 @@
+ * return 0 if not in set, 1 if in set.
+ */
+ int (*testip) (struct ip_set *set,
-+ const void *data, size_t size,
++ const void *data, u_int32_t size,
+ ip_set_ip_t *ip);
+
+ /*
+ * Size of the data structure passed by when
+ * adding/deletin/testing an entry.
+ */
-+ size_t reqsize;
++ u_int32_t reqsize;
+
+ /* Add IP into set (userspace: ipset -A set IP)
+ * Return -EEXIST if the address is already in the set,
+ * and -ERANGE if the address lies outside the set bounds.
+ * If the address was not already in the set, 0 is returned.
+ */
-+ int (*addip) (struct ip_set *set,
-+ const void *data, size_t size,
++ int (*addip) (struct ip_set *set,
++ const void *data, u_int32_t size,
+ ip_set_ip_t *ip);
+
+ /* Add IP into set (kernel: iptables ... -j SET set src|dst)
@@ -394,7 +408,7 @@
+ * If the address was not already in the set, 0 is returned.
+ */
+ int (*addip_kernel) (struct ip_set *set,
-+ const struct sk_buff * skb,
++ const struct sk_buff * skb,
+ ip_set_ip_t *ip,
+ const u_int32_t *flags,
+ unsigned char index);
@@ -404,8 +418,8 @@
+ * and -ERANGE if the address lies outside the set bounds.
+ * If the address really was in the set, 0 is returned.
+ */
-+ int (*delip) (struct ip_set *set,
-+ const void *data, size_t size,
++ int (*delip) (struct ip_set *set,
++ const void *data, u_int32_t size,
+ ip_set_ip_t *ip);
+
+ /* remove IP from set (kernel: iptables ... -j SET --entry x)
@@ -414,7 +428,7 @@
+ * If the address really was in the set, 0 is returned.
+ */
+ int (*delip_kernel) (struct ip_set *set,
-+ const struct sk_buff * skb,
++ const struct sk_buff * skb,
+ ip_set_ip_t *ip,
+ const u_int32_t *flags,
+ unsigned char index);
@@ -422,7 +436,7 @@
+ /* new set creation - allocated type specific items
+ */
+ int (*create) (struct ip_set *set,
-+ const void *data, size_t size);
++ const void *data, u_int32_t size);
+
+ /* retry the operation after successfully tweaking the set
+ */
@@ -441,16 +455,16 @@
+
+ /* Listing: size needed for header
+ */
-+ size_t header_size;
++ u_int32_t header_size;
+
+ /* Listing: Get the header
+ *
+ * Fill in the information in "data".
-+ * This function is always run after list_header_size() under a
-+ * writelock on the set. Therefor is the length of "data" always
-+ * correct.
++ * This function is always run after list_header_size() under a
++ * writelock on the set. Therefor is the length of "data" always
++ * correct.
+ */
-+ void (*list_header) (const struct ip_set *set,
++ void (*list_header) (const struct ip_set *set,
+ void *data);
+
+ /* Listing: Get the size for the set members
@@ -460,9 +474,9 @@
+ /* Listing: Get the set members
+ *
+ * Fill in the information in "data".
-+ * This function is always run after list_member_size() under a
-+ * writelock on the set. Therefor is the length of "data" always
-+ * correct.
++ * This function is always run after list_member_size() under a
++ * writelock on the set. Therefor is the length of "data" always
++ * correct.
+ */
+ void (*list_members) (const struct ip_set *set,
+ void *data);
@@ -499,33 +513,659 @@
+
+/* register and unregister set references */
+extern ip_set_id_t ip_set_get_byname(const char name[IP_SET_MAXNAMELEN]);
-+extern ip_set_id_t ip_set_get_byindex(ip_set_id_t id);
-+extern void ip_set_put(ip_set_id_t id);
++extern ip_set_id_t ip_set_get_byindex(ip_set_id_t index);
++extern void ip_set_put_byindex(ip_set_id_t index);
++extern ip_set_id_t ip_set_id(ip_set_id_t index);
++extern ip_set_id_t __ip_set_get_byname(const char name[IP_SET_MAXNAMELEN],
++ struct ip_set **set);
++extern void __ip_set_put_byindex(ip_set_id_t index);
+
+/* API for iptables set match, and SET target */
-+extern void ip_set_addip_kernel(ip_set_id_t id,
-+ const struct sk_buff *skb,
-+ const u_int32_t *flags);
-+extern void ip_set_delip_kernel(ip_set_id_t id,
-+ const struct sk_buff *skb,
-+ const u_int32_t *flags);
++extern int ip_set_addip_kernel(ip_set_id_t id,
++ const struct sk_buff *skb,
++ const u_int32_t *flags);
++extern int ip_set_delip_kernel(ip_set_id_t id,
++ const struct sk_buff *skb,
++ const u_int32_t *flags);
+extern int ip_set_testip_kernel(ip_set_id_t id,
+ const struct sk_buff *skb,
+ const u_int32_t *flags);
+
++/* Macros to generate functions */
++
++#define STRUCT(pre, type) CONCAT2(pre, type)
++#define CONCAT2(pre, type) struct pre##type
++
++#define FNAME(pre, mid, post) CONCAT3(pre, mid, post)
++#define CONCAT3(pre, mid, post) pre##mid##post
++
++#define UADT0(type, adt, args...) \
++static int \
++FNAME(type,_u,adt)(struct ip_set *set, const void *data, u_int32_t size,\
++ ip_set_ip_t *hash_ip) \
++{ \
++ const STRUCT(ip_set_req_,type) *req = data; \
++ \
++ return FNAME(type,_,adt)(set, hash_ip , ## args); \
++}
++
++#define UADT(type, adt, args...) \
++ UADT0(type, adt, req->ip , ## args)
++
++#define KADT(type, adt, getfn, args...) \
++static int \
++FNAME(type,_k,adt)(struct ip_set *set, \
++ const struct sk_buff *skb, \
++ ip_set_ip_t *hash_ip, \
++ const u_int32_t *flags, \
++ unsigned char index) \
++{ \
++ ip_set_ip_t ip = getfn(skb, flags[index]); \
++ \
++ KADT_CONDITION \
++ return FNAME(type,_,adt)(set, hash_ip, ip , ##args); \
++}
++
++#define REGISTER_MODULE(type) \
++static int __init ip_set_##type##_init(void) \
++{ \
++ init_max_page_size(); \
++ return ip_set_register_set_type(&ip_set_##type); \
++} \
++ \
++static void __exit ip_set_##type##_fini(void) \
++{ \
++ /* FIXME: possible race with ip_set_create() */ \
++ ip_set_unregister_set_type(&ip_set_##type); \
++} \
++ \
++module_init(ip_set_##type##_init); \
++module_exit(ip_set_##type##_fini);
++
++/* Common functions */
++
++static inline ip_set_ip_t
++ipaddr(const struct sk_buff *skb, u_int32_t flag)
++{
++ return ntohl(flag & IPSET_SRC ? ip_hdr(skb)->saddr : ip_hdr(skb)->daddr);
++}
++
++#define jhash_ip(map, i, ip) jhash_1word(ip, *(map->initval + i))
++
++#define pack_ip_port(map, ip, port) \
++ (port + ((ip - ((map)->first_ip)) << 16))
++
+#endif /* __KERNEL__ */
+
+#endif /*_IP_SET_H*/
--- /dev/null
++++ b/include/linux/netfilter_ipv4/ip_set_bitmaps.h
+@@ -0,0 +1,121 @@
++#ifndef __IP_SET_BITMAPS_H
++#define __IP_SET_BITMAPS_H
++
++/* Macros to generate functions */
++
++#ifdef __KERNEL__
++#define BITMAP_CREATE(type) \
++static int \
++type##_create(struct ip_set *set, const void *data, u_int32_t size) \
++{ \
++ int newbytes; \
++ const struct ip_set_req_##type##_create *req = data; \
++ struct ip_set_##type *map; \
++ \
++ if (req->from > req->to) { \
++ DP("bad range"); \
++ return -ENOEXEC; \
++ } \
++ \
++ map = kmalloc(sizeof(struct ip_set_##type), GFP_KERNEL); \
++ if (!map) { \
++ DP("out of memory for %zu bytes", \
++ sizeof(struct ip_set_##type)); \
++ return -ENOMEM; \
++ } \
++ map->first_ip = req->from; \
++ map->last_ip = req->to; \
++ \
++ newbytes = __##type##_create(req, map); \
++ if (newbytes < 0) { \
++ kfree(map); \
++ return newbytes; \
++ } \
++ \
++ map->size = newbytes; \
++ map->members = ip_set_malloc(newbytes); \
++ if (!map->members) { \
++ DP("out of memory for %i bytes", newbytes); \
++ kfree(map); \
++ return -ENOMEM; \
++ } \
++ memset(map->members, 0, newbytes); \
++ \
++ set->data = map; \
++ return 0; \
++}
++
++#define BITMAP_DESTROY(type) \
++static void \
++type##_destroy(struct ip_set *set) \
++{ \
++ struct ip_set_##type *map = set->data; \
++ \
++ ip_set_free(map->members, map->size); \
++ kfree(map); \
++ \
++ set->data = NULL; \
++}
++
++#define BITMAP_FLUSH(type) \
++static void \
++type##_flush(struct ip_set *set) \
++{ \
++ struct ip_set_##type *map = set->data; \
++ memset(map->members, 0, map->size); \
++}
++
++#define BITMAP_LIST_HEADER(type) \
++static void \
++type##_list_header(const struct ip_set *set, void *data) \
++{ \
++ const struct ip_set_##type *map = set->data; \
++ struct ip_set_req_##type##_create *header = data; \
++ \
++ header->from = map->first_ip; \
++ header->to = map->last_ip; \
++ __##type##_list_header(map, header); \
++}
++
++#define BITMAP_LIST_MEMBERS_SIZE(type) \
++static int \
++type##_list_members_size(const struct ip_set *set) \
++{ \
++ const struct ip_set_##type *map = set->data; \
++ \
++ return map->size; \
++}
++
++#define BITMAP_LIST_MEMBERS(type) \
++static void \
++type##_list_members(const struct ip_set *set, void *data) \
++{ \
++ const struct ip_set_##type *map = set->data; \
++ \
++ memcpy(data, map->members, map->size); \
++}
++
++#define IP_SET_TYPE(type, __features) \
++struct ip_set_type ip_set_##type = { \
++ .typename = #type, \
++ .features = __features, \
++ .protocol_version = IP_SET_PROTOCOL_VERSION, \
++ .create = &type##_create, \
++ .destroy = &type##_destroy, \
++ .flush = &type##_flush, \
++ .reqsize = sizeof(struct ip_set_req_##type), \
++ .addip = &type##_uadd, \
++ .addip_kernel = &type##_kadd, \
++ .delip = &type##_udel, \
++ .delip_kernel = &type##_kdel, \
++ .testip = &type##_utest, \
++ .testip_kernel = &type##_ktest, \
++ .header_size = sizeof(struct ip_set_req_##type##_create),\
++ .list_header = &type##_list_header, \
++ .list_members_size = &type##_list_members_size, \
++ .list_members = &type##_list_members, \
++ .me = THIS_MODULE, \
++};
++#endif /* __KERNEL */
++
++#endif /* __IP_SET_BITMAPS_H */
+--- /dev/null
++++ b/include/linux/netfilter_ipv4/ip_set_compat.h
+@@ -0,0 +1,71 @@
++#ifndef _IP_SET_COMPAT_H
++#define _IP_SET_COMPAT_H
++
++#ifdef __KERNEL__
++#include <linux/version.h>
++
++/* Arrgh */
++#ifdef MODULE
++#define __MOD_INC(foo) __MOD_INC_USE_COUNT(foo)
++#define __MOD_DEC(foo) __MOD_DEC_USE_COUNT(foo)
++#else
++#define __MOD_INC(foo) 1
++#define __MOD_DEC(foo)
++#endif
++
++/* Backward compatibility */
++#ifndef __nocast
++#define __nocast
++#endif
++#ifndef __bitwise__
++#define __bitwise__
++#endif
++
++/* Compatibility glue code */
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++#include <linux/interrupt.h>
++#define DEFINE_RWLOCK(x) rwlock_t x = RW_LOCK_UNLOCKED
++#define try_module_get(x) __MOD_INC(x)
++#define module_put(x) __MOD_DEC(x)
++#define __clear_bit(nr, addr) clear_bit(nr, addr)
++#define __set_bit(nr, addr) set_bit(nr, addr)
++#define __test_and_set_bit(nr, addr) test_and_set_bit(nr, addr)
++#define __test_and_clear_bit(nr, addr) test_and_clear_bit(nr, addr)
++
++typedef unsigned __bitwise__ gfp_t;
++
++static inline void *kzalloc(size_t size, gfp_t flags)
++{
++ void *data = kmalloc(size, flags);
++
++ if (data)
++ memset(data, 0, size);
++
++ return data;
++}
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
++#define __KMEM_CACHE_T__ kmem_cache_t
++#else
++#define __KMEM_CACHE_T__ struct kmem_cache
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
++#define ip_hdr(skb) ((skb)->nh.iph)
++#define skb_mac_header(skb) ((skb)->mac.raw)
++#define eth_hdr(skb) ((struct ethhdr *)skb_mac_header(skb))
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
++#include <linux/netfilter.h>
++#define KMEM_CACHE_CREATE(name, size) \
++ kmem_cache_create(name, size, 0, 0, NULL, NULL)
++#else
++#define KMEM_CACHE_CREATE(name, size) \
++ kmem_cache_create(name, size, 0, 0, NULL)
++#endif
++
++
++#endif /* __KERNEL__ */
++#endif /* _IP_SET_COMPAT_H */
+--- /dev/null
++++ b/include/linux/netfilter_ipv4/ip_set_getport.h
+@@ -0,0 +1,48 @@
++#ifndef _IP_SET_GETPORT_H
++#define _IP_SET_GETPORT_H
++
++#ifdef __KERNEL__
++
++#define INVALID_PORT (MAX_RANGE + 1)
++
++/* We must handle non-linear skbs */
++static inline ip_set_ip_t
++get_port(const struct sk_buff *skb, u_int32_t flags)
++{
++ struct iphdr *iph = ip_hdr(skb);
++ u_int16_t offset = ntohs(iph->frag_off) & IP_OFFSET;
++ switch (iph->protocol) {
++ case IPPROTO_TCP: {
++ struct tcphdr tcph;
++
++ /* See comments at tcp_match in ip_tables.c */
++ if (offset)
++ return INVALID_PORT;
++
++ if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &tcph, sizeof(tcph)) < 0)
++ /* No choice either */
++ return INVALID_PORT;
++
++ return ntohs(flags & IPSET_SRC ?
++ tcph.source : tcph.dest);
++ }
++ case IPPROTO_UDP: {
++ struct udphdr udph;
++
++ if (offset)
++ return INVALID_PORT;
++
++ if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &udph, sizeof(udph)) < 0)
++ /* No choice either */
++ return INVALID_PORT;
++
++ return ntohs(flags & IPSET_SRC ?
++ udph.source : udph.dest);
++ }
++ default:
++ return INVALID_PORT;
++ }
++}
++#endif /* __KERNEL__ */
++
++#endif /*_IP_SET_GETPORT_H*/
+--- /dev/null
++++ b/include/linux/netfilter_ipv4/ip_set_hashes.h
+@@ -0,0 +1,306 @@
++#ifndef __IP_SET_HASHES_H
++#define __IP_SET_HASHES_H
++
++#define initval_t uint32_t
++
++/* Macros to generate functions */
++
++#ifdef __KERNEL__
++#define HASH_RETRY0(type, dtype, cond) \
++static int \
++type##_retry(struct ip_set *set) \
++{ \
++ struct ip_set_##type *map = set->data, *tmp; \
++ dtype *elem; \
++ void *members; \
++ u_int32_t i, hashsize = map->hashsize; \
++ int res; \
++ \
++ if (map->resize == 0) \
++ return -ERANGE; \
++ \
++ again: \
++ res = 0; \
++ \
++ /* Calculate new hash size */ \
++ hashsize += (hashsize * map->resize)/100; \
++ if (hashsize == map->hashsize) \
++ hashsize++; \
++ \
++ ip_set_printk("rehashing of set %s triggered: " \
++ "hashsize grows from %lu to %lu", \
++ set->name, \
++ (long unsigned)map->hashsize, \
++ (long unsigned)hashsize); \
++ \
++ tmp = kmalloc(sizeof(struct ip_set_##type) \
++ + map->probes * sizeof(initval_t), GFP_ATOMIC); \
++ if (!tmp) { \
++ DP("out of memory for %zu bytes", \
++ sizeof(struct ip_set_##type) \
++ + map->probes * sizeof(initval_t)); \
++ return -ENOMEM; \
++ } \
++ tmp->members = harray_malloc(hashsize, sizeof(dtype), GFP_ATOMIC);\
++ if (!tmp->members) { \
++ DP("out of memory for %zu bytes", hashsize * sizeof(dtype));\
++ kfree(tmp); \
++ return -ENOMEM; \
++ } \
++ tmp->hashsize = hashsize; \
++ tmp->elements = 0; \
++ tmp->probes = map->probes; \
++ tmp->resize = map->resize; \
++ memcpy(tmp->initval, map->initval, map->probes * sizeof(initval_t));\
++ __##type##_retry(tmp, map); \
++ \
++ write_lock_bh(&set->lock); \
++ map = set->data; /* Play safe */ \
++ for (i = 0; i < map->hashsize && res == 0; i++) { \
++ elem = HARRAY_ELEM(map->members, dtype *, i); \
++ if (cond) \
++ res = __##type##_add(tmp, elem); \
++ } \
++ if (res) { \
++ /* Failure, try again */ \
++ write_unlock_bh(&set->lock); \
++ harray_free(tmp->members); \
++ kfree(tmp); \
++ goto again; \
++ } \
++ \
++ /* Success at resizing! */ \
++ members = map->members; \
++ \
++ map->hashsize = tmp->hashsize; \
++ map->members = tmp->members; \
++ write_unlock_bh(&set->lock); \
++ \
++ harray_free(members); \
++ kfree(tmp); \
++ \
++ return 0; \
++}
++
++#define HASH_RETRY(type, dtype) \
++ HASH_RETRY0(type, dtype, *elem)
++
++#define HASH_RETRY2(type, dtype) \
++ HASH_RETRY0(type, dtype, elem->ip || elem->ip1)
++
++#define HASH_CREATE(type, dtype) \
++static int \
++type##_create(struct ip_set *set, const void *data, u_int32_t size) \
++{ \
++ const struct ip_set_req_##type##_create *req = data; \
++ struct ip_set_##type *map; \
++ uint16_t i; \
++ \
++ if (req->hashsize < 1) { \
++ ip_set_printk("hashsize too small"); \
++ return -ENOEXEC; \
++ } \
++ \
++ if (req->probes < 1) { \
++ ip_set_printk("probes too small"); \
++ return -ENOEXEC; \
++ } \
++ \
++ map = kmalloc(sizeof(struct ip_set_##type) \
++ + req->probes * sizeof(initval_t), GFP_KERNEL); \
++ if (!map) { \
++ DP("out of memory for %zu bytes", \
++ sizeof(struct ip_set_##type) \
++ + req->probes * sizeof(initval_t)); \
++ return -ENOMEM; \
++ } \
++ for (i = 0; i < req->probes; i++) \
++ get_random_bytes(((initval_t *) map->initval)+i, 4); \
++ map->elements = 0; \
++ map->hashsize = req->hashsize; \
++ map->probes = req->probes; \
++ map->resize = req->resize; \
++ if (__##type##_create(req, map)) { \
++ kfree(map); \
++ return -ENOEXEC; \
++ } \
++ map->members = harray_malloc(map->hashsize, sizeof(dtype), GFP_KERNEL);\
++ if (!map->members) { \
++ DP("out of memory for %zu bytes", map->hashsize * sizeof(dtype));\
++ kfree(map); \
++ return -ENOMEM; \
++ } \
++ \
++ set->data = map; \
++ return 0; \
++}
++
++#define HASH_DESTROY(type) \
++static void \
++type##_destroy(struct ip_set *set) \
++{ \
++ struct ip_set_##type *map = set->data; \
++ \
++ harray_free(map->members); \
++ kfree(map); \
++ \
++ set->data = NULL; \
++}
++
++#define HASH_FLUSH(type, dtype) \
++static void \
++type##_flush(struct ip_set *set) \
++{ \
++ struct ip_set_##type *map = set->data; \
++ harray_flush(map->members, map->hashsize, sizeof(dtype)); \
++ map->elements = 0; \
++}
++
++#define HASH_FLUSH_CIDR(type, dtype) \
++static void \
++type##_flush(struct ip_set *set) \
++{ \
++ struct ip_set_##type *map = set->data; \
++ harray_flush(map->members, map->hashsize, sizeof(dtype)); \
++ memset(map->cidr, 0, sizeof(map->cidr)); \
++ memset(map->nets, 0, sizeof(map->nets)); \
++ map->elements = 0; \
++}
++
++#define HASH_LIST_HEADER(type) \
++static void \
++type##_list_header(const struct ip_set *set, void *data) \
++{ \
++ const struct ip_set_##type *map = set->data; \
++ struct ip_set_req_##type##_create *header = data; \
++ \
++ header->hashsize = map->hashsize; \
++ header->probes = map->probes; \
++ header->resize = map->resize; \
++ __##type##_list_header(map, header); \
++}
++
++#define HASH_LIST_MEMBERS_SIZE(type, dtype) \
++static int \
++type##_list_members_size(const struct ip_set *set) \
++{ \
++ const struct ip_set_##type *map = set->data; \
++ \
++ return (map->hashsize * sizeof(dtype)); \
++}
++
++#define HASH_LIST_MEMBERS(type, dtype) \
++static void \
++type##_list_members(const struct ip_set *set, void *data) \
++{ \
++ const struct ip_set_##type *map = set->data; \
++ dtype *elem; \
++ uint32_t i; \
++ \
++ for (i = 0; i < map->hashsize; i++) { \
++ elem = HARRAY_ELEM(map->members, dtype *, i); \
++ ((dtype *)data)[i] = *elem; \
++ } \
++}
++
++#define HASH_LIST_MEMBERS_MEMCPY(type, dtype) \
++static void \
++type##_list_members(const struct ip_set *set, void *data) \
++{ \
++ const struct ip_set_##type *map = set->data; \
++ dtype *elem; \
++ uint32_t i; \
++ \
++ for (i = 0; i < map->hashsize; i++) { \
++ elem = HARRAY_ELEM(map->members, dtype *, i); \
++ memcpy((((dtype *)data)+i), elem, sizeof(dtype)); \
++ } \
++}
++
++#define IP_SET_RTYPE(type, __features) \
++struct ip_set_type ip_set_##type = { \
++ .typename = #type, \
++ .features = __features, \
++ .protocol_version = IP_SET_PROTOCOL_VERSION, \
++ .create = &type##_create, \
++ .retry = &type##_retry, \
++ .destroy = &type##_destroy, \
++ .flush = &type##_flush, \
++ .reqsize = sizeof(struct ip_set_req_##type), \
++ .addip = &type##_uadd, \
++ .addip_kernel = &type##_kadd, \
++ .delip = &type##_udel, \
++ .delip_kernel = &type##_kdel, \
++ .testip = &type##_utest, \
++ .testip_kernel = &type##_ktest, \
++ .header_size = sizeof(struct ip_set_req_##type##_create),\
++ .list_header = &type##_list_header, \
++ .list_members_size = &type##_list_members_size, \
++ .list_members = &type##_list_members, \
++ .me = THIS_MODULE, \
++};
++
++/* Helper functions */
++static inline void
++add_cidr_size(uint8_t *cidr, uint8_t size)
++{
++ uint8_t next;
++ int i;
++
++ for (i = 0; i < 30 && cidr[i]; i++) {
++ if (cidr[i] < size) {
++ next = cidr[i];
++ cidr[i] = size;
++ size = next;
++ }
++ }
++ if (i < 30)
++ cidr[i] = size;
++}
++
++static inline void
++del_cidr_size(uint8_t *cidr, uint8_t size)
++{
++ int i;
++
++ for (i = 0; i < 29 && cidr[i]; i++) {
++ if (cidr[i] == size)
++ cidr[i] = size = cidr[i+1];
++ }
++ cidr[29] = 0;
++}
++#else
++#include <arpa/inet.h>
++#endif /* __KERNEL */
++
++#ifndef UINT16_MAX
++#define UINT16_MAX 65535
++#endif
++
++static unsigned char shifts[] = {255, 253, 249, 241, 225, 193, 129, 1};
++
++static inline ip_set_ip_t
++pack_ip_cidr(ip_set_ip_t ip, unsigned char cidr)
++{
++ ip_set_ip_t addr, *paddr = &addr;
++ unsigned char n, t, *a;
++
++ addr = htonl(ip & (0xFFFFFFFF << (32 - (cidr))));
++#ifdef __KERNEL__
++ DP("ip:%u.%u.%u.%u/%u", NIPQUAD(addr), cidr);
++#endif
++ n = cidr / 8;
++ t = cidr % 8;
++ a = &((unsigned char *)paddr)[n];
++ *a = *a /(1 << (8 - t)) + shifts[t];
++#ifdef __KERNEL__
++ DP("n: %u, t: %u, a: %u", n, t, *a);
++ DP("ip:%u.%u.%u.%u/%u, %u.%u.%u.%u",
++ HIPQUAD(ip), cidr, NIPQUAD(addr));
++#endif
++
++ return ntohl(addr);
++}
++
++
++#endif /* __IP_SET_HASHES_H */
+--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_iphash.h
@@ -0,0 +1,30 @@
+#ifndef __IP_SET_IPHASH_H
+#define __IP_SET_IPHASH_H
+
+#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_hashes.h>
+
+#define SETTYPE_NAME "iphash"
-+#define MAX_RANGE 0x0000FFFF
+
+struct ip_set_iphash {
+ ip_set_ip_t *members; /* the iphash proper */
@@ -534,7 +1174,7 @@
+ uint16_t probes; /* max number of probes */
+ uint16_t resize; /* resize factor in percent */
+ ip_set_ip_t netmask; /* netmask */
-+ void *initval[0]; /* initvals for jhash_1word */
++ initval_t initval[0]; /* initvals for jhash_1word */
+};
+
+struct ip_set_req_iphash_create {
@@ -551,14 +1191,14 @@
+#endif /* __IP_SET_IPHASH_H */
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_ipmap.h
-@@ -0,0 +1,56 @@
+@@ -0,0 +1,57 @@
+#ifndef __IP_SET_IPMAP_H
+#define __IP_SET_IPMAP_H
+
+#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_bitmaps.h>
+
+#define SETTYPE_NAME "ipmap"
-+#define MAX_RANGE 0x0000FFFF
+
+struct ip_set_ipmap {
+ void *members; /* the ipmap proper */
@@ -567,6 +1207,7 @@
+ ip_set_ip_t netmask; /* subnet netmask */
+ ip_set_ip_t sizeid; /* size of set in IPs */
+ ip_set_ip_t hosts; /* number of hosts in a subnet */
++ u_int32_t size; /* size of the ipmap proper */
+};
+
+struct ip_set_req_ipmap_create {
@@ -579,46 +1220,45 @@
+ ip_set_ip_t ip;
+};
+
-+unsigned int
++static inline unsigned int
+mask_to_bits(ip_set_ip_t mask)
+{
+ unsigned int bits = 32;
+ ip_set_ip_t maskaddr;
-+
++
+ if (mask == 0xFFFFFFFF)
+ return bits;
-+
++
+ maskaddr = 0xFFFFFFFE;
-+ while (--bits >= 0 && maskaddr != mask)
++ while (--bits > 0 && maskaddr != mask)
+ maskaddr <<= 1;
-+
++
+ return bits;
+}
+
-+ip_set_ip_t
++static inline ip_set_ip_t
+range_to_mask(ip_set_ip_t from, ip_set_ip_t to, unsigned int *bits)
+{
+ ip_set_ip_t mask = 0xFFFFFFFE;
-+
++
+ *bits = 32;
-+ while (--(*bits) >= 0 && mask && (to & mask) != from)
++ while (--(*bits) > 0 && mask && (to & mask) != from)
+ mask <<= 1;
-+
++
+ return mask;
+}
-+
++
+#endif /* __IP_SET_IPMAP_H */
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_ipporthash.h
-@@ -0,0 +1,34 @@
+@@ -0,0 +1,33 @@
+#ifndef __IP_SET_IPPORTHASH_H
+#define __IP_SET_IPPORTHASH_H
+
+#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_hashes.h>
+
+#define SETTYPE_NAME "ipporthash"
-+#define MAX_RANGE 0x0000FFFF
-+#define INVALID_PORT (MAX_RANGE + 1)
+
+struct ip_set_ipporthash {
+ ip_set_ip_t *members; /* the ipporthash proper */
@@ -628,7 +1268,7 @@
+ uint16_t resize; /* resize factor in percent */
+ ip_set_ip_t first_ip; /* host byte order, included in range */
+ ip_set_ip_t last_ip; /* host byte order, included in range */
-+ void *initval[0]; /* initvals for jhash_1word */
++ initval_t initval[0]; /* initvals for jhash_1word */
+};
+
+struct ip_set_req_ipporthash_create {
@@ -646,15 +1286,101 @@
+
+#endif /* __IP_SET_IPPORTHASH_H */
--- /dev/null
++++ b/include/linux/netfilter_ipv4/ip_set_ipportiphash.h
+@@ -0,0 +1,39 @@
++#ifndef __IP_SET_IPPORTIPHASH_H
++#define __IP_SET_IPPORTIPHASH_H
++
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_hashes.h>
++
++#define SETTYPE_NAME "ipportiphash"
++
++struct ipportip {
++ ip_set_ip_t ip;
++ ip_set_ip_t ip1;
++};
++
++struct ip_set_ipportiphash {
++ struct ipportip *members; /* the ipportip proper */
++ uint32_t elements; /* number of elements */
++ uint32_t hashsize; /* hash size */
++ uint16_t probes; /* max number of probes */
++ uint16_t resize; /* resize factor in percent */
++ ip_set_ip_t first_ip; /* host byte order, included in range */
++ ip_set_ip_t last_ip; /* host byte order, included in range */
++ initval_t initval[0]; /* initvals for jhash_1word */
++};
++
++struct ip_set_req_ipportiphash_create {
++ uint32_t hashsize;
++ uint16_t probes;
++ uint16_t resize;
++ ip_set_ip_t from;
++ ip_set_ip_t to;
++};
++
++struct ip_set_req_ipportiphash {
++ ip_set_ip_t ip;
++ ip_set_ip_t port;
++ ip_set_ip_t ip1;
++};
++
++#endif /* __IP_SET_IPPORTIPHASH_H */
+--- /dev/null
++++ b/include/linux/netfilter_ipv4/ip_set_ipportnethash.h
+@@ -0,0 +1,42 @@
++#ifndef __IP_SET_IPPORTNETHASH_H
++#define __IP_SET_IPPORTNETHASH_H
++
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_hashes.h>
++
++#define SETTYPE_NAME "ipportnethash"
++
++struct ipportip {
++ ip_set_ip_t ip;
++ ip_set_ip_t ip1;
++};
++
++struct ip_set_ipportnethash {
++ struct ipportip *members; /* the ipportip proper */
++ uint32_t elements; /* number of elements */
++ uint32_t hashsize; /* hash size */
++ uint16_t probes; /* max number of probes */
++ uint16_t resize; /* resize factor in percent */
++ ip_set_ip_t first_ip; /* host byte order, included in range */
++ ip_set_ip_t last_ip; /* host byte order, included in range */
++ uint8_t cidr[30]; /* CIDR sizes */
++ uint16_t nets[30]; /* nr of nets by CIDR sizes */
++ initval_t initval[0]; /* initvals for jhash_1word */
++};
++
++struct ip_set_req_ipportnethash_create {
++ uint32_t hashsize;
++ uint16_t probes;
++ uint16_t resize;
++ ip_set_ip_t from;
++ ip_set_ip_t to;
++};
++
++struct ip_set_req_ipportnethash {
++ ip_set_ip_t ip;
++ ip_set_ip_t port;
++ ip_set_ip_t ip1;
++ uint8_t cidr;
++};
++
++#endif /* __IP_SET_IPPORTNETHASH_H */
+--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_iptree.h
-@@ -0,0 +1,40 @@
+@@ -0,0 +1,39 @@
+#ifndef __IP_SET_IPTREE_H
+#define __IP_SET_IPTREE_H
+
+#include <linux/netfilter_ipv4/ip_set.h>
+
+#define SETTYPE_NAME "iptree"
-+#define MAX_RANGE 0x0000FFFF
+
+struct ip_set_iptreed {
+ unsigned long expires[256]; /* x.x.x.ADDR */
@@ -726,172 +1452,181 @@
+};
+
+struct ip_set_req_iptreemap {
-+ ip_set_ip_t start;
++ ip_set_ip_t ip;
+ ip_set_ip_t end;
+};
+
+#endif /* __IP_SET_IPTREEMAP_H */
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_jhash.h
-@@ -0,0 +1,148 @@
-+#ifndef _LINUX_IPSET_JHASH_H
-+#define _LINUX_IPSET_JHASH_H
-+
-+/* This is a copy of linux/jhash.h but the types u32/u8 are changed
-+ * to __u32/__u8 so that the header file can be included into
-+ * userspace code as well. Jozsef Kadlecsik (kadlec@blackhole.kfki.hu)
-+ */
+@@ -0,0 +1,157 @@
++#ifndef _LINUX_JHASH_H
++#define _LINUX_JHASH_H
+
+/* jhash.h: Jenkins hash support.
+ *
-+ * Copyright (C) 1996 Bob Jenkins (bob_jenkins@burtleburtle.net)
++ * Copyright (C) 2006. Bob Jenkins (bob_jenkins@burtleburtle.net)
+ *
+ * http://burtleburtle.net/bob/hash/
+ *
+ * These are the credits from Bob's sources:
+ *
-+ * lookup2.c, by Bob Jenkins, December 1996, Public Domain.
-+ * hash(), hash2(), hash3, and mix() are externally useful functions.
-+ * Routines to test the hash are included if SELF_TEST is defined.
-+ * You can use this free for any purpose. It has no warranty.
++ * lookup3.c, by Bob Jenkins, May 2006, Public Domain.
+ *
-+ * Copyright (C) 2003 David S. Miller (davem@redhat.com)
++ * These are functions for producing 32-bit hashes for hash table lookup.
++ * hashword(), hashlittle(), hashlittle2(), hashbig(), mix(), and final()
++ * are externally useful functions. Routines to test the hash are included
++ * if SELF_TEST is defined. You can use this free for any purpose. It's in
++ * the public domain. It has no warranty.
++ *
++ * Copyright (C) 2009 Jozsef Kadlecsik (kadlec@blackhole.kfki.hu)
+ *
+ * I've modified Bob's hash to be useful in the Linux kernel, and
-+ * any bugs present are surely my fault. -DaveM
++ * any bugs present are my fault. Jozsef
+ */
+
-+/* NOTE: Arguments are modified. */
-+#define __jhash_mix(a, b, c) \
++#define __rot(x,k) (((x)<<(k)) | ((x)>>(32-(k))))
++
++/* __jhash_mix - mix 3 32-bit values reversibly. */
++#define __jhash_mix(a,b,c) \
+{ \
-+ a -= b; a -= c; a ^= (c>>13); \
-+ b -= c; b -= a; b ^= (a<<8); \
-+ c -= a; c -= b; c ^= (b>>13); \
-+ a -= b; a -= c; a ^= (c>>12); \
-+ b -= c; b -= a; b ^= (a<<16); \
-+ c -= a; c -= b; c ^= (b>>5); \
-+ a -= b; a -= c; a ^= (c>>3); \
-+ b -= c; b -= a; b ^= (a<<10); \
-+ c -= a; c -= b; c ^= (b>>15); \
++ a -= c; a ^= __rot(c, 4); c += b; \
++ b -= a; b ^= __rot(a, 6); a += c; \
++ c -= b; c ^= __rot(b, 8); b += a; \
++ a -= c; a ^= __rot(c,16); c += b; \
++ b -= a; b ^= __rot(a,19); a += c; \
++ c -= b; c ^= __rot(b, 4); b += a; \
++}
++
++/* __jhash_final - final mixing of 3 32-bit values (a,b,c) into c */
++#define __jhash_final(a,b,c) \
++{ \
++ c ^= b; c -= __rot(b,14); \
++ a ^= c; a -= __rot(c,11); \
++ b ^= a; b -= __rot(a,25); \
++ c ^= b; c -= __rot(b,16); \
++ a ^= c; a -= __rot(c,4); \
++ b ^= a; b -= __rot(a,14); \
++ c ^= b; c -= __rot(b,24); \
+}
+
+/* The golden ration: an arbitrary value */
-+#define JHASH_GOLDEN_RATIO 0x9e3779b9
++#define JHASH_GOLDEN_RATIO 0xdeadbeef
+
+/* The most generic version, hashes an arbitrary sequence
+ * of bytes. No alignment or length assumptions are made about
-+ * the input key.
++ * the input key. The result depends on endianness.
+ */
-+static inline __u32 jhash(void *key, __u32 length, __u32 initval)
++static inline u32 jhash(const void *key, u32 length, u32 initval)
+{
-+ __u32 a, b, c, len;
-+ __u8 *k = key;
-+
-+ len = length;
-+ a = b = JHASH_GOLDEN_RATIO;
-+ c = initval;
++ u32 a,b,c;
++ const u8 *k = key;
+
-+ while (len >= 12) {
-+ a += (k[0] +((__u32)k[1]<<8) +((__u32)k[2]<<16) +((__u32)k[3]<<24));
-+ b += (k[4] +((__u32)k[5]<<8) +((__u32)k[6]<<16) +((__u32)k[7]<<24));
-+ c += (k[8] +((__u32)k[9]<<8) +((__u32)k[10]<<16)+((__u32)k[11]<<24));
-+
-+ __jhash_mix(a,b,c);
++ /* Set up the internal state */
++ a = b = c = JHASH_GOLDEN_RATIO + length + initval;
+
++ /* all but the last block: affect some 32 bits of (a,b,c) */
++ while (length > 12) {
++ a += (k[0] + ((u32)k[1]<<8) + ((u32)k[2]<<16) + ((u32)k[3]<<24));
++ b += (k[4] + ((u32)k[5]<<8) + ((u32)k[6]<<16) + ((u32)k[7]<<24));
++ c += (k[8] + ((u32)k[9]<<8) + ((u32)k[10]<<16) + ((u32)k[11]<<24));
++ __jhash_mix(a, b, c);
++ length -= 12;
+ k += 12;
-+ len -= 12;
+ }
+
-+ c += length;
-+ switch (len) {
-+ case 11: c += ((__u32)k[10]<<24);
-+ case 10: c += ((__u32)k[9]<<16);
-+ case 9 : c += ((__u32)k[8]<<8);
-+ case 8 : b += ((__u32)k[7]<<24);
-+ case 7 : b += ((__u32)k[6]<<16);
-+ case 6 : b += ((__u32)k[5]<<8);
++ /* last block: affect all 32 bits of (c) */
++ /* all the case statements fall through */
++ switch (length) {
++ case 12: c += (u32)k[11]<<24;
++ case 11: c += (u32)k[10]<<16;
++ case 10: c += (u32)k[9]<<8;
++ case 9 : c += k[8];
++ case 8 : b += (u32)k[7]<<24;
++ case 7 : b += (u32)k[6]<<16;
++ case 6 : b += (u32)k[5]<<8;
+ case 5 : b += k[4];
-+ case 4 : a += ((__u32)k[3]<<24);
-+ case 3 : a += ((__u32)k[2]<<16);
-+ case 2 : a += ((__u32)k[1]<<8);
++ case 4 : a += (u32)k[3]<<24;
++ case 3 : a += (u32)k[2]<<16;
++ case 2 : a += (u32)k[1]<<8;
+ case 1 : a += k[0];
-+ };
-+
-+ __jhash_mix(a,b,c);
++ __jhash_final(a, b, c);
++ case 0 :
++ break;
++ }
+
+ return c;
+}
+
-+/* A special optimized version that handles 1 or more of __u32s.
-+ * The length parameter here is the number of __u32s in the key.
++/* A special optimized version that handles 1 or more of u32s.
++ * The length parameter here is the number of u32s in the key.
+ */
-+static inline __u32 jhash2(__u32 *k, __u32 length, __u32 initval)
++static inline u32 jhash2(const u32 *k, u32 length, u32 initval)
+{
-+ __u32 a, b, c, len;
++ u32 a, b, c;
+
-+ a = b = JHASH_GOLDEN_RATIO;
-+ c = initval;
-+ len = length;
++ /* Set up the internal state */
++ a = b = c = JHASH_GOLDEN_RATIO + (length<<2) + initval;
+
-+ while (len >= 3) {
++ /* handle most of the key */
++ while (length > 3) {
+ a += k[0];
+ b += k[1];
+ c += k[2];
+ __jhash_mix(a, b, c);
-+ k += 3; len -= 3;
++ length -= 3;
++ k += 3;
+ }
+
-+ c += length * 4;
-+
-+ switch (len) {
-+ case 2 : b += k[1];
-+ case 1 : a += k[0];
-+ };
-+
-+ __jhash_mix(a,b,c);
++ /* handle the last 3 u32's */
++ /* all the case statements fall through */
++ switch (length) {
++ case 3: c += k[2];
++ case 2: b += k[1];
++ case 1: a += k[0];
++ __jhash_final(a, b, c);
++ case 0: /* case 0: nothing left to add */
++ break;
++ }
+
+ return c;
+}
+
-+
+/* A special ultra-optimized versions that knows they are hashing exactly
+ * 3, 2 or 1 word(s).
-+ *
-+ * NOTE: In partilar the "c += length; __jhash_mix(a,b,c);" normally
-+ * done at the end is not done here.
+ */
-+static inline __u32 jhash_3words(__u32 a, __u32 b, __u32 c, __u32 initval)
++static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval)
+{
-+ a += JHASH_GOLDEN_RATIO;
-+ b += JHASH_GOLDEN_RATIO;
-+ c += initval;
++ a += JHASH_GOLDEN_RATIO + initval;
++ b += JHASH_GOLDEN_RATIO + initval;
++ c += JHASH_GOLDEN_RATIO + initval;
+
-+ __jhash_mix(a, b, c);
++ __jhash_final(a, b, c);
+
+ return c;
+}
+
-+static inline __u32 jhash_2words(__u32 a, __u32 b, __u32 initval)
++static inline u32 jhash_2words(u32 a, u32 b, u32 initval)
+{
-+ return jhash_3words(a, b, 0, initval);
++ return jhash_3words(0, a, b, initval);
+}
+
-+static inline __u32 jhash_1word(__u32 a, __u32 initval)
++static inline u32 jhash_1word(u32 a, u32 initval)
+{
-+ return jhash_3words(a, 0, 0, initval);
++ return jhash_3words(0, 0, a, initval);
+}
+
-+#endif /* _LINUX_IPSET_JHASH_H */
++#endif /* _LINUX_JHASH_H */
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_macipmap.h
-@@ -0,0 +1,38 @@
+@@ -0,0 +1,39 @@
+#ifndef __IP_SET_MACIPMAP_H
+#define __IP_SET_MACIPMAP_H
+
+#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_bitmaps.h>
+
+#define SETTYPE_NAME "macipmap"
-+#define MAX_RANGE 0x0000FFFF
+
+/* general flags */
+#define IPSET_MACIP_MATCHUNSET 1
@@ -904,6 +1639,7 @@
+ ip_set_ip_t first_ip; /* host byte order, included in range */
+ ip_set_ip_t last_ip; /* host byte order, included in range */
+ u_int32_t flags;
++ u_int32_t size; /* size of the ipmap proper */
+};
+
+struct ip_set_req_macipmap_create {
@@ -918,43 +1654,48 @@
+};
+
+struct ip_set_macip {
-+ unsigned short flags;
++ unsigned short match;
+ unsigned char ethernet[ETH_ALEN];
+};
+
+#endif /* __IP_SET_MACIPMAP_H */
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_malloc.h
-@@ -0,0 +1,116 @@
+@@ -0,0 +1,153 @@
+#ifndef _IP_SET_MALLOC_H
+#define _IP_SET_MALLOC_H
+
+#ifdef __KERNEL__
++#include <linux/vmalloc.h>
+
-+/* Memory allocation and deallocation */
-+static size_t max_malloc_size = 0;
++static size_t max_malloc_size = 0, max_page_size = 0;
++static size_t default_max_malloc_size = 131072; /* Guaranteed: slab.c */
+
-+static inline void init_max_malloc_size(void)
++static inline int init_max_page_size(void)
+{
-+#define CACHE(x) max_malloc_size = x;
++/* Compatibility glues to support 2.4.36 */
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++#define __GFP_NOWARN 0
++
++ /* Guaranteed: slab.c */
++ max_malloc_size = max_page_size = default_max_malloc_size;
++#else
++ size_t page_size = 0;
++
++#define CACHE(x) if (max_page_size == 0 || x < max_page_size) \
++ page_size = x;
+#include <linux/kmalloc_sizes.h>
+#undef CACHE
-+}
++ if (page_size) {
++ if (max_malloc_size == 0)
++ max_malloc_size = page_size;
+
-+static inline void * ip_set_malloc(size_t bytes)
-+{
-+ if (bytes > max_malloc_size)
-+ return vmalloc(bytes);
-+ else
-+ return kmalloc(bytes, GFP_KERNEL);
-+}
++ max_page_size = page_size;
+
-+static inline void ip_set_free(void * data, size_t bytes)
-+{
-+ if (bytes > max_malloc_size)
-+ vfree(data);
-+ else
-+ kfree(data);
++ return 1;
++ }
++#endif
++ return 0;
+}
+
+struct harray {
@@ -962,37 +1703,36 @@
+ void *arrays[0];
+};
+
-+static inline void *
-+harray_malloc(size_t hashsize, size_t typesize, int flags)
++static inline void *
++__harray_malloc(size_t hashsize, size_t typesize, gfp_t flags)
+{
+ struct harray *harray;
+ size_t max_elements, size, i, j;
+
-+ if (!max_malloc_size)
-+ init_max_malloc_size();
++ BUG_ON(max_page_size == 0);
+
-+ if (typesize > max_malloc_size)
++ if (typesize > max_page_size)
+ return NULL;
+
-+ max_elements = max_malloc_size/typesize;
++ max_elements = max_page_size/typesize;
+ size = hashsize/max_elements;
+ if (hashsize % max_elements)
+ size++;
-+
++
+ /* Last pointer signals end of arrays */
+ harray = kmalloc(sizeof(struct harray) + (size + 1) * sizeof(void *),
+ flags);
+
+ if (!harray)
+ return NULL;
-+
++
+ for (i = 0; i < size - 1; i++) {
+ harray->arrays[i] = kmalloc(max_elements * typesize, flags);
+ if (!harray->arrays[i])
+ goto undo;
+ memset(harray->arrays[i], 0, max_elements * typesize);
+ }
-+ harray->arrays[i] = kmalloc((hashsize - i * max_elements) * typesize,
++ harray->arrays[i] = kmalloc((hashsize - i * max_elements) * typesize,
+ flags);
+ if (!harray->arrays[i])
+ goto undo;
@@ -1000,7 +1740,7 @@
+
+ harray->max_elements = max_elements;
+ harray->arrays[size] = NULL;
-+
++
+ return (void *)harray;
+
+ undo:
@@ -1011,11 +1751,23 @@
+ return NULL;
+}
+
++static inline void *
++harray_malloc(size_t hashsize, size_t typesize, gfp_t flags)
++{
++ void *harray;
++
++ do {
++ harray = __harray_malloc(hashsize, typesize, flags|__GFP_NOWARN);
++ } while (harray == NULL && init_max_page_size());
++
++ return harray;
++}
++
+static inline void harray_free(void *h)
+{
+ struct harray *harray = (struct harray *) h;
+ size_t i;
-+
++
+ for (i = 0; harray->arrays[i] != NULL; i++)
+ kfree(harray->arrays[i]);
+ kfree(harray);
@@ -1025,10 +1777,10 @@
+{
+ struct harray *harray = (struct harray *) h;
+ size_t i;
-+
++
+ for (i = 0; harray->arrays[i+1] != NULL; i++)
+ memset(harray->arrays[i], 0, harray->max_elements * typesize);
-+ memset(harray->arrays[i], 0,
++ memset(harray->arrays[i], 0,
+ (hashsize - i * harray->max_elements) * typesize);
+}
+
@@ -1039,19 +1791,40 @@
+ + (which)%(__h)->max_elements); \
+})
+
++/* General memory allocation and deallocation */
++static inline void * ip_set_malloc(size_t bytes)
++{
++ BUG_ON(max_malloc_size == 0);
++
++ if (bytes > default_max_malloc_size)
++ return vmalloc(bytes);
++ else
++ return kmalloc(bytes, GFP_KERNEL | __GFP_NOWARN);
++}
++
++static inline void ip_set_free(void * data, size_t bytes)
++{
++ BUG_ON(max_malloc_size == 0);
++
++ if (bytes > default_max_malloc_size)
++ vfree(data);
++ else
++ kfree(data);
++}
++
+#endif /* __KERNEL__ */
+
+#endif /*_IP_SET_MALLOC_H*/
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_nethash.h
-@@ -0,0 +1,55 @@
+@@ -0,0 +1,31 @@
+#ifndef __IP_SET_NETHASH_H
+#define __IP_SET_NETHASH_H
+
+#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_hashes.h>
+
+#define SETTYPE_NAME "nethash"
-+#define MAX_RANGE 0x0000FFFF
+
+struct ip_set_nethash {
+ ip_set_ip_t *members; /* the nethash proper */
@@ -1059,8 +1832,9 @@
+ uint32_t hashsize; /* hash size */
+ uint16_t probes; /* max number of probes */
+ uint16_t resize; /* resize factor in percent */
-+ unsigned char cidr[30]; /* CIDR sizes */
-+ void *initval[0]; /* initvals for jhash_1word */
++ uint8_t cidr[30]; /* CIDR sizes */
++ uint16_t nets[30]; /* nr of nets by CIDR sizes */
++ initval_t initval[0]; /* initvals for jhash_1word */
+};
+
+struct ip_set_req_nethash_create {
@@ -1071,34 +1845,9 @@
+
+struct ip_set_req_nethash {
+ ip_set_ip_t ip;
-+ unsigned char cidr;
++ uint8_t cidr;
+};
+
-+static unsigned char shifts[] = {255, 253, 249, 241, 225, 193, 129, 1};
-+
-+static inline ip_set_ip_t
-+pack(ip_set_ip_t ip, unsigned char cidr)
-+{
-+ ip_set_ip_t addr, *paddr = &addr;
-+ unsigned char n, t, *a;
-+
-+ addr = htonl(ip & (0xFFFFFFFF << (32 - (cidr))));
-+#ifdef __KERNEL__
-+ DP("ip:%u.%u.%u.%u/%u", NIPQUAD(addr), cidr);
-+#endif
-+ n = cidr / 8;
-+ t = cidr % 8;
-+ a = &((unsigned char *)paddr)[n];
-+ *a = *a /(1 << (8 - t)) + shifts[t];
-+#ifdef __KERNEL__
-+ DP("n: %u, t: %u, a: %u", n, t, *a);
-+ DP("ip:%u.%u.%u.%u/%u, %u.%u.%u.%u",
-+ HIPQUAD(ip), cidr, NIPQUAD(addr));
-+#endif
-+
-+ return ntohl(addr);
-+}
-+
+#endif /* __IP_SET_NETHASH_H */
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_portmap.h
@@ -1107,15 +1856,15 @@
+#define __IP_SET_PORTMAP_H
+
+#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_bitmaps.h>
+
+#define SETTYPE_NAME "portmap"
-+#define MAX_RANGE 0x0000FFFF
-+#define INVALID_PORT (MAX_RANGE + 1)
+
+struct ip_set_portmap {
+ void *members; /* the portmap proper */
-+ ip_set_ip_t first_port; /* host byte order, included in range */
-+ ip_set_ip_t last_port; /* host byte order, included in range */
++ ip_set_ip_t first_ip; /* host byte order, included in range */
++ ip_set_ip_t last_ip; /* host byte order, included in range */
++ u_int32_t size; /* size of the ipmap proper */
+};
+
+struct ip_set_req_portmap_create {
@@ -1124,11 +1873,40 @@
+};
+
+struct ip_set_req_portmap {
-+ ip_set_ip_t port;
++ ip_set_ip_t ip;
+};
+
+#endif /* __IP_SET_PORTMAP_H */
--- /dev/null
++++ b/include/linux/netfilter_ipv4/ip_set_setlist.h
+@@ -0,0 +1,26 @@
++#ifndef __IP_SET_SETLIST_H
++#define __IP_SET_SETLIST_H
++
++#include <linux/netfilter_ipv4/ip_set.h>
++
++#define SETTYPE_NAME "setlist"
++
++#define IP_SET_SETLIST_ADD_AFTER 0
++#define IP_SET_SETLIST_ADD_BEFORE 1
++
++struct ip_set_setlist {
++ uint8_t size;
++ ip_set_id_t index[0];
++};
++
++struct ip_set_req_setlist_create {
++ uint8_t size;
++};
++
++struct ip_set_req_setlist {
++ char name[IP_SET_MAXNAMELEN];
++ char ref[IP_SET_MAXNAMELEN];
++ uint8_t before;
++};
++
++#endif /* __IP_SET_SETLIST_H */
+--- /dev/null
+++ b/include/linux/netfilter_ipv4/ipt_set.h
@@ -0,0 +1,21 @@
+#ifndef _IPT_SET_H
@@ -1154,7 +1932,7 @@
+#endif /*_IPT_SET_H*/
--- /dev/null
+++ b/net/ipv4/netfilter/ip_set.c
-@@ -0,0 +1,2003 @@
+@@ -0,0 +1,2076 @@
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ * Patrick Schaaf <bof@bof.de>
+ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
@@ -1176,17 +1954,21 @@
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/random.h>
-+#include <linux/jhash.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ip_set_jhash.h>
+#include <linux/errno.h>
-+#include <linux/semaphore.h>
++#include <linux/capability.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
++#include <asm/semaphore.h>
++#else
++#include <linux/semaphore.h>
++#endif
+#include <linux/spinlock.h>
-+#include <linux/vmalloc.h>
+
+#define ASSERT_READ_LOCK(x)
+#define ASSERT_WRITE_LOCK(x)
++#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4/ip_set.h>
+
+static struct list_head set_type_list; /* all registered sets */
@@ -1198,6 +1980,8 @@
+static struct list_head *ip_set_hash; /* hash of bindings */
+static unsigned int ip_set_hash_random; /* random seed */
+
++#define SETNAME_EQ(a,b) (strncmp(a,b,IP_SET_MAXNAMELEN) == 0)
++
+/*
+ * Sets are identified either by the index in ip_set_list or by id.
+ * The id never changes and is used to find a key in the hash.
@@ -1236,7 +2020,7 @@
+ list_for_each_entry(set_hash, &ip_set_hash[key], list)
+ if (set_hash->id == id && set_hash->ip == ip)
+ return set_hash;
-+
++
+ return NULL;
+}
+
@@ -1249,10 +2033,10 @@
+
+ ASSERT_READ_LOCK(&ip_set_lock);
+ IP_SET_ASSERT(ip_set_list[id]);
-+ DP("set: %s, ip: %u.%u.%u.%u", ip_set_list[id]->name, HIPQUAD(ip));
-+
++ DP("set: %s, ip: %u.%u.%u.%u", ip_set_list[id]->name, HIPQUAD(ip));
++
+ set_hash = __ip_set_find(key, id, ip);
-+
++
+ DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
+ HIPQUAD(ip),
+ set_hash != NULL ? ip_set_list[set_hash->binding]->name : "");
@@ -1264,7 +2048,7 @@
+__set_hash_del(struct ip_set_hash *set_hash)
+{
+ ASSERT_WRITE_LOCK(&ip_set_lock);
-+ IP_SET_ASSERT(ip_set_list[set_hash->binding]);
++ IP_SET_ASSERT(ip_set_list[set_hash->binding]);
+
+ __ip_set_put(set_hash->binding);
+ list_del(&set_hash->list);
@@ -1277,9 +2061,9 @@
+ u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
+ % ip_set_bindings_hash_size;
+ struct ip_set_hash *set_hash;
-+
++
+ IP_SET_ASSERT(ip_set_list[id]);
-+ DP("set: %s, ip: %u.%u.%u.%u", ip_set_list[id]->name, HIPQUAD(ip));
++ DP("set: %s, ip: %u.%u.%u.%u", ip_set_list[id]->name, HIPQUAD(ip));
+ write_lock_bh(&ip_set_lock);
+ set_hash = __ip_set_find(key, id, ip);
+ DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
@@ -1288,7 +2072,7 @@
+
+ if (set_hash != NULL)
+ __set_hash_del(set_hash);
-+ write_unlock_bh(&ip_set_lock);
++ write_unlock_bh(&ip_set_lock);
+ return 0;
+}
+
@@ -1299,7 +2083,7 @@
+ % ip_set_bindings_hash_size;
+ struct ip_set_hash *set_hash;
+ int ret = 0;
-+
++
+ IP_SET_ASSERT(ip_set_list[id]);
+ IP_SET_ASSERT(ip_set_list[binding]);
+ DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
@@ -1317,7 +2101,7 @@
+ set_hash->ip = ip;
+ list_add(&set_hash->list, &ip_set_hash[key]);
+ } else {
-+ IP_SET_ASSERT(ip_set_list[set_hash->binding]);
++ IP_SET_ASSERT(ip_set_list[set_hash->binding]);
+ DP("overwrite binding: %s",
+ ip_set_list[set_hash->binding]->name);
+ __ip_set_put(set_hash->binding);
@@ -1370,7 +2154,7 @@
+ ip_set_ip_t ip;
+ int res;
+ unsigned char i = 0;
-+
++
+ IP_SET_ASSERT(flags[i]);
+ read_lock_bh(&ip_set_lock);
+ do {
@@ -1386,10 +2170,10 @@
+ && follow_bindings(index, set, ip));
+ read_unlock_bh(&ip_set_lock);
+
-+ return res;
++ return (res < 0 ? 0 : res);
+}
+
-+void
++int
+ip_set_addip_kernel(ip_set_id_t index,
+ const struct sk_buff *skb,
+ const u_int32_t *flags)
@@ -1419,9 +2203,11 @@
+ && set->type->retry
+ && (res = set->type->retry(set)) == 0)
+ goto retry;
++
++ return res;
+}
+
-+void
++int
+ip_set_delip_kernel(ip_set_id_t index,
+ const struct sk_buff *skb,
+ const u_int32_t *flags)
@@ -1445,6 +2231,8 @@
+ && flags[i]
+ && follow_bindings(index, set, ip));
+ read_unlock_bh(&ip_set_lock);
++
++ return res;
+}
+
+/* Register and deregister settype */
@@ -1464,7 +2252,7 @@
+ip_set_register_set_type(struct ip_set_type *set_type)
+{
+ int ret = 0;
-+
++
+ if (set_type->protocol_version != IP_SET_PROTOCOL_VERSION) {
+ ip_set_printk("'%s' uses wrong protocol version %u (want %u)",
+ set_type->typename,
@@ -1509,6 +2297,29 @@
+
+}
+
++ip_set_id_t
++__ip_set_get_byname(const char *name, struct ip_set **set)
++{
++ ip_set_id_t i, index = IP_SET_INVALID_ID;
++
++ for (i = 0; i < ip_set_max; i++) {
++ if (ip_set_list[i] != NULL
++ && SETNAME_EQ(ip_set_list[i]->name, name)) {
++ __ip_set_get(i);
++ index = i;
++ *set = ip_set_list[i];
++ break;
++ }
++ }
++ return index;
++}
++
++void __ip_set_put_byindex(ip_set_id_t index)
++{
++ if (ip_set_list[index])
++ __ip_set_put(index);
++}
++
+/*
+ * Userspace routines
+ */
@@ -1522,11 +2333,11 @@
+ip_set_get_byname(const char *name)
+{
+ ip_set_id_t i, index = IP_SET_INVALID_ID;
-+
++
+ down(&ip_set_app_mutex);
+ for (i = 0; i < ip_set_max; i++) {
+ if (ip_set_list[i] != NULL
-+ && strcmp(ip_set_list[i]->name, name) == 0) {
++ && SETNAME_EQ(ip_set_list[i]->name, name)) {
+ __ip_set_get(i);
+ index = i;
+ break;
@@ -1548,22 +2359,36 @@
+
+ if (index >= ip_set_max)
+ return IP_SET_INVALID_ID;
-+
++
+ if (ip_set_list[index])
+ __ip_set_get(index);
+ else
+ index = IP_SET_INVALID_ID;
-+
++
+ up(&ip_set_app_mutex);
+ return index;
+}
+
+/*
++ * Find the set id belonging to the index.
++ * We are protected by the mutex, so we do not need to use
++ * ip_set_lock. There is no need to reference the sets either.
++ */
++ip_set_id_t
++ip_set_id(ip_set_id_t index)
++{
++ if (index >= ip_set_max || !ip_set_list[index])
++ return IP_SET_INVALID_ID;
++
++ return ip_set_list[index]->id;
++}
++
++/*
+ * If the given set pointer points to a valid set, decrement
+ * reference count by 1. The caller shall not assume the index
+ * to be valid, after calling this function.
+ */
-+void ip_set_put(ip_set_id_t index)
++void ip_set_put_byindex(ip_set_id_t index)
+{
+ down(&ip_set_app_mutex);
+ if (ip_set_list[index])
@@ -1576,10 +2401,10 @@
+ip_set_find_byname(const char *name)
+{
+ ip_set_id_t i, index = IP_SET_INVALID_ID;
-+
++
+ for (i = 0; i < ip_set_max; i++) {
+ if (ip_set_list[i] != NULL
-+ && strcmp(ip_set_list[i]->name, name) == 0) {
++ && SETNAME_EQ(ip_set_list[i]->name, name)) {
+ index = i;
+ break;
+ }
@@ -1592,7 +2417,7 @@
+{
+ if (index >= ip_set_max || ip_set_list[index] == NULL)
+ index = IP_SET_INVALID_ID;
-+
++
+ return index;
+}
+
@@ -1603,7 +2428,7 @@
+static inline int
+__ip_set_testip(struct ip_set *set,
+ const void *data,
-+ size_t size,
++ u_int32_t size,
+ ip_set_ip_t *ip)
+{
+ int res;
@@ -1618,12 +2443,12 @@
+static int
+__ip_set_addip(ip_set_id_t index,
+ const void *data,
-+ size_t size)
++ u_int32_t size)
+{
+ struct ip_set *set = ip_set_list[index];
+ ip_set_ip_t ip;
+ int res;
-+
++
+ IP_SET_ASSERT(set);
+ do {
+ write_lock_bh(&set->lock);
@@ -1639,9 +2464,18 @@
+static int
+ip_set_addip(ip_set_id_t index,
+ const void *data,
-+ size_t size)
++ u_int32_t size)
+{
++ struct ip_set *set = ip_set_list[index];
++
++ IP_SET_ASSERT(set);
+
++ if (size - sizeof(struct ip_set_req_adt) != set->type->reqsize) {
++ ip_set_printk("data length wrong (want %lu, have %zu)",
++ (long unsigned)set->type->reqsize,
++ size - sizeof(struct ip_set_req_adt));
++ return -EINVAL;
++ }
+ return __ip_set_addip(index,
+ data + sizeof(struct ip_set_req_adt),
+ size - sizeof(struct ip_set_req_adt));
@@ -1650,13 +2484,20 @@
+static int
+ip_set_delip(ip_set_id_t index,
+ const void *data,
-+ size_t size)
++ u_int32_t size)
+{
+ struct ip_set *set = ip_set_list[index];
+ ip_set_ip_t ip;
+ int res;
-+
++
+ IP_SET_ASSERT(set);
++
++ if (size - sizeof(struct ip_set_req_adt) != set->type->reqsize) {
++ ip_set_printk("data length wrong (want %lu, have %zu)",
++ (long unsigned)set->type->reqsize,
++ size - sizeof(struct ip_set_req_adt));
++ return -EINVAL;
++ }
+ write_lock_bh(&set->lock);
+ res = set->type->delip(set,
+ data + sizeof(struct ip_set_req_adt),
@@ -1670,13 +2511,20 @@
+static int
+ip_set_testip(ip_set_id_t index,
+ const void *data,
-+ size_t size)
++ u_int32_t size)
+{
+ struct ip_set *set = ip_set_list[index];
+ ip_set_ip_t ip;
+ int res;
+
+ IP_SET_ASSERT(set);
++
++ if (size - sizeof(struct ip_set_req_adt) != set->type->reqsize) {
++ ip_set_printk("data length wrong (want %lu, have %zu)",
++ (long unsigned)set->type->reqsize,
++ size - sizeof(struct ip_set_req_adt));
++ return -EINVAL;
++ }
+ res = __ip_set_testip(set,
+ data + sizeof(struct ip_set_req_adt),
+ size - sizeof(struct ip_set_req_adt),
@@ -1688,10 +2536,10 @@
+static int
+ip_set_bindip(ip_set_id_t index,
+ const void *data,
-+ size_t size)
++ u_int32_t size)
+{
+ struct ip_set *set = ip_set_list[index];
-+ struct ip_set_req_bind *req_bind;
++ const struct ip_set_req_bind *req_bind;
+ ip_set_id_t binding;
+ ip_set_ip_t ip;
+ int res;
@@ -1699,19 +2547,17 @@
+ IP_SET_ASSERT(set);
+ if (size < sizeof(struct ip_set_req_bind))
+ return -EINVAL;
++
++ req_bind = data;
+
-+ req_bind = (struct ip_set_req_bind *) data;
-+ req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
-+
-+ if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
++ if (SETNAME_EQ(req_bind->binding, IPSET_TOKEN_DEFAULT)) {
+ /* Default binding of a set */
-+ char *binding_name;
-+
++ const char *binding_name;
++
+ if (size != sizeof(struct ip_set_req_bind) + IP_SET_MAXNAMELEN)
+ return -EINVAL;
+
-+ binding_name = (char *)(data + sizeof(struct ip_set_req_bind));
-+ binding_name[IP_SET_MAXNAMELEN - 1] = '\0';
++ binding_name = data + sizeof(struct ip_set_req_bind);
+
+ binding = ip_set_find_byname(binding_name);
+ if (binding == IP_SET_INVALID_ID)
@@ -1737,7 +2583,7 @@
+ &ip);
+ DP("set %s, ip: %u.%u.%u.%u, binding %s",
+ set->name, HIPQUAD(ip), ip_set_list[binding]->name);
-+
++
+ if (res >= 0)
+ res = ip_set_hash_add(set->id, ip, binding);
+
@@ -1776,30 +2622,29 @@
+static int
+ip_set_unbindip(ip_set_id_t index,
+ const void *data,
-+ size_t size)
++ u_int32_t size)
+{
+ struct ip_set *set;
-+ struct ip_set_req_bind *req_bind;
++ const struct ip_set_req_bind *req_bind;
+ ip_set_ip_t ip;
+ int res;
+
+ DP("");
+ if (size < sizeof(struct ip_set_req_bind))
+ return -EINVAL;
-+
-+ req_bind = (struct ip_set_req_bind *) data;
-+ req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
-+
++
++ req_bind = data;
++
+ DP("%u %s", index, req_bind->binding);
+ if (index == IP_SET_INVALID_ID) {
+ /* unbind :all: */
-+ if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
++ if (SETNAME_EQ(req_bind->binding, IPSET_TOKEN_DEFAULT)) {
+ /* Default binding of sets */
+ write_lock_bh(&ip_set_lock);
+ FOREACH_SET_DO(__unbind_default);
+ write_unlock_bh(&ip_set_lock);
+ return 0;
-+ } else if (strcmp(req_bind->binding, IPSET_TOKEN_ALL) == 0) {
++ } else if (SETNAME_EQ(req_bind->binding, IPSET_TOKEN_ALL)) {
+ /* Flush all bindings of all sets*/
+ write_lock_bh(&ip_set_lock);
+ FOREACH_HASH_RW_DO(__set_hash_del);
@@ -1809,16 +2654,16 @@
+ DP("unreachable reached!");
+ return -EINVAL;
+ }
-+
++
+ set = ip_set_list[index];
+ IP_SET_ASSERT(set);
-+ if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
++ if (SETNAME_EQ(req_bind->binding, IPSET_TOKEN_DEFAULT)) {
+ /* Default binding of set */
+ ip_set_id_t binding = ip_set_find_byindex(set->binding);
+
+ if (binding == IP_SET_INVALID_ID)
+ return -ENOENT;
-+
++
+ write_lock_bh(&ip_set_lock);
+ /* Sets in hash values are referenced */
+ __ip_set_put(set->binding);
@@ -1826,7 +2671,7 @@
+ write_unlock_bh(&ip_set_lock);
+
+ return 0;
-+ } else if (strcmp(req_bind->binding, IPSET_TOKEN_ALL) == 0) {
++ } else if (SETNAME_EQ(req_bind->binding, IPSET_TOKEN_ALL)) {
+ /* Flush all bindings */
+
+ write_lock_bh(&ip_set_lock);
@@ -1834,7 +2679,7 @@
+ write_unlock_bh(&ip_set_lock);
+ return 0;
+ }
-+
++
+ res = __ip_set_testip(set,
+ data + sizeof(struct ip_set_req_bind),
+ size - sizeof(struct ip_set_req_bind),
@@ -1850,10 +2695,10 @@
+static int
+ip_set_testbind(ip_set_id_t index,
+ const void *data,
-+ size_t size)
++ u_int32_t size)
+{
+ struct ip_set *set = ip_set_list[index];
-+ struct ip_set_req_bind *req_bind;
++ const struct ip_set_req_bind *req_bind;
+ ip_set_id_t binding;
+ ip_set_ip_t ip;
+ int res;
@@ -1861,24 +2706,22 @@
+ IP_SET_ASSERT(set);
+ if (size < sizeof(struct ip_set_req_bind))
+ return -EINVAL;
++
++ req_bind = data;
+
-+ req_bind = (struct ip_set_req_bind *) data;
-+ req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
-+
-+ if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
++ if (SETNAME_EQ(req_bind->binding, IPSET_TOKEN_DEFAULT)) {
+ /* Default binding of set */
-+ char *binding_name;
-+
++ const char *binding_name;
++
+ if (size != sizeof(struct ip_set_req_bind) + IP_SET_MAXNAMELEN)
+ return -EINVAL;
+
-+ binding_name = (char *)(data + sizeof(struct ip_set_req_bind));
-+ binding_name[IP_SET_MAXNAMELEN - 1] = '\0';
++ binding_name = data + sizeof(struct ip_set_req_bind);
+
+ binding = ip_set_find_byname(binding_name);
+ if (binding == IP_SET_INVALID_ID)
+ return -ENOENT;
-+
++
+ res = (set->binding == binding) ? -EEXIST : 0;
+
+ return res;
@@ -1886,15 +2729,15 @@
+ binding = ip_set_find_byname(req_bind->binding);
+ if (binding == IP_SET_INVALID_ID)
+ return -ENOENT;
-+
-+
++
++
+ res = __ip_set_testip(set,
+ data + sizeof(struct ip_set_req_bind),
+ size - sizeof(struct ip_set_req_bind),
+ &ip);
+ DP("set %s, ip: %u.%u.%u.%u, binding %s",
+ set->name, HIPQUAD(ip), ip_set_list[binding]->name);
-+
++
+ if (res >= 0)
+ res = (ip_set_find_in_hash(set->id, ip) == binding)
+ ? -EEXIST : 0;
@@ -1906,7 +2749,7 @@
+find_set_type_rlock(const char *typename)
+{
+ struct ip_set_type *type;
-+
++
+ read_lock_bh(&ip_set_lock);
+ type = find_set_type(typename);
+ if (type == NULL)
@@ -1927,7 +2770,7 @@
+ if (ip_set_list[i] == NULL) {
+ if (*id == IP_SET_INVALID_ID)
+ *id = *index = i;
-+ } else if (strcmp(name, ip_set_list[i]->name) == 0)
++ } else if (SETNAME_EQ(name, ip_set_list[i]->name))
+ /* Name clash */
+ return -EEXIST;
+ }
@@ -1935,7 +2778,7 @@
+ /* No free slot remained */
+ return -ERANGE;
+ /* Check that index is usable as id (swapping) */
-+ check:
++ check:
+ for (i = 0; i < ip_set_max; i++) {
+ if (ip_set_list[i] != NULL
+ && ip_set_list[i]->id == *id) {
@@ -1954,13 +2797,14 @@
+ const char *typename,
+ ip_set_id_t restore,
+ const void *data,
-+ size_t size)
++ u_int32_t size)
+{
+ struct ip_set *set;
+ ip_set_id_t index = 0, id;
+ int res = 0;
+
+ DP("setname: %s, typename: %s, id: %u", name, typename, restore);
++
+ /*
+ * First, and without any locks, allocate and initialize
+ * a normal base set structure.
@@ -1968,7 +2812,7 @@
+ set = kmalloc(sizeof(struct ip_set), GFP_KERNEL);
+ if (!set)
+ return -ENOMEM;
-+ set->lock = RW_LOCK_UNLOCKED;
++ rwlock_init(&set->lock);
+ strncpy(set->name, name, IP_SET_MAXNAMELEN);
+ set->binding = IP_SET_INVALID_ID;
+ atomic_set(&set->ref, 0);
@@ -2004,6 +2848,14 @@
+ }
+ read_unlock_bh(&ip_set_lock);
+
++ /* Check request size */
++ if (size != set->type->header_size) {
++ ip_set_printk("data length wrong (want %lu, have %lu)",
++ (long unsigned)set->type->header_size,
++ (long unsigned)size);
++ goto put_out;
++ }
++
+ /*
+ * Without holding any locks, create private part.
+ */
@@ -2030,7 +2882,7 @@
+ res = -ERANGE;
+ goto cleanup;
+ }
-+
++
+ /*
+ * Finally! Add our shiny new set to the list, and be done.
+ */
@@ -2039,7 +2891,7 @@
+ ip_set_list[index] = set;
+ write_unlock_bh(&ip_set_lock);
+ return res;
-+
++
+ cleanup:
+ write_unlock_bh(&ip_set_lock);
+ set->type->destroy(set);
@@ -2139,9 +2991,7 @@
+ write_lock_bh(&ip_set_lock);
+ for (i = 0; i < ip_set_max; i++) {
+ if (ip_set_list[i] != NULL
-+ && strncmp(ip_set_list[i]->name,
-+ name,
-+ IP_SET_MAXNAMELEN - 1) == 0) {
++ && SETNAME_EQ(ip_set_list[i]->name, name)) {
+ res = -EEXIST;
+ goto unlock;
+ }
@@ -2165,11 +3015,13 @@
+ u_int32_t from_ref;
+
+ DP("set: %s to %s", from->name, to->name);
-+ /* Features must not change. Artifical restriction. */
++ /* Features must not change.
++ * Not an artifical restriction anymore, as we must prevent
++ * possible loops created by swapping in setlist type of sets. */
+ if (from->type->features != to->type->features)
+ return -ENOEXEC;
+
-+ /* No magic here: ref munging protected by the mutex */
++ /* No magic here: ref munging protected by the mutex */
+ write_lock_bh(&ip_set_lock);
+ strncpy(from_name, from->name, IP_SET_MAXNAMELEN);
+ from_ref = atomic_read(&from->ref);
@@ -2178,10 +3030,10 @@
+ atomic_set(&from->ref, atomic_read(&to->ref));
+ strncpy(to->name, from_name, IP_SET_MAXNAMELEN);
+ atomic_set(&to->ref, from_ref);
-+
++
+ ip_set_list[from_index] = to;
+ ip_set_list[to_index] = from;
-+
++
+ write_unlock_bh(&ip_set_lock);
+ return 0;
+}
@@ -2192,7 +3044,7 @@
+
+static inline void
+__set_hash_bindings_size_list(struct ip_set_hash *set_hash,
-+ ip_set_id_t id, size_t *size)
++ ip_set_id_t id, u_int32_t *size)
+{
+ if (set_hash->id == id)
+ *size += sizeof(struct ip_set_hash_list);
@@ -2200,7 +3052,7 @@
+
+static inline void
+__set_hash_bindings_size_save(struct ip_set_hash *set_hash,
-+ ip_set_id_t id, size_t *size)
++ ip_set_id_t id, u_int32_t *size)
+{
+ if (set_hash->id == id)
+ *size += sizeof(struct ip_set_hash_save);
@@ -2211,8 +3063,7 @@
+ ip_set_id_t id, void *data, int *used)
+{
+ if (set_hash->id == id) {
-+ struct ip_set_hash_list *hash_list =
-+ (struct ip_set_hash_list *)(data + *used);
++ struct ip_set_hash_list *hash_list = data + *used;
+
+ hash_list->ip = set_hash->ip;
+ hash_list->binding = set_hash->binding;
@@ -2229,7 +3080,7 @@
+ struct ip_set_list *set_list;
+
+ /* Pointer to our header */
-+ set_list = (struct ip_set_list *) (data + *used);
++ set_list = data + *used;
+
+ DP("set: %s, used: %d %p %p", set->name, *used, data, data + *used);
+
@@ -2274,7 +3125,7 @@
+
+ /* Fill in set spefific bindings data */
+ FOREACH_HASH_DO(__set_hash_bindings, set->id, data, used);
-+
++
+ return 0;
+
+ unlock_set:
@@ -2296,7 +3147,7 @@
+ struct ip_set_save *set_save;
+
+ /* Pointer to our header */
-+ set_save = (struct ip_set_save *) (data + *used);
++ set_save = data + *used;
+
+ /* Get and ensure header size */
+ if (*used + sizeof(struct ip_set_save) > len)
@@ -2304,7 +3155,7 @@
+ *used += sizeof(struct ip_set_save);
+
+ set = ip_set_list[index];
-+ DP("set: %s, used: %u(%u) %p %p", set->name, *used, len,
++ DP("set: %s, used: %d(%d) %p %p", set->name, *used, len,
+ data, data + *used);
+
+ read_lock_bh(&set->lock);
@@ -2321,8 +3172,8 @@
+ set->type->list_header(set, data + *used);
+ *used += set_save->header_size;
+
-+ DP("set header filled: %s, used: %u(%u) %p %p", set->name, *used,
-+ set_save->header_size, data, data + *used);
++ DP("set header filled: %s, used: %d(%lu) %p %p", set->name, *used,
++ (unsigned long)set_save->header_size, data, data + *used);
+ /* Get and ensure set specific members size */
+ set_save->members_size = set->type->list_members_size(set);
+ if (*used + set_save->members_size > len)
@@ -2332,8 +3183,8 @@
+ set->type->list_members(set, data + *used);
+ *used += set_save->members_size;
+ read_unlock_bh(&set->lock);
-+ DP("set members filled: %s, used: %u(%u) %p %p", set->name, *used,
-+ set_save->members_size, data, data + *used);
++ DP("set members filled: %s, used: %d(%lu) %p %p", set->name, *used,
++ (unsigned long)set_save->members_size, data, data + *used);
+ return 0;
+
+ unlock_set:
@@ -2353,8 +3204,7 @@
+{
+ if (*res == 0
+ && (id == IP_SET_INVALID_ID || set_hash->id == id)) {
-+ struct ip_set_hash_save *hash_save =
-+ (struct ip_set_hash_save *)(data + *used);
++ struct ip_set_hash_save *hash_save = data + *used;
+ /* Ensure bindings size */
+ if (*used + sizeof(struct ip_set_hash_save) > len) {
+ *res = -ENOMEM;
@@ -2381,7 +3231,7 @@
+ return -ENOMEM;
+
+ /* Marker */
-+ set_save = (struct ip_set_save *) (data + *used);
++ set_save = data + *used;
+ set_save->index = IP_SET_INVALID_ID;
+ set_save->header_size = 0;
+ set_save->members_size = 0;
@@ -2394,7 +3244,7 @@
+ index = ip_set_list[index]->id;
+ FOREACH_HASH_DO(__set_hash_save_bindings, index, data, used, len, &res);
+
-+ return res;
++ return res;
+}
+
+/*
@@ -2413,12 +3263,12 @@
+ /* Loop to restore sets */
+ while (1) {
+ line++;
-+
-+ DP("%u %u %u", used, sizeof(struct ip_set_restore), len);
++
++ DP("%d %zu %d", used, sizeof(struct ip_set_restore), len);
+ /* Get and ensure header size */
+ if (used + sizeof(struct ip_set_restore) > len)
+ return line;
-+ set_restore = (struct ip_set_restore *) (data + used);
++ set_restore = data + used;
+ used += sizeof(struct ip_set_restore);
+
+ /* Ensure data size */
@@ -2432,7 +3282,7 @@
+ line--;
+ goto bindings;
+ }
-+
++
+ /* Try to create the set */
+ DP("restore %s %s", set_restore->name, set_restore->typename);
+ res = ip_set_create(set_restore->name,
@@ -2440,7 +3290,7 @@
+ set_restore->index,
+ data + used,
+ set_restore->header_size);
-+
++
+ if (res != 0)
+ return line;
+ used += set_restore->header_size;
@@ -2452,12 +3302,13 @@
+ /* Try to restore members data */
+ set = ip_set_list[index];
+ members_size = 0;
-+ DP("members_size %u reqsize %u",
-+ set_restore->members_size, set->type->reqsize);
++ DP("members_size %lu reqsize %lu",
++ (unsigned long)set_restore->members_size,
++ (unsigned long)set->type->reqsize);
+ while (members_size + set->type->reqsize <=
+ set_restore->members_size) {
+ line++;
-+ DP("members: %u, line %u", members_size, line);
++ DP("members: %d, line %d", members_size, line);
+ res = __ip_set_addip(index,
+ data + used + members_size,
+ set->type->reqsize);
@@ -2466,29 +3317,29 @@
+ members_size += set->type->reqsize;
+ }
+
-+ DP("members_size %u %u",
-+ set_restore->members_size, members_size);
++ DP("members_size %lu %d",
++ (unsigned long)set_restore->members_size, members_size);
+ if (members_size != set_restore->members_size)
+ return line++;
-+ used += set_restore->members_size;
++ used += set_restore->members_size;
+ }
-+
++
+ bindings:
+ /* Loop to restore bindings */
+ while (used < len) {
+ line++;
+
-+ DP("restore binding, line %u", line);
++ DP("restore binding, line %u", line);
+ /* Get and ensure size */
+ if (used + sizeof(struct ip_set_hash_save) > len)
+ return line;
-+ hash_save = (struct ip_set_hash_save *) (data + used);
++ hash_save = data + used;
+ used += sizeof(struct ip_set_hash_save);
-+
++
+ /* hash_save->id is used to store the index */
+ index = ip_set_find_byindex(hash_save->id);
+ DP("restore binding index %u, id %u, %u -> %u",
-+ index, hash_save->id, hash_save->ip, hash_save->binding);
++ index, hash_save->id, hash_save->ip, hash_save->binding);
+ if (index != hash_save->id)
+ return line;
+ if (ip_set_find_byindex(hash_save->binding) == IP_SET_INVALID_ID) {
@@ -2514,8 +3365,8 @@
+ }
+ if (used != len)
+ return line;
-+
-+ return 0;
++
++ return 0;
+}
+
+static int
@@ -2527,10 +3378,10 @@
+ struct ip_set_req_adt *req_adt;
+ ip_set_id_t index = IP_SET_INVALID_ID;
+ int (*adtfn)(ip_set_id_t index,
-+ const void *data, size_t size);
++ const void *data, u_int32_t size);
+ struct fn_table {
+ int (*fn)(ip_set_id_t index,
-+ const void *data, size_t size);
++ const void *data, u_int32_t size);
+ } adtfn_table[] =
+ { { ip_set_addip }, { ip_set_delip }, { ip_set_testip},
+ { ip_set_bindip}, { ip_set_unbindip }, { ip_set_testbind },
@@ -2562,11 +3413,10 @@
+
+ op = (unsigned *)data;
+ DP("op=%x", *op);
-+
++
+ if (*op < IP_SET_OP_VERSION) {
+ /* Check the version at the beginning of operations */
-+ struct ip_set_req_version *req_version =
-+ (struct ip_set_req_version *) data;
++ struct ip_set_req_version *req_version = data;
+ if (req_version->version != IP_SET_PROTOCOL_VERSION) {
+ res = -EPROTO;
+ goto done;
@@ -2575,9 +3425,8 @@
+
+ switch (*op) {
+ case IP_SET_OP_CREATE:{
-+ struct ip_set_req_create *req_create
-+ = (struct ip_set_req_create *) data;
-+
++ struct ip_set_req_create *req_create = data;
++
+ if (len < sizeof(struct ip_set_req_create)) {
+ ip_set_printk("short CREATE data (want >=%zu, got %u)",
+ sizeof(struct ip_set_req_create), len);
@@ -2594,16 +3443,15 @@
+ goto done;
+ }
+ case IP_SET_OP_DESTROY:{
-+ struct ip_set_req_std *req_destroy
-+ = (struct ip_set_req_std *) data;
-+
++ struct ip_set_req_std *req_destroy = data;
++
+ if (len != sizeof(struct ip_set_req_std)) {
+ ip_set_printk("invalid DESTROY data (want %zu, got %u)",
+ sizeof(struct ip_set_req_std), len);
+ res = -EINVAL;
+ goto done;
+ }
-+ if (strcmp(req_destroy->name, IPSET_TOKEN_ALL) == 0) {
++ if (SETNAME_EQ(req_destroy->name, IPSET_TOKEN_ALL)) {
+ /* Destroy all sets */
+ index = IP_SET_INVALID_ID;
+ } else {
@@ -2615,13 +3463,12 @@
+ goto done;
+ }
+ }
-+
++
+ res = ip_set_destroy(index);
+ goto done;
+ }
+ case IP_SET_OP_FLUSH:{
-+ struct ip_set_req_std *req_flush =
-+ (struct ip_set_req_std *) data;
++ struct ip_set_req_std *req_flush = data;
+
+ if (len != sizeof(struct ip_set_req_std)) {
+ ip_set_printk("invalid FLUSH data (want %zu, got %u)",
@@ -2629,7 +3476,7 @@
+ res = -EINVAL;
+ goto done;
+ }
-+ if (strcmp(req_flush->name, IPSET_TOKEN_ALL) == 0) {
++ if (SETNAME_EQ(req_flush->name, IPSET_TOKEN_ALL)) {
+ /* Flush all sets */
+ index = IP_SET_INVALID_ID;
+ } else {
@@ -2645,8 +3492,7 @@
+ goto done;
+ }
+ case IP_SET_OP_RENAME:{
-+ struct ip_set_req_create *req_rename
-+ = (struct ip_set_req_create *) data;
++ struct ip_set_req_create *req_rename = data;
+
+ if (len != sizeof(struct ip_set_req_create)) {
+ ip_set_printk("invalid RENAME data (want %zu, got %u)",
@@ -2657,7 +3503,7 @@
+
+ req_rename->name[IP_SET_MAXNAMELEN - 1] = '\0';
+ req_rename->typename[IP_SET_MAXNAMELEN - 1] = '\0';
-+
++
+ index = ip_set_find_byname(req_rename->name);
+ if (index == IP_SET_INVALID_ID) {
+ res = -ENOENT;
@@ -2667,8 +3513,7 @@
+ goto done;
+ }
+ case IP_SET_OP_SWAP:{
-+ struct ip_set_req_create *req_swap
-+ = (struct ip_set_req_create *) data;
++ struct ip_set_req_create *req_swap = data;
+ ip_set_id_t to_index;
+
+ if (len != sizeof(struct ip_set_req_create)) {
@@ -2697,7 +3542,7 @@
+ default:
+ break; /* Set identified by id */
+ }
-+
++
+ /* There we may have add/del/test/bind/unbind/test_bind operations */
+ if (*op < IP_SET_OP_ADD_IP || *op > IP_SET_OP_TEST_BIND_SET) {
+ res = -EBADMSG;
@@ -2711,7 +3556,7 @@
+ res = -EINVAL;
+ goto done;
+ }
-+ req_adt = (struct ip_set_req_adt *) data;
++ req_adt = data;
+
+ /* -U :all: :all:|:default: uses IP_SET_INVALID_ID */
+ if (!(*op == IP_SET_OP_UNBIND_SET
@@ -2771,8 +3616,7 @@
+
+ if (*op < IP_SET_OP_VERSION) {
+ /* Check the version at the beginning of operations */
-+ struct ip_set_req_version *req_version =
-+ (struct ip_set_req_version *) data;
++ struct ip_set_req_version *req_version = data;
+ if (req_version->version != IP_SET_PROTOCOL_VERSION) {
+ res = -EPROTO;
+ goto done;
@@ -2781,8 +3625,7 @@
+
+ switch (*op) {
+ case IP_SET_OP_VERSION: {
-+ struct ip_set_req_version *req_version =
-+ (struct ip_set_req_version *) data;
++ struct ip_set_req_version *req_version = data;
+
+ if (*len != sizeof(struct ip_set_req_version)) {
+ ip_set_printk("invalid VERSION (want %zu, got %d)",
@@ -2798,8 +3641,7 @@
+ goto done;
+ }
+ case IP_SET_OP_GET_BYNAME: {
-+ struct ip_set_req_get_set *req_get
-+ = (struct ip_set_req_get_set *) data;
++ struct ip_set_req_get_set *req_get = data;
+
+ if (*len != sizeof(struct ip_set_req_get_set)) {
+ ip_set_printk("invalid GET_BYNAME (want %zu, got %d)",
@@ -2813,8 +3655,7 @@
+ goto copy;
+ }
+ case IP_SET_OP_GET_BYINDEX: {
-+ struct ip_set_req_get_set *req_get
-+ = (struct ip_set_req_get_set *) data;
++ struct ip_set_req_get_set *req_get = data;
+
+ if (*len != sizeof(struct ip_set_req_get_set)) {
+ ip_set_printk("invalid GET_BYINDEX (want %zu, got %d)",
@@ -2830,8 +3671,7 @@
+ goto copy;
+ }
+ case IP_SET_OP_ADT_GET: {
-+ struct ip_set_req_adt_get *req_get
-+ = (struct ip_set_req_adt_get *) data;
++ struct ip_set_req_adt_get *req_get = data;
+
+ if (*len != sizeof(struct ip_set_req_adt_get)) {
+ ip_set_printk("invalid ADT_GET (want %zu, got %d)",
@@ -2853,8 +3693,7 @@
+ goto copy;
+ }
+ case IP_SET_OP_MAX_SETS: {
-+ struct ip_set_req_max_sets *req_max_sets
-+ = (struct ip_set_req_max_sets *) data;
++ struct ip_set_req_max_sets *req_max_sets = data;
+ ip_set_id_t i;
+
+ if (*len != sizeof(struct ip_set_req_max_sets)) {
@@ -2864,7 +3703,7 @@
+ goto done;
+ }
+
-+ if (strcmp(req_max_sets->set.name, IPSET_TOKEN_ALL) == 0) {
++ if (SETNAME_EQ(req_max_sets->set.name, IPSET_TOKEN_ALL)) {
+ req_max_sets->set.index = IP_SET_INVALID_ID;
+ } else {
+ req_max_sets->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
@@ -2885,8 +3724,7 @@
+ }
+ case IP_SET_OP_LIST_SIZE:
+ case IP_SET_OP_SAVE_SIZE: {
-+ struct ip_set_req_setnames *req_setnames
-+ = (struct ip_set_req_setnames *) data;
++ struct ip_set_req_setnames *req_setnames = data;
+ struct ip_set_name_list *name_list;
+ struct ip_set *set;
+ ip_set_id_t i;
@@ -2904,8 +3742,7 @@
+ for (i = 0; i < ip_set_max; i++) {
+ if (ip_set_list[i] == NULL)
+ continue;
-+ name_list = (struct ip_set_name_list *)
-+ (data + used);
++ name_list = data + used;
+ used += sizeof(struct ip_set_name_list);
+ if (used > copylen) {
+ res = -EAGAIN;
@@ -2957,8 +3794,7 @@
+ goto copy;
+ }
+ case IP_SET_OP_LIST: {
-+ struct ip_set_req_list *req_list
-+ = (struct ip_set_req_list *) data;
++ struct ip_set_req_list *req_list = data;
+ ip_set_id_t i;
+ int used;
+
@@ -2994,8 +3830,7 @@
+ goto copy;
+ }
+ case IP_SET_OP_SAVE: {
-+ struct ip_set_req_list *req_save
-+ = (struct ip_set_req_list *) data;
++ struct ip_set_req_list *req_save = data;
+ ip_set_id_t i;
+ int used;
+
@@ -3011,20 +3846,30 @@
+ res = -ENOENT;
+ goto done;
+ }
++
++#define SETLIST(set) (strcmp(set->type->typename, "setlist") == 0)
++
+ used = 0;
+ if (index == IP_SET_INVALID_ID) {
-+ /* Save all sets */
++ /* Save all sets: ugly setlist type dependency */
++ int setlist = 0;
++ setlists:
+ for (i = 0; i < ip_set_max && res == 0; i++) {
-+ if (ip_set_list[i] != NULL)
++ if (ip_set_list[i] != NULL
++ && !(setlist ^ SETLIST(ip_set_list[i])))
+ res = ip_set_save_set(i, data, &used, *len);
+ }
++ if (!setlist) {
++ setlist = 1;
++ goto setlists;
++ }
+ } else {
+ /* Save an individual set */
+ res = ip_set_save_set(index, data, &used, *len);
+ }
+ if (res == 0)
+ res = ip_set_save_bindings(index, data, &used, *len);
-+
++
+ if (res != 0)
+ goto done;
+ else if (copylen != used) {
@@ -3034,20 +3879,19 @@
+ goto copy;
+ }
+ case IP_SET_OP_RESTORE: {
-+ struct ip_set_req_setnames *req_restore
-+ = (struct ip_set_req_setnames *) data;
++ struct ip_set_req_setnames *req_restore = data;
+ int line;
+
+ if (*len < sizeof(struct ip_set_req_setnames)
+ || *len != req_restore->size) {
-+ ip_set_printk("invalid RESTORE (want =%zu, got %d)",
-+ req_restore->size, *len);
++ ip_set_printk("invalid RESTORE (want =%lu, got %d)",
++ (long unsigned)req_restore->size, *len);
+ res = -EINVAL;
+ goto done;
+ }
+ line = ip_set_restore(data + sizeof(struct ip_set_req_setnames),
+ req_restore->size - sizeof(struct ip_set_req_setnames));
-+ DP("ip_set_restore: %u", line);
++ DP("ip_set_restore: %d", line);
+ if (line != 0) {
+ res = -EAGAIN;
+ req_restore->size = line;
@@ -3062,12 +3906,12 @@
+ } /* end of switch(op) */
+
+ copy:
-+ DP("set %s, copylen %u", index != IP_SET_INVALID_ID
++ DP("set %s, copylen %d", index != IP_SET_INVALID_ID
+ && ip_set_list[index]
+ ? ip_set_list[index]->name
+ : ":all:", copylen);
+ res = copy_to_user(user, data, copylen);
-+
++
+ done:
+ up(&ip_set_app_mutex);
+ vfree(data);
@@ -3085,12 +3929,15 @@
+ .get_optmin = SO_IP_SET,
+ .get_optmax = SO_IP_SET + 1,
+ .get = &ip_set_sockfn_get,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
++ .use = 0,
++#else
+ .owner = THIS_MODULE,
+#endif
+};
+
+static int max_sets, hash_size;
++
+module_param(max_sets, int, 0600);
+MODULE_PARM_DESC(max_sets, "maximal number of sets");
+module_param(hash_size, int, 0600);
@@ -3133,6 +3980,7 @@
+ vfree(ip_set_hash);
+ return res;
+ }
++
+ return 0;
+}
+
@@ -3150,7 +3998,10 @@
+
+EXPORT_SYMBOL(ip_set_get_byname);
+EXPORT_SYMBOL(ip_set_get_byindex);
-+EXPORT_SYMBOL(ip_set_put);
++EXPORT_SYMBOL(ip_set_put_byindex);
++EXPORT_SYMBOL(ip_set_id);
++EXPORT_SYMBOL(__ip_set_get_byname);
++EXPORT_SYMBOL(__ip_set_put_byindex);
+
+EXPORT_SYMBOL(ip_set_addip_kernel);
+EXPORT_SYMBOL(ip_set_delip_kernel);
@@ -3160,8 +4011,8 @@
+module_exit(ip_set_fini);
--- /dev/null
+++ b/net/ipv4/netfilter/ip_set_iphash.c
-@@ -0,0 +1,429 @@
-+/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+@@ -0,0 +1,166 @@
++/* Copyright (C) 2003-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
@@ -3171,36 +4022,26 @@
+/* Kernel module implementing an ip hash set */
+
+#include <linux/module.h>
++#include <linux/moduleparam.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
-+#include <linux/version.h>
-+#include <linux/jhash.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_jhash.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
+#include <linux/spinlock.h>
-+#include <linux/vmalloc.h>
+#include <linux/random.h>
+
+#include <net/ip.h>
+
-+#include <linux/netfilter_ipv4/ip_set_malloc.h>
+#include <linux/netfilter_ipv4/ip_set_iphash.h>
+
+static int limit = MAX_RANGE;
+
+static inline __u32
-+jhash_ip(const struct ip_set_iphash *map, uint16_t i, ip_set_ip_t ip)
-+{
-+ return jhash_1word(ip, *(((uint32_t *) map->initval) + i));
-+}
-+
-+static inline __u32
-+hash_id(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++iphash_id(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
++ struct ip_set_iphash *map = set->data;
+ __u32 id;
+ u_int16_t i;
+ ip_set_ip_t *elem;
@@ -3208,208 +4049,91 @@
+ *hash_ip = ip & map->netmask;
+ DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u, %u.%u.%u.%u",
+ set->name, HIPQUAD(ip), HIPQUAD(*hash_ip), HIPQUAD(map->netmask));
-+
++
+ for (i = 0; i < map->probes; i++) {
+ id = jhash_ip(map, i, *hash_ip) % map->hashsize;
+ DP("hash key: %u", id);
+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
+ if (*elem == *hash_ip)
+ return id;
-+ /* No shortcut at testing - there can be deleted
-+ * entries. */
++ /* No shortcut - there can be deleted entries. */
+ }
+ return UINT_MAX;
+}
+
+static inline int
-+__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++iphash_test(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ return (ip && hash_id(set, ip, hash_ip) != UINT_MAX);
++ return (ip && iphash_id(set, hash_ip, ip) != UINT_MAX);
+}
+
-+static int
-+testip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_iphash *req =
-+ (struct ip_set_req_iphash *) data;
++#define KADT_CONDITION
+
-+ if (size != sizeof(struct ip_set_req_iphash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iphash),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __testip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+testip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __testip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+}
++UADT(iphash, test)
++KADT(iphash, test, ipaddr)
+
+static inline int
-+__addip(struct ip_set_iphash *map, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++__iphash_add(struct ip_set_iphash *map, ip_set_ip_t *ip)
+{
+ __u32 probe;
+ u_int16_t i;
-+ ip_set_ip_t *elem;
-+
-+ if (!ip || map->elements >= limit)
-+ return -ERANGE;
-+
-+ *hash_ip = ip & map->netmask;
-+
++ ip_set_ip_t *elem, *slot = NULL;
++
+ for (i = 0; i < map->probes; i++) {
-+ probe = jhash_ip(map, i, *hash_ip) % map->hashsize;
++ probe = jhash_ip(map, i, *ip) % map->hashsize;
+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
-+ if (*elem == *hash_ip)
++ if (*elem == *ip)
+ return -EEXIST;
-+ if (!*elem) {
-+ *elem = *hash_ip;
-+ map->elements++;
-+ return 0;
-+ }
++ if (!(slot || *elem))
++ slot = elem;
++ /* There can be deleted entries, must check all slots */
++ }
++ if (slot) {
++ *slot = *ip;
++ map->elements++;
++ return 0;
+ }
+ /* Trigger rehashing */
+ return -EAGAIN;
+}
+
-+static int
-+addip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
++static inline int
++iphash_add(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_req_iphash *req =
-+ (struct ip_set_req_iphash *) data;
++ struct ip_set_iphash *map = set->data;
++
++ if (!ip || map->elements >= limit)
++ return -ERANGE;
+
-+ if (size != sizeof(struct ip_set_req_iphash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iphash),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __addip((struct ip_set_iphash *) set->data, req->ip, hash_ip);
++ *hash_ip = ip & map->netmask;
++
++ return __iphash_add(map, hash_ip);
+}
+
-+static int
-+addip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __addip((struct ip_set_iphash *) set->data,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+}
++UADT(iphash, add)
++KADT(iphash, add, ipaddr)
+
-+static int retry(struct ip_set *set)
++static inline void
++__iphash_retry(struct ip_set_iphash *tmp, struct ip_set_iphash *map)
+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
-+ ip_set_ip_t hash_ip, *elem;
-+ void *members;
-+ u_int32_t i, hashsize = map->hashsize;
-+ int res;
-+ struct ip_set_iphash *tmp;
-+
-+ if (map->resize == 0)
-+ return -ERANGE;
-+
-+ again:
-+ res = 0;
-+
-+ /* Calculate new hash size */
-+ hashsize += (hashsize * map->resize)/100;
-+ if (hashsize == map->hashsize)
-+ hashsize++;
-+
-+ ip_set_printk("rehashing of set %s triggered: "
-+ "hashsize grows from %u to %u",
-+ set->name, map->hashsize, hashsize);
-+
-+ tmp = kmalloc(sizeof(struct ip_set_iphash)
-+ + map->probes * sizeof(uint32_t), GFP_ATOMIC);
-+ if (!tmp) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_iphash)
-+ + map->probes * sizeof(uint32_t));
-+ return -ENOMEM;
-+ }
-+ tmp->members = harray_malloc(hashsize, sizeof(ip_set_ip_t), GFP_ATOMIC);
-+ if (!tmp->members) {
-+ DP("out of memory for %d bytes", hashsize * sizeof(ip_set_ip_t));
-+ kfree(tmp);
-+ return -ENOMEM;
-+ }
-+ tmp->hashsize = hashsize;
-+ tmp->elements = 0;
-+ tmp->probes = map->probes;
-+ tmp->resize = map->resize;
+ tmp->netmask = map->netmask;
-+ memcpy(tmp->initval, map->initval, map->probes * sizeof(uint32_t));
-+
-+ write_lock_bh(&set->lock);
-+ map = (struct ip_set_iphash *) set->data; /* Play safe */
-+ for (i = 0; i < map->hashsize && res == 0; i++) {
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
-+ if (*elem)
-+ res = __addip(tmp, *elem, &hash_ip);
-+ }
-+ if (res) {
-+ /* Failure, try again */
-+ write_unlock_bh(&set->lock);
-+ harray_free(tmp->members);
-+ kfree(tmp);
-+ goto again;
-+ }
-+
-+ /* Success at resizing! */
-+ members = map->members;
-+
-+ map->hashsize = tmp->hashsize;
-+ map->members = tmp->members;
-+ write_unlock_bh(&set->lock);
-+
-+ harray_free(members);
-+ kfree(tmp);
-+
-+ return 0;
+}
+
++HASH_RETRY(iphash, ip_set_ip_t)
++
+static inline int
-+__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++iphash_del(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
++ struct ip_set_iphash *map = set->data;
+ ip_set_ip_t id, *elem;
+
+ if (!ip)
+ return -ERANGE;
+
-+ id = hash_id(set, ip, hash_ip);
++ id = iphash_id(set, hash_ip, ip);
+ if (id == UINT_MAX)
+ return -EEXIST;
-+
++
+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
+ *elem = 0;
+ map->elements--;
@@ -3417,159 +4141,35 @@
+ return 0;
+}
+
-+static int
-+delip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_iphash *req =
-+ (struct ip_set_req_iphash *) data;
-+
-+ if (size != sizeof(struct ip_set_req_iphash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iphash),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __delip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+delip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __delip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+}
++UADT(iphash, del)
++KADT(iphash, del, ipaddr)
+
-+static int create(struct ip_set *set, const void *data, size_t size)
++static inline int
++__iphash_create(const struct ip_set_req_iphash_create *req,
++ struct ip_set_iphash *map)
+{
-+ struct ip_set_req_iphash_create *req =
-+ (struct ip_set_req_iphash_create *) data;
-+ struct ip_set_iphash *map;
-+ uint16_t i;
-+
-+ if (size != sizeof(struct ip_set_req_iphash_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iphash_create),
-+ size);
-+ return -EINVAL;
-+ }
-+
-+ if (req->hashsize < 1) {
-+ ip_set_printk("hashsize too small");
-+ return -ENOEXEC;
-+ }
-+
-+ if (req->probes < 1) {
-+ ip_set_printk("probes too small");
-+ return -ENOEXEC;
-+ }
-+
-+ map = kmalloc(sizeof(struct ip_set_iphash)
-+ + req->probes * sizeof(uint32_t), GFP_KERNEL);
-+ if (!map) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_iphash)
-+ + req->probes * sizeof(uint32_t));
-+ return -ENOMEM;
-+ }
-+ for (i = 0; i < req->probes; i++)
-+ get_random_bytes(((uint32_t *) map->initval)+i, 4);
-+ map->elements = 0;
-+ map->hashsize = req->hashsize;
-+ map->probes = req->probes;
-+ map->resize = req->resize;
+ map->netmask = req->netmask;
-+ map->members = harray_malloc(map->hashsize, sizeof(ip_set_ip_t), GFP_KERNEL);
-+ if (!map->members) {
-+ DP("out of memory for %d bytes", map->hashsize * sizeof(ip_set_ip_t));
-+ kfree(map);
-+ return -ENOMEM;
-+ }
-+
-+ set->data = map;
++
+ return 0;
+}
+
-+static void destroy(struct ip_set *set)
-+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
-+
-+ harray_free(map->members);
-+ kfree(map);
-+
-+ set->data = NULL;
-+}
-+
-+static void flush(struct ip_set *set)
-+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
-+ harray_flush(map->members, map->hashsize, sizeof(ip_set_ip_t));
-+ map->elements = 0;
-+}
++HASH_CREATE(iphash, ip_set_ip_t)
++HASH_DESTROY(iphash)
+
-+static void list_header(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
-+ struct ip_set_req_iphash_create *header =
-+ (struct ip_set_req_iphash_create *) data;
++HASH_FLUSH(iphash, ip_set_ip_t)
+
-+ header->hashsize = map->hashsize;
-+ header->probes = map->probes;
-+ header->resize = map->resize;
++static inline void
++__iphash_list_header(const struct ip_set_iphash *map,
++ struct ip_set_req_iphash_create *header)
++{
+ header->netmask = map->netmask;
+}
+
-+static int list_members_size(const struct ip_set *set)
-+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
-+
-+ return (map->hashsize * sizeof(ip_set_ip_t));
-+}
-+
-+static void list_members(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
-+ ip_set_ip_t i, *elem;
-+
-+ for (i = 0; i < map->hashsize; i++) {
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
-+ ((ip_set_ip_t *)data)[i] = *elem;
-+ }
-+}
++HASH_LIST_HEADER(iphash)
++HASH_LIST_MEMBERS_SIZE(iphash, ip_set_ip_t)
++HASH_LIST_MEMBERS(iphash, ip_set_ip_t)
+
-+static struct ip_set_type ip_set_iphash = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_iphash),
-+ .addip = &addip,
-+ .addip_kernel = &addip_kernel,
-+ .retry = &retry,
-+ .delip = &delip,
-+ .delip_kernel = &delip_kernel,
-+ .testip = &testip,
-+ .testip_kernel = &testip_kernel,
-+ .header_size = sizeof(struct ip_set_req_iphash_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
++IP_SET_RTYPE(iphash, IPSET_TYPE_IP | IPSET_DATA_SINGLE)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
@@ -3577,25 +4177,13 @@
+module_param(limit, int, 0600);
+MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
+
-+static int __init ip_set_iphash_init(void)
-+{
-+ return ip_set_register_set_type(&ip_set_iphash);
-+}
-+
-+static void __exit ip_set_iphash_fini(void)
-+{
-+ /* FIXME: possible race with ip_set_create() */
-+ ip_set_unregister_set_type(&ip_set_iphash);
-+}
-+
-+module_init(ip_set_iphash_init);
-+module_exit(ip_set_iphash_fini);
++REGISTER_MODULE(iphash)
--- /dev/null
+++ b/net/ipv4/netfilter/ip_set_ipmap.c
-@@ -0,0 +1,336 @@
+@@ -0,0 +1,142 @@
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ * Patrick Schaaf <bof@bof.de>
-+ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ * Copyright (C) 2003-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
@@ -3607,9 +4195,6 @@
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
-+#include <linux/version.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
@@ -3624,10 +4209,10 @@
+}
+
+static inline int
-+__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++ipmap_test(const struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
-+
++ const struct ip_set_ipmap *map = set->data;
++
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
+
@@ -3637,46 +4222,15 @@
+ return !!test_bit(ip_to_id(map, *hash_ip), map->members);
+}
+
-+static int
-+testip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_ipmap *req =
-+ (struct ip_set_req_ipmap *) data;
++#define KADT_CONDITION
+
-+ if (size != sizeof(struct ip_set_req_ipmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipmap),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __testip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+testip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ int res = __testip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+ return (res < 0 ? 0 : res);
-+}
++UADT(ipmap, test)
++KADT(ipmap, test, ipaddr)
+
+static inline int
-+__addip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++ipmap_add(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
++ struct ip_set_ipmap *map = set->data;
+
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
@@ -3689,46 +4243,13 @@
+ return 0;
+}
+
-+static int
-+addip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_ipmap *req =
-+ (struct ip_set_req_ipmap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_ipmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipmap),
-+ size);
-+ return -EINVAL;
-+ }
-+ DP("%u.%u.%u.%u", HIPQUAD(req->ip));
-+ return __addip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+addip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __addip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+}
++UADT(ipmap, add)
++KADT(ipmap, add, ipaddr)
+
+static inline int
-+__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++ipmap_del(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
++ struct ip_set_ipmap *map = set->data;
+
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
@@ -3737,75 +4258,17 @@
+ DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
+ if (!test_and_clear_bit(ip_to_id(map, *hash_ip), map->members))
+ return -EEXIST;
-+
++
+ return 0;
+}
+
-+static int
-+delip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_ipmap *req =
-+ (struct ip_set_req_ipmap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_ipmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipmap),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __delip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+delip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __delip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+}
++UADT(ipmap, del)
++KADT(ipmap, del, ipaddr)
+
-+static int create(struct ip_set *set, const void *data, size_t size)
++static inline int
++__ipmap_create(const struct ip_set_req_ipmap_create *req,
++ struct ip_set_ipmap *map)
+{
-+ int newbytes;
-+ struct ip_set_req_ipmap_create *req =
-+ (struct ip_set_req_ipmap_create *) data;
-+ struct ip_set_ipmap *map;
-+
-+ if (size != sizeof(struct ip_set_req_ipmap_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipmap_create),
-+ size);
-+ return -EINVAL;
-+ }
-+
-+ DP("from %u.%u.%u.%u to %u.%u.%u.%u",
-+ HIPQUAD(req->from), HIPQUAD(req->to));
-+
-+ if (req->from > req->to) {
-+ DP("bad ip range");
-+ return -ENOEXEC;
-+ }
-+
-+ map = kmalloc(sizeof(struct ip_set_ipmap), GFP_KERNEL);
-+ if (!map) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_ipmap));
-+ return -ENOMEM;
-+ }
-+ map->first_ip = req->from;
-+ map->last_ip = req->to;
+ map->netmask = req->netmask;
+
+ if (req->netmask == 0xFFFFFFFF) {
@@ -3830,109 +4293,40 @@
+ map->sizeid = 2 << (netmask_bits - mask_bits - 1);
+ }
+ if (map->sizeid > MAX_RANGE + 1) {
-+ ip_set_printk("range too big (max %d addresses)",
-+ MAX_RANGE+1);
-+ kfree(map);
++ ip_set_printk("range too big, %d elements (max %d)",
++ map->sizeid, MAX_RANGE+1);
+ return -ENOEXEC;
+ }
+ DP("hosts %u, sizeid %u", map->hosts, map->sizeid);
-+ newbytes = bitmap_bytes(0, map->sizeid - 1);
-+ map->members = kmalloc(newbytes, GFP_KERNEL);
-+ if (!map->members) {
-+ DP("out of memory for %d bytes", newbytes);
-+ kfree(map);
-+ return -ENOMEM;
-+ }
-+ memset(map->members, 0, newbytes);
-+
-+ set->data = map;
-+ return 0;
-+}
-+
-+static void destroy(struct ip_set *set)
-+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
-+
-+ kfree(map->members);
-+ kfree(map);
-+
-+ set->data = NULL;
++ return bitmap_bytes(0, map->sizeid - 1);
+}
+
-+static void flush(struct ip_set *set)
-+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
-+ memset(map->members, 0, bitmap_bytes(0, map->sizeid - 1));
-+}
++BITMAP_CREATE(ipmap)
++BITMAP_DESTROY(ipmap)
++BITMAP_FLUSH(ipmap)
+
-+static void list_header(const struct ip_set *set, void *data)
++static inline void
++__ipmap_list_header(const struct ip_set_ipmap *map,
++ struct ip_set_req_ipmap_create *header)
+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
-+ struct ip_set_req_ipmap_create *header =
-+ (struct ip_set_req_ipmap_create *) data;
-+
-+ header->from = map->first_ip;
-+ header->to = map->last_ip;
+ header->netmask = map->netmask;
+}
+
-+static int list_members_size(const struct ip_set *set)
-+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
-+
-+ return bitmap_bytes(0, map->sizeid - 1);
-+}
++BITMAP_LIST_HEADER(ipmap)
++BITMAP_LIST_MEMBERS_SIZE(ipmap)
++BITMAP_LIST_MEMBERS(ipmap)
+
-+static void list_members(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
-+ int bytes = bitmap_bytes(0, map->sizeid - 1);
-+
-+ memcpy(data, map->members, bytes);
-+}
-+
-+static struct ip_set_type ip_set_ipmap = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_ipmap),
-+ .addip = &addip,
-+ .addip_kernel = &addip_kernel,
-+ .delip = &delip,
-+ .delip_kernel = &delip_kernel,
-+ .testip = &testip,
-+ .testip_kernel = &testip_kernel,
-+ .header_size = sizeof(struct ip_set_req_ipmap_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
++IP_SET_TYPE(ipmap, IPSET_TYPE_IP | IPSET_DATA_SINGLE)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("ipmap type of IP sets");
+
-+static int __init ip_set_ipmap_init(void)
-+{
-+ return ip_set_register_set_type(&ip_set_ipmap);
-+}
-+
-+static void __exit ip_set_ipmap_fini(void)
-+{
-+ /* FIXME: possible race with ip_set_create() */
-+ ip_set_unregister_set_type(&ip_set_ipmap);
-+}
-+
-+module_init(ip_set_ipmap_init);
-+module_exit(ip_set_ipmap_fini);
++REGISTER_MODULE(ipmap)
--- /dev/null
+++ b/net/ipv4/netfilter/ip_set_ipporthash.c
-@@ -0,0 +1,581 @@
-+/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+@@ -0,0 +1,203 @@
++/* Copyright (C) 2003-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
@@ -3942,581 +4336,729 @@
+/* Kernel module implementing an ip+port hash set */
+
+#include <linux/module.h>
++#include <linux/moduleparam.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/skbuff.h>
-+#include <linux/version.h>
-+#include <linux/jhash.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_jhash.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
+#include <linux/spinlock.h>
-+#include <linux/vmalloc.h>
+#include <linux/random.h>
+
+#include <net/ip.h>
+
-+#include <linux/netfilter_ipv4/ip_set_malloc.h>
+#include <linux/netfilter_ipv4/ip_set_ipporthash.h>
++#include <linux/netfilter_ipv4/ip_set_getport.h>
+
+static int limit = MAX_RANGE;
+
-+/* We must handle non-linear skbs */
-+static inline ip_set_ip_t
-+get_port(const struct sk_buff *skb, u_int32_t flags)
-+{
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ struct iphdr *iph = ip_hdr(skb);
-+#else
-+ struct iphdr *iph = skb->nh.iph;
-+#endif
-+ u_int16_t offset = ntohs(iph->frag_off) & IP_OFFSET;
-+
-+ switch (iph->protocol) {
-+ case IPPROTO_TCP: {
-+ struct tcphdr tcph;
-+
-+ /* See comments at tcp_match in ip_tables.c */
-+ if (offset)
-+ return INVALID_PORT;
-+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &tcph, sizeof(tcph)) < 0)
-+#else
-+ if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &tcph, sizeof(tcph)) < 0)
-+#endif
-+ /* No choice either */
-+ return INVALID_PORT;
-+
-+ return ntohs(flags & IPSET_SRC ?
-+ tcph.source : tcph.dest);
-+ }
-+ case IPPROTO_UDP: {
-+ struct udphdr udph;
-+
-+ if (offset)
-+ return INVALID_PORT;
-+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &udph, sizeof(udph)) < 0)
-+#else
-+ if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &udph, sizeof(udph)) < 0)
-+#endif
-+ /* No choice either */
-+ return INVALID_PORT;
-+
-+ return ntohs(flags & IPSET_SRC ?
-+ udph.source : udph.dest);
-+ }
-+ default:
-+ return INVALID_PORT;
-+ }
-+}
-+
+static inline __u32
-+jhash_ip(const struct ip_set_ipporthash *map, uint16_t i, ip_set_ip_t ip)
++ipporthash_id(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port)
+{
-+ return jhash_1word(ip, *(((uint32_t *) map->initval) + i));
-+}
-+
-+#define HASH_IP(map, ip, port) (port + ((ip - ((map)->first_ip)) << 16))
-+
-+static inline __u32
-+hash_id(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t port,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_ipporthash *map =
-+ (struct ip_set_ipporthash *) set->data;
++ struct ip_set_ipporthash *map = set->data;
+ __u32 id;
+ u_int16_t i;
+ ip_set_ip_t *elem;
+
-+ *hash_ip = HASH_IP(map, ip, port);
++ *hash_ip = pack_ip_port(map, ip, port);
++
+ DP("set: %s, ipport:%u.%u.%u.%u:%u, %u.%u.%u.%u",
+ set->name, HIPQUAD(ip), port, HIPQUAD(*hash_ip));
-+
++ if (!*hash_ip)
++ return UINT_MAX;
++
+ for (i = 0; i < map->probes; i++) {
+ id = jhash_ip(map, i, *hash_ip) % map->hashsize;
+ DP("hash key: %u", id);
+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
+ if (*elem == *hash_ip)
+ return id;
-+ /* No shortcut at testing - there can be deleted
-+ * entries. */
++ /* No shortcut - there can be deleted entries. */
+ }
+ return UINT_MAX;
+}
+
+static inline int
-+__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t port,
-+ ip_set_ip_t *hash_ip)
++ipporthash_test(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port)
+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+
++ struct ip_set_ipporthash *map = set->data;
++
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
+
-+ return (hash_id(set, ip, port, hash_ip) != UINT_MAX);
++ return (ipporthash_id(set, hash_ip, ip, port) != UINT_MAX);
+}
+
-+static int
-+testip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_ipporthash *req =
-+ (struct ip_set_req_ipporthash *) data;
-+
-+ if (size != sizeof(struct ip_set_req_ipporthash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipporthash),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __testip(set, req->ip, req->port, hash_ip);
-+}
-+
-+static int
-+testip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ ip_set_ip_t port;
-+ int res;
-+
-+ if (flags[index+1] == 0)
-+ return 0;
-+
-+ port = get_port(skb, flags[index+1]);
-+
-+ DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
-+ flags[index] & IPSET_SRC ? "SRC" : "DST",
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ NIPQUAD(ip_hdr(skb)->saddr),
-+ NIPQUAD(ip_hdr(skb)->daddr));
-+#else
-+ NIPQUAD(skb->nh.iph->saddr),
-+ NIPQUAD(skb->nh.iph->daddr));
-+#endif
-+ DP("flag %s port %u",
-+ flags[index+1] & IPSET_SRC ? "SRC" : "DST",
-+ port);
-+ if (port == INVALID_PORT)
++#define KADT_CONDITION \
++ ip_set_ip_t port; \
++ \
++ if (flags[index+1] == 0) \
++ return 0; \
++ \
++ port = get_port(skb, flags[index+1]); \
++ \
++ if (port == INVALID_PORT) \
+ return 0;
+
-+ res = __testip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ port,
-+ hash_ip);
-+ return (res < 0 ? 0 : res);
-+
-+}
++UADT(ipporthash, test, req->port)
++KADT(ipporthash, test, ipaddr, port)
+
+static inline int
-+__add_haship(struct ip_set_ipporthash *map, ip_set_ip_t hash_ip)
++__ipporthash_add(struct ip_set_ipporthash *map, ip_set_ip_t *ip)
+{
+ __u32 probe;
+ u_int16_t i;
-+ ip_set_ip_t *elem;
++ ip_set_ip_t *elem, *slot = NULL;
+
+ for (i = 0; i < map->probes; i++) {
-+ probe = jhash_ip(map, i, hash_ip) % map->hashsize;
++ probe = jhash_ip(map, i, *ip) % map->hashsize;
+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
-+ if (*elem == hash_ip)
++ if (*elem == *ip)
+ return -EEXIST;
-+ if (!*elem) {
-+ *elem = hash_ip;
-+ map->elements++;
-+ return 0;
-+ }
++ if (!(slot || *elem))
++ slot = elem;
++ /* There can be deleted entries, must check all slots */
++ }
++ if (slot) {
++ *slot = *ip;
++ map->elements++;
++ return 0;
+ }
+ /* Trigger rehashing */
+ return -EAGAIN;
+}
+
+static inline int
-+__addip(struct ip_set_ipporthash *map, ip_set_ip_t ip, ip_set_ip_t port,
-+ ip_set_ip_t *hash_ip)
++ipporthash_add(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port)
+{
++ struct ip_set_ipporthash *map = set->data;
+ if (map->elements > limit)
+ return -ERANGE;
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
+
-+ *hash_ip = HASH_IP(map, ip, port);
++ *hash_ip = pack_ip_port(map, ip, port);
+
-+ return __add_haship(map, *hash_ip);
++ if (!*hash_ip)
++ return -ERANGE;
++
++ return __ipporthash_add(map, hash_ip);
+}
+
-+static int
-+addip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
++UADT(ipporthash, add, req->port)
++KADT(ipporthash, add, ipaddr, port)
++
++static inline void
++__ipporthash_retry(struct ip_set_ipporthash *tmp,
++ struct ip_set_ipporthash *map)
+{
-+ struct ip_set_req_ipporthash *req =
-+ (struct ip_set_req_ipporthash *) data;
++ tmp->first_ip = map->first_ip;
++ tmp->last_ip = map->last_ip;
++}
+
-+ if (size != sizeof(struct ip_set_req_ipporthash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipporthash),
-+ size);
-+ return -EINVAL;
++HASH_RETRY(ipporthash, ip_set_ip_t)
++
++static inline int
++ipporthash_del(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port)
++{
++ struct ip_set_ipporthash *map = set->data;
++ ip_set_ip_t id;
++ ip_set_ip_t *elem;
++
++ if (ip < map->first_ip || ip > map->last_ip)
++ return -ERANGE;
++
++ id = ipporthash_id(set, hash_ip, ip, port);
++
++ if (id == UINT_MAX)
++ return -EEXIST;
++
++ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
++ *elem = 0;
++ map->elements--;
++
++ return 0;
++}
++
++UADT(ipporthash, del, req->port)
++KADT(ipporthash, del, ipaddr, port)
++
++static inline int
++__ipporthash_create(const struct ip_set_req_ipporthash_create *req,
++ struct ip_set_ipporthash *map)
++{
++ if (req->to - req->from > MAX_RANGE) {
++ ip_set_printk("range too big, %d elements (max %d)",
++ req->to - req->from + 1, MAX_RANGE+1);
++ return -ENOEXEC;
+ }
-+ return __addip((struct ip_set_ipporthash *) set->data,
-+ req->ip, req->port, hash_ip);
++ map->first_ip = req->from;
++ map->last_ip = req->to;
++ return 0;
+}
+
-+static int
-+addip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
++HASH_CREATE(ipporthash, ip_set_ip_t)
++HASH_DESTROY(ipporthash)
++HASH_FLUSH(ipporthash, ip_set_ip_t)
++
++static inline void
++__ipporthash_list_header(const struct ip_set_ipporthash *map,
++ struct ip_set_req_ipporthash_create *header)
+{
-+ ip_set_ip_t port;
++ header->from = map->first_ip;
++ header->to = map->last_ip;
++}
+
-+ if (flags[index+1] == 0)
-+ return -EINVAL;
++HASH_LIST_HEADER(ipporthash)
++HASH_LIST_MEMBERS_SIZE(ipporthash, ip_set_ip_t)
++HASH_LIST_MEMBERS(ipporthash, ip_set_ip_t)
+
-+ port = get_port(skb, flags[index+1]);
++IP_SET_RTYPE(ipporthash, IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_DATA_DOUBLE)
+
-+ DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
-+ flags[index] & IPSET_SRC ? "SRC" : "DST",
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ NIPQUAD(ip_hdr(skb)->saddr),
-+ NIPQUAD(ip_hdr(skb)->daddr));
-+#else
-+ NIPQUAD(skb->nh.iph->saddr),
-+ NIPQUAD(skb->nh.iph->daddr));
-+#endif
-+ DP("flag %s port %u",
-+ flags[index+1] & IPSET_SRC ? "SRC" : "DST",
-+ port);
-+ if (port == INVALID_PORT)
-+ return -EINVAL;
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("ipporthash type of IP sets");
++module_param(limit, int, 0600);
++MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
+
-+ return __addip((struct ip_set_ipporthash *) set->data,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ port,
-+ hash_ip);
-+}
++REGISTER_MODULE(ipporthash)
+--- /dev/null
++++ b/net/ipv4/netfilter/ip_set_ipportiphash.c
+@@ -0,0 +1,216 @@
++/* Copyright (C) 2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/* Kernel module implementing an ip+port+ip hash set */
++
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++#include <linux/ip.h>
++#include <linux/tcp.h>
++#include <linux/udp.h>
++#include <linux/skbuff.h>
++#include <linux/netfilter_ipv4/ip_set_jhash.h>
++#include <linux/errno.h>
++#include <asm/uaccess.h>
++#include <asm/bitops.h>
++#include <linux/spinlock.h>
++#include <linux/random.h>
+
-+static int retry(struct ip_set *set)
++#include <net/ip.h>
++
++#include <linux/netfilter_ipv4/ip_set_ipportiphash.h>
++#include <linux/netfilter_ipv4/ip_set_getport.h>
++
++static int limit = MAX_RANGE;
++
++#define jhash_ip2(map, i, ipport, ip1) \
++ jhash_2words(ipport, ip1, *(map->initval + i))
++
++static inline __u32
++ipportiphash_id(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port, ip_set_ip_t ip1)
+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+ ip_set_ip_t *elem;
-+ void *members;
-+ u_int32_t i, hashsize = map->hashsize;
-+ int res;
-+ struct ip_set_ipporthash *tmp;
++ struct ip_set_ipportiphash *map = set->data;
++ __u32 id;
++ u_int16_t i;
++ struct ipportip *elem;
+
-+ if (map->resize == 0)
++ *hash_ip = pack_ip_port(map, ip, port);
++ DP("set: %s, ipport:%u.%u.%u.%u:%u, %u.%u.%u.%u",
++ set->name, HIPQUAD(ip), port, HIPQUAD(*hash_ip));
++ if (!(*hash_ip || ip1))
++ return UINT_MAX;
++
++ for (i = 0; i < map->probes; i++) {
++ id = jhash_ip2(map, i, *hash_ip, ip1) % map->hashsize;
++ DP("hash key: %u", id);
++ elem = HARRAY_ELEM(map->members, struct ipportip *, id);
++ if (elem->ip == *hash_ip && elem->ip1 == ip1)
++ return id;
++ /* No shortcut - there can be deleted entries. */
++ }
++ return UINT_MAX;
++}
++
++static inline int
++ipportiphash_test(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port, ip_set_ip_t ip1)
++{
++ struct ip_set_ipportiphash *map = set->data;
++
++ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
+
-+ again:
-+ res = 0;
++ return (ipportiphash_id(set, hash_ip, ip, port, ip1) != UINT_MAX);
++}
+
-+ /* Calculate new hash size */
-+ hashsize += (hashsize * map->resize)/100;
-+ if (hashsize == map->hashsize)
-+ hashsize++;
++#define KADT_CONDITION \
++ ip_set_ip_t port, ip1; \
++ \
++ if (flags[index+2] == 0) \
++ return 0; \
++ \
++ port = get_port(skb, flags[index+1]); \
++ ip1 = ipaddr(skb, flags[index+2]); \
++ \
++ if (port == INVALID_PORT) \
++ return 0;
+
-+ ip_set_printk("rehashing of set %s triggered: "
-+ "hashsize grows from %u to %u",
-+ set->name, map->hashsize, hashsize);
++UADT(ipportiphash, test, req->port, req->ip1)
++KADT(ipportiphash, test, ipaddr, port, ip1)
+
-+ tmp = kmalloc(sizeof(struct ip_set_ipporthash)
-+ + map->probes * sizeof(uint32_t), GFP_ATOMIC);
-+ if (!tmp) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_ipporthash)
-+ + map->probes * sizeof(uint32_t));
-+ return -ENOMEM;
++static inline int
++__ipportip_add(struct ip_set_ipportiphash *map,
++ ip_set_ip_t hash_ip, ip_set_ip_t ip1)
++{
++ __u32 probe;
++ u_int16_t i;
++ struct ipportip *elem, *slot = NULL;
++
++ for (i = 0; i < map->probes; i++) {
++ probe = jhash_ip2(map, i, hash_ip, ip1) % map->hashsize;
++ elem = HARRAY_ELEM(map->members, struct ipportip *, probe);
++ if (elem->ip == hash_ip && elem->ip1 == ip1)
++ return -EEXIST;
++ if (!(slot || elem->ip || elem->ip1))
++ slot = elem;
++ /* There can be deleted entries, must check all slots */
+ }
-+ tmp->members = harray_malloc(hashsize, sizeof(ip_set_ip_t), GFP_ATOMIC);
-+ if (!tmp->members) {
-+ DP("out of memory for %d bytes", hashsize * sizeof(ip_set_ip_t));
-+ kfree(tmp);
-+ return -ENOMEM;
++ if (slot) {
++ slot->ip = hash_ip;
++ slot->ip1 = ip1;
++ map->elements++;
++ return 0;
+ }
-+ tmp->hashsize = hashsize;
-+ tmp->elements = 0;
-+ tmp->probes = map->probes;
-+ tmp->resize = map->resize;
-+ tmp->first_ip = map->first_ip;
-+ tmp->last_ip = map->last_ip;
-+ memcpy(tmp->initval, map->initval, map->probes * sizeof(uint32_t));
++ /* Trigger rehashing */
++ return -EAGAIN;
++}
+
-+ write_lock_bh(&set->lock);
-+ map = (struct ip_set_ipporthash *) set->data; /* Play safe */
-+ for (i = 0; i < map->hashsize && res == 0; i++) {
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
-+ if (*elem)
-+ res = __add_haship(tmp, *elem);
-+ }
-+ if (res) {
-+ /* Failure, try again */
-+ write_unlock_bh(&set->lock);
-+ harray_free(tmp->members);
-+ kfree(tmp);
-+ goto again;
-+ }
++static inline int
++__ipportiphash_add(struct ip_set_ipportiphash *map,
++ struct ipportip *elem)
++{
++ return __ipportip_add(map, elem->ip, elem->ip1);
++}
+
-+ /* Success at resizing! */
-+ members = map->members;
++static inline int
++ipportiphash_add(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port, ip_set_ip_t ip1)
++{
++ struct ip_set_ipportiphash *map = set->data;
++
++ if (map->elements > limit)
++ return -ERANGE;
++ if (ip < map->first_ip || ip > map->last_ip)
++ return -ERANGE;
+
-+ map->hashsize = tmp->hashsize;
-+ map->members = tmp->members;
-+ write_unlock_bh(&set->lock);
++ *hash_ip = pack_ip_port(map, ip, port);
++ if (!(*hash_ip || ip1))
++ return -ERANGE;
++
++ return __ipportip_add(map, *hash_ip, ip1);
++}
+
-+ harray_free(members);
-+ kfree(tmp);
++UADT(ipportiphash, add, req->port, req->ip1)
++KADT(ipportiphash, add, ipaddr, port, ip1)
+
-+ return 0;
++static inline void
++__ipportiphash_retry(struct ip_set_ipportiphash *tmp,
++ struct ip_set_ipportiphash *map)
++{
++ tmp->first_ip = map->first_ip;
++ tmp->last_ip = map->last_ip;
+}
+
++HASH_RETRY2(ipportiphash, struct ipportip)
++
+static inline int
-+__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t port,
-+ ip_set_ip_t *hash_ip)
++ipportiphash_del(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port, ip_set_ip_t ip1)
+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
++ struct ip_set_ipportiphash *map = set->data;
+ ip_set_ip_t id;
-+ ip_set_ip_t *elem;
++ struct ipportip *elem;
+
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
+
-+ id = hash_id(set, ip, port, hash_ip);
++ id = ipportiphash_id(set, hash_ip, ip, port, ip1);
+
+ if (id == UINT_MAX)
+ return -EEXIST;
-+
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
-+ *elem = 0;
++
++ elem = HARRAY_ELEM(map->members, struct ipportip *, id);
++ elem->ip = elem->ip1 = 0;
+ map->elements--;
+
+ return 0;
+}
+
-+static int
-+delip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_ipporthash *req =
-+ (struct ip_set_req_ipporthash *) data;
++UADT(ipportiphash, del, req->port, req->ip1)
++KADT(ipportiphash, del, ipaddr, port, ip1)
+
-+ if (size != sizeof(struct ip_set_req_ipporthash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipporthash),
-+ size);
-+ return -EINVAL;
++static inline int
++__ipportiphash_create(const struct ip_set_req_ipportiphash_create *req,
++ struct ip_set_ipportiphash *map)
++{
++ if (req->to - req->from > MAX_RANGE) {
++ ip_set_printk("range too big, %d elements (max %d)",
++ req->to - req->from + 1, MAX_RANGE+1);
++ return -ENOEXEC;
+ }
-+ return __delip(set, req->ip, req->port, hash_ip);
++ map->first_ip = req->from;
++ map->last_ip = req->to;
++ return 0;
+}
+
-+static int
-+delip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
++HASH_CREATE(ipportiphash, struct ipportip)
++HASH_DESTROY(ipportiphash)
++HASH_FLUSH(ipportiphash, struct ipportip)
++
++static inline void
++__ipportiphash_list_header(const struct ip_set_ipportiphash *map,
++ struct ip_set_req_ipportiphash_create *header)
+{
-+ ip_set_ip_t port;
++ header->from = map->first_ip;
++ header->to = map->last_ip;
++}
+
-+ if (flags[index+1] == 0)
-+ return -EINVAL;
++HASH_LIST_HEADER(ipportiphash)
++HASH_LIST_MEMBERS_SIZE(ipportiphash, struct ipportip)
++HASH_LIST_MEMBERS_MEMCPY(ipportiphash, struct ipportip)
+
-+ port = get_port(skb, flags[index+1]);
++IP_SET_RTYPE(ipportiphash, IPSET_TYPE_IP | IPSET_TYPE_PORT
++ | IPSET_TYPE_IP1 | IPSET_DATA_TRIPLE)
+
-+ DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
-+ flags[index] & IPSET_SRC ? "SRC" : "DST",
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ NIPQUAD(ip_hdr(skb)->saddr),
-+ NIPQUAD(ip_hdr(skb)->daddr));
-+#else
-+ NIPQUAD(skb->nh.iph->saddr),
-+ NIPQUAD(skb->nh.iph->daddr));
-+#endif
-+ DP("flag %s port %u",
-+ flags[index+1] & IPSET_SRC ? "SRC" : "DST",
-+ port);
-+ if (port == INVALID_PORT)
-+ return -EINVAL;
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("ipportiphash type of IP sets");
++module_param(limit, int, 0600);
++MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
+
-+ return __delip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ port,
-+ hash_ip);
-+}
++REGISTER_MODULE(ipportiphash)
+--- /dev/null
++++ b/net/ipv4/netfilter/ip_set_ipportnethash.c
+@@ -0,0 +1,304 @@
++/* Copyright (C) 2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/* Kernel module implementing an ip+port+net hash set */
++
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++#include <linux/ip.h>
++#include <linux/tcp.h>
++#include <linux/udp.h>
++#include <linux/skbuff.h>
++#include <linux/netfilter_ipv4/ip_set_jhash.h>
++#include <linux/errno.h>
++#include <asm/uaccess.h>
++#include <asm/bitops.h>
++#include <linux/spinlock.h>
++#include <linux/random.h>
+
-+static int create(struct ip_set *set, const void *data, size_t size)
++#include <net/ip.h>
++
++#include <linux/netfilter_ipv4/ip_set_ipportnethash.h>
++#include <linux/netfilter_ipv4/ip_set_getport.h>
++
++static int limit = MAX_RANGE;
++
++#define jhash_ip2(map, i, ipport, ip1) \
++ jhash_2words(ipport, ip1, *(map->initval + i))
++
++static inline __u32
++ipportnethash_id_cidr(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port,
++ ip_set_ip_t ip1, uint8_t cidr)
+{
-+ struct ip_set_req_ipporthash_create *req =
-+ (struct ip_set_req_ipporthash_create *) data;
-+ struct ip_set_ipporthash *map;
-+ uint16_t i;
++ struct ip_set_ipportnethash *map = set->data;
++ __u32 id;
++ u_int16_t i;
++ struct ipportip *elem;
+
-+ if (size != sizeof(struct ip_set_req_ipporthash_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipporthash_create),
-+ size);
-+ return -EINVAL;
++ *hash_ip = pack_ip_port(map, ip, port);
++ DP("set: %s, ipport:%u.%u.%u.%u:%u, %u.%u.%u.%u",
++ set->name, HIPQUAD(ip), port, HIPQUAD(*hash_ip));
++ ip1 = pack_ip_cidr(ip1, cidr);
++ if (!(*hash_ip || ip1))
++ return UINT_MAX;
++
++ for (i = 0; i < map->probes; i++) {
++ id = jhash_ip2(map, i, *hash_ip, ip1) % map->hashsize;
++ DP("hash key: %u", id);
++ elem = HARRAY_ELEM(map->members, struct ipportip *, id);
++ if (elem->ip == *hash_ip && elem->ip1 == ip1)
++ return id;
++ /* No shortcut - there can be deleted entries. */
+ }
++ return UINT_MAX;
++}
+
-+ if (req->hashsize < 1) {
-+ ip_set_printk("hashsize too small");
-+ return -ENOEXEC;
-+ }
++static inline __u32
++ipportnethash_id(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port, ip_set_ip_t ip1)
++{
++ struct ip_set_ipportnethash *map = set->data;
++ __u32 id = UINT_MAX;
++ int i;
+
-+ if (req->probes < 1) {
-+ ip_set_printk("probes too small");
-+ return -ENOEXEC;
++ for (i = 0; i < 30 && map->cidr[i]; i++) {
++ id = ipportnethash_id_cidr(set, hash_ip, ip, port, ip1,
++ map->cidr[i]);
++ if (id != UINT_MAX)
++ break;
+ }
++ return id;
++}
+
-+ map = kmalloc(sizeof(struct ip_set_ipporthash)
-+ + req->probes * sizeof(uint32_t), GFP_KERNEL);
-+ if (!map) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_ipporthash)
-+ + req->probes * sizeof(uint32_t));
-+ return -ENOMEM;
-+ }
-+ for (i = 0; i < req->probes; i++)
-+ get_random_bytes(((uint32_t *) map->initval)+i, 4);
-+ map->elements = 0;
-+ map->hashsize = req->hashsize;
-+ map->probes = req->probes;
-+ map->resize = req->resize;
-+ map->first_ip = req->from;
-+ map->last_ip = req->to;
-+ map->members = harray_malloc(map->hashsize, sizeof(ip_set_ip_t), GFP_KERNEL);
-+ if (!map->members) {
-+ DP("out of memory for %d bytes", map->hashsize * sizeof(ip_set_ip_t));
-+ kfree(map);
-+ return -ENOMEM;
-+ }
++static inline int
++ipportnethash_test_cidr(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port,
++ ip_set_ip_t ip1, uint8_t cidr)
++{
++ struct ip_set_ipportnethash *map = set->data;
++
++ if (ip < map->first_ip || ip > map->last_ip)
++ return -ERANGE;
+
-+ set->data = map;
-+ return 0;
++ return (ipportnethash_id_cidr(set, hash_ip, ip, port, ip1,
++ cidr) != UINT_MAX);
+}
+
-+static void destroy(struct ip_set *set)
++static inline int
++ipportnethash_test(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port, ip_set_ip_t ip1)
+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+
-+ harray_free(map->members);
-+ kfree(map);
++ struct ip_set_ipportnethash *map = set->data;
++
++ if (ip < map->first_ip || ip > map->last_ip)
++ return -ERANGE;
+
-+ set->data = NULL;
++ return (ipportnethash_id(set, hash_ip, ip, port, ip1) != UINT_MAX);
+}
+
-+static void flush(struct ip_set *set)
++static int
++ipportnethash_utest(struct ip_set *set, const void *data, u_int32_t size,
++ ip_set_ip_t *hash_ip)
+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+ harray_flush(map->members, map->hashsize, sizeof(ip_set_ip_t));
-+ map->elements = 0;
++ const struct ip_set_req_ipportnethash *req = data;
++
++ if (req->cidr <= 0 || req->cidr > 32)
++ return -EINVAL;
++ return (req->cidr == 32
++ ? ipportnethash_test(set, hash_ip, req->ip, req->port,
++ req->ip1)
++ : ipportnethash_test_cidr(set, hash_ip, req->ip, req->port,
++ req->ip1, req->cidr));
+}
+
-+static void list_header(const struct ip_set *set, void *data)
++#define KADT_CONDITION \
++ ip_set_ip_t port, ip1; \
++ \
++ if (flags[index+2] == 0) \
++ return 0; \
++ \
++ port = get_port(skb, flags[index+1]); \
++ ip1 = ipaddr(skb, flags[index+2]); \
++ \
++ if (port == INVALID_PORT) \
++ return 0;
++
++KADT(ipportnethash, test, ipaddr, port, ip1)
++
++static inline int
++__ipportnet_add(struct ip_set_ipportnethash *map,
++ ip_set_ip_t hash_ip, ip_set_ip_t ip1)
+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+ struct ip_set_req_ipporthash_create *header =
-+ (struct ip_set_req_ipporthash_create *) data;
++ __u32 probe;
++ u_int16_t i;
++ struct ipportip *elem, *slot = NULL;
+
-+ header->hashsize = map->hashsize;
-+ header->probes = map->probes;
-+ header->resize = map->resize;
-+ header->from = map->first_ip;
-+ header->to = map->last_ip;
++ for (i = 0; i < map->probes; i++) {
++ probe = jhash_ip2(map, i, hash_ip, ip1) % map->hashsize;
++ elem = HARRAY_ELEM(map->members, struct ipportip *, probe);
++ if (elem->ip == hash_ip && elem->ip1 == ip1)
++ return -EEXIST;
++ if (!(slot || elem->ip || elem->ip1))
++ slot = elem;
++ /* There can be deleted entries, must check all slots */
++ }
++ if (slot) {
++ slot->ip = hash_ip;
++ slot->ip1 = ip1;
++ map->elements++;
++ return 0;
++ }
++ /* Trigger rehashing */
++ return -EAGAIN;
+}
+
-+static int list_members_size(const struct ip_set *set)
++static inline int
++__ipportnethash_add(struct ip_set_ipportnethash *map,
++ struct ipportip *elem)
+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+
-+ return (map->hashsize * sizeof(ip_set_ip_t));
++ return __ipportnet_add(map, elem->ip, elem->ip1);
+}
+
-+static void list_members(const struct ip_set *set, void *data)
++static inline int
++ipportnethash_add(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port,
++ ip_set_ip_t ip1, uint8_t cidr)
+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+ ip_set_ip_t i, *elem;
++ struct ip_set_ipportnethash *map = set->data;
++ struct ipportip;
++ int ret;
++
++ if (map->elements > limit)
++ return -ERANGE;
++ if (ip < map->first_ip || ip > map->last_ip)
++ return -ERANGE;
++ if (cidr <= 0 || cidr >= 32)
++ return -EINVAL;
++ if (map->nets[cidr-1] == UINT16_MAX)
++ return -ERANGE;
+
-+ for (i = 0; i < map->hashsize; i++) {
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
-+ ((ip_set_ip_t *)data)[i] = *elem;
++ *hash_ip = pack_ip_port(map, ip, port);
++ ip1 = pack_ip_cidr(ip1, cidr);
++ if (!(*hash_ip || ip1))
++ return -ERANGE;
++
++ ret =__ipportnet_add(map, *hash_ip, ip1);
++ if (ret == 0) {
++ if (!map->nets[cidr-1]++)
++ add_cidr_size(map->cidr, cidr);
++ map->elements++;
+ }
++ return ret;
+}
+
-+static struct ip_set_type ip_set_ipporthash = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_DATA_DOUBLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_ipporthash),
-+ .addip = &addip,
-+ .addip_kernel = &addip_kernel,
-+ .retry = &retry,
-+ .delip = &delip,
-+ .delip_kernel = &delip_kernel,
-+ .testip = &testip,
-+ .testip_kernel = &testip_kernel,
-+ .header_size = sizeof(struct ip_set_req_ipporthash_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
++#undef KADT_CONDITION
++#define KADT_CONDITION \
++ struct ip_set_ipportnethash *map = set->data; \
++ uint8_t cidr = map->cidr[0] ? map->cidr[0] : 31; \
++ ip_set_ip_t port, ip1; \
++ \
++ if (flags[index+2] == 0) \
++ return 0; \
++ \
++ port = get_port(skb, flags[index+1]); \
++ ip1 = ipaddr(skb, flags[index+2]); \
++ \
++ if (port == INVALID_PORT) \
++ return 0;
+
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-+MODULE_DESCRIPTION("ipporthash type of IP sets");
-+module_param(limit, int, 0600);
-+MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
++UADT(ipportnethash, add, req->port, req->ip1, req->cidr)
++KADT(ipportnethash, add, ipaddr, port, ip1, cidr)
+
-+static int __init ip_set_ipporthash_init(void)
++static inline void
++__ipportnethash_retry(struct ip_set_ipportnethash *tmp,
++ struct ip_set_ipportnethash *map)
+{
-+ return ip_set_register_set_type(&ip_set_ipporthash);
++ tmp->first_ip = map->first_ip;
++ tmp->last_ip = map->last_ip;
++ memcpy(tmp->cidr, map->cidr, sizeof(tmp->cidr));
++ memcpy(tmp->nets, map->nets, sizeof(tmp->nets));
+}
+
-+static void __exit ip_set_ipporthash_fini(void)
++HASH_RETRY2(ipportnethash, struct ipportip)
++
++static inline int
++ipportnethash_del(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port,
++ ip_set_ip_t ip1, uint8_t cidr)
+{
-+ /* FIXME: possible race with ip_set_create() */
-+ ip_set_unregister_set_type(&ip_set_ipporthash);
++ struct ip_set_ipportnethash *map = set->data;
++ ip_set_ip_t id;
++ struct ipportip *elem;
++
++ if (ip < map->first_ip || ip > map->last_ip)
++ return -ERANGE;
++ if (!ip)
++ return -ERANGE;
++ if (cidr <= 0 || cidr >= 32)
++ return -EINVAL;
++
++ id = ipportnethash_id_cidr(set, hash_ip, ip, port, ip1, cidr);
++
++ if (id == UINT_MAX)
++ return -EEXIST;
++
++ elem = HARRAY_ELEM(map->members, struct ipportip *, id);
++ elem->ip = elem->ip1 = 0;
++ map->elements--;
++ if (!map->nets[cidr-1]--)
++ del_cidr_size(map->cidr, cidr);
++
++ return 0;
++}
++
++UADT(ipportnethash, del, req->port, req->ip1, req->cidr)
++KADT(ipportnethash, del, ipaddr, port, ip1, cidr)
++
++static inline int
++__ipportnethash_create(const struct ip_set_req_ipportnethash_create *req,
++ struct ip_set_ipportnethash *map)
++{
++ if (req->to - req->from > MAX_RANGE) {
++ ip_set_printk("range too big, %d elements (max %d)",
++ req->to - req->from + 1, MAX_RANGE+1);
++ return -ENOEXEC;
++ }
++ map->first_ip = req->from;
++ map->last_ip = req->to;
++ memset(map->cidr, 0, sizeof(map->cidr));
++ memset(map->nets, 0, sizeof(map->nets));
++ return 0;
+}
+
-+module_init(ip_set_ipporthash_init);
-+module_exit(ip_set_ipporthash_fini);
++HASH_CREATE(ipportnethash, struct ipportip)
++HASH_DESTROY(ipportnethash)
++HASH_FLUSH_CIDR(ipportnethash, struct ipportip);
++
++static inline void
++__ipportnethash_list_header(const struct ip_set_ipportnethash *map,
++ struct ip_set_req_ipportnethash_create *header)
++{
++ header->from = map->first_ip;
++ header->to = map->last_ip;
++}
++
++HASH_LIST_HEADER(ipportnethash)
++
++HASH_LIST_MEMBERS_SIZE(ipportnethash, struct ipportip)
++HASH_LIST_MEMBERS_MEMCPY(ipportnethash, struct ipportip)
++
++IP_SET_RTYPE(ipportnethash, IPSET_TYPE_IP | IPSET_TYPE_PORT
++ | IPSET_TYPE_IP1 | IPSET_DATA_TRIPLE)
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("ipportnethash type of IP sets");
++module_param(limit, int, 0600);
++MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
++
++REGISTER_MODULE(ipportnethash)
--- /dev/null
+++ b/net/ipv4/netfilter/ip_set_iptree.c
-@@ -0,0 +1,612 @@
-+/* Copyright (C) 2005 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+@@ -0,0 +1,466 @@
++/* Copyright (C) 2005-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
@@ -4525,24 +5067,20 @@
+
+/* Kernel module implementing an IP set type: the iptree type */
+
-+#include <linux/version.h>
+#include <linux/module.h>
++#include <linux/moduleparam.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
+#include <linux/spinlock.h>
++#include <linux/timer.h>
+
-+/* Backward compatibility */
-+#ifndef __nocast
-+#define __nocast
-+#endif
-+
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_bitmaps.h>
+#include <linux/netfilter_ipv4/ip_set_iptree.h>
+
+static int limit = MAX_RANGE;
@@ -4553,13 +5091,9 @@
+ * to delete the gc timer at destroying/flushing a set */
+#define IPTREE_DESTROY_SLEEP 100
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
-+static struct kmem_cache *branch_cachep;
-+static struct kmem_cache *leaf_cachep;
-+#else
-+static kmem_cache_t *branch_cachep;
-+static kmem_cache_t *leaf_cachep;
-+#endif
++static __KMEM_CACHE_T__ *branch_cachep;
++static __KMEM_CACHE_T__ *leaf_cachep;
++
+
+#if defined(__LITTLE_ENDIAN)
+#define ABCD(a,b,c,d,addrp) do { \
@@ -4587,9 +5121,9 @@
+} while (0)
+
+static inline int
-+__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++iptree_test(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set_iptree *map = set->data;
+ struct ip_set_iptreeb *btree;
+ struct ip_set_iptreec *ctree;
+ struct ip_set_iptreed *dtree;
@@ -4597,7 +5131,7 @@
+
+ if (!ip)
+ return -ERANGE;
-+
++
+ *hash_ip = ip;
+ ABCD(a, b, c, d, hash_ip);
+ DP("%u %u %u %u timeout %u", a, b, c, d, map->timeout);
@@ -4610,53 +5144,10 @@
+ || time_after(dtree->expires[d], jiffies));
+}
+
-+static int
-+testip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_iptree *req =
-+ (struct ip_set_req_iptree *) data;
-+
-+ if (size != sizeof(struct ip_set_req_iptree)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iptree),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __testip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+testip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ int res;
-+
-+ DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
-+ flags[index] & IPSET_SRC ? "SRC" : "DST",
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ NIPQUAD(ip_hdr(skb)->saddr),
-+ NIPQUAD(ip_hdr(skb)->daddr));
-+#else
-+ NIPQUAD(skb->nh.iph->saddr),
-+ NIPQUAD(skb->nh.iph->daddr));
-+#endif
++#define KADT_CONDITION
+
-+ res = __testip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+ return (res < 0 ? 0 : res);
-+}
++UADT(iptree, test)
++KADT(iptree, test, ipaddr)
+
+#define ADDIP_WALK(map, elem, branch, type, cachep) do { \
+ if ((map)->tree[elem]) { \
@@ -4671,24 +5162,24 @@
+ (map)->tree[elem] = branch; \
+ DP("alloc %u", elem); \
+ } \
-+} while (0)
++} while (0)
+
+static inline int
-+__addip(struct ip_set *set, ip_set_ip_t ip, unsigned int timeout,
-+ ip_set_ip_t *hash_ip)
++iptree_add(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, unsigned int timeout)
+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set_iptree *map = set->data;
+ struct ip_set_iptreeb *btree;
+ struct ip_set_iptreec *ctree;
+ struct ip_set_iptreed *dtree;
+ unsigned char a,b,c,d;
+ int ret = 0;
-+
++
+ if (!ip || map->elements >= limit)
+ /* We could call the garbage collector
+ * but it's probably overkill */
+ return -ERANGE;
-+
++
+ *hash_ip = ip;
+ ABCD(a, b, c, d, hash_ip);
+ DP("%u %u %u %u timeout %u", a, b, c, d, timeout);
@@ -4698,6 +5189,8 @@
+ if (dtree->expires[d]
+ && (!map->timeout || time_after(dtree->expires[d], jiffies)))
+ ret = -EEXIST;
++ if (map->timeout && timeout == 0)
++ timeout = map->timeout;
+ dtree->expires[d] = map->timeout ? (timeout * HZ + jiffies) : 1;
+ /* Lottery: I won! */
+ if (dtree->expires[d] == 0)
@@ -4708,47 +5201,8 @@
+ return ret;
+}
+
-+static int
-+addip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
-+ struct ip_set_req_iptree *req =
-+ (struct ip_set_req_iptree *) data;
-+
-+ if (size != sizeof(struct ip_set_req_iptree)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iptree),
-+ size);
-+ return -EINVAL;
-+ }
-+ DP("%u.%u.%u.%u %u", HIPQUAD(req->ip), req->timeout);
-+ return __addip(set, req->ip,
-+ req->timeout ? req->timeout : map->timeout,
-+ hash_ip);
-+}
-+
-+static int
-+addip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
-+
-+ return __addip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ map->timeout,
-+ hash_ip);
-+}
++UADT(iptree, add, req->timeout)
++KADT(iptree, add, ipaddr, 0)
+
+#define DELIP_WALK(map, elem, branch) do { \
+ if ((map)->tree[elem]) { \
@@ -4758,17 +5212,17 @@
+} while (0)
+
+static inline int
-+__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++iptree_del(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set_iptree *map = set->data;
+ struct ip_set_iptreeb *btree;
+ struct ip_set_iptreec *ctree;
+ struct ip_set_iptreed *dtree;
+ unsigned char a,b,c,d;
-+
++
+ if (!ip)
+ return -ERANGE;
-+
++
+ *hash_ip = ip;
+ ABCD(a, b, c, d, hash_ip);
+ DELIP_WALK(map, a, btree);
@@ -4783,40 +5237,8 @@
+ return -EEXIST;
+}
+
-+static int
-+delip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_iptree *req =
-+ (struct ip_set_req_iptree *) data;
-+
-+ if (size != sizeof(struct ip_set_req_iptree)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iptree),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __delip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+delip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __delip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+}
++UADT(iptree, del)
++KADT(iptree, del, ipaddr)
+
+#define LOOP_WALK_BEGIN(map, i, branch) \
+ for (i = 0; i < 256; i++) { \
@@ -4826,10 +5248,11 @@
+
+#define LOOP_WALK_END }
+
-+static void ip_tree_gc(unsigned long ul_set)
++static void
++ip_tree_gc(unsigned long ul_set)
+{
-+ struct ip_set *set = (void *) ul_set;
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set *set = (struct ip_set *) ul_set;
++ struct ip_set_iptree *map = set->data;
+ struct ip_set_iptreeb *btree;
+ struct ip_set_iptreec *ctree;
+ struct ip_set_iptreed *dtree;
@@ -4891,14 +5314,15 @@
+ }
+ LOOP_WALK_END;
+ write_unlock_bh(&set->lock);
-+
++
+ map->gc.expires = jiffies + map->gc_interval * HZ;
+ add_timer(&map->gc);
+}
+
-+static inline void init_gc_timer(struct ip_set *set)
++static inline void
++init_gc_timer(struct ip_set *set)
+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set_iptree *map = set->data;
+
+ /* Even if there is no timeout for the entries,
+ * we still have to call gc because delete
@@ -4911,22 +5335,22 @@
+ add_timer(&map->gc);
+}
+
-+static int create(struct ip_set *set, const void *data, size_t size)
++static int
++iptree_create(struct ip_set *set, const void *data, u_int32_t size)
+{
-+ struct ip_set_req_iptree_create *req =
-+ (struct ip_set_req_iptree_create *) data;
++ const struct ip_set_req_iptree_create *req = data;
+ struct ip_set_iptree *map;
+
+ if (size != sizeof(struct ip_set_req_iptree_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
++ ip_set_printk("data length wrong (want %zu, have %lu)",
+ sizeof(struct ip_set_req_iptree_create),
-+ size);
++ (unsigned long)size);
+ return -EINVAL;
+ }
+
+ map = kmalloc(sizeof(struct ip_set_iptree), GFP_KERNEL);
+ if (!map) {
-+ DP("out of memory for %d bytes",
++ DP("out of memory for %zu bytes",
+ sizeof(struct ip_set_iptree));
+ return -ENOMEM;
+ }
@@ -4940,7 +5364,8 @@
+ return 0;
+}
+
-+static void __flush(struct ip_set_iptree *map)
++static inline void
++__flush(struct ip_set_iptree *map)
+{
+ struct ip_set_iptreeb *btree;
+ struct ip_set_iptreec *ctree;
@@ -4959,9 +5384,10 @@
+ map->elements = 0;
+}
+
-+static void destroy(struct ip_set *set)
++static void
++iptree_destroy(struct ip_set *set)
+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set_iptree *map = set->data;
+
+ /* gc might be running */
+ while (!del_timer(&map->gc))
@@ -4971,11 +5397,12 @@
+ set->data = NULL;
+}
+
-+static void flush(struct ip_set *set)
++static void
++iptree_flush(struct ip_set *set)
+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set_iptree *map = set->data;
+ unsigned int timeout = map->timeout;
-+
++
+ /* gc might be running */
+ while (!del_timer(&map->gc))
+ msleep(IPTREE_DESTROY_SLEEP);
@@ -4986,18 +5413,19 @@
+ init_gc_timer(set);
+}
+
-+static void list_header(const struct ip_set *set, void *data)
++static void
++iptree_list_header(const struct ip_set *set, void *data)
+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
-+ struct ip_set_req_iptree_create *header =
-+ (struct ip_set_req_iptree_create *) data;
++ const struct ip_set_iptree *map = set->data;
++ struct ip_set_req_iptree_create *header = data;
+
+ header->timeout = map->timeout;
+}
+
-+static int list_members_size(const struct ip_set *set)
++static int
++iptree_list_members_size(const struct ip_set *set)
+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ const struct ip_set_iptree *map = set->data;
+ struct ip_set_iptreeb *btree;
+ struct ip_set_iptreec *ctree;
+ struct ip_set_iptreed *dtree;
@@ -5020,9 +5448,10 @@
+ return (count * sizeof(struct ip_set_req_iptree));
+}
+
-+static void list_members(const struct ip_set *set, void *data)
++static void
++iptree_list_members(const struct ip_set *set, void *data)
+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ const struct ip_set_iptree *map = set->data;
+ struct ip_set_iptreeb *btree;
+ struct ip_set_iptreec *ctree;
+ struct ip_set_iptreed *dtree;
@@ -5036,7 +5465,7 @@
+ for (d = 0; d < 256; d++) {
+ if (dtree->expires[d]
+ && (!map->timeout || time_after(dtree->expires[d], jiffies))) {
-+ entry = (struct ip_set_req_iptree *)(data + offset);
++ entry = data + offset;
+ entry->ip = ((a << 24) | (b << 16) | (c << 8) | d);
+ entry->timeout = !map->timeout ? 0
+ : (dtree->expires[d] - jiffies)/HZ;
@@ -5048,26 +5477,7 @@
+ LOOP_WALK_END;
+}
+
-+static struct ip_set_type ip_set_iptree = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_iptree),
-+ .addip = &addip,
-+ .addip_kernel = &addip_kernel,
-+ .delip = &delip,
-+ .delip_kernel = &delip_kernel,
-+ .testip = &testip,
-+ .testip_kernel = &testip_kernel,
-+ .header_size = sizeof(struct ip_set_req_iptree_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
++IP_SET_TYPE(iptree, IPSET_TYPE_IP | IPSET_DATA_SINGLE)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
@@ -5078,30 +5488,16 @@
+static int __init ip_set_iptree_init(void)
+{
+ int ret;
-+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
-+ branch_cachep = kmem_cache_create("ip_set_iptreeb",
-+ sizeof(struct ip_set_iptreeb),
-+ 0, 0, NULL);
-+#else
-+ branch_cachep = kmem_cache_create("ip_set_iptreeb",
-+ sizeof(struct ip_set_iptreeb),
-+ 0, 0, NULL, NULL);
-+#endif
++
++ branch_cachep = KMEM_CACHE_CREATE("ip_set_iptreeb",
++ sizeof(struct ip_set_iptreeb));
+ if (!branch_cachep) {
+ printk(KERN_ERR "Unable to create ip_set_iptreeb slab cache\n");
+ ret = -ENOMEM;
+ goto out;
+ }
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
-+ leaf_cachep = kmem_cache_create("ip_set_iptreed",
-+ sizeof(struct ip_set_iptreed),
-+ 0, 0, NULL);
-+#else
-+ leaf_cachep = kmem_cache_create("ip_set_iptreed",
-+ sizeof(struct ip_set_iptreed),
-+ 0, 0, NULL, NULL);
-+#endif
++ leaf_cachep = KMEM_CACHE_CREATE("ip_set_iptreed",
++ sizeof(struct ip_set_iptreed));
+ if (!leaf_cachep) {
+ printk(KERN_ERR "Unable to create ip_set_iptreed slab cache\n");
+ ret = -ENOMEM;
@@ -5112,7 +5508,7 @@
+ goto out;
+
+ kmem_cache_destroy(leaf_cachep);
-+ free_branch:
++ free_branch:
+ kmem_cache_destroy(branch_cachep);
+ out:
+ return ret;
@@ -5130,7 +5526,7 @@
+module_exit(ip_set_iptree_fini);
--- /dev/null
+++ b/net/ipv4/netfilter/ip_set_iptreemap.c
-@@ -0,0 +1,829 @@
+@@ -0,0 +1,708 @@
+/* Copyright (C) 2007 Sven Wegener <sven.wegener@stealer.net>
+ *
+ * This program is free software; you can redistribute it and/or modify it
@@ -5139,38 +5535,33 @@
+ */
+
+/* This modules implements the iptreemap ipset type. It uses bitmaps to
-+ * represent every single IPv4 address as a single bit. The bitmaps are managed
-+ * in a tree structure, where the first three octets of an addresses are used
-+ * as an index to find the bitmap and the last octet is used as the bit number.
++ * represent every single IPv4 address as a bit. The bitmaps are managed in a
++ * tree structure, where the first three octets of an address are used as an
++ * index to find the bitmap and the last octet is used as the bit number.
+ */
+
-+#include <linux/version.h>
++#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
+#include <linux/spinlock.h>
++#include <linux/timer.h>
+
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_bitmaps.h>
+#include <linux/netfilter_ipv4/ip_set_iptreemap.h>
+
+#define IPTREEMAP_DEFAULT_GC_TIME (5 * 60)
+#define IPTREEMAP_DESTROY_SLEEP (100)
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
-+static struct kmem_cache *cachep_b;
-+static struct kmem_cache *cachep_c;
-+static struct kmem_cache *cachep_d;
-+#else
-+static kmem_cache_t *cachep_b;
-+static kmem_cache_t *cachep_c;
-+static kmem_cache_t *cachep_d;
-+#endif
++static __KMEM_CACHE_T__ *cachep_b;
++static __KMEM_CACHE_T__ *cachep_c;
++static __KMEM_CACHE_T__ *cachep_d;
+
+static struct ip_set_iptreemap_d *fullbitmap_d;
+static struct ip_set_iptreemap_c *fullbitmap_c;
@@ -5319,9 +5710,6 @@
+#define LOOP_WALK_END_COUNT() \
+ }
+
-+#define MIN(a, b) (a < b ? a : b)
-+#define MAX(a, b) (a > b ? a : b)
-+
+#define GETVALUE1(a, a1, b1, r) \
+ (a == a1 ? b1 : r)
+
@@ -5391,9 +5779,9 @@
+}
+
+static inline int
-+__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++iptreemap_test(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap *map = set->data;
+ struct ip_set_iptreemap_b *btree;
+ struct ip_set_iptreemap_c *ctree;
+ struct ip_set_iptreemap_d *dtree;
@@ -5410,40 +5798,13 @@
+ return !!test_bit(d, (void *) dtree->bitmap);
+}
+
-+static int
-+testip(struct ip_set *set, const void *data, size_t size, ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_iptreemap *req = (struct ip_set_req_iptreemap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_iptreemap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)", sizeof(struct ip_set_req_iptreemap), size);
-+ return -EINVAL;
-+ }
-+
-+ return __testip(set, req->start, hash_ip);
-+}
-+
-+static int
-+testip_kernel(struct ip_set *set, const struct sk_buff *skb, ip_set_ip_t *hash_ip, const u_int32_t *flags, unsigned char index)
-+{
-+ int res;
++#define KADT_CONDITION
+
-+ res = __testip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+
-+ return (res < 0 ? 0 : res);
-+}
++UADT(iptreemap, test)
++KADT(iptreemap, test, ipaddr)
+
+static inline int
-+__addip_single(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++__addip_single(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
+ struct ip_set_iptreemap_b *btree;
@@ -5459,18 +5820,19 @@
+ ADDIP_WALK(btree, b, ctree, struct ip_set_iptreemap_c, cachep_c, fullbitmap_c);
+ ADDIP_WALK(ctree, c, dtree, struct ip_set_iptreemap_d, cachep_d, fullbitmap_d);
+
-+ if (test_and_set_bit(d, (void *) dtree->bitmap))
++ if (__test_and_set_bit(d, (void *) dtree->bitmap))
+ return -EEXIST;
+
-+ set_bit(b, (void *) btree->dirty);
++ __set_bit(b, (void *) btree->dirty);
+
+ return 0;
+}
+
+static inline int
-+__addip_range(struct ip_set *set, ip_set_ip_t start, ip_set_ip_t end, ip_set_ip_t *hash_ip)
++iptreemap_add(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t start, ip_set_ip_t end)
+{
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap *map = set->data;
+ struct ip_set_iptreemap_b *btree;
+ struct ip_set_iptreemap_c *ctree;
+ struct ip_set_iptreemap_d *dtree;
@@ -5479,7 +5841,7 @@
+ unsigned char a2, b2, c2, d2;
+
+ if (start == end)
-+ return __addip_single(set, start, hash_ip);
++ return __addip_single(set, hash_ip, start);
+
+ *hash_ip = start;
+
@@ -5491,8 +5853,8 @@
+ ADDIP_RANGE_LOOP(btree, b, GETVALUE1(a, a1, b1, 0), GETVALUE1(a, a2, b2, 255), CHECK2(a, b, a1, a2, b1, b2, c1, c2, d1, d2), ctree, fullbitmap_c, cachep_c, free_c) {
+ ADDIP_RANGE_LOOP(ctree, c, GETVALUE2(a, b, a1, b1, c1, 0), GETVALUE2(a, b, a2, b2, c2, 255), CHECK3(a, b, c, a1, a2, b1, b2, c1, c2, d1, d2), dtree, fullbitmap_d, cachep_d, free_d) {
+ for (d = GETVALUE3(a, b, c, a1, b1, c1, d1, 0); d <= GETVALUE3(a, b, c, a2, b2, c2, d2, 255); d++)
-+ set_bit(d, (void *) dtree->bitmap);
-+ set_bit(b, (void *) btree->dirty);
++ __set_bit(d, (void *) dtree->bitmap);
++ __set_bit(b, (void *) btree->dirty);
+ } ADDIP_RANGE_LOOP_END();
+ } ADDIP_RANGE_LOOP_END();
+ } ADDIP_RANGE_LOOP_END();
@@ -5500,39 +5862,14 @@
+ return 0;
+}
+
-+static int
-+addip(struct ip_set *set, const void *data, size_t size, ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_iptreemap *req = (struct ip_set_req_iptreemap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_iptreemap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)", sizeof(struct ip_set_req_iptreemap), size);
-+ return -EINVAL;
-+ }
-+
-+ return __addip_range(set, MIN(req->start, req->end), MAX(req->start, req->end), hash_ip);
-+}
-+
-+static int
-+addip_kernel(struct ip_set *set, const struct sk_buff *skb, ip_set_ip_t *hash_ip, const u_int32_t *flags, unsigned char index)
-+{
-+
-+ return __addip_single(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+}
++UADT0(iptreemap, add, min(req->ip, req->end), max(req->ip, req->end))
++KADT(iptreemap, add, ipaddr, ip)
+
+static inline int
-+__delip_single(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip, unsigned int __nocast flags)
++__delip_single(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, gfp_t flags)
+{
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap *map = set->data;
+ struct ip_set_iptreemap_b *btree;
+ struct ip_set_iptreemap_c *ctree;
+ struct ip_set_iptreemap_d *dtree;
@@ -5546,18 +5883,19 @@
+ DELIP_WALK(btree, b, ctree, cachep_c, fullbitmap_c, flags);
+ DELIP_WALK(ctree, c, dtree, cachep_d, fullbitmap_d, flags);
+
-+ if (!test_and_clear_bit(d, (void *) dtree->bitmap))
++ if (!__test_and_clear_bit(d, (void *) dtree->bitmap))
+ return -EEXIST;
+
-+ set_bit(b, (void *) btree->dirty);
++ __set_bit(b, (void *) btree->dirty);
+
+ return 0;
+}
+
+static inline int
-+__delip_range(struct ip_set *set, ip_set_ip_t start, ip_set_ip_t end, ip_set_ip_t *hash_ip, unsigned int __nocast flags)
++iptreemap_del(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t start, ip_set_ip_t end, gfp_t flags)
+{
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap *map = set->data;
+ struct ip_set_iptreemap_b *btree;
+ struct ip_set_iptreemap_c *ctree;
+ struct ip_set_iptreemap_d *dtree;
@@ -5566,7 +5904,7 @@
+ unsigned char a2, b2, c2, d2;
+
+ if (start == end)
-+ return __delip_single(set, start, hash_ip, flags);
++ return __delip_single(set, hash_ip, start, flags);
+
+ *hash_ip = start;
+
@@ -5578,8 +5916,8 @@
+ DELIP_RANGE_LOOP(btree, b, GETVALUE1(a, a1, b1, 0), GETVALUE1(a, a2, b2, 255), CHECK2(a, b, a1, a2, b1, b2, c1, c2, d1, d2), ctree, fullbitmap_c, cachep_c, free_c, flags) {
+ DELIP_RANGE_LOOP(ctree, c, GETVALUE2(a, b, a1, b1, c1, 0), GETVALUE2(a, b, a2, b2, c2, 255), CHECK3(a, b, c, a1, a2, b1, b2, c1, c2, d1, d2), dtree, fullbitmap_d, cachep_d, free_d, flags) {
+ for (d = GETVALUE3(a, b, c, a1, b1, c1, d1, 0); d <= GETVALUE3(a, b, c, a2, b2, c2, d2, 255); d++)
-+ clear_bit(d, (void *) dtree->bitmap);
-+ set_bit(b, (void *) btree->dirty);
++ __clear_bit(d, (void *) dtree->bitmap);
++ __set_bit(b, (void *) btree->dirty);
+ } DELIP_RANGE_LOOP_END();
+ } DELIP_RANGE_LOOP_END();
+ } DELIP_RANGE_LOOP_END();
@@ -5587,34 +5925,8 @@
+ return 0;
+}
+
-+static int
-+delip(struct ip_set *set, const void *data, size_t size, ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_iptreemap *req = (struct ip_set_req_iptreemap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_iptreemap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)", sizeof(struct ip_set_req_iptreemap), size);
-+ return -EINVAL;
-+ }
-+
-+ return __delip_range(set, MIN(req->start, req->end), MAX(req->start, req->end), hash_ip, GFP_KERNEL);
-+}
-+
-+static int
-+delip_kernel(struct ip_set *set, const struct sk_buff *skb, ip_set_ip_t *hash_ip, const u_int32_t *flags, unsigned char index)
-+{
-+ return __delip_single(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip,
-+ GFP_ATOMIC);
-+}
++UADT0(iptreemap, del, min(req->ip, req->end), max(req->ip, req->end), GFP_KERNEL)
++KADT(iptreemap, del, ipaddr, ip, GFP_ATOMIC)
+
+/* Check the status of the bitmap
+ * -1 == all bits cleared
@@ -5638,7 +5950,7 @@
+gc(unsigned long addr)
+{
+ struct ip_set *set = (struct ip_set *) addr;
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap *map = set->data;
+ struct ip_set_iptreemap_b *btree;
+ struct ip_set_iptreemap_c *ctree;
+ struct ip_set_iptreemap_d *dtree;
@@ -5649,7 +5961,7 @@
+
+ LOOP_WALK_BEGIN_GC(map, a, btree, fullbitmap_b, cachep_b, i) {
+ LOOP_WALK_BEGIN_GC(btree, b, ctree, fullbitmap_c, cachep_c, j) {
-+ if (!test_and_clear_bit(b, (void *) btree->dirty))
++ if (!__test_and_clear_bit(b, (void *) btree->dirty))
+ continue;
+ LOOP_WALK_BEGIN_GC(ctree, c, dtree, fullbitmap_d, cachep_d, k) {
+ switch (bitmap_status(dtree)) {
@@ -5677,7 +5989,7 @@
+static inline void
+init_gc_timer(struct ip_set *set)
+{
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap *map = set->data;
+
+ init_timer(&map->gc);
+ map->gc.data = (unsigned long) set;
@@ -5686,16 +5998,12 @@
+ add_timer(&map->gc);
+}
+
-+static int create(struct ip_set *set, const void *data, size_t size)
++static int
++iptreemap_create(struct ip_set *set, const void *data, u_int32_t size)
+{
-+ struct ip_set_req_iptreemap_create *req = (struct ip_set_req_iptreemap_create *) data;
++ const struct ip_set_req_iptreemap_create *req = data;
+ struct ip_set_iptreemap *map;
+
-+ if (size != sizeof(struct ip_set_req_iptreemap_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)", sizeof(struct ip_set_req_iptreemap_create), size);
-+ return -EINVAL;
-+ }
-+
+ map = kzalloc(sizeof(*map), GFP_KERNEL);
+ if (!map)
+ return -ENOMEM;
@@ -5708,7 +6016,8 @@
+ return 0;
+}
+
-+static inline void __flush(struct ip_set_iptreemap *map)
++static inline void
++__flush(struct ip_set_iptreemap *map)
+{
+ struct ip_set_iptreemap_b *btree;
+ unsigned int a;
@@ -5719,9 +6028,10 @@
+ LOOP_WALK_END();
+}
+
-+static void destroy(struct ip_set *set)
++static void
++iptreemap_destroy(struct ip_set *set)
+{
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap *map = set->data;
+
+ while (!del_timer(&map->gc))
+ msleep(IPTREEMAP_DESTROY_SLEEP);
@@ -5732,9 +6042,10 @@
+ set->data = NULL;
+}
+
-+static void flush(struct ip_set *set)
++static void
++iptreemap_flush(struct ip_set *set)
+{
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap *map = set->data;
+
+ while (!del_timer(&map->gc))
+ msleep(IPTREEMAP_DESTROY_SLEEP);
@@ -5746,17 +6057,19 @@
+ init_gc_timer(set);
+}
+
-+static void list_header(const struct ip_set *set, void *data)
++static void
++iptreemap_list_header(const struct ip_set *set, void *data)
+{
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
-+ struct ip_set_req_iptreemap_create *header = (struct ip_set_req_iptreemap_create *) data;
++ struct ip_set_iptreemap *map = set->data;
++ struct ip_set_req_iptreemap_create *header = data;
+
+ header->gc_interval = map->gc_interval;
+}
+
-+static int list_members_size(const struct ip_set *set)
++static int
++iptreemap_list_members_size(const struct ip_set *set)
+{
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap *map = set->data;
+ struct ip_set_iptreemap_b *btree;
+ struct ip_set_iptreemap_c *ctree;
+ struct ip_set_iptreemap_d *dtree;
@@ -5783,19 +6096,21 @@
+ return (count * sizeof(struct ip_set_req_iptreemap));
+}
+
-+static inline size_t add_member(void *data, size_t offset, ip_set_ip_t start, ip_set_ip_t end)
++static inline u_int32_t
++add_member(void *data, size_t offset, ip_set_ip_t start, ip_set_ip_t end)
+{
-+ struct ip_set_req_iptreemap *entry = (struct ip_set_req_iptreemap *) (data + offset);
++ struct ip_set_req_iptreemap *entry = data + offset;
+
-+ entry->start = start;
++ entry->ip = start;
+ entry->end = end;
+
+ return sizeof(*entry);
+}
+
-+static void list_members(const struct ip_set *set, void *data)
++static void
++iptreemap_list_members(const struct ip_set *set, void *data)
+{
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap *map = set->data;
+ struct ip_set_iptreemap_b *btree;
+ struct ip_set_iptreemap_c *ctree;
+ struct ip_set_iptreemap_d *dtree;
@@ -5830,26 +6145,7 @@
+ add_member(data, offset, start, end);
+}
+
-+static struct ip_set_type ip_set_iptreemap = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = create,
-+ .destroy = destroy,
-+ .flush = flush,
-+ .reqsize = sizeof(struct ip_set_req_iptreemap),
-+ .addip = addip,
-+ .addip_kernel = addip_kernel,
-+ .delip = delip,
-+ .delip_kernel = delip_kernel,
-+ .testip = testip,
-+ .testip_kernel = testip_kernel,
-+ .header_size = sizeof(struct ip_set_req_iptreemap_create),
-+ .list_header = list_header,
-+ .list_members_size = list_members_size,
-+ .list_members = list_members,
-+ .me = THIS_MODULE,
-+};
++IP_SET_TYPE(iptreemap, IPSET_TYPE_IP | IPSET_DATA_SINGLE)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Sven Wegener <sven.wegener@stealer.net>");
@@ -5860,43 +6156,22 @@
+ int ret = -ENOMEM;
+ int a;
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
-+ cachep_b = kmem_cache_create("ip_set_iptreemap_b",
-+ sizeof(struct ip_set_iptreemap_b),
-+ 0, 0, NULL);
-+#else
-+ cachep_b = kmem_cache_create("ip_set_iptreemap_b",
-+ sizeof(struct ip_set_iptreemap_b),
-+ 0, 0, NULL, NULL);
-+#endif
++ cachep_b = KMEM_CACHE_CREATE("ip_set_iptreemap_b",
++ sizeof(struct ip_set_iptreemap_b));
+ if (!cachep_b) {
+ ip_set_printk("Unable to create ip_set_iptreemap_b slab cache");
+ goto out;
+ }
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
-+ cachep_c = kmem_cache_create("ip_set_iptreemap_c",
-+ sizeof(struct ip_set_iptreemap_c),
-+ 0, 0, NULL);
-+#else
-+ cachep_c = kmem_cache_create("ip_set_iptreemap_c",
-+ sizeof(struct ip_set_iptreemap_c),
-+ 0, 0, NULL, NULL);
-+#endif
++ cachep_c = KMEM_CACHE_CREATE("ip_set_iptreemap_c",
++ sizeof(struct ip_set_iptreemap_c));
+ if (!cachep_c) {
+ ip_set_printk("Unable to create ip_set_iptreemap_c slab cache");
+ goto outb;
+ }
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
-+ cachep_d = kmem_cache_create("ip_set_iptreemap_d",
-+ sizeof(struct ip_set_iptreemap_d),
-+ 0, 0, NULL);
-+#else
-+ cachep_d = kmem_cache_create("ip_set_iptreemap_d",
-+ sizeof(struct ip_set_iptreemap_d),
-+ 0, 0, NULL, NULL);
-+#endif
++ cachep_d = KMEM_CACHE_CREATE("ip_set_iptreemap_d",
++ sizeof(struct ip_set_iptreemap_d));
+ if (!cachep_d) {
+ ip_set_printk("Unable to create ip_set_iptreemap_d slab cache");
+ goto outc;
@@ -5962,11 +6237,11 @@
+module_exit(ip_set_iptreemap_fini);
--- /dev/null
+++ b/net/ipv4/netfilter/ip_set_macipmap.c
-@@ -0,0 +1,375 @@
+@@ -0,0 +1,164 @@
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ * Patrick Schaaf <bof@bof.de>
+ * Martin Josefsson <gandalf@wlug.westbo.se>
-+ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ * Copyright (C) 2003-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
@@ -5978,41 +6253,29 @@
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
-+#include <linux/version.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
+#include <linux/spinlock.h>
+#include <linux/if_ether.h>
-+#include <linux/vmalloc.h>
+
-+#include <linux/netfilter_ipv4/ip_set_malloc.h>
+#include <linux/netfilter_ipv4/ip_set_macipmap.h>
+
+static int
-+testip(struct ip_set *set, const void *data, size_t size, ip_set_ip_t *hash_ip)
++macipmap_utest(struct ip_set *set, const void *data, u_int32_t size,
++ ip_set_ip_t *hash_ip)
+{
-+ struct ip_set_macipmap *map = (struct ip_set_macipmap *) set->data;
-+ struct ip_set_macip *table = (struct ip_set_macip *) map->members;
-+ struct ip_set_req_macipmap *req = (struct ip_set_req_macipmap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_macipmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_macipmap),
-+ size);
-+ return -EINVAL;
-+ }
++ const struct ip_set_macipmap *map = set->data;
++ const struct ip_set_macip *table = map->members;
++ const struct ip_set_req_macipmap *req = data;
+
+ if (req->ip < map->first_ip || req->ip > map->last_ip)
+ return -ERANGE;
+
+ *hash_ip = req->ip;
+ DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
-+ set->name, HIPQUAD(req->ip), HIPQUAD(*hash_ip));
-+ if (test_bit(IPSET_MACIP_ISSET,
-+ (void *) &table[req->ip - map->first_ip].flags)) {
++ set->name, HIPQUAD(req->ip), HIPQUAD(*hash_ip));
++ if (table[req->ip - map->first_ip].match) {
+ return (memcmp(req->ethernet,
+ &table[req->ip - map->first_ip].ethernet,
+ ETH_ALEN) == 0);
@@ -6022,44 +6285,29 @@
+}
+
+static int
-+testip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+ struct ip_set_macip *table =
-+ (struct ip_set_macip *) map->members;
++macipmap_ktest(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ const struct ip_set_macipmap *map = set->data;
++ const struct ip_set_macip *table = map->members;
+ ip_set_ip_t ip;
-+
-+ ip = ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr);
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr);
-+#endif
++
++ ip = ipaddr(skb, flags[index]);
+
+ if (ip < map->first_ip || ip > map->last_ip)
+ return 0;
+
-+ *hash_ip = ip;
++ *hash_ip = ip;
+ DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
-+ set->name, HIPQUAD(ip), HIPQUAD(*hash_ip));
-+ if (test_bit(IPSET_MACIP_ISSET,
-+ (void *) &table[ip - map->first_ip].flags)) {
++ set->name, HIPQUAD(ip), HIPQUAD(*hash_ip));
++ if (table[ip - map->first_ip].match) {
+ /* Is mac pointer valid?
+ * If so, compare... */
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
+ return (skb_mac_header(skb) >= skb->head
+ && (skb_mac_header(skb) + ETH_HLEN) <= skb->data
-+#else
-+ return (skb->mac.raw >= skb->head
-+ && (skb->mac.raw + ETH_HLEN) <= skb->data
-+#endif
+ && (memcmp(eth_hdr(skb)->h_source,
+ &table[ip - map->first_ip].ethernet,
+ ETH_ALEN) == 0));
@@ -6070,278 +6318,94 @@
+
+/* returns 0 on success */
+static inline int
-+__addip(struct ip_set *set,
-+ ip_set_ip_t ip, unsigned char *ethernet, ip_set_ip_t *hash_ip)
++macipmap_add(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, const unsigned char *ethernet)
+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+ struct ip_set_macip *table =
-+ (struct ip_set_macip *) map->members;
++ struct ip_set_macipmap *map = set->data;
++ struct ip_set_macip *table = map->members;
+
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
-+ if (test_and_set_bit(IPSET_MACIP_ISSET,
-+ (void *) &table[ip - map->first_ip].flags))
++ if (table[ip - map->first_ip].match)
+ return -EEXIST;
+
+ *hash_ip = ip;
+ DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
+ memcpy(&table[ip - map->first_ip].ethernet, ethernet, ETH_ALEN);
++ table[ip - map->first_ip].match = IPSET_MACIP_ISSET;
+ return 0;
+}
+
-+static int
-+addip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_macipmap *req =
-+ (struct ip_set_req_macipmap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_macipmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_macipmap),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __addip(set, req->ip, req->ethernet, hash_ip);
-+}
-+
-+static int
-+addip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ ip_set_ip_t ip;
-+
-+ ip = ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr);
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr);
-+#endif
-+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ if (!(skb_mac_header(skb) >= skb->head
-+ && (skb_mac_header(skb) + ETH_HLEN) <= skb->data))
-+#else
-+ if (!(skb->mac.raw >= skb->head
-+ && (skb->mac.raw + ETH_HLEN) <= skb->data))
-+#endif
++#define KADT_CONDITION \
++ if (!(skb_mac_header(skb) >= skb->head \
++ && (skb_mac_header(skb) + ETH_HLEN) <= skb->data))\
+ return -EINVAL;
+
-+ return __addip(set, ip, eth_hdr(skb)->h_source, hash_ip);
-+}
++UADT(macipmap, add, req->ethernet)
++KADT(macipmap, add, ipaddr, eth_hdr(skb)->h_source)
+
+static inline int
-+__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++macipmap_del(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+ struct ip_set_macip *table =
-+ (struct ip_set_macip *) map->members;
++ struct ip_set_macipmap *map = set->data;
++ struct ip_set_macip *table = map->members;
+
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
-+ if (!test_and_clear_bit(IPSET_MACIP_ISSET,
-+ (void *)&table[ip - map->first_ip].flags))
++ if (!table[ip - map->first_ip].match)
+ return -EEXIST;
+
+ *hash_ip = ip;
++ table[ip - map->first_ip].match = 0;
+ DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
+ return 0;
+}
+
-+static int
-+delip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_macipmap *req =
-+ (struct ip_set_req_macipmap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_macipmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_macipmap),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __delip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+delip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __delip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+}
++#undef KADT_CONDITION
++#define KADT_CONDITION
+
-+static inline size_t members_size(ip_set_id_t from, ip_set_id_t to)
-+{
-+ return (size_t)((to - from + 1) * sizeof(struct ip_set_macip));
-+}
++UADT(macipmap, del)
++KADT(macipmap, del, ipaddr)
+
-+static int create(struct ip_set *set, const void *data, size_t size)
++static inline int
++__macipmap_create(const struct ip_set_req_macipmap_create *req,
++ struct ip_set_macipmap *map)
+{
-+ int newbytes;
-+ struct ip_set_req_macipmap_create *req =
-+ (struct ip_set_req_macipmap_create *) data;
-+ struct ip_set_macipmap *map;
-+
-+ if (size != sizeof(struct ip_set_req_macipmap_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_macipmap_create),
-+ size);
-+ return -EINVAL;
-+ }
-+
-+ DP("from %u.%u.%u.%u to %u.%u.%u.%u",
-+ HIPQUAD(req->from), HIPQUAD(req->to));
-+
-+ if (req->from > req->to) {
-+ DP("bad ip range");
-+ return -ENOEXEC;
-+ }
-+
+ if (req->to - req->from > MAX_RANGE) {
-+ ip_set_printk("range too big (max %d addresses)",
-+ MAX_RANGE+1);
++ ip_set_printk("range too big, %d elements (max %d)",
++ req->to - req->from + 1, MAX_RANGE+1);
+ return -ENOEXEC;
+ }
-+
-+ map = kmalloc(sizeof(struct ip_set_macipmap), GFP_KERNEL);
-+ if (!map) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_macipmap));
-+ return -ENOMEM;
-+ }
+ map->flags = req->flags;
-+ map->first_ip = req->from;
-+ map->last_ip = req->to;
-+ newbytes = members_size(map->first_ip, map->last_ip);
-+ map->members = ip_set_malloc(newbytes);
-+ DP("members: %u %p", newbytes, map->members);
-+ if (!map->members) {
-+ DP("out of memory for %d bytes", newbytes);
-+ kfree(map);
-+ return -ENOMEM;
-+ }
-+ memset(map->members, 0, newbytes);
-+
-+ set->data = map;
-+ return 0;
-+}
-+
-+static void destroy(struct ip_set *set)
-+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+
-+ ip_set_free(map->members, members_size(map->first_ip, map->last_ip));
-+ kfree(map);
-+
-+ set->data = NULL;
++ return (req->to - req->from + 1) * sizeof(struct ip_set_macip);
+}
+
-+static void flush(struct ip_set *set)
-+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+ memset(map->members, 0, members_size(map->first_ip, map->last_ip));
-+}
++BITMAP_CREATE(macipmap)
++BITMAP_DESTROY(macipmap)
++BITMAP_FLUSH(macipmap)
+
-+static void list_header(const struct ip_set *set, void *data)
++static inline void
++__macipmap_list_header(const struct ip_set_macipmap *map,
++ struct ip_set_req_macipmap_create *header)
+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+ struct ip_set_req_macipmap_create *header =
-+ (struct ip_set_req_macipmap_create *) data;
-+
-+ DP("list_header %x %x %u", map->first_ip, map->last_ip,
-+ map->flags);
-+
-+ header->from = map->first_ip;
-+ header->to = map->last_ip;
+ header->flags = map->flags;
+}
+
-+static int list_members_size(const struct ip_set *set)
-+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+
-+ DP("%u", members_size(map->first_ip, map->last_ip));
-+ return members_size(map->first_ip, map->last_ip);
-+}
-+
-+static void list_members(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+
-+ int bytes = members_size(map->first_ip, map->last_ip);
-+
-+ DP("members: %u %p", bytes, map->members);
-+ memcpy(data, map->members, bytes);
-+}
++BITMAP_LIST_HEADER(macipmap)
++BITMAP_LIST_MEMBERS_SIZE(macipmap)
++BITMAP_LIST_MEMBERS(macipmap)
+
-+static struct ip_set_type ip_set_macipmap = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_macipmap),
-+ .addip = &addip,
-+ .addip_kernel = &addip_kernel,
-+ .delip = &delip,
-+ .delip_kernel = &delip_kernel,
-+ .testip = &testip,
-+ .testip_kernel = &testip_kernel,
-+ .header_size = sizeof(struct ip_set_req_macipmap_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
++IP_SET_TYPE(macipmap, IPSET_TYPE_IP | IPSET_DATA_SINGLE)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("macipmap type of IP sets");
+
-+static int __init ip_set_macipmap_init(void)
-+{
-+ init_max_malloc_size();
-+ return ip_set_register_set_type(&ip_set_macipmap);
-+}
-+
-+static void __exit ip_set_macipmap_fini(void)
-+{
-+ /* FIXME: possible race with ip_set_create() */
-+ ip_set_unregister_set_type(&ip_set_macipmap);
-+}
-+
-+module_init(ip_set_macipmap_init);
-+module_exit(ip_set_macipmap_fini);
++REGISTER_MODULE(macipmap)
--- /dev/null
+++ b/net/ipv4/netfilter/ip_set_nethash.c
-@@ -0,0 +1,497 @@
-+/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+@@ -0,0 +1,225 @@
++/* Copyright (C) 2003-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
@@ -6351,63 +6415,56 @@
+/* Kernel module implementing a cidr nethash set */
+
+#include <linux/module.h>
++#include <linux/moduleparam.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
-+#include <linux/version.h>
-+#include <linux/jhash.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_jhash.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
+#include <linux/spinlock.h>
-+#include <linux/vmalloc.h>
+#include <linux/random.h>
+
+#include <net/ip.h>
+
-+#include <linux/netfilter_ipv4/ip_set_malloc.h>
+#include <linux/netfilter_ipv4/ip_set_nethash.h>
+
+static int limit = MAX_RANGE;
+
+static inline __u32
-+jhash_ip(const struct ip_set_nethash *map, uint16_t i, ip_set_ip_t ip)
-+{
-+ return jhash_1word(ip, *(((uint32_t *) map->initval) + i));
-+}
-+
-+static inline __u32
-+hash_id_cidr(struct ip_set_nethash *map,
-+ ip_set_ip_t ip,
-+ unsigned char cidr,
-+ ip_set_ip_t *hash_ip)
++nethash_id_cidr(const struct ip_set_nethash *map,
++ ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip,
++ uint8_t cidr)
+{
+ __u32 id;
+ u_int16_t i;
+ ip_set_ip_t *elem;
+
-+ *hash_ip = pack(ip, cidr);
-+
++ *hash_ip = pack_ip_cidr(ip, cidr);
++ if (!*hash_ip)
++ return MAX_RANGE;
++
+ for (i = 0; i < map->probes; i++) {
+ id = jhash_ip(map, i, *hash_ip) % map->hashsize;
+ DP("hash key: %u", id);
+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
+ if (*elem == *hash_ip)
+ return id;
++ /* No shortcut - there can be deleted entries. */
+ }
+ return UINT_MAX;
+}
+
+static inline __u32
-+hash_id(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++nethash_id(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
++ const struct ip_set_nethash *map = set->data;
+ __u32 id = UINT_MAX;
+ int i;
+
+ for (i = 0; i < 30 && map->cidr[i]; i++) {
-+ id = hash_id_cidr(map, ip, map->cidr[i], hash_ip);
++ id = nethash_id_cidr(map, hash_ip, ip, map->cidr[i]);
+ if (id != UINT_MAX)
+ break;
+ }
@@ -6415,409 +6472,156 @@
+}
+
+static inline int
-+__testip_cidr(struct ip_set *set, ip_set_ip_t ip, unsigned char cidr,
-+ ip_set_ip_t *hash_ip)
++nethash_test_cidr(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, uint8_t cidr)
+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
++ const struct ip_set_nethash *map = set->data;
+
-+ return (ip && hash_id_cidr(map, ip, cidr, hash_ip) != UINT_MAX);
++ return (nethash_id_cidr(map, hash_ip, ip, cidr) != UINT_MAX);
+}
+
+static inline int
-+__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++nethash_test(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ return (ip && hash_id(set, ip, hash_ip) != UINT_MAX);
++ return (nethash_id(set, hash_ip, ip) != UINT_MAX);
+}
+
+static int
-+testip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
++nethash_utest(struct ip_set *set, const void *data, u_int32_t size,
++ ip_set_ip_t *hash_ip)
+{
-+ struct ip_set_req_nethash *req =
-+ (struct ip_set_req_nethash *) data;
++ const struct ip_set_req_nethash *req = data;
+
-+ if (size != sizeof(struct ip_set_req_nethash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_nethash),
-+ size);
++ if (req->cidr <= 0 || req->cidr > 32)
+ return -EINVAL;
-+ }
-+ return (req->cidr == 32 ? __testip(set, req->ip, hash_ip)
-+ : __testip_cidr(set, req->ip, req->cidr, hash_ip));
++ return (req->cidr == 32 ? nethash_test(set, hash_ip, req->ip)
++ : nethash_test_cidr(set, hash_ip, req->ip, req->cidr));
+}
+
-+static int
-+testip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __testip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+}
++#define KADT_CONDITION
++
++KADT(nethash, test, ipaddr)
+
+static inline int
-+__addip_base(struct ip_set_nethash *map, ip_set_ip_t ip)
++__nethash_add(struct ip_set_nethash *map, ip_set_ip_t *ip)
+{
+ __u32 probe;
+ u_int16_t i;
-+ ip_set_ip_t *elem;
-+
++ ip_set_ip_t *elem, *slot = NULL;
++
+ for (i = 0; i < map->probes; i++) {
-+ probe = jhash_ip(map, i, ip) % map->hashsize;
++ probe = jhash_ip(map, i, *ip) % map->hashsize;
+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
-+ if (*elem == ip)
++ if (*elem == *ip)
+ return -EEXIST;
-+ if (!*elem) {
-+ *elem = ip;
-+ map->elements++;
-+ return 0;
-+ }
++ if (!(slot || *elem))
++ slot = elem;
++ /* There can be deleted entries, must check all slots */
++ }
++ if (slot) {
++ *slot = *ip;
++ map->elements++;
++ return 0;
+ }
+ /* Trigger rehashing */
+ return -EAGAIN;
+}
+
+static inline int
-+__addip(struct ip_set_nethash *map, ip_set_ip_t ip, unsigned char cidr,
-+ ip_set_ip_t *hash_ip)
-+{
-+ if (!ip || map->elements >= limit)
-+ return -ERANGE;
-+
-+ *hash_ip = pack(ip, cidr);
-+ DP("%u.%u.%u.%u/%u, %u.%u.%u.%u", HIPQUAD(ip), cidr, HIPQUAD(*hash_ip));
-+
-+ return __addip_base(map, *hash_ip);
-+}
-+
-+static void
-+update_cidr_sizes(struct ip_set_nethash *map, unsigned char cidr)
-+{
-+ unsigned char next;
-+ int i;
-+
-+ for (i = 0; i < 30 && map->cidr[i]; i++) {
-+ if (map->cidr[i] == cidr) {
-+ return;
-+ } else if (map->cidr[i] < cidr) {
-+ next = map->cidr[i];
-+ map->cidr[i] = cidr;
-+ cidr = next;
-+ }
-+ }
-+ if (i < 30)
-+ map->cidr[i] = cidr;
-+}
-+
-+static int
-+addip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
++nethash_add(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, uint8_t cidr)
+{
-+ struct ip_set_req_nethash *req =
-+ (struct ip_set_req_nethash *) data;
++ struct ip_set_nethash *map = set->data;
+ int ret;
-+
-+ if (size != sizeof(struct ip_set_req_nethash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_nethash),
-+ size);
++
++ if (map->elements >= limit || map->nets[cidr-1] == UINT16_MAX)
++ return -ERANGE;
++ if (cidr <= 0 || cidr >= 32)
+ return -EINVAL;
-+ }
-+ ret = __addip((struct ip_set_nethash *) set->data,
-+ req->ip, req->cidr, hash_ip);
-+
-+ if (ret == 0)
-+ update_cidr_sizes((struct ip_set_nethash *) set->data,
-+ req->cidr);
+
++ *hash_ip = pack_ip_cidr(ip, cidr);
++ DP("%u.%u.%u.%u/%u, %u.%u.%u.%u", HIPQUAD(ip), cidr, HIPQUAD(*hash_ip));
++ if (!*hash_ip)
++ return -ERANGE;
++
++ ret = __nethash_add(map, hash_ip);
++ if (ret == 0) {
++ if (!map->nets[cidr-1]++)
++ add_cidr_size(map->cidr, cidr);
++ map->elements++;
++ }
++
+ return ret;
+}
+
-+static int
-+addip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+ int ret = -ERANGE;
-+ ip_set_ip_t ip = ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr);
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr);
-+#endif
++#undef KADT_CONDITION
++#define KADT_CONDITION \
++ struct ip_set_nethash *map = set->data; \
++ uint8_t cidr = map->cidr[0] ? map->cidr[0] : 31;
+
-+ if (map->cidr[0])
-+ ret = __addip(map, ip, map->cidr[0], hash_ip);
++UADT(nethash, add, req->cidr)
++KADT(nethash, add, ipaddr, cidr)
+
-+ return ret;
-+}
-+
-+static int retry(struct ip_set *set)
++static inline void
++__nethash_retry(struct ip_set_nethash *tmp, struct ip_set_nethash *map)
+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+ ip_set_ip_t *elem;
-+ void *members;
-+ u_int32_t i, hashsize = map->hashsize;
-+ int res;
-+ struct ip_set_nethash *tmp;
-+
-+ if (map->resize == 0)
-+ return -ERANGE;
-+
-+ again:
-+ res = 0;
-+
-+ /* Calculate new parameters */
-+ hashsize += (hashsize * map->resize)/100;
-+ if (hashsize == map->hashsize)
-+ hashsize++;
-+
-+ ip_set_printk("rehashing of set %s triggered: "
-+ "hashsize grows from %u to %u",
-+ set->name, map->hashsize, hashsize);
-+
-+ tmp = kmalloc(sizeof(struct ip_set_nethash)
-+ + map->probes * sizeof(uint32_t), GFP_ATOMIC);
-+ if (!tmp) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_nethash)
-+ + map->probes * sizeof(uint32_t));
-+ return -ENOMEM;
-+ }
-+ tmp->members = harray_malloc(hashsize, sizeof(ip_set_ip_t), GFP_ATOMIC);
-+ if (!tmp->members) {
-+ DP("out of memory for %d bytes", hashsize * sizeof(ip_set_ip_t));
-+ kfree(tmp);
-+ return -ENOMEM;
-+ }
-+ tmp->hashsize = hashsize;
-+ tmp->elements = 0;
-+ tmp->probes = map->probes;
-+ tmp->resize = map->resize;
-+ memcpy(tmp->initval, map->initval, map->probes * sizeof(uint32_t));
-+ memcpy(tmp->cidr, map->cidr, 30 * sizeof(unsigned char));
-+
-+ write_lock_bh(&set->lock);
-+ map = (struct ip_set_nethash *) set->data; /* Play safe */
-+ for (i = 0; i < map->hashsize && res == 0; i++) {
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
-+ if (*elem)
-+ res = __addip_base(tmp, *elem);
-+ }
-+ if (res) {
-+ /* Failure, try again */
-+ write_unlock_bh(&set->lock);
-+ harray_free(tmp->members);
-+ kfree(tmp);
-+ goto again;
-+ }
-+
-+ /* Success at resizing! */
-+ members = map->members;
-+
-+ map->hashsize = tmp->hashsize;
-+ map->members = tmp->members;
-+ write_unlock_bh(&set->lock);
-+
-+ harray_free(members);
-+ kfree(tmp);
-+
-+ return 0;
++ memcpy(tmp->cidr, map->cidr, sizeof(tmp->cidr));
++ memcpy(tmp->nets, map->nets, sizeof(tmp->nets));
+}
+
++HASH_RETRY(nethash, ip_set_ip_t)
++
+static inline int
-+__delip(struct ip_set_nethash *map, ip_set_ip_t ip, unsigned char cidr,
-+ ip_set_ip_t *hash_ip)
++nethash_del(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, uint8_t cidr)
+{
++ struct ip_set_nethash *map = set->data;
+ ip_set_ip_t id, *elem;
+
-+ if (!ip)
-+ return -ERANGE;
-+
-+ id = hash_id_cidr(map, ip, cidr, hash_ip);
++ if (cidr <= 0 || cidr >= 32)
++ return -EINVAL;
++
++ id = nethash_id_cidr(map, hash_ip, ip, cidr);
+ if (id == UINT_MAX)
+ return -EEXIST;
-+
++
+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
+ *elem = 0;
+ map->elements--;
++ if (!map->nets[cidr-1]--)
++ del_cidr_size(map->cidr, cidr);
+ return 0;
+}
+
-+static int
-+delip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_nethash *req =
-+ (struct ip_set_req_nethash *) data;
-+
-+ if (size != sizeof(struct ip_set_req_nethash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_nethash),
-+ size);
-+ return -EINVAL;
-+ }
-+ /* TODO: no garbage collection in map->cidr */
-+ return __delip((struct ip_set_nethash *) set->data,
-+ req->ip, req->cidr, hash_ip);
-+}
-+
-+static int
-+delip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+ int ret = -ERANGE;
-+ ip_set_ip_t ip = ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr);
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr);
-+#endif
-+
-+ if (map->cidr[0])
-+ ret = __delip(map, ip, map->cidr[0], hash_ip);
-+
-+ return ret;
-+}
++UADT(nethash, del, req->cidr)
++KADT(nethash, del, ipaddr, cidr)
+
-+static int create(struct ip_set *set, const void *data, size_t size)
++static inline int
++__nethash_create(const struct ip_set_req_nethash_create *req,
++ struct ip_set_nethash *map)
+{
-+ struct ip_set_req_nethash_create *req =
-+ (struct ip_set_req_nethash_create *) data;
-+ struct ip_set_nethash *map;
-+ uint16_t i;
-+
-+ if (size != sizeof(struct ip_set_req_nethash_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_nethash_create),
-+ size);
-+ return -EINVAL;
-+ }
-+
-+ if (req->hashsize < 1) {
-+ ip_set_printk("hashsize too small");
-+ return -ENOEXEC;
-+ }
-+ if (req->probes < 1) {
-+ ip_set_printk("probes too small");
-+ return -ENOEXEC;
-+ }
-+
-+ map = kmalloc(sizeof(struct ip_set_nethash)
-+ + req->probes * sizeof(uint32_t), GFP_KERNEL);
-+ if (!map) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_nethash)
-+ + req->probes * sizeof(uint32_t));
-+ return -ENOMEM;
-+ }
-+ for (i = 0; i < req->probes; i++)
-+ get_random_bytes(((uint32_t *) map->initval)+i, 4);
-+ map->elements = 0;
-+ map->hashsize = req->hashsize;
-+ map->probes = req->probes;
-+ map->resize = req->resize;
-+ memset(map->cidr, 0, 30 * sizeof(unsigned char));
-+ map->members = harray_malloc(map->hashsize, sizeof(ip_set_ip_t), GFP_KERNEL);
-+ if (!map->members) {
-+ DP("out of memory for %d bytes", map->hashsize * sizeof(ip_set_ip_t));
-+ kfree(map);
-+ return -ENOMEM;
-+ }
-+
-+ set->data = map;
++ memset(map->cidr, 0, sizeof(map->cidr));
++ memset(map->nets, 0, sizeof(map->nets));
++
+ return 0;
+}
+
-+static void destroy(struct ip_set *set)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+
-+ harray_free(map->members);
-+ kfree(map);
-+
-+ set->data = NULL;
-+}
-+
-+static void flush(struct ip_set *set)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+ harray_flush(map->members, map->hashsize, sizeof(ip_set_ip_t));
-+ memset(map->cidr, 0, 30 * sizeof(unsigned char));
-+ map->elements = 0;
-+}
-+
-+static void list_header(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+ struct ip_set_req_nethash_create *header =
-+ (struct ip_set_req_nethash_create *) data;
-+
-+ header->hashsize = map->hashsize;
-+ header->probes = map->probes;
-+ header->resize = map->resize;
-+}
++HASH_CREATE(nethash, ip_set_ip_t)
++HASH_DESTROY(nethash)
+
-+static int list_members_size(const struct ip_set *set)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
++HASH_FLUSH_CIDR(nethash, ip_set_ip_t)
+
-+ return (map->hashsize * sizeof(ip_set_ip_t));
++static inline void
++__nethash_list_header(const struct ip_set_nethash *map,
++ struct ip_set_req_nethash_create *header)
++{
+}
+
-+static void list_members(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+ ip_set_ip_t i, *elem;
++HASH_LIST_HEADER(nethash)
++HASH_LIST_MEMBERS_SIZE(nethash, ip_set_ip_t)
++HASH_LIST_MEMBERS(nethash, ip_set_ip_t)
+
-+ for (i = 0; i < map->hashsize; i++) {
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
-+ ((ip_set_ip_t *)data)[i] = *elem;
-+ }
-+}
-+
-+static struct ip_set_type ip_set_nethash = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_nethash),
-+ .addip = &addip,
-+ .addip_kernel = &addip_kernel,
-+ .retry = &retry,
-+ .delip = &delip,
-+ .delip_kernel = &delip_kernel,
-+ .testip = &testip,
-+ .testip_kernel = &testip_kernel,
-+ .header_size = sizeof(struct ip_set_req_nethash_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
++IP_SET_RTYPE(nethash, IPSET_TYPE_IP | IPSET_DATA_SINGLE)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
@@ -6825,23 +6629,11 @@
+module_param(limit, int, 0600);
+MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
+
-+static int __init ip_set_nethash_init(void)
-+{
-+ return ip_set_register_set_type(&ip_set_nethash);
-+}
-+
-+static void __exit ip_set_nethash_fini(void)
-+{
-+ /* FIXME: possible race with ip_set_create() */
-+ ip_set_unregister_set_type(&ip_set_nethash);
-+}
-+
-+module_init(ip_set_nethash_init);
-+module_exit(ip_set_nethash_fini);
++REGISTER_MODULE(nethash)
--- /dev/null
+++ b/net/ipv4/netfilter/ip_set_portmap.c
-@@ -0,0 +1,346 @@
-+/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+@@ -0,0 +1,114 @@
++/* Copyright (C) 2003-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
@@ -6855,9 +6647,6 @@
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/skbuff.h>
-+#include <linux/version.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
@@ -6866,330 +6655,434 @@
+#include <net/ip.h>
+
+#include <linux/netfilter_ipv4/ip_set_portmap.h>
++#include <linux/netfilter_ipv4/ip_set_getport.h>
+
-+/* We must handle non-linear skbs */
-+static inline ip_set_ip_t
-+get_port(const struct sk_buff *skb, u_int32_t flags)
++static inline int
++portmap_test(const struct ip_set *set, ip_set_ip_t *hash_port,
++ ip_set_ip_t port)
+{
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ struct iphdr *iph = ip_hdr(skb);
-+#else
-+ struct iphdr *iph = skb->nh.iph;
-+#endif
-+ u_int16_t offset = ntohs(iph->frag_off) & IP_OFFSET;
-+ switch (iph->protocol) {
-+ case IPPROTO_TCP: {
-+ struct tcphdr tcph;
-+
-+ /* See comments at tcp_match in ip_tables.c */
-+ if (offset)
-+ return INVALID_PORT;
++ const struct ip_set_portmap *map = set->data;
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &tcph, sizeof(tcph)) < 0)
-+#else
-+ if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &tcph, sizeof(tcph)) < 0)
-+#endif
-+ /* No choice either */
-+ return INVALID_PORT;
++ if (port < map->first_ip || port > map->last_ip)
++ return -ERANGE;
++
++ *hash_port = port;
++ DP("set: %s, port:%u, %u", set->name, port, *hash_port);
++ return !!test_bit(port - map->first_ip, map->members);
++}
+
-+ return ntohs(flags & IPSET_SRC ?
-+ tcph.source : tcph.dest);
-+ }
-+ case IPPROTO_UDP: {
-+ struct udphdr udph;
++#define KADT_CONDITION \
++ if (ip == INVALID_PORT) \
++ return 0;
+
-+ if (offset)
-+ return INVALID_PORT;
++UADT(portmap, test)
++KADT(portmap, test, get_port)
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &udph, sizeof(udph)) < 0)
-+#else
-+ if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &udph, sizeof(udph)) < 0)
-+#endif
-+ /* No choice either */
-+ return INVALID_PORT;
++static inline int
++portmap_add(struct ip_set *set, ip_set_ip_t *hash_port, ip_set_ip_t port)
++{
++ struct ip_set_portmap *map = set->data;
+
-+ return ntohs(flags & IPSET_SRC ?
-+ udph.source : udph.dest);
-+ }
-+ default:
-+ return INVALID_PORT;
-+ }
++ if (port < map->first_ip || port > map->last_ip)
++ return -ERANGE;
++ if (test_and_set_bit(port - map->first_ip, map->members))
++ return -EEXIST;
++
++ *hash_port = port;
++ DP("port %u", port);
++ return 0;
+}
+
++UADT(portmap, add)
++KADT(portmap, add, get_port)
++
+static inline int
-+__testport(struct ip_set *set, ip_set_ip_t port, ip_set_ip_t *hash_port)
++portmap_del(struct ip_set *set, ip_set_ip_t *hash_port, ip_set_ip_t port)
+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
++ struct ip_set_portmap *map = set->data;
+
-+ if (port < map->first_port || port > map->last_port)
++ if (port < map->first_ip || port > map->last_ip)
+ return -ERANGE;
-+
++ if (!test_and_clear_bit(port - map->first_ip, map->members))
++ return -EEXIST;
++
+ *hash_port = port;
-+ DP("set: %s, port:%u, %u", set->name, port, *hash_port);
-+ return !!test_bit(port - map->first_port, map->members);
++ DP("port %u", port);
++ return 0;
+}
+
-+static int
-+testport(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_port)
-+{
-+ struct ip_set_req_portmap *req =
-+ (struct ip_set_req_portmap *) data;
++UADT(portmap, del)
++KADT(portmap, del, get_port)
+
-+ if (size != sizeof(struct ip_set_req_portmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_portmap),
-+ size);
-+ return -EINVAL;
++static inline int
++__portmap_create(const struct ip_set_req_portmap_create *req,
++ struct ip_set_portmap *map)
++{
++ if (req->to - req->from > MAX_RANGE) {
++ ip_set_printk("range too big, %d elements (max %d)",
++ req->to - req->from + 1, MAX_RANGE+1);
++ return -ENOEXEC;
+ }
-+ return __testport(set, req->port, hash_port);
++ return bitmap_bytes(req->from, req->to);
+}
+
-+static int
-+testport_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_port,
-+ const u_int32_t *flags,
-+ unsigned char index)
++BITMAP_CREATE(portmap)
++BITMAP_DESTROY(portmap)
++BITMAP_FLUSH(portmap)
++
++static inline void
++__portmap_list_header(const struct ip_set_portmap *map,
++ struct ip_set_req_portmap_create *header)
+{
-+ int res;
-+ ip_set_ip_t port = get_port(skb, flags[index]);
++}
+
-+ DP("flag %s port %u", flags[index] & IPSET_SRC ? "SRC" : "DST", port);
-+ if (port == INVALID_PORT)
-+ return 0;
++BITMAP_LIST_HEADER(portmap)
++BITMAP_LIST_MEMBERS_SIZE(portmap)
++BITMAP_LIST_MEMBERS(portmap)
+
-+ res = __testport(set, port, hash_port);
++IP_SET_TYPE(portmap, IPSET_TYPE_PORT | IPSET_DATA_SINGLE)
+
-+ return (res < 0 ? 0 : res);
-+}
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("portmap type of IP sets");
+
-+static inline int
-+__addport(struct ip_set *set, ip_set_ip_t port, ip_set_ip_t *hash_port)
-+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
++REGISTER_MODULE(portmap)
+--- /dev/null
++++ b/net/ipv4/netfilter/ip_set_setlist.c
+@@ -0,0 +1,330 @@
++/* Copyright (C) 2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
+
-+ if (port < map->first_port || port > map->last_port)
-+ return -ERANGE;
-+ if (test_and_set_bit(port - map->first_port, map->members))
-+ return -EEXIST;
++/* Kernel module implementing an IP set type: the setlist type */
+
-+ *hash_port = port;
-+ DP("port %u", port);
-+ return 0;
++#include <linux/module.h>
++#include <linux/ip.h>
++#include <linux/skbuff.h>
++#include <linux/errno.h>
++
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_bitmaps.h>
++#include <linux/netfilter_ipv4/ip_set_setlist.h>
++
++/*
++ * before ==> index, ref
++ * after ==> ref, index
++ */
++
++static inline int
++next_index_eq(const struct ip_set_setlist *map, int i, ip_set_id_t index)
++{
++ return i < map->size && map->index[i] == index;
+}
+
+static int
-+addport(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_port)
-+{
-+ struct ip_set_req_portmap *req =
-+ (struct ip_set_req_portmap *) data;
++setlist_utest(struct ip_set *set, const void *data, u_int32_t size,
++ ip_set_ip_t *hash_ip)
++{
++ const struct ip_set_setlist *map = set->data;
++ const struct ip_set_req_setlist *req = data;
++ ip_set_id_t index, ref = IP_SET_INVALID_ID;
++ int i, res = 0;
++ struct ip_set *s;
++
++ if (req->before && req->ref[0] == '\0')
++ return 0;
+
-+ if (size != sizeof(struct ip_set_req_portmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_portmap),
-+ size);
-+ return -EINVAL;
++ index = __ip_set_get_byname(req->name, &s);
++ if (index == IP_SET_INVALID_ID)
++ return 0;
++ if (req->ref[0] != '\0') {
++ ref = __ip_set_get_byname(req->ref, &s);
++ if (ref == IP_SET_INVALID_ID)
++ goto finish;
++ }
++ for (i = 0; i < map->size
++ && map->index[i] != IP_SET_INVALID_ID; i++) {
++ if (req->before && map->index[i] == index) {
++ res = next_index_eq(map, i + 1, ref);
++ break;
++ } else if (!req->before) {
++ if ((ref == IP_SET_INVALID_ID
++ && map->index[i] == index)
++ || (map->index[i] == ref
++ && next_index_eq(map, i + 1, index))) {
++ res = 1;
++ break;
++ }
++ }
+ }
-+ return __addport(set, req->port, hash_port);
++ if (ref != IP_SET_INVALID_ID)
++ __ip_set_put_byindex(ref);
++finish:
++ __ip_set_put_byindex(index);
++ return res;
+}
+
+static int
-+addport_kernel(struct ip_set *set,
++setlist_ktest(struct ip_set *set,
+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_port,
++ ip_set_ip_t *hash_ip,
+ const u_int32_t *flags,
+ unsigned char index)
+{
-+ ip_set_ip_t port = get_port(skb, flags[index]);
-+
-+ if (port == INVALID_PORT)
-+ return -EINVAL;
-+
-+ return __addport(set, port, hash_port);
++ struct ip_set_setlist *map = set->data;
++ int i, res = 0;
++
++ for (i = 0; i < map->size
++ && map->index[i] != IP_SET_INVALID_ID
++ && res == 0; i++)
++ res = ip_set_testip_kernel(map->index[i], skb, flags);
++ return res;
+}
+
+static inline int
-+__delport(struct ip_set *set, ip_set_ip_t port, ip_set_ip_t *hash_port)
++insert_setlist(struct ip_set_setlist *map, int i, ip_set_id_t index)
+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
++ ip_set_id_t tmp;
++ int j;
+
-+ if (port < map->first_port || port > map->last_port)
++ DP("i: %u, last %u\n", i, map->index[map->size - 1]);
++ if (i >= map->size || map->index[map->size - 1] != IP_SET_INVALID_ID)
+ return -ERANGE;
-+ if (!test_and_clear_bit(port - map->first_port, map->members))
-+ return -EEXIST;
-+
-+ *hash_port = port;
-+ DP("port %u", port);
++
++ for (j = i; j < map->size
++ && index != IP_SET_INVALID_ID; j++) {
++ tmp = map->index[j];
++ map->index[j] = index;
++ index = tmp;
++ }
+ return 0;
+}
+
+static int
-+delport(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_port)
++setlist_uadd(struct ip_set *set, const void *data, u_int32_t size,
++ ip_set_ip_t *hash_ip)
+{
-+ struct ip_set_req_portmap *req =
-+ (struct ip_set_req_portmap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_portmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_portmap),
-+ size);
++ struct ip_set_setlist *map = set->data;
++ const struct ip_set_req_setlist *req = data;
++ ip_set_id_t index, ref = IP_SET_INVALID_ID;
++ int i, res = -ERANGE;
++ struct ip_set *s;
++
++ if (req->before && req->ref[0] == '\0')
+ return -EINVAL;
++
++ index = __ip_set_get_byname(req->name, &s);
++ if (index == IP_SET_INVALID_ID)
++ return -EEXIST;
++ /* "Loop detection" */
++ if (strcmp(s->type->typename, "setlist") == 0)
++ goto finish;
++
++ if (req->ref[0] != '\0') {
++ ref = __ip_set_get_byname(req->ref, &s);
++ if (ref == IP_SET_INVALID_ID) {
++ res = -EEXIST;
++ goto finish;
++ }
+ }
-+ return __delport(set, req->port, hash_port);
++ for (i = 0; i < map->size; i++) {
++ if (map->index[i] != ref)
++ continue;
++ if (req->before)
++ res = insert_setlist(map, i, index);
++ else
++ res = insert_setlist(map,
++ ref == IP_SET_INVALID_ID ? i : i + 1,
++ index);
++ break;
++ }
++ if (ref != IP_SET_INVALID_ID)
++ __ip_set_put_byindex(ref);
++ /* In case of success, we keep the reference to the set */
++finish:
++ if (res != 0)
++ __ip_set_put_byindex(index);
++ return res;
+}
+
+static int
-+delport_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_port,
-+ const u_int32_t *flags,
-+ unsigned char index)
++setlist_kadd(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
+{
-+ ip_set_ip_t port = get_port(skb, flags[index]);
-+
-+ if (port == INVALID_PORT)
-+ return -EINVAL;
-+
-+ return __delport(set, port, hash_port);
++ struct ip_set_setlist *map = set->data;
++ int i, res = -EINVAL;
++
++ for (i = 0; i < map->size
++ && map->index[i] != IP_SET_INVALID_ID
++ && res != 0; i++)
++ res = ip_set_addip_kernel(map->index[i], skb, flags);
++ return res;
+}
+
-+static int create(struct ip_set *set, const void *data, size_t size)
++static inline int
++unshift_setlist(struct ip_set_setlist *map, int i)
+{
-+ int newbytes;
-+ struct ip_set_req_portmap_create *req =
-+ (struct ip_set_req_portmap_create *) data;
-+ struct ip_set_portmap *map;
++ int j;
++
++ for (j = i; j < map->size - 1; j++)
++ map->index[j] = map->index[j+1];
++ map->index[map->size-1] = IP_SET_INVALID_ID;
++ return 0;
++}
+
-+ if (size != sizeof(struct ip_set_req_portmap_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_portmap_create),
-+ size);
++static int
++setlist_udel(struct ip_set *set, const void *data, u_int32_t size,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_setlist *map = set->data;
++ const struct ip_set_req_setlist *req = data;
++ ip_set_id_t index, ref = IP_SET_INVALID_ID;
++ int i, res = -EEXIST;
++ struct ip_set *s;
++
++ if (req->before && req->ref[0] == '\0')
+ return -EINVAL;
-+ }
+
-+ DP("from %u to %u", req->from, req->to);
-+
-+ if (req->from > req->to) {
-+ DP("bad port range");
-+ return -ENOEXEC;
++ index = __ip_set_get_byname(req->name, &s);
++ if (index == IP_SET_INVALID_ID)
++ return -EEXIST;
++ if (req->ref[0] != '\0') {
++ ref = __ip_set_get_byname(req->ref, &s);
++ if (ref == IP_SET_INVALID_ID)
++ goto finish;
++ }
++ for (i = 0; i < map->size
++ && map->index[i] != IP_SET_INVALID_ID; i++) {
++ if (req->before) {
++ if (map->index[i] == index
++ && next_index_eq(map, i + 1, ref)) {
++ res = unshift_setlist(map, i);
++ break;
++ }
++ } else if (ref == IP_SET_INVALID_ID) {
++ if (map->index[i] == index) {
++ res = unshift_setlist(map, i);
++ break;
++ }
++ } else if (map->index[i] == ref
++ && next_index_eq(map, i + 1, index)) {
++ res = unshift_setlist(map, i + 1);
++ break;
++ }
+ }
++ if (ref != IP_SET_INVALID_ID)
++ __ip_set_put_byindex(ref);
++finish:
++ __ip_set_put_byindex(index);
++ /* In case of success, release the reference to the set */
++ if (res == 0)
++ __ip_set_put_byindex(index);
++ return res;
++}
+
-+ if (req->to - req->from > MAX_RANGE) {
-+ ip_set_printk("range too big (max %d ports)",
-+ MAX_RANGE+1);
-+ return -ENOEXEC;
-+ }
++static int
++setlist_kdel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ struct ip_set_setlist *map = set->data;
++ int i, res = -EINVAL;
++
++ for (i = 0; i < map->size
++ && map->index[i] != IP_SET_INVALID_ID
++ && res != 0; i++)
++ res = ip_set_delip_kernel(map->index[i], skb, flags);
++ return res;
++}
+
-+ map = kmalloc(sizeof(struct ip_set_portmap), GFP_KERNEL);
-+ if (!map) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_portmap));
-+ return -ENOMEM;
-+ }
-+ map->first_port = req->from;
-+ map->last_port = req->to;
-+ newbytes = bitmap_bytes(req->from, req->to);
-+ map->members = kmalloc(newbytes, GFP_KERNEL);
-+ if (!map->members) {
-+ DP("out of memory for %d bytes", newbytes);
-+ kfree(map);
++static int
++setlist_create(struct ip_set *set, const void *data, u_int32_t size)
++{
++ struct ip_set_setlist *map;
++ const struct ip_set_req_setlist_create *req = data;
++ int i;
++
++ map = kmalloc(sizeof(struct ip_set_setlist) +
++ req->size * sizeof(ip_set_id_t), GFP_KERNEL);
++ if (!map)
+ return -ENOMEM;
-+ }
-+ memset(map->members, 0, newbytes);
-+
++ map->size = req->size;
++ for (i = 0; i < map->size; i++)
++ map->index[i] = IP_SET_INVALID_ID;
++
+ set->data = map;
+ return 0;
-+}
++}
+
-+static void destroy(struct ip_set *set)
++static void
++setlist_destroy(struct ip_set *set)
+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
++ struct ip_set_setlist *map = set->data;
++ int i;
++
++ for (i = 0; i < map->size
++ && map->index[i] != IP_SET_INVALID_ID; i++)
++ __ip_set_put_byindex(map->index[i]);
+
-+ kfree(map->members);
+ kfree(map);
-+
+ set->data = NULL;
+}
+
-+static void flush(struct ip_set *set)
++static void
++setlist_flush(struct ip_set *set)
+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
-+ memset(map->members, 0, bitmap_bytes(map->first_port, map->last_port));
++ struct ip_set_setlist *map = set->data;
++ int i;
++
++ for (i = 0; i < map->size
++ && map->index[i] != IP_SET_INVALID_ID; i++) {
++ __ip_set_put_byindex(map->index[i]);
++ map->index[i] = IP_SET_INVALID_ID;
++ }
+}
+
-+static void list_header(const struct ip_set *set, void *data)
++static void
++setlist_list_header(const struct ip_set *set, void *data)
+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
-+ struct ip_set_req_portmap_create *header =
-+ (struct ip_set_req_portmap_create *) data;
-+
-+ DP("list_header %u %u", map->first_port, map->last_port);
-+
-+ header->from = map->first_port;
-+ header->to = map->last_port;
++ const struct ip_set_setlist *map = set->data;
++ struct ip_set_req_setlist_create *header = data;
++
++ header->size = map->size;
+}
+
-+static int list_members_size(const struct ip_set *set)
++static int
++setlist_list_members_size(const struct ip_set *set)
+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
-+
-+ return bitmap_bytes(map->first_port, map->last_port);
++ const struct ip_set_setlist *map = set->data;
++
++ return map->size * sizeof(ip_set_id_t);
+}
+
-+static void list_members(const struct ip_set *set, void *data)
++static void
++setlist_list_members(const struct ip_set *set, void *data)
+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
-+ int bytes = bitmap_bytes(map->first_port, map->last_port);
-+
-+ memcpy(data, map->members, bytes);
++ struct ip_set_setlist *map = set->data;
++ int i;
++
++ for (i = 0; i < map->size; i++)
++ *((ip_set_id_t *)data + i) = ip_set_id(map->index[i]);
+}
+
-+static struct ip_set_type ip_set_portmap = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_PORT | IPSET_DATA_SINGLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_portmap),
-+ .addip = &addport,
-+ .addip_kernel = &addport_kernel,
-+ .delip = &delport,
-+ .delip_kernel = &delport_kernel,
-+ .testip = &testport,
-+ .testip_kernel = &testport_kernel,
-+ .header_size = sizeof(struct ip_set_req_portmap_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
++IP_SET_TYPE(setlist, IPSET_TYPE_SETNAME | IPSET_DATA_SINGLE)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-+MODULE_DESCRIPTION("portmap type of IP sets");
++MODULE_DESCRIPTION("setlist type of IP sets");
+
-+static int __init ip_set_portmap_init(void)
-+{
-+ return ip_set_register_set_type(&ip_set_portmap);
-+}
-+
-+static void __exit ip_set_portmap_fini(void)
-+{
-+ /* FIXME: possible race with ip_set_create() */
-+ ip_set_unregister_set_type(&ip_set_portmap);
-+}
-+
-+module_init(ip_set_portmap_init);
-+module_exit(ip_set_portmap_fini);
++REGISTER_MODULE(setlist)
--- /dev/null
+++ b/net/ipv4/netfilter/ipt_set.c
-@@ -0,0 +1,160 @@
+@@ -0,0 +1,238 @@
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ * Patrick Schaaf <bof@bof.de>
+ * Martin Josefsson <gandalf@wlug.westbo.se>
@@ -7207,7 +7100,14 @@
+#include <linux/skbuff.h>
+#include <linux/version.h>
+
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
+#include <linux/netfilter_ipv4/ip_tables.h>
++#define xt_register_match ipt_register_match
++#define xt_unregister_match ipt_unregister_match
++#define xt_match ipt_match
++#else
++#include <linux/netfilter/x_tables.h>
++#endif
+#include <linux/netfilter_ipv4/ip_set.h>
+#include <linux/netfilter_ipv4/ipt_set.h>
+
@@ -7215,64 +7115,125 @@
+match_set(const struct ipt_set_info *info,
+ const struct sk_buff *skb,
+ int inv)
-+{
++{
+ if (ip_set_testip_kernel(info->index, skb, info->flags))
+ inv = !inv;
+ return inv;
+}
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
-+static bool
-+#else
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++static int
++match(const struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ const void *matchinfo,
++ int offset,
++ const void *hdr,
++ u_int16_t datalen,
++ int *hotdrop)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
++static int
++match(const struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ const void *matchinfo,
++ int offset,
++ int *hotdrop)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
++static int
++match(const struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ const void *matchinfo,
++ int offset,
++ unsigned int protoff,
++ int *hotdrop)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
+static int
-+#endif
+match(const struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
+ const struct xt_match *match,
-+#endif
+ const void *matchinfo,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
-+ int offset, unsigned int protoff, bool *hotdrop)
-+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
-+ int offset, unsigned int protoff, int *hotdrop)
-+#else
-+ int offset, int *hotdrop)
++ int offset,
++ unsigned int protoff,
++ int *hotdrop)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++static bool
++match(const struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ const struct xt_match *match,
++ const void *matchinfo,
++ int offset,
++ unsigned int protoff,
++ bool *hotdrop)
++#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */
++static bool
++match(const struct sk_buff *skb,
++ const struct xt_match_param *par)
+#endif
+{
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
+ const struct ipt_set_info_match *info = matchinfo;
-+
++#else
++ const struct ipt_set_info_match *info = par->matchinfo;
++#endif
++
+ return match_set(&info->match_set,
+ skb,
+ info->match_set.flags[0] & IPSET_MATCH_INV);
+}
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
-+bool
-+#else
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
+static int
-+#endif
+checkentry(const char *tablename,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
-+ const void *inf,
-+#else
+ const struct ipt_ip *ip,
-+#endif
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
++ void *matchinfo,
++ unsigned int matchsize,
++ unsigned int hook_mask)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
++static int
++checkentry(const char *tablename,
++ const void *inf,
++ void *matchinfo,
++ unsigned int matchsize,
++ unsigned int hook_mask)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++static int
++checkentry(const char *tablename,
++ const void *inf,
+ const struct xt_match *match,
-+#endif
+ void *matchinfo,
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
+ unsigned int matchsize,
-+#endif
+ unsigned int hook_mask)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
++static int
++checkentry(const char *tablename,
++ const void *inf,
++ const struct xt_match *match,
++ void *matchinfo,
++ unsigned int hook_mask)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++static bool
++checkentry(const char *tablename,
++ const void *inf,
++ const struct xt_match *match,
++ void *matchinfo,
++ unsigned int hook_mask)
++#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */
++static bool
++checkentry(const struct xt_mtchk_param *par)
++#endif
+{
-+ struct ipt_set_info_match *info =
-+ (struct ipt_set_info_match *) matchinfo;
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++ struct ipt_set_info_match *info = matchinfo;
++#else
++ struct ipt_set_info_match *info = par->matchinfo;
++#endif
+ ip_set_id_t index;
+
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
+ if (matchsize != IPT_ALIGN(sizeof(struct ipt_set_info_match))) {
+ ip_set_printk("invalid matchsize %d", matchsize);
+ return 0;
@@ -7280,7 +7241,7 @@
+#endif
+
+ index = ip_set_get_byindex(info->match_set.index);
-+
++
+ if (index == IP_SET_INVALID_ID) {
+ ip_set_printk("Cannot find set indentified by id %u to match",
+ info->match_set.index);
@@ -7294,65 +7255,75 @@
+ return 1;
+}
+
-+static void destroy(
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
-+ const struct xt_match *match,
-+#endif
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-+ void *matchinfo, unsigned int matchsize)
-+#else
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
++static void destroy(void *matchinfo,
++ unsigned int matchsize)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++static void destroy(const struct xt_match *match,
++ void *matchinfo,
++ unsigned int matchsize)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++static void destroy(const struct xt_match *match,
+ void *matchinfo)
++#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */
++static void destroy(const struct xt_mtdtor_param *par)
+#endif
+{
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
+ struct ipt_set_info_match *info = matchinfo;
++#else
++ struct ipt_set_info_match *info = par->matchinfo;
++#endif
+
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
+ if (matchsize != IPT_ALIGN(sizeof(struct ipt_set_info_match))) {
+ ip_set_printk("invalid matchsize %d", matchsize);
+ return;
+ }
+#endif
-+ ip_set_put(info->match_set.index);
++ ip_set_put_byindex(info->match_set.index);
+}
+
-+static struct ipt_match set_match = {
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
++static struct xt_match set_match = {
++ .name = "set",
++ .match = &match,
++ .checkentry = &checkentry,
++ .destroy = &destroy,
++ .me = THIS_MODULE
++};
++#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17) */
++static struct xt_match set_match = {
+ .name = "set",
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
+ .family = AF_INET,
-+#endif
+ .match = &match,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
+ .matchsize = sizeof(struct ipt_set_info_match),
-+#endif
+ .checkentry = &checkentry,
+ .destroy = &destroy,
+ .me = THIS_MODULE
+};
++#endif
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("iptables IP set match module");
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
-+#define ipt_register_match xt_register_match
-+#define ipt_unregister_match xt_unregister_match
-+#endif
-+
+static int __init ipt_ipset_init(void)
+{
-+ return ipt_register_match(&set_match);
++ return xt_register_match(&set_match);
+}
+
+static void __exit ipt_ipset_fini(void)
+{
-+ ipt_unregister_match(&set_match);
++ xt_unregister_match(&set_match);
+}
+
+module_init(ipt_ipset_init);
+module_exit(ipt_ipset_fini);
--- /dev/null
+++ b/net/ipv4/netfilter/ipt_SET.c
-@@ -0,0 +1,179 @@
+@@ -0,0 +1,242 @@
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ * Patrick Schaaf <bof@bof.de>
+ * Martin Josefsson <gandalf@wlug.westbo.se>
@@ -7365,45 +7336,75 @@
+
+/* ipt_SET.c - netfilter target to manipulate IP sets */
+
-+#include <linux/types.h>
-+#include <linux/ip.h>
-+#include <linux/timer.h>
+#include <linux/module.h>
-+#include <linux/netfilter.h>
-+#include <linux/netdevice.h>
-+#include <linux/if.h>
-+#include <linux/inetdevice.h>
++#include <linux/ip.h>
++#include <linux/skbuff.h>
+#include <linux/version.h>
-+#include <net/protocol.h>
-+#include <net/checksum.h>
++
+#include <linux/netfilter_ipv4.h>
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
+#include <linux/netfilter_ipv4/ip_tables.h>
++#define xt_register_target ipt_register_target
++#define xt_unregister_target ipt_unregister_target
++#define xt_target ipt_target
++#define XT_CONTINUE IPT_CONTINUE
++#else
++#include <linux/netfilter/x_tables.h>
++#endif
+#include <linux/netfilter_ipv4/ipt_set.h>
+
+static unsigned int
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
-+target(struct sk_buff *skb,
-+#else
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++target(struct sk_buff **pskb,
++ unsigned int hooknum,
++ const struct net_device *in,
++ const struct net_device *out,
++ const void *targinfo,
++ void *userinfo)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
++target(struct sk_buff **pskb,
++ const struct net_device *in,
++ const struct net_device *out,
++ unsigned int hooknum,
++ const void *targinfo,
++ void *userinfo)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
+target(struct sk_buff **pskb,
-+#endif
+ const struct net_device *in,
+ const struct net_device *out,
+ unsigned int hooknum,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
+ const struct xt_target *target,
-+#endif
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
+ const void *targinfo,
+ void *userinfo)
-+#else
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
++target(struct sk_buff **pskb,
++ const struct net_device *in,
++ const struct net_device *out,
++ unsigned int hooknum,
++ const struct xt_target *target,
++ const void *targinfo)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++target(struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ unsigned int hooknum,
++ const struct xt_target *target,
+ const void *targinfo)
++#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */
++target(struct sk_buff *skb,
++ const struct xt_target_param *par)
+#endif
+{
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
+ const struct ipt_set_info_target *info = targinfo;
++#else
++ const struct ipt_set_info_target *info = par->targinfo;
++#endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+ struct sk_buff *skb = *pskb;
+#endif
+
++
+ if (info->add_set.index != IP_SET_INVALID_ID)
+ ip_set_addip_kernel(info->add_set.index,
+ skb,
@@ -7413,34 +7414,58 @@
+ skb,
+ info->del_set.flags);
+
-+ return IPT_CONTINUE;
++ return XT_CONTINUE;
+}
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
-+static bool
-+#else
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
+static int
-+#endif
+checkentry(const char *tablename,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
-+ const void *e,
-+#else
+ const struct ipt_entry *e,
-+#endif
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
++ void *targinfo,
++ unsigned int targinfosize,
++ unsigned int hook_mask)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
++static int
++checkentry(const char *tablename,
++ const void *e,
++ void *targinfo,
++ unsigned int targinfosize,
++ unsigned int hook_mask)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++static int
++checkentry(const char *tablename,
++ const void *e,
+ const struct xt_target *target,
-+#endif
+ void *targinfo,
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
+ unsigned int targinfosize,
-+#endif
+ unsigned int hook_mask)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
++static int
++checkentry(const char *tablename,
++ const void *e,
++ const struct xt_target *target,
++ void *targinfo,
++ unsigned int hook_mask)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++static bool
++checkentry(const char *tablename,
++ const void *e,
++ const struct xt_target *target,
++ void *targinfo,
++ unsigned int hook_mask)
++#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */
++static bool
++checkentry(const struct xt_tgchk_param *par)
++#endif
+{
-+ struct ipt_set_info_target *info =
-+ (struct ipt_set_info_target *) targinfo;
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++ const struct ipt_set_info_target *info = targinfo;
++#else
++ const struct ipt_set_info_target *info = par->targinfo;
++#endif
+ ip_set_id_t index;
+
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
+ if (targinfosize != IPT_ALIGN(sizeof(*info))) {
+ DP("bad target info size %u", targinfosize);
+ return 0;
@@ -7473,68 +7498,77 @@
+ return 1;
+}
+
-+static void destroy(
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
-+ const struct xt_target *target,
-+#endif
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-+ void *targetinfo, unsigned int targetsize)
-+#else
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
++static void destroy(void *targetinfo,
++ unsigned int targetsize)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++static void destroy(const struct xt_target *target,
++ void *targetinfo,
++ unsigned int targetsize)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++static void destroy(const struct xt_target *target,
+ void *targetinfo)
++#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */
++static void destroy(const struct xt_tgdtor_param *par)
+#endif
+{
-+ struct ipt_set_info_target *info = targetinfo;
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++ const struct ipt_set_info_target *info = targetinfo;
++#else
++ const struct ipt_set_info_target *info = par->targinfo;
++#endif
+
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
+ if (targetsize != IPT_ALIGN(sizeof(struct ipt_set_info_target))) {
+ ip_set_printk("invalid targetsize %d", targetsize);
+ return;
+ }
+#endif
+ if (info->add_set.index != IP_SET_INVALID_ID)
-+ ip_set_put(info->add_set.index);
++ ip_set_put_byindex(info->add_set.index);
+ if (info->del_set.index != IP_SET_INVALID_ID)
-+ ip_set_put(info->del_set.index);
++ ip_set_put_byindex(info->del_set.index);
+}
+
-+static struct ipt_target SET_target = {
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
++static struct xt_target SET_target = {
++ .name = "SET",
++ .target = target,
++ .checkentry = checkentry,
++ .destroy = destroy,
++ .me = THIS_MODULE
++};
++#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17) */
++static struct xt_target SET_target = {
+ .name = "SET",
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
+ .family = AF_INET,
-+#endif
+ .target = target,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
+ .targetsize = sizeof(struct ipt_set_info_target),
-+#endif
+ .checkentry = checkentry,
+ .destroy = destroy,
+ .me = THIS_MODULE
+};
++#endif
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("iptables IP set target module");
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
-+#define ipt_register_target xt_register_target
-+#define ipt_unregister_target xt_unregister_target
-+#endif
-+
+static int __init ipt_SET_init(void)
+{
-+ return ipt_register_target(&SET_target);
++ return xt_register_target(&SET_target);
+}
+
+static void __exit ipt_SET_fini(void)
+{
-+ ipt_unregister_target(&SET_target);
++ xt_unregister_target(&SET_target);
+}
+
+module_init(ipt_SET_init);
+module_exit(ipt_SET_fini);
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
-@@ -388,5 +388,122 @@ config IP_NF_ARP_MANGLE
+@@ -388,5 +388,146 @@ config IP_NF_ARP_MANGLE
endif # IP_NF_ARPTABLES
@@ -7619,6 +7653,22 @@
+
+ To compile it as a module, choose M here. If unsure, say N.
+
++config IP_NF_SET_IPPORTIPHASH
++ tristate "ipportiphash set support"
++ depends on IP_NF_SET
++ help
++ This option adds the ipportiphash set type support.
++
++ To compile it as a module, choose M here. If unsure, say N.
++
++config IP_NF_SET_IPPORTNETHASH
++ tristate "ipportnethash set support"
++ depends on IP_NF_SET
++ help
++ This option adds the ipportnethash set type support.
++
++ To compile it as a module, choose M here. If unsure, say N.
++
+config IP_NF_SET_IPTREE
+ tristate "iptree set support"
+ depends on IP_NF_SET
@@ -7635,6 +7685,14 @@
+
+ To compile it as a module, choose M here. If unsure, say N.
+
++config IP_NF_SET_SETLIST
++ tristate "setlist set support"
++ depends on IP_NF_SET
++ help
++ This option adds the setlist set type support.
++
++ To compile it as a module, choose M here. If unsure, say N.
++
+config IP_NF_MATCH_SET
+ tristate "set match support"
+ depends on IP_NF_SET
@@ -7667,7 +7725,7 @@
# targets
obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o
-@@ -61,6 +62,18 @@ obj-$(CONFIG_IP_NF_TARGET_NETMAP) += ipt
+@@ -61,6 +62,21 @@ obj-$(CONFIG_IP_NF_TARGET_NETMAP) += ipt
obj-$(CONFIG_IP_NF_TARGET_REDIRECT) += ipt_REDIRECT.o
obj-$(CONFIG_IP_NF_TARGET_REJECT) += ipt_REJECT.o
obj-$(CONFIG_IP_NF_TARGET_ULOG) += ipt_ULOG.o
@@ -7681,8 +7739,11 @@
+obj-$(CONFIG_IP_NF_SET_IPHASH) += ip_set_iphash.o
+obj-$(CONFIG_IP_NF_SET_NETHASH) += ip_set_nethash.o
+obj-$(CONFIG_IP_NF_SET_IPPORTHASH) += ip_set_ipporthash.o
++obj-$(CONFIG_IP_NF_SET_IPPORTIPHASH) += ip_set_ipportiphash.o
++obj-$(CONFIG_IP_NF_SET_IPPORTNETHASH) += ip_set_ipportnethash.o
+obj-$(CONFIG_IP_NF_SET_IPTREE) += ip_set_iptree.o
+obj-$(CONFIG_IP_NF_SET_IPTREEMAP) += ip_set_iptreemap.o
++obj-$(CONFIG_IP_NF_SET_SETLIST) += ip_set_setlist.o
# generic ARP tables
obj-$(CONFIG_IP_NF_ARPTABLES) += arp_tables.o
diff --git a/target/linux/generic-2.6/patches-2.6.31/130-netfilter_ipset.patch b/target/linux/generic-2.6/patches-2.6.31/130-netfilter_ipset.patch
index efe1041e6d..832f679d7a 100644
--- a/target/linux/generic-2.6/patches-2.6.31/130-netfilter_ipset.patch
+++ b/target/linux/generic-2.6/patches-2.6.31/130-netfilter_ipset.patch
@@ -1,23 +1,29 @@
--- a/include/linux/netfilter_ipv4/Kbuild
+++ b/include/linux/netfilter_ipv4/Kbuild
-@@ -45,3 +45,14 @@ header-y += ipt_ttl.h
+@@ -45,3 +45,20 @@ header-y += ipt_ttl.h
unifdef-y += ip_queue.h
unifdef-y += ip_tables.h
+
+unifdef-y += ip_set.h
+header-y += ip_set_iphash.h
++unifdef-y += ip_set_bitmaps.h
++unifdef-y += ip_set_getport.h
++unifdef-y += ip_set_hashes.h
+header-y += ip_set_ipmap.h
+header-y += ip_set_ipporthash.h
++header-y += ip_set_ipportiphash.h
++header-y += ip_set_ipportnethash.h
+unifdef-y += ip_set_iptree.h
+unifdef-y += ip_set_iptreemap.h
+header-y += ip_set_jhash.h
+header-y += ip_set_macipmap.h
-+unifdef-y += ip_set_nethash.h
++header-y += ip_set_nethash.h
+header-y += ip_set_portmap.h
++header-y += ip_set_setlist.h
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set.h
-@@ -0,0 +1,498 @@
+@@ -0,0 +1,574 @@
+#ifndef _IP_SET_H
+#define _IP_SET_H
+
@@ -28,7 +34,7 @@
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
++ * published by the Free Software Foundation.
+ */
+
+#if 0
@@ -57,10 +63,10 @@
+ * - in order to "deal with" backward compatibility, renamed to ipset
+ */
+
-+/*
-+ * Used so that the kernel module and ipset-binary can match their versions
++/*
++ * Used so that the kernel module and ipset-binary can match their versions
+ */
-+#define IP_SET_PROTOCOL_VERSION 2
++#define IP_SET_PROTOCOL_VERSION 3
+
+#define IP_SET_MAXNAMELEN 32 /* set names and set typenames */
+
@@ -69,7 +75,7 @@
+ *
+ * The representation works in HOST byte order, because most set types
+ * will perform arithmetic operations and compare operations.
-+ *
++ *
+ * For now the type is an uint32_t.
+ *
+ * Make sure to ONLY use the functions when translating and parsing
@@ -107,6 +113,9 @@
+#define IPSET_TYPE_PORT 0x02 /* Port type of set */
+#define IPSET_DATA_SINGLE 0x04 /* Single data storage */
+#define IPSET_DATA_DOUBLE 0x08 /* Double data storage */
++#define IPSET_DATA_TRIPLE 0x10 /* Triple data storage */
++#define IPSET_TYPE_IP1 0x20 /* IP address type of set */
++#define IPSET_TYPE_SETNAME 0x40 /* setname type of set */
+
+/* Reserved keywords */
+#define IPSET_TOKEN_DEFAULT ":default:"
@@ -120,8 +129,8 @@
+ * 200-299: list, save, restore
+ */
+
-+/* Single shot operations:
-+ * version, create, destroy, flush, rename and swap
++/* Single shot operations:
++ * version, create, destroy, flush, rename and swap
+ *
+ * Sets are identified by name.
+ */
@@ -172,7 +181,7 @@
+ unsigned version;
+};
+
-+/* Double shots operations:
++/* Double shots operations:
+ * add, del, test, bind and unbind.
+ *
+ * First we query the kernel to get the index and type of the target set,
@@ -214,7 +223,7 @@
+};
+
+#define IP_SET_OP_UNBIND_SET 0x00000105 /* Unbind an IP from a set */
-+/* Uses ip_set_req_bind, with type speficic addage
++/* Uses ip_set_req_bind, with type speficic addage
+ * index = 0 means unbinding for all sets */
+
+#define IP_SET_OP_TEST_BIND_SET 0x00000106 /* Test binding an IP to a set */
@@ -245,7 +254,7 @@
+struct ip_set_req_setnames {
+ unsigned op;
+ ip_set_id_t index; /* set to list/save */
-+ size_t size; /* size to get setdata/bindings */
++ u_int32_t size; /* size to get setdata/bindings */
+ /* followed by sets number of struct ip_set_name_list */
+};
+
@@ -260,16 +269,16 @@
+#define IP_SET_OP_LIST 0x00000203
+struct ip_set_req_list {
+ IP_SET_REQ_BYINDEX;
-+ /* sets number of struct ip_set_list in reply */
++ /* sets number of struct ip_set_list in reply */
+};
+
+struct ip_set_list {
+ ip_set_id_t index;
+ ip_set_id_t binding;
+ u_int32_t ref;
-+ size_t header_size; /* Set header data of header_size */
-+ size_t members_size; /* Set members data of members_size */
-+ size_t bindings_size; /* Set bindings data of bindings_size */
++ u_int32_t header_size; /* Set header data of header_size */
++ u_int32_t members_size; /* Set members data of members_size */
++ u_int32_t bindings_size;/* Set bindings data of bindings_size */
+};
+
+struct ip_set_hash_list {
@@ -286,8 +295,8 @@
+struct ip_set_save {
+ ip_set_id_t index;
+ ip_set_id_t binding;
-+ size_t header_size; /* Set header data of header_size */
-+ size_t members_size; /* Set members data of members_size */
++ u_int32_t header_size; /* Set header data of header_size */
++ u_int32_t members_size; /* Set members data of members_size */
+};
+
+/* At restoring, ip == 0 means default binding for the given set: */
@@ -300,15 +309,15 @@
+/* The restore operation */
+#define IP_SET_OP_RESTORE 0x00000205
+/* Uses ip_set_req_setnames followed by ip_set_restore structures
-+ * plus a marker ip_set_restore, followed by ip_set_hash_save
++ * plus a marker ip_set_restore, followed by ip_set_hash_save
+ * structures.
+ */
+struct ip_set_restore {
+ char name[IP_SET_MAXNAMELEN];
+ char typename[IP_SET_MAXNAMELEN];
+ ip_set_id_t index;
-+ size_t header_size; /* Create data of header_size */
-+ size_t members_size; /* Set members data of members_size */
++ u_int32_t header_size; /* Create data of header_size */
++ u_int32_t members_size; /* Set members data of members_size */
+};
+
+static inline int bitmap_bytes(ip_set_ip_t a, ip_set_ip_t b)
@@ -316,7 +325,12 @@
+ return 4 * ((((b - a + 8) / 8) + 3) / 4);
+}
+
++/* General limit for the elements in a set */
++#define MAX_RANGE 0x0000FFFF
++
+#ifdef __KERNEL__
++#include <linux/netfilter_ipv4/ip_set_compat.h>
++#include <linux/netfilter_ipv4/ip_set_malloc.h>
+
+#define ip_set_printk(format, args...) \
+ do { \
@@ -361,7 +375,7 @@
+ * return 0 if not in set, 1 if in set.
+ */
+ int (*testip_kernel) (struct ip_set *set,
-+ const struct sk_buff * skb,
++ const struct sk_buff * skb,
+ ip_set_ip_t *ip,
+ const u_int32_t *flags,
+ unsigned char index);
@@ -370,22 +384,22 @@
+ * return 0 if not in set, 1 if in set.
+ */
+ int (*testip) (struct ip_set *set,
-+ const void *data, size_t size,
++ const void *data, u_int32_t size,
+ ip_set_ip_t *ip);
+
+ /*
+ * Size of the data structure passed by when
+ * adding/deletin/testing an entry.
+ */
-+ size_t reqsize;
++ u_int32_t reqsize;
+
+ /* Add IP into set (userspace: ipset -A set IP)
+ * Return -EEXIST if the address is already in the set,
+ * and -ERANGE if the address lies outside the set bounds.
+ * If the address was not already in the set, 0 is returned.
+ */
-+ int (*addip) (struct ip_set *set,
-+ const void *data, size_t size,
++ int (*addip) (struct ip_set *set,
++ const void *data, u_int32_t size,
+ ip_set_ip_t *ip);
+
+ /* Add IP into set (kernel: iptables ... -j SET set src|dst)
@@ -394,7 +408,7 @@
+ * If the address was not already in the set, 0 is returned.
+ */
+ int (*addip_kernel) (struct ip_set *set,
-+ const struct sk_buff * skb,
++ const struct sk_buff * skb,
+ ip_set_ip_t *ip,
+ const u_int32_t *flags,
+ unsigned char index);
@@ -404,8 +418,8 @@
+ * and -ERANGE if the address lies outside the set bounds.
+ * If the address really was in the set, 0 is returned.
+ */
-+ int (*delip) (struct ip_set *set,
-+ const void *data, size_t size,
++ int (*delip) (struct ip_set *set,
++ const void *data, u_int32_t size,
+ ip_set_ip_t *ip);
+
+ /* remove IP from set (kernel: iptables ... -j SET --entry x)
@@ -414,7 +428,7 @@
+ * If the address really was in the set, 0 is returned.
+ */
+ int (*delip_kernel) (struct ip_set *set,
-+ const struct sk_buff * skb,
++ const struct sk_buff * skb,
+ ip_set_ip_t *ip,
+ const u_int32_t *flags,
+ unsigned char index);
@@ -422,7 +436,7 @@
+ /* new set creation - allocated type specific items
+ */
+ int (*create) (struct ip_set *set,
-+ const void *data, size_t size);
++ const void *data, u_int32_t size);
+
+ /* retry the operation after successfully tweaking the set
+ */
@@ -441,16 +455,16 @@
+
+ /* Listing: size needed for header
+ */
-+ size_t header_size;
++ u_int32_t header_size;
+
+ /* Listing: Get the header
+ *
+ * Fill in the information in "data".
-+ * This function is always run after list_header_size() under a
-+ * writelock on the set. Therefor is the length of "data" always
-+ * correct.
++ * This function is always run after list_header_size() under a
++ * writelock on the set. Therefor is the length of "data" always
++ * correct.
+ */
-+ void (*list_header) (const struct ip_set *set,
++ void (*list_header) (const struct ip_set *set,
+ void *data);
+
+ /* Listing: Get the size for the set members
@@ -460,9 +474,9 @@
+ /* Listing: Get the set members
+ *
+ * Fill in the information in "data".
-+ * This function is always run after list_member_size() under a
-+ * writelock on the set. Therefor is the length of "data" always
-+ * correct.
++ * This function is always run after list_member_size() under a
++ * writelock on the set. Therefor is the length of "data" always
++ * correct.
+ */
+ void (*list_members) (const struct ip_set *set,
+ void *data);
@@ -499,33 +513,659 @@
+
+/* register and unregister set references */
+extern ip_set_id_t ip_set_get_byname(const char name[IP_SET_MAXNAMELEN]);
-+extern ip_set_id_t ip_set_get_byindex(ip_set_id_t id);
-+extern void ip_set_put(ip_set_id_t id);
++extern ip_set_id_t ip_set_get_byindex(ip_set_id_t index);
++extern void ip_set_put_byindex(ip_set_id_t index);
++extern ip_set_id_t ip_set_id(ip_set_id_t index);
++extern ip_set_id_t __ip_set_get_byname(const char name[IP_SET_MAXNAMELEN],
++ struct ip_set **set);
++extern void __ip_set_put_byindex(ip_set_id_t index);
+
+/* API for iptables set match, and SET target */
-+extern void ip_set_addip_kernel(ip_set_id_t id,
-+ const struct sk_buff *skb,
-+ const u_int32_t *flags);
-+extern void ip_set_delip_kernel(ip_set_id_t id,
-+ const struct sk_buff *skb,
-+ const u_int32_t *flags);
++extern int ip_set_addip_kernel(ip_set_id_t id,
++ const struct sk_buff *skb,
++ const u_int32_t *flags);
++extern int ip_set_delip_kernel(ip_set_id_t id,
++ const struct sk_buff *skb,
++ const u_int32_t *flags);
+extern int ip_set_testip_kernel(ip_set_id_t id,
+ const struct sk_buff *skb,
+ const u_int32_t *flags);
+
++/* Macros to generate functions */
++
++#define STRUCT(pre, type) CONCAT2(pre, type)
++#define CONCAT2(pre, type) struct pre##type
++
++#define FNAME(pre, mid, post) CONCAT3(pre, mid, post)
++#define CONCAT3(pre, mid, post) pre##mid##post
++
++#define UADT0(type, adt, args...) \
++static int \
++FNAME(type,_u,adt)(struct ip_set *set, const void *data, u_int32_t size,\
++ ip_set_ip_t *hash_ip) \
++{ \
++ const STRUCT(ip_set_req_,type) *req = data; \
++ \
++ return FNAME(type,_,adt)(set, hash_ip , ## args); \
++}
++
++#define UADT(type, adt, args...) \
++ UADT0(type, adt, req->ip , ## args)
++
++#define KADT(type, adt, getfn, args...) \
++static int \
++FNAME(type,_k,adt)(struct ip_set *set, \
++ const struct sk_buff *skb, \
++ ip_set_ip_t *hash_ip, \
++ const u_int32_t *flags, \
++ unsigned char index) \
++{ \
++ ip_set_ip_t ip = getfn(skb, flags[index]); \
++ \
++ KADT_CONDITION \
++ return FNAME(type,_,adt)(set, hash_ip, ip , ##args); \
++}
++
++#define REGISTER_MODULE(type) \
++static int __init ip_set_##type##_init(void) \
++{ \
++ init_max_page_size(); \
++ return ip_set_register_set_type(&ip_set_##type); \
++} \
++ \
++static void __exit ip_set_##type##_fini(void) \
++{ \
++ /* FIXME: possible race with ip_set_create() */ \
++ ip_set_unregister_set_type(&ip_set_##type); \
++} \
++ \
++module_init(ip_set_##type##_init); \
++module_exit(ip_set_##type##_fini);
++
++/* Common functions */
++
++static inline ip_set_ip_t
++ipaddr(const struct sk_buff *skb, u_int32_t flag)
++{
++ return ntohl(flag & IPSET_SRC ? ip_hdr(skb)->saddr : ip_hdr(skb)->daddr);
++}
++
++#define jhash_ip(map, i, ip) jhash_1word(ip, *(map->initval + i))
++
++#define pack_ip_port(map, ip, port) \
++ (port + ((ip - ((map)->first_ip)) << 16))
++
+#endif /* __KERNEL__ */
+
+#endif /*_IP_SET_H*/
--- /dev/null
++++ b/include/linux/netfilter_ipv4/ip_set_bitmaps.h
+@@ -0,0 +1,121 @@
++#ifndef __IP_SET_BITMAPS_H
++#define __IP_SET_BITMAPS_H
++
++/* Macros to generate functions */
++
++#ifdef __KERNEL__
++#define BITMAP_CREATE(type) \
++static int \
++type##_create(struct ip_set *set, const void *data, u_int32_t size) \
++{ \
++ int newbytes; \
++ const struct ip_set_req_##type##_create *req = data; \
++ struct ip_set_##type *map; \
++ \
++ if (req->from > req->to) { \
++ DP("bad range"); \
++ return -ENOEXEC; \
++ } \
++ \
++ map = kmalloc(sizeof(struct ip_set_##type), GFP_KERNEL); \
++ if (!map) { \
++ DP("out of memory for %zu bytes", \
++ sizeof(struct ip_set_##type)); \
++ return -ENOMEM; \
++ } \
++ map->first_ip = req->from; \
++ map->last_ip = req->to; \
++ \
++ newbytes = __##type##_create(req, map); \
++ if (newbytes < 0) { \
++ kfree(map); \
++ return newbytes; \
++ } \
++ \
++ map->size = newbytes; \
++ map->members = ip_set_malloc(newbytes); \
++ if (!map->members) { \
++ DP("out of memory for %i bytes", newbytes); \
++ kfree(map); \
++ return -ENOMEM; \
++ } \
++ memset(map->members, 0, newbytes); \
++ \
++ set->data = map; \
++ return 0; \
++}
++
++#define BITMAP_DESTROY(type) \
++static void \
++type##_destroy(struct ip_set *set) \
++{ \
++ struct ip_set_##type *map = set->data; \
++ \
++ ip_set_free(map->members, map->size); \
++ kfree(map); \
++ \
++ set->data = NULL; \
++}
++
++#define BITMAP_FLUSH(type) \
++static void \
++type##_flush(struct ip_set *set) \
++{ \
++ struct ip_set_##type *map = set->data; \
++ memset(map->members, 0, map->size); \
++}
++
++#define BITMAP_LIST_HEADER(type) \
++static void \
++type##_list_header(const struct ip_set *set, void *data) \
++{ \
++ const struct ip_set_##type *map = set->data; \
++ struct ip_set_req_##type##_create *header = data; \
++ \
++ header->from = map->first_ip; \
++ header->to = map->last_ip; \
++ __##type##_list_header(map, header); \
++}
++
++#define BITMAP_LIST_MEMBERS_SIZE(type) \
++static int \
++type##_list_members_size(const struct ip_set *set) \
++{ \
++ const struct ip_set_##type *map = set->data; \
++ \
++ return map->size; \
++}
++
++#define BITMAP_LIST_MEMBERS(type) \
++static void \
++type##_list_members(const struct ip_set *set, void *data) \
++{ \
++ const struct ip_set_##type *map = set->data; \
++ \
++ memcpy(data, map->members, map->size); \
++}
++
++#define IP_SET_TYPE(type, __features) \
++struct ip_set_type ip_set_##type = { \
++ .typename = #type, \
++ .features = __features, \
++ .protocol_version = IP_SET_PROTOCOL_VERSION, \
++ .create = &type##_create, \
++ .destroy = &type##_destroy, \
++ .flush = &type##_flush, \
++ .reqsize = sizeof(struct ip_set_req_##type), \
++ .addip = &type##_uadd, \
++ .addip_kernel = &type##_kadd, \
++ .delip = &type##_udel, \
++ .delip_kernel = &type##_kdel, \
++ .testip = &type##_utest, \
++ .testip_kernel = &type##_ktest, \
++ .header_size = sizeof(struct ip_set_req_##type##_create),\
++ .list_header = &type##_list_header, \
++ .list_members_size = &type##_list_members_size, \
++ .list_members = &type##_list_members, \
++ .me = THIS_MODULE, \
++};
++#endif /* __KERNEL */
++
++#endif /* __IP_SET_BITMAPS_H */
+--- /dev/null
++++ b/include/linux/netfilter_ipv4/ip_set_compat.h
+@@ -0,0 +1,71 @@
++#ifndef _IP_SET_COMPAT_H
++#define _IP_SET_COMPAT_H
++
++#ifdef __KERNEL__
++#include <linux/version.h>
++
++/* Arrgh */
++#ifdef MODULE
++#define __MOD_INC(foo) __MOD_INC_USE_COUNT(foo)
++#define __MOD_DEC(foo) __MOD_DEC_USE_COUNT(foo)
++#else
++#define __MOD_INC(foo) 1
++#define __MOD_DEC(foo)
++#endif
++
++/* Backward compatibility */
++#ifndef __nocast
++#define __nocast
++#endif
++#ifndef __bitwise__
++#define __bitwise__
++#endif
++
++/* Compatibility glue code */
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++#include <linux/interrupt.h>
++#define DEFINE_RWLOCK(x) rwlock_t x = RW_LOCK_UNLOCKED
++#define try_module_get(x) __MOD_INC(x)
++#define module_put(x) __MOD_DEC(x)
++#define __clear_bit(nr, addr) clear_bit(nr, addr)
++#define __set_bit(nr, addr) set_bit(nr, addr)
++#define __test_and_set_bit(nr, addr) test_and_set_bit(nr, addr)
++#define __test_and_clear_bit(nr, addr) test_and_clear_bit(nr, addr)
++
++typedef unsigned __bitwise__ gfp_t;
++
++static inline void *kzalloc(size_t size, gfp_t flags)
++{
++ void *data = kmalloc(size, flags);
++
++ if (data)
++ memset(data, 0, size);
++
++ return data;
++}
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
++#define __KMEM_CACHE_T__ kmem_cache_t
++#else
++#define __KMEM_CACHE_T__ struct kmem_cache
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
++#define ip_hdr(skb) ((skb)->nh.iph)
++#define skb_mac_header(skb) ((skb)->mac.raw)
++#define eth_hdr(skb) ((struct ethhdr *)skb_mac_header(skb))
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
++#include <linux/netfilter.h>
++#define KMEM_CACHE_CREATE(name, size) \
++ kmem_cache_create(name, size, 0, 0, NULL, NULL)
++#else
++#define KMEM_CACHE_CREATE(name, size) \
++ kmem_cache_create(name, size, 0, 0, NULL)
++#endif
++
++
++#endif /* __KERNEL__ */
++#endif /* _IP_SET_COMPAT_H */
+--- /dev/null
++++ b/include/linux/netfilter_ipv4/ip_set_getport.h
+@@ -0,0 +1,48 @@
++#ifndef _IP_SET_GETPORT_H
++#define _IP_SET_GETPORT_H
++
++#ifdef __KERNEL__
++
++#define INVALID_PORT (MAX_RANGE + 1)
++
++/* We must handle non-linear skbs */
++static inline ip_set_ip_t
++get_port(const struct sk_buff *skb, u_int32_t flags)
++{
++ struct iphdr *iph = ip_hdr(skb);
++ u_int16_t offset = ntohs(iph->frag_off) & IP_OFFSET;
++ switch (iph->protocol) {
++ case IPPROTO_TCP: {
++ struct tcphdr tcph;
++
++ /* See comments at tcp_match in ip_tables.c */
++ if (offset)
++ return INVALID_PORT;
++
++ if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &tcph, sizeof(tcph)) < 0)
++ /* No choice either */
++ return INVALID_PORT;
++
++ return ntohs(flags & IPSET_SRC ?
++ tcph.source : tcph.dest);
++ }
++ case IPPROTO_UDP: {
++ struct udphdr udph;
++
++ if (offset)
++ return INVALID_PORT;
++
++ if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &udph, sizeof(udph)) < 0)
++ /* No choice either */
++ return INVALID_PORT;
++
++ return ntohs(flags & IPSET_SRC ?
++ udph.source : udph.dest);
++ }
++ default:
++ return INVALID_PORT;
++ }
++}
++#endif /* __KERNEL__ */
++
++#endif /*_IP_SET_GETPORT_H*/
+--- /dev/null
++++ b/include/linux/netfilter_ipv4/ip_set_hashes.h
+@@ -0,0 +1,306 @@
++#ifndef __IP_SET_HASHES_H
++#define __IP_SET_HASHES_H
++
++#define initval_t uint32_t
++
++/* Macros to generate functions */
++
++#ifdef __KERNEL__
++#define HASH_RETRY0(type, dtype, cond) \
++static int \
++type##_retry(struct ip_set *set) \
++{ \
++ struct ip_set_##type *map = set->data, *tmp; \
++ dtype *elem; \
++ void *members; \
++ u_int32_t i, hashsize = map->hashsize; \
++ int res; \
++ \
++ if (map->resize == 0) \
++ return -ERANGE; \
++ \
++ again: \
++ res = 0; \
++ \
++ /* Calculate new hash size */ \
++ hashsize += (hashsize * map->resize)/100; \
++ if (hashsize == map->hashsize) \
++ hashsize++; \
++ \
++ ip_set_printk("rehashing of set %s triggered: " \
++ "hashsize grows from %lu to %lu", \
++ set->name, \
++ (long unsigned)map->hashsize, \
++ (long unsigned)hashsize); \
++ \
++ tmp = kmalloc(sizeof(struct ip_set_##type) \
++ + map->probes * sizeof(initval_t), GFP_ATOMIC); \
++ if (!tmp) { \
++ DP("out of memory for %zu bytes", \
++ sizeof(struct ip_set_##type) \
++ + map->probes * sizeof(initval_t)); \
++ return -ENOMEM; \
++ } \
++ tmp->members = harray_malloc(hashsize, sizeof(dtype), GFP_ATOMIC);\
++ if (!tmp->members) { \
++ DP("out of memory for %zu bytes", hashsize * sizeof(dtype));\
++ kfree(tmp); \
++ return -ENOMEM; \
++ } \
++ tmp->hashsize = hashsize; \
++ tmp->elements = 0; \
++ tmp->probes = map->probes; \
++ tmp->resize = map->resize; \
++ memcpy(tmp->initval, map->initval, map->probes * sizeof(initval_t));\
++ __##type##_retry(tmp, map); \
++ \
++ write_lock_bh(&set->lock); \
++ map = set->data; /* Play safe */ \
++ for (i = 0; i < map->hashsize && res == 0; i++) { \
++ elem = HARRAY_ELEM(map->members, dtype *, i); \
++ if (cond) \
++ res = __##type##_add(tmp, elem); \
++ } \
++ if (res) { \
++ /* Failure, try again */ \
++ write_unlock_bh(&set->lock); \
++ harray_free(tmp->members); \
++ kfree(tmp); \
++ goto again; \
++ } \
++ \
++ /* Success at resizing! */ \
++ members = map->members; \
++ \
++ map->hashsize = tmp->hashsize; \
++ map->members = tmp->members; \
++ write_unlock_bh(&set->lock); \
++ \
++ harray_free(members); \
++ kfree(tmp); \
++ \
++ return 0; \
++}
++
++#define HASH_RETRY(type, dtype) \
++ HASH_RETRY0(type, dtype, *elem)
++
++#define HASH_RETRY2(type, dtype) \
++ HASH_RETRY0(type, dtype, elem->ip || elem->ip1)
++
++#define HASH_CREATE(type, dtype) \
++static int \
++type##_create(struct ip_set *set, const void *data, u_int32_t size) \
++{ \
++ const struct ip_set_req_##type##_create *req = data; \
++ struct ip_set_##type *map; \
++ uint16_t i; \
++ \
++ if (req->hashsize < 1) { \
++ ip_set_printk("hashsize too small"); \
++ return -ENOEXEC; \
++ } \
++ \
++ if (req->probes < 1) { \
++ ip_set_printk("probes too small"); \
++ return -ENOEXEC; \
++ } \
++ \
++ map = kmalloc(sizeof(struct ip_set_##type) \
++ + req->probes * sizeof(initval_t), GFP_KERNEL); \
++ if (!map) { \
++ DP("out of memory for %zu bytes", \
++ sizeof(struct ip_set_##type) \
++ + req->probes * sizeof(initval_t)); \
++ return -ENOMEM; \
++ } \
++ for (i = 0; i < req->probes; i++) \
++ get_random_bytes(((initval_t *) map->initval)+i, 4); \
++ map->elements = 0; \
++ map->hashsize = req->hashsize; \
++ map->probes = req->probes; \
++ map->resize = req->resize; \
++ if (__##type##_create(req, map)) { \
++ kfree(map); \
++ return -ENOEXEC; \
++ } \
++ map->members = harray_malloc(map->hashsize, sizeof(dtype), GFP_KERNEL);\
++ if (!map->members) { \
++ DP("out of memory for %zu bytes", map->hashsize * sizeof(dtype));\
++ kfree(map); \
++ return -ENOMEM; \
++ } \
++ \
++ set->data = map; \
++ return 0; \
++}
++
++#define HASH_DESTROY(type) \
++static void \
++type##_destroy(struct ip_set *set) \
++{ \
++ struct ip_set_##type *map = set->data; \
++ \
++ harray_free(map->members); \
++ kfree(map); \
++ \
++ set->data = NULL; \
++}
++
++#define HASH_FLUSH(type, dtype) \
++static void \
++type##_flush(struct ip_set *set) \
++{ \
++ struct ip_set_##type *map = set->data; \
++ harray_flush(map->members, map->hashsize, sizeof(dtype)); \
++ map->elements = 0; \
++}
++
++#define HASH_FLUSH_CIDR(type, dtype) \
++static void \
++type##_flush(struct ip_set *set) \
++{ \
++ struct ip_set_##type *map = set->data; \
++ harray_flush(map->members, map->hashsize, sizeof(dtype)); \
++ memset(map->cidr, 0, sizeof(map->cidr)); \
++ memset(map->nets, 0, sizeof(map->nets)); \
++ map->elements = 0; \
++}
++
++#define HASH_LIST_HEADER(type) \
++static void \
++type##_list_header(const struct ip_set *set, void *data) \
++{ \
++ const struct ip_set_##type *map = set->data; \
++ struct ip_set_req_##type##_create *header = data; \
++ \
++ header->hashsize = map->hashsize; \
++ header->probes = map->probes; \
++ header->resize = map->resize; \
++ __##type##_list_header(map, header); \
++}
++
++#define HASH_LIST_MEMBERS_SIZE(type, dtype) \
++static int \
++type##_list_members_size(const struct ip_set *set) \
++{ \
++ const struct ip_set_##type *map = set->data; \
++ \
++ return (map->hashsize * sizeof(dtype)); \
++}
++
++#define HASH_LIST_MEMBERS(type, dtype) \
++static void \
++type##_list_members(const struct ip_set *set, void *data) \
++{ \
++ const struct ip_set_##type *map = set->data; \
++ dtype *elem; \
++ uint32_t i; \
++ \
++ for (i = 0; i < map->hashsize; i++) { \
++ elem = HARRAY_ELEM(map->members, dtype *, i); \
++ ((dtype *)data)[i] = *elem; \
++ } \
++}
++
++#define HASH_LIST_MEMBERS_MEMCPY(type, dtype) \
++static void \
++type##_list_members(const struct ip_set *set, void *data) \
++{ \
++ const struct ip_set_##type *map = set->data; \
++ dtype *elem; \
++ uint32_t i; \
++ \
++ for (i = 0; i < map->hashsize; i++) { \
++ elem = HARRAY_ELEM(map->members, dtype *, i); \
++ memcpy((((dtype *)data)+i), elem, sizeof(dtype)); \
++ } \
++}
++
++#define IP_SET_RTYPE(type, __features) \
++struct ip_set_type ip_set_##type = { \
++ .typename = #type, \
++ .features = __features, \
++ .protocol_version = IP_SET_PROTOCOL_VERSION, \
++ .create = &type##_create, \
++ .retry = &type##_retry, \
++ .destroy = &type##_destroy, \
++ .flush = &type##_flush, \
++ .reqsize = sizeof(struct ip_set_req_##type), \
++ .addip = &type##_uadd, \
++ .addip_kernel = &type##_kadd, \
++ .delip = &type##_udel, \
++ .delip_kernel = &type##_kdel, \
++ .testip = &type##_utest, \
++ .testip_kernel = &type##_ktest, \
++ .header_size = sizeof(struct ip_set_req_##type##_create),\
++ .list_header = &type##_list_header, \
++ .list_members_size = &type##_list_members_size, \
++ .list_members = &type##_list_members, \
++ .me = THIS_MODULE, \
++};
++
++/* Helper functions */
++static inline void
++add_cidr_size(uint8_t *cidr, uint8_t size)
++{
++ uint8_t next;
++ int i;
++
++ for (i = 0; i < 30 && cidr[i]; i++) {
++ if (cidr[i] < size) {
++ next = cidr[i];
++ cidr[i] = size;
++ size = next;
++ }
++ }
++ if (i < 30)
++ cidr[i] = size;
++}
++
++static inline void
++del_cidr_size(uint8_t *cidr, uint8_t size)
++{
++ int i;
++
++ for (i = 0; i < 29 && cidr[i]; i++) {
++ if (cidr[i] == size)
++ cidr[i] = size = cidr[i+1];
++ }
++ cidr[29] = 0;
++}
++#else
++#include <arpa/inet.h>
++#endif /* __KERNEL */
++
++#ifndef UINT16_MAX
++#define UINT16_MAX 65535
++#endif
++
++static unsigned char shifts[] = {255, 253, 249, 241, 225, 193, 129, 1};
++
++static inline ip_set_ip_t
++pack_ip_cidr(ip_set_ip_t ip, unsigned char cidr)
++{
++ ip_set_ip_t addr, *paddr = &addr;
++ unsigned char n, t, *a;
++
++ addr = htonl(ip & (0xFFFFFFFF << (32 - (cidr))));
++#ifdef __KERNEL__
++ DP("ip:%u.%u.%u.%u/%u", NIPQUAD(addr), cidr);
++#endif
++ n = cidr / 8;
++ t = cidr % 8;
++ a = &((unsigned char *)paddr)[n];
++ *a = *a /(1 << (8 - t)) + shifts[t];
++#ifdef __KERNEL__
++ DP("n: %u, t: %u, a: %u", n, t, *a);
++ DP("ip:%u.%u.%u.%u/%u, %u.%u.%u.%u",
++ HIPQUAD(ip), cidr, NIPQUAD(addr));
++#endif
++
++ return ntohl(addr);
++}
++
++
++#endif /* __IP_SET_HASHES_H */
+--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_iphash.h
@@ -0,0 +1,30 @@
+#ifndef __IP_SET_IPHASH_H
+#define __IP_SET_IPHASH_H
+
+#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_hashes.h>
+
+#define SETTYPE_NAME "iphash"
-+#define MAX_RANGE 0x0000FFFF
+
+struct ip_set_iphash {
+ ip_set_ip_t *members; /* the iphash proper */
@@ -534,7 +1174,7 @@
+ uint16_t probes; /* max number of probes */
+ uint16_t resize; /* resize factor in percent */
+ ip_set_ip_t netmask; /* netmask */
-+ void *initval[0]; /* initvals for jhash_1word */
++ initval_t initval[0]; /* initvals for jhash_1word */
+};
+
+struct ip_set_req_iphash_create {
@@ -551,14 +1191,14 @@
+#endif /* __IP_SET_IPHASH_H */
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_ipmap.h
-@@ -0,0 +1,56 @@
+@@ -0,0 +1,57 @@
+#ifndef __IP_SET_IPMAP_H
+#define __IP_SET_IPMAP_H
+
+#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_bitmaps.h>
+
+#define SETTYPE_NAME "ipmap"
-+#define MAX_RANGE 0x0000FFFF
+
+struct ip_set_ipmap {
+ void *members; /* the ipmap proper */
@@ -567,6 +1207,7 @@
+ ip_set_ip_t netmask; /* subnet netmask */
+ ip_set_ip_t sizeid; /* size of set in IPs */
+ ip_set_ip_t hosts; /* number of hosts in a subnet */
++ u_int32_t size; /* size of the ipmap proper */
+};
+
+struct ip_set_req_ipmap_create {
@@ -579,46 +1220,45 @@
+ ip_set_ip_t ip;
+};
+
-+unsigned int
++static inline unsigned int
+mask_to_bits(ip_set_ip_t mask)
+{
+ unsigned int bits = 32;
+ ip_set_ip_t maskaddr;
-+
++
+ if (mask == 0xFFFFFFFF)
+ return bits;
-+
++
+ maskaddr = 0xFFFFFFFE;
-+ while (--bits >= 0 && maskaddr != mask)
++ while (--bits > 0 && maskaddr != mask)
+ maskaddr <<= 1;
-+
++
+ return bits;
+}
+
-+ip_set_ip_t
++static inline ip_set_ip_t
+range_to_mask(ip_set_ip_t from, ip_set_ip_t to, unsigned int *bits)
+{
+ ip_set_ip_t mask = 0xFFFFFFFE;
-+
++
+ *bits = 32;
-+ while (--(*bits) >= 0 && mask && (to & mask) != from)
++ while (--(*bits) > 0 && mask && (to & mask) != from)
+ mask <<= 1;
-+
++
+ return mask;
+}
-+
++
+#endif /* __IP_SET_IPMAP_H */
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_ipporthash.h
-@@ -0,0 +1,34 @@
+@@ -0,0 +1,33 @@
+#ifndef __IP_SET_IPPORTHASH_H
+#define __IP_SET_IPPORTHASH_H
+
+#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_hashes.h>
+
+#define SETTYPE_NAME "ipporthash"
-+#define MAX_RANGE 0x0000FFFF
-+#define INVALID_PORT (MAX_RANGE + 1)
+
+struct ip_set_ipporthash {
+ ip_set_ip_t *members; /* the ipporthash proper */
@@ -628,7 +1268,7 @@
+ uint16_t resize; /* resize factor in percent */
+ ip_set_ip_t first_ip; /* host byte order, included in range */
+ ip_set_ip_t last_ip; /* host byte order, included in range */
-+ void *initval[0]; /* initvals for jhash_1word */
++ initval_t initval[0]; /* initvals for jhash_1word */
+};
+
+struct ip_set_req_ipporthash_create {
@@ -646,15 +1286,101 @@
+
+#endif /* __IP_SET_IPPORTHASH_H */
--- /dev/null
++++ b/include/linux/netfilter_ipv4/ip_set_ipportiphash.h
+@@ -0,0 +1,39 @@
++#ifndef __IP_SET_IPPORTIPHASH_H
++#define __IP_SET_IPPORTIPHASH_H
++
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_hashes.h>
++
++#define SETTYPE_NAME "ipportiphash"
++
++struct ipportip {
++ ip_set_ip_t ip;
++ ip_set_ip_t ip1;
++};
++
++struct ip_set_ipportiphash {
++ struct ipportip *members; /* the ipportip proper */
++ uint32_t elements; /* number of elements */
++ uint32_t hashsize; /* hash size */
++ uint16_t probes; /* max number of probes */
++ uint16_t resize; /* resize factor in percent */
++ ip_set_ip_t first_ip; /* host byte order, included in range */
++ ip_set_ip_t last_ip; /* host byte order, included in range */
++ initval_t initval[0]; /* initvals for jhash_1word */
++};
++
++struct ip_set_req_ipportiphash_create {
++ uint32_t hashsize;
++ uint16_t probes;
++ uint16_t resize;
++ ip_set_ip_t from;
++ ip_set_ip_t to;
++};
++
++struct ip_set_req_ipportiphash {
++ ip_set_ip_t ip;
++ ip_set_ip_t port;
++ ip_set_ip_t ip1;
++};
++
++#endif /* __IP_SET_IPPORTIPHASH_H */
+--- /dev/null
++++ b/include/linux/netfilter_ipv4/ip_set_ipportnethash.h
+@@ -0,0 +1,42 @@
++#ifndef __IP_SET_IPPORTNETHASH_H
++#define __IP_SET_IPPORTNETHASH_H
++
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_hashes.h>
++
++#define SETTYPE_NAME "ipportnethash"
++
++struct ipportip {
++ ip_set_ip_t ip;
++ ip_set_ip_t ip1;
++};
++
++struct ip_set_ipportnethash {
++ struct ipportip *members; /* the ipportip proper */
++ uint32_t elements; /* number of elements */
++ uint32_t hashsize; /* hash size */
++ uint16_t probes; /* max number of probes */
++ uint16_t resize; /* resize factor in percent */
++ ip_set_ip_t first_ip; /* host byte order, included in range */
++ ip_set_ip_t last_ip; /* host byte order, included in range */
++ uint8_t cidr[30]; /* CIDR sizes */
++ uint16_t nets[30]; /* nr of nets by CIDR sizes */
++ initval_t initval[0]; /* initvals for jhash_1word */
++};
++
++struct ip_set_req_ipportnethash_create {
++ uint32_t hashsize;
++ uint16_t probes;
++ uint16_t resize;
++ ip_set_ip_t from;
++ ip_set_ip_t to;
++};
++
++struct ip_set_req_ipportnethash {
++ ip_set_ip_t ip;
++ ip_set_ip_t port;
++ ip_set_ip_t ip1;
++ uint8_t cidr;
++};
++
++#endif /* __IP_SET_IPPORTNETHASH_H */
+--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_iptree.h
-@@ -0,0 +1,40 @@
+@@ -0,0 +1,39 @@
+#ifndef __IP_SET_IPTREE_H
+#define __IP_SET_IPTREE_H
+
+#include <linux/netfilter_ipv4/ip_set.h>
+
+#define SETTYPE_NAME "iptree"
-+#define MAX_RANGE 0x0000FFFF
+
+struct ip_set_iptreed {
+ unsigned long expires[256]; /* x.x.x.ADDR */
@@ -726,172 +1452,181 @@
+};
+
+struct ip_set_req_iptreemap {
-+ ip_set_ip_t start;
++ ip_set_ip_t ip;
+ ip_set_ip_t end;
+};
+
+#endif /* __IP_SET_IPTREEMAP_H */
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_jhash.h
-@@ -0,0 +1,148 @@
-+#ifndef _LINUX_IPSET_JHASH_H
-+#define _LINUX_IPSET_JHASH_H
-+
-+/* This is a copy of linux/jhash.h but the types u32/u8 are changed
-+ * to __u32/__u8 so that the header file can be included into
-+ * userspace code as well. Jozsef Kadlecsik (kadlec@blackhole.kfki.hu)
-+ */
+@@ -0,0 +1,157 @@
++#ifndef _LINUX_JHASH_H
++#define _LINUX_JHASH_H
+
+/* jhash.h: Jenkins hash support.
+ *
-+ * Copyright (C) 1996 Bob Jenkins (bob_jenkins@burtleburtle.net)
++ * Copyright (C) 2006. Bob Jenkins (bob_jenkins@burtleburtle.net)
+ *
+ * http://burtleburtle.net/bob/hash/
+ *
+ * These are the credits from Bob's sources:
+ *
-+ * lookup2.c, by Bob Jenkins, December 1996, Public Domain.
-+ * hash(), hash2(), hash3, and mix() are externally useful functions.
-+ * Routines to test the hash are included if SELF_TEST is defined.
-+ * You can use this free for any purpose. It has no warranty.
++ * lookup3.c, by Bob Jenkins, May 2006, Public Domain.
+ *
-+ * Copyright (C) 2003 David S. Miller (davem@redhat.com)
++ * These are functions for producing 32-bit hashes for hash table lookup.
++ * hashword(), hashlittle(), hashlittle2(), hashbig(), mix(), and final()
++ * are externally useful functions. Routines to test the hash are included
++ * if SELF_TEST is defined. You can use this free for any purpose. It's in
++ * the public domain. It has no warranty.
++ *
++ * Copyright (C) 2009 Jozsef Kadlecsik (kadlec@blackhole.kfki.hu)
+ *
+ * I've modified Bob's hash to be useful in the Linux kernel, and
-+ * any bugs present are surely my fault. -DaveM
++ * any bugs present are my fault. Jozsef
+ */
+
-+/* NOTE: Arguments are modified. */
-+#define __jhash_mix(a, b, c) \
++#define __rot(x,k) (((x)<<(k)) | ((x)>>(32-(k))))
++
++/* __jhash_mix - mix 3 32-bit values reversibly. */
++#define __jhash_mix(a,b,c) \
+{ \
-+ a -= b; a -= c; a ^= (c>>13); \
-+ b -= c; b -= a; b ^= (a<<8); \
-+ c -= a; c -= b; c ^= (b>>13); \
-+ a -= b; a -= c; a ^= (c>>12); \
-+ b -= c; b -= a; b ^= (a<<16); \
-+ c -= a; c -= b; c ^= (b>>5); \
-+ a -= b; a -= c; a ^= (c>>3); \
-+ b -= c; b -= a; b ^= (a<<10); \
-+ c -= a; c -= b; c ^= (b>>15); \
++ a -= c; a ^= __rot(c, 4); c += b; \
++ b -= a; b ^= __rot(a, 6); a += c; \
++ c -= b; c ^= __rot(b, 8); b += a; \
++ a -= c; a ^= __rot(c,16); c += b; \
++ b -= a; b ^= __rot(a,19); a += c; \
++ c -= b; c ^= __rot(b, 4); b += a; \
++}
++
++/* __jhash_final - final mixing of 3 32-bit values (a,b,c) into c */
++#define __jhash_final(a,b,c) \
++{ \
++ c ^= b; c -= __rot(b,14); \
++ a ^= c; a -= __rot(c,11); \
++ b ^= a; b -= __rot(a,25); \
++ c ^= b; c -= __rot(b,16); \
++ a ^= c; a -= __rot(c,4); \
++ b ^= a; b -= __rot(a,14); \
++ c ^= b; c -= __rot(b,24); \
+}
+
+/* The golden ration: an arbitrary value */
-+#define JHASH_GOLDEN_RATIO 0x9e3779b9
++#define JHASH_GOLDEN_RATIO 0xdeadbeef
+
+/* The most generic version, hashes an arbitrary sequence
+ * of bytes. No alignment or length assumptions are made about
-+ * the input key.
++ * the input key. The result depends on endianness.
+ */
-+static inline __u32 jhash(void *key, __u32 length, __u32 initval)
++static inline u32 jhash(const void *key, u32 length, u32 initval)
+{
-+ __u32 a, b, c, len;
-+ __u8 *k = key;
-+
-+ len = length;
-+ a = b = JHASH_GOLDEN_RATIO;
-+ c = initval;
++ u32 a,b,c;
++ const u8 *k = key;
+
-+ while (len >= 12) {
-+ a += (k[0] +((__u32)k[1]<<8) +((__u32)k[2]<<16) +((__u32)k[3]<<24));
-+ b += (k[4] +((__u32)k[5]<<8) +((__u32)k[6]<<16) +((__u32)k[7]<<24));
-+ c += (k[8] +((__u32)k[9]<<8) +((__u32)k[10]<<16)+((__u32)k[11]<<24));
-+
-+ __jhash_mix(a,b,c);
++ /* Set up the internal state */
++ a = b = c = JHASH_GOLDEN_RATIO + length + initval;
+
++ /* all but the last block: affect some 32 bits of (a,b,c) */
++ while (length > 12) {
++ a += (k[0] + ((u32)k[1]<<8) + ((u32)k[2]<<16) + ((u32)k[3]<<24));
++ b += (k[4] + ((u32)k[5]<<8) + ((u32)k[6]<<16) + ((u32)k[7]<<24));
++ c += (k[8] + ((u32)k[9]<<8) + ((u32)k[10]<<16) + ((u32)k[11]<<24));
++ __jhash_mix(a, b, c);
++ length -= 12;
+ k += 12;
-+ len -= 12;
+ }
+
-+ c += length;
-+ switch (len) {
-+ case 11: c += ((__u32)k[10]<<24);
-+ case 10: c += ((__u32)k[9]<<16);
-+ case 9 : c += ((__u32)k[8]<<8);
-+ case 8 : b += ((__u32)k[7]<<24);
-+ case 7 : b += ((__u32)k[6]<<16);
-+ case 6 : b += ((__u32)k[5]<<8);
++ /* last block: affect all 32 bits of (c) */
++ /* all the case statements fall through */
++ switch (length) {
++ case 12: c += (u32)k[11]<<24;
++ case 11: c += (u32)k[10]<<16;
++ case 10: c += (u32)k[9]<<8;
++ case 9 : c += k[8];
++ case 8 : b += (u32)k[7]<<24;
++ case 7 : b += (u32)k[6]<<16;
++ case 6 : b += (u32)k[5]<<8;
+ case 5 : b += k[4];
-+ case 4 : a += ((__u32)k[3]<<24);
-+ case 3 : a += ((__u32)k[2]<<16);
-+ case 2 : a += ((__u32)k[1]<<8);
++ case 4 : a += (u32)k[3]<<24;
++ case 3 : a += (u32)k[2]<<16;
++ case 2 : a += (u32)k[1]<<8;
+ case 1 : a += k[0];
-+ };
-+
-+ __jhash_mix(a,b,c);
++ __jhash_final(a, b, c);
++ case 0 :
++ break;
++ }
+
+ return c;
+}
+
-+/* A special optimized version that handles 1 or more of __u32s.
-+ * The length parameter here is the number of __u32s in the key.
++/* A special optimized version that handles 1 or more of u32s.
++ * The length parameter here is the number of u32s in the key.
+ */
-+static inline __u32 jhash2(__u32 *k, __u32 length, __u32 initval)
++static inline u32 jhash2(const u32 *k, u32 length, u32 initval)
+{
-+ __u32 a, b, c, len;
++ u32 a, b, c;
+
-+ a = b = JHASH_GOLDEN_RATIO;
-+ c = initval;
-+ len = length;
++ /* Set up the internal state */
++ a = b = c = JHASH_GOLDEN_RATIO + (length<<2) + initval;
+
-+ while (len >= 3) {
++ /* handle most of the key */
++ while (length > 3) {
+ a += k[0];
+ b += k[1];
+ c += k[2];
+ __jhash_mix(a, b, c);
-+ k += 3; len -= 3;
++ length -= 3;
++ k += 3;
+ }
+
-+ c += length * 4;
-+
-+ switch (len) {
-+ case 2 : b += k[1];
-+ case 1 : a += k[0];
-+ };
-+
-+ __jhash_mix(a,b,c);
++ /* handle the last 3 u32's */
++ /* all the case statements fall through */
++ switch (length) {
++ case 3: c += k[2];
++ case 2: b += k[1];
++ case 1: a += k[0];
++ __jhash_final(a, b, c);
++ case 0: /* case 0: nothing left to add */
++ break;
++ }
+
+ return c;
+}
+
-+
+/* A special ultra-optimized versions that knows they are hashing exactly
+ * 3, 2 or 1 word(s).
-+ *
-+ * NOTE: In partilar the "c += length; __jhash_mix(a,b,c);" normally
-+ * done at the end is not done here.
+ */
-+static inline __u32 jhash_3words(__u32 a, __u32 b, __u32 c, __u32 initval)
++static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval)
+{
-+ a += JHASH_GOLDEN_RATIO;
-+ b += JHASH_GOLDEN_RATIO;
-+ c += initval;
++ a += JHASH_GOLDEN_RATIO + initval;
++ b += JHASH_GOLDEN_RATIO + initval;
++ c += JHASH_GOLDEN_RATIO + initval;
+
-+ __jhash_mix(a, b, c);
++ __jhash_final(a, b, c);
+
+ return c;
+}
+
-+static inline __u32 jhash_2words(__u32 a, __u32 b, __u32 initval)
++static inline u32 jhash_2words(u32 a, u32 b, u32 initval)
+{
-+ return jhash_3words(a, b, 0, initval);
++ return jhash_3words(0, a, b, initval);
+}
+
-+static inline __u32 jhash_1word(__u32 a, __u32 initval)
++static inline u32 jhash_1word(u32 a, u32 initval)
+{
-+ return jhash_3words(a, 0, 0, initval);
++ return jhash_3words(0, 0, a, initval);
+}
+
-+#endif /* _LINUX_IPSET_JHASH_H */
++#endif /* _LINUX_JHASH_H */
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_macipmap.h
-@@ -0,0 +1,38 @@
+@@ -0,0 +1,39 @@
+#ifndef __IP_SET_MACIPMAP_H
+#define __IP_SET_MACIPMAP_H
+
+#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_bitmaps.h>
+
+#define SETTYPE_NAME "macipmap"
-+#define MAX_RANGE 0x0000FFFF
+
+/* general flags */
+#define IPSET_MACIP_MATCHUNSET 1
@@ -904,6 +1639,7 @@
+ ip_set_ip_t first_ip; /* host byte order, included in range */
+ ip_set_ip_t last_ip; /* host byte order, included in range */
+ u_int32_t flags;
++ u_int32_t size; /* size of the ipmap proper */
+};
+
+struct ip_set_req_macipmap_create {
@@ -918,43 +1654,48 @@
+};
+
+struct ip_set_macip {
-+ unsigned short flags;
++ unsigned short match;
+ unsigned char ethernet[ETH_ALEN];
+};
+
+#endif /* __IP_SET_MACIPMAP_H */
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_malloc.h
-@@ -0,0 +1,116 @@
+@@ -0,0 +1,153 @@
+#ifndef _IP_SET_MALLOC_H
+#define _IP_SET_MALLOC_H
+
+#ifdef __KERNEL__
++#include <linux/vmalloc.h>
+
-+/* Memory allocation and deallocation */
-+static size_t max_malloc_size = 0;
++static size_t max_malloc_size = 0, max_page_size = 0;
++static size_t default_max_malloc_size = 131072; /* Guaranteed: slab.c */
+
-+static inline void init_max_malloc_size(void)
++static inline int init_max_page_size(void)
+{
-+#define CACHE(x) max_malloc_size = x;
++/* Compatibility glues to support 2.4.36 */
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++#define __GFP_NOWARN 0
++
++ /* Guaranteed: slab.c */
++ max_malloc_size = max_page_size = default_max_malloc_size;
++#else
++ size_t page_size = 0;
++
++#define CACHE(x) if (max_page_size == 0 || x < max_page_size) \
++ page_size = x;
+#include <linux/kmalloc_sizes.h>
+#undef CACHE
-+}
++ if (page_size) {
++ if (max_malloc_size == 0)
++ max_malloc_size = page_size;
+
-+static inline void * ip_set_malloc(size_t bytes)
-+{
-+ if (bytes > max_malloc_size)
-+ return vmalloc(bytes);
-+ else
-+ return kmalloc(bytes, GFP_KERNEL);
-+}
++ max_page_size = page_size;
+
-+static inline void ip_set_free(void * data, size_t bytes)
-+{
-+ if (bytes > max_malloc_size)
-+ vfree(data);
-+ else
-+ kfree(data);
++ return 1;
++ }
++#endif
++ return 0;
+}
+
+struct harray {
@@ -962,37 +1703,36 @@
+ void *arrays[0];
+};
+
-+static inline void *
-+harray_malloc(size_t hashsize, size_t typesize, int flags)
++static inline void *
++__harray_malloc(size_t hashsize, size_t typesize, gfp_t flags)
+{
+ struct harray *harray;
+ size_t max_elements, size, i, j;
+
-+ if (!max_malloc_size)
-+ init_max_malloc_size();
++ BUG_ON(max_page_size == 0);
+
-+ if (typesize > max_malloc_size)
++ if (typesize > max_page_size)
+ return NULL;
+
-+ max_elements = max_malloc_size/typesize;
++ max_elements = max_page_size/typesize;
+ size = hashsize/max_elements;
+ if (hashsize % max_elements)
+ size++;
-+
++
+ /* Last pointer signals end of arrays */
+ harray = kmalloc(sizeof(struct harray) + (size + 1) * sizeof(void *),
+ flags);
+
+ if (!harray)
+ return NULL;
-+
++
+ for (i = 0; i < size - 1; i++) {
+ harray->arrays[i] = kmalloc(max_elements * typesize, flags);
+ if (!harray->arrays[i])
+ goto undo;
+ memset(harray->arrays[i], 0, max_elements * typesize);
+ }
-+ harray->arrays[i] = kmalloc((hashsize - i * max_elements) * typesize,
++ harray->arrays[i] = kmalloc((hashsize - i * max_elements) * typesize,
+ flags);
+ if (!harray->arrays[i])
+ goto undo;
@@ -1000,7 +1740,7 @@
+
+ harray->max_elements = max_elements;
+ harray->arrays[size] = NULL;
-+
++
+ return (void *)harray;
+
+ undo:
@@ -1011,11 +1751,23 @@
+ return NULL;
+}
+
++static inline void *
++harray_malloc(size_t hashsize, size_t typesize, gfp_t flags)
++{
++ void *harray;
++
++ do {
++ harray = __harray_malloc(hashsize, typesize, flags|__GFP_NOWARN);
++ } while (harray == NULL && init_max_page_size());
++
++ return harray;
++}
++
+static inline void harray_free(void *h)
+{
+ struct harray *harray = (struct harray *) h;
+ size_t i;
-+
++
+ for (i = 0; harray->arrays[i] != NULL; i++)
+ kfree(harray->arrays[i]);
+ kfree(harray);
@@ -1025,10 +1777,10 @@
+{
+ struct harray *harray = (struct harray *) h;
+ size_t i;
-+
++
+ for (i = 0; harray->arrays[i+1] != NULL; i++)
+ memset(harray->arrays[i], 0, harray->max_elements * typesize);
-+ memset(harray->arrays[i], 0,
++ memset(harray->arrays[i], 0,
+ (hashsize - i * harray->max_elements) * typesize);
+}
+
@@ -1039,19 +1791,40 @@
+ + (which)%(__h)->max_elements); \
+})
+
++/* General memory allocation and deallocation */
++static inline void * ip_set_malloc(size_t bytes)
++{
++ BUG_ON(max_malloc_size == 0);
++
++ if (bytes > default_max_malloc_size)
++ return vmalloc(bytes);
++ else
++ return kmalloc(bytes, GFP_KERNEL | __GFP_NOWARN);
++}
++
++static inline void ip_set_free(void * data, size_t bytes)
++{
++ BUG_ON(max_malloc_size == 0);
++
++ if (bytes > default_max_malloc_size)
++ vfree(data);
++ else
++ kfree(data);
++}
++
+#endif /* __KERNEL__ */
+
+#endif /*_IP_SET_MALLOC_H*/
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_nethash.h
-@@ -0,0 +1,55 @@
+@@ -0,0 +1,31 @@
+#ifndef __IP_SET_NETHASH_H
+#define __IP_SET_NETHASH_H
+
+#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_hashes.h>
+
+#define SETTYPE_NAME "nethash"
-+#define MAX_RANGE 0x0000FFFF
+
+struct ip_set_nethash {
+ ip_set_ip_t *members; /* the nethash proper */
@@ -1059,8 +1832,9 @@
+ uint32_t hashsize; /* hash size */
+ uint16_t probes; /* max number of probes */
+ uint16_t resize; /* resize factor in percent */
-+ unsigned char cidr[30]; /* CIDR sizes */
-+ void *initval[0]; /* initvals for jhash_1word */
++ uint8_t cidr[30]; /* CIDR sizes */
++ uint16_t nets[30]; /* nr of nets by CIDR sizes */
++ initval_t initval[0]; /* initvals for jhash_1word */
+};
+
+struct ip_set_req_nethash_create {
@@ -1071,34 +1845,9 @@
+
+struct ip_set_req_nethash {
+ ip_set_ip_t ip;
-+ unsigned char cidr;
++ uint8_t cidr;
+};
+
-+static unsigned char shifts[] = {255, 253, 249, 241, 225, 193, 129, 1};
-+
-+static inline ip_set_ip_t
-+pack(ip_set_ip_t ip, unsigned char cidr)
-+{
-+ ip_set_ip_t addr, *paddr = &addr;
-+ unsigned char n, t, *a;
-+
-+ addr = htonl(ip & (0xFFFFFFFF << (32 - (cidr))));
-+#ifdef __KERNEL__
-+ DP("ip:%u.%u.%u.%u/%u", NIPQUAD(addr), cidr);
-+#endif
-+ n = cidr / 8;
-+ t = cidr % 8;
-+ a = &((unsigned char *)paddr)[n];
-+ *a = *a /(1 << (8 - t)) + shifts[t];
-+#ifdef __KERNEL__
-+ DP("n: %u, t: %u, a: %u", n, t, *a);
-+ DP("ip:%u.%u.%u.%u/%u, %u.%u.%u.%u",
-+ HIPQUAD(ip), cidr, NIPQUAD(addr));
-+#endif
-+
-+ return ntohl(addr);
-+}
-+
+#endif /* __IP_SET_NETHASH_H */
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_portmap.h
@@ -1107,15 +1856,15 @@
+#define __IP_SET_PORTMAP_H
+
+#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_bitmaps.h>
+
+#define SETTYPE_NAME "portmap"
-+#define MAX_RANGE 0x0000FFFF
-+#define INVALID_PORT (MAX_RANGE + 1)
+
+struct ip_set_portmap {
+ void *members; /* the portmap proper */
-+ ip_set_ip_t first_port; /* host byte order, included in range */
-+ ip_set_ip_t last_port; /* host byte order, included in range */
++ ip_set_ip_t first_ip; /* host byte order, included in range */
++ ip_set_ip_t last_ip; /* host byte order, included in range */
++ u_int32_t size; /* size of the ipmap proper */
+};
+
+struct ip_set_req_portmap_create {
@@ -1124,11 +1873,40 @@
+};
+
+struct ip_set_req_portmap {
-+ ip_set_ip_t port;
++ ip_set_ip_t ip;
+};
+
+#endif /* __IP_SET_PORTMAP_H */
--- /dev/null
++++ b/include/linux/netfilter_ipv4/ip_set_setlist.h
+@@ -0,0 +1,26 @@
++#ifndef __IP_SET_SETLIST_H
++#define __IP_SET_SETLIST_H
++
++#include <linux/netfilter_ipv4/ip_set.h>
++
++#define SETTYPE_NAME "setlist"
++
++#define IP_SET_SETLIST_ADD_AFTER 0
++#define IP_SET_SETLIST_ADD_BEFORE 1
++
++struct ip_set_setlist {
++ uint8_t size;
++ ip_set_id_t index[0];
++};
++
++struct ip_set_req_setlist_create {
++ uint8_t size;
++};
++
++struct ip_set_req_setlist {
++ char name[IP_SET_MAXNAMELEN];
++ char ref[IP_SET_MAXNAMELEN];
++ uint8_t before;
++};
++
++#endif /* __IP_SET_SETLIST_H */
+--- /dev/null
+++ b/include/linux/netfilter_ipv4/ipt_set.h
@@ -0,0 +1,21 @@
+#ifndef _IPT_SET_H
@@ -1154,7 +1932,7 @@
+#endif /*_IPT_SET_H*/
--- /dev/null
+++ b/net/ipv4/netfilter/ip_set.c
-@@ -0,0 +1,2003 @@
+@@ -0,0 +1,2076 @@
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ * Patrick Schaaf <bof@bof.de>
+ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
@@ -1176,17 +1954,21 @@
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/random.h>
-+#include <linux/jhash.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ip_set_jhash.h>
+#include <linux/errno.h>
-+#include <linux/semaphore.h>
++#include <linux/capability.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
++#include <asm/semaphore.h>
++#else
++#include <linux/semaphore.h>
++#endif
+#include <linux/spinlock.h>
-+#include <linux/vmalloc.h>
+
+#define ASSERT_READ_LOCK(x)
+#define ASSERT_WRITE_LOCK(x)
++#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4/ip_set.h>
+
+static struct list_head set_type_list; /* all registered sets */
@@ -1198,6 +1980,8 @@
+static struct list_head *ip_set_hash; /* hash of bindings */
+static unsigned int ip_set_hash_random; /* random seed */
+
++#define SETNAME_EQ(a,b) (strncmp(a,b,IP_SET_MAXNAMELEN) == 0)
++
+/*
+ * Sets are identified either by the index in ip_set_list or by id.
+ * The id never changes and is used to find a key in the hash.
@@ -1236,7 +2020,7 @@
+ list_for_each_entry(set_hash, &ip_set_hash[key], list)
+ if (set_hash->id == id && set_hash->ip == ip)
+ return set_hash;
-+
++
+ return NULL;
+}
+
@@ -1249,10 +2033,10 @@
+
+ ASSERT_READ_LOCK(&ip_set_lock);
+ IP_SET_ASSERT(ip_set_list[id]);
-+ DP("set: %s, ip: %u.%u.%u.%u", ip_set_list[id]->name, HIPQUAD(ip));
-+
++ DP("set: %s, ip: %u.%u.%u.%u", ip_set_list[id]->name, HIPQUAD(ip));
++
+ set_hash = __ip_set_find(key, id, ip);
-+
++
+ DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
+ HIPQUAD(ip),
+ set_hash != NULL ? ip_set_list[set_hash->binding]->name : "");
@@ -1264,7 +2048,7 @@
+__set_hash_del(struct ip_set_hash *set_hash)
+{
+ ASSERT_WRITE_LOCK(&ip_set_lock);
-+ IP_SET_ASSERT(ip_set_list[set_hash->binding]);
++ IP_SET_ASSERT(ip_set_list[set_hash->binding]);
+
+ __ip_set_put(set_hash->binding);
+ list_del(&set_hash->list);
@@ -1277,9 +2061,9 @@
+ u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
+ % ip_set_bindings_hash_size;
+ struct ip_set_hash *set_hash;
-+
++
+ IP_SET_ASSERT(ip_set_list[id]);
-+ DP("set: %s, ip: %u.%u.%u.%u", ip_set_list[id]->name, HIPQUAD(ip));
++ DP("set: %s, ip: %u.%u.%u.%u", ip_set_list[id]->name, HIPQUAD(ip));
+ write_lock_bh(&ip_set_lock);
+ set_hash = __ip_set_find(key, id, ip);
+ DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
@@ -1288,7 +2072,7 @@
+
+ if (set_hash != NULL)
+ __set_hash_del(set_hash);
-+ write_unlock_bh(&ip_set_lock);
++ write_unlock_bh(&ip_set_lock);
+ return 0;
+}
+
@@ -1299,7 +2083,7 @@
+ % ip_set_bindings_hash_size;
+ struct ip_set_hash *set_hash;
+ int ret = 0;
-+
++
+ IP_SET_ASSERT(ip_set_list[id]);
+ IP_SET_ASSERT(ip_set_list[binding]);
+ DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
@@ -1317,7 +2101,7 @@
+ set_hash->ip = ip;
+ list_add(&set_hash->list, &ip_set_hash[key]);
+ } else {
-+ IP_SET_ASSERT(ip_set_list[set_hash->binding]);
++ IP_SET_ASSERT(ip_set_list[set_hash->binding]);
+ DP("overwrite binding: %s",
+ ip_set_list[set_hash->binding]->name);
+ __ip_set_put(set_hash->binding);
@@ -1370,7 +2154,7 @@
+ ip_set_ip_t ip;
+ int res;
+ unsigned char i = 0;
-+
++
+ IP_SET_ASSERT(flags[i]);
+ read_lock_bh(&ip_set_lock);
+ do {
@@ -1386,10 +2170,10 @@
+ && follow_bindings(index, set, ip));
+ read_unlock_bh(&ip_set_lock);
+
-+ return res;
++ return (res < 0 ? 0 : res);
+}
+
-+void
++int
+ip_set_addip_kernel(ip_set_id_t index,
+ const struct sk_buff *skb,
+ const u_int32_t *flags)
@@ -1419,9 +2203,11 @@
+ && set->type->retry
+ && (res = set->type->retry(set)) == 0)
+ goto retry;
++
++ return res;
+}
+
-+void
++int
+ip_set_delip_kernel(ip_set_id_t index,
+ const struct sk_buff *skb,
+ const u_int32_t *flags)
@@ -1445,6 +2231,8 @@
+ && flags[i]
+ && follow_bindings(index, set, ip));
+ read_unlock_bh(&ip_set_lock);
++
++ return res;
+}
+
+/* Register and deregister settype */
@@ -1464,7 +2252,7 @@
+ip_set_register_set_type(struct ip_set_type *set_type)
+{
+ int ret = 0;
-+
++
+ if (set_type->protocol_version != IP_SET_PROTOCOL_VERSION) {
+ ip_set_printk("'%s' uses wrong protocol version %u (want %u)",
+ set_type->typename,
@@ -1509,6 +2297,29 @@
+
+}
+
++ip_set_id_t
++__ip_set_get_byname(const char *name, struct ip_set **set)
++{
++ ip_set_id_t i, index = IP_SET_INVALID_ID;
++
++ for (i = 0; i < ip_set_max; i++) {
++ if (ip_set_list[i] != NULL
++ && SETNAME_EQ(ip_set_list[i]->name, name)) {
++ __ip_set_get(i);
++ index = i;
++ *set = ip_set_list[i];
++ break;
++ }
++ }
++ return index;
++}
++
++void __ip_set_put_byindex(ip_set_id_t index)
++{
++ if (ip_set_list[index])
++ __ip_set_put(index);
++}
++
+/*
+ * Userspace routines
+ */
@@ -1522,11 +2333,11 @@
+ip_set_get_byname(const char *name)
+{
+ ip_set_id_t i, index = IP_SET_INVALID_ID;
-+
++
+ down(&ip_set_app_mutex);
+ for (i = 0; i < ip_set_max; i++) {
+ if (ip_set_list[i] != NULL
-+ && strcmp(ip_set_list[i]->name, name) == 0) {
++ && SETNAME_EQ(ip_set_list[i]->name, name)) {
+ __ip_set_get(i);
+ index = i;
+ break;
@@ -1548,22 +2359,36 @@
+
+ if (index >= ip_set_max)
+ return IP_SET_INVALID_ID;
-+
++
+ if (ip_set_list[index])
+ __ip_set_get(index);
+ else
+ index = IP_SET_INVALID_ID;
-+
++
+ up(&ip_set_app_mutex);
+ return index;
+}
+
+/*
++ * Find the set id belonging to the index.
++ * We are protected by the mutex, so we do not need to use
++ * ip_set_lock. There is no need to reference the sets either.
++ */
++ip_set_id_t
++ip_set_id(ip_set_id_t index)
++{
++ if (index >= ip_set_max || !ip_set_list[index])
++ return IP_SET_INVALID_ID;
++
++ return ip_set_list[index]->id;
++}
++
++/*
+ * If the given set pointer points to a valid set, decrement
+ * reference count by 1. The caller shall not assume the index
+ * to be valid, after calling this function.
+ */
-+void ip_set_put(ip_set_id_t index)
++void ip_set_put_byindex(ip_set_id_t index)
+{
+ down(&ip_set_app_mutex);
+ if (ip_set_list[index])
@@ -1576,10 +2401,10 @@
+ip_set_find_byname(const char *name)
+{
+ ip_set_id_t i, index = IP_SET_INVALID_ID;
-+
++
+ for (i = 0; i < ip_set_max; i++) {
+ if (ip_set_list[i] != NULL
-+ && strcmp(ip_set_list[i]->name, name) == 0) {
++ && SETNAME_EQ(ip_set_list[i]->name, name)) {
+ index = i;
+ break;
+ }
@@ -1592,7 +2417,7 @@
+{
+ if (index >= ip_set_max || ip_set_list[index] == NULL)
+ index = IP_SET_INVALID_ID;
-+
++
+ return index;
+}
+
@@ -1603,7 +2428,7 @@
+static inline int
+__ip_set_testip(struct ip_set *set,
+ const void *data,
-+ size_t size,
++ u_int32_t size,
+ ip_set_ip_t *ip)
+{
+ int res;
@@ -1618,12 +2443,12 @@
+static int
+__ip_set_addip(ip_set_id_t index,
+ const void *data,
-+ size_t size)
++ u_int32_t size)
+{
+ struct ip_set *set = ip_set_list[index];
+ ip_set_ip_t ip;
+ int res;
-+
++
+ IP_SET_ASSERT(set);
+ do {
+ write_lock_bh(&set->lock);
@@ -1639,9 +2464,18 @@
+static int
+ip_set_addip(ip_set_id_t index,
+ const void *data,
-+ size_t size)
++ u_int32_t size)
+{
++ struct ip_set *set = ip_set_list[index];
++
++ IP_SET_ASSERT(set);
+
++ if (size - sizeof(struct ip_set_req_adt) != set->type->reqsize) {
++ ip_set_printk("data length wrong (want %lu, have %zu)",
++ (long unsigned)set->type->reqsize,
++ size - sizeof(struct ip_set_req_adt));
++ return -EINVAL;
++ }
+ return __ip_set_addip(index,
+ data + sizeof(struct ip_set_req_adt),
+ size - sizeof(struct ip_set_req_adt));
@@ -1650,13 +2484,20 @@
+static int
+ip_set_delip(ip_set_id_t index,
+ const void *data,
-+ size_t size)
++ u_int32_t size)
+{
+ struct ip_set *set = ip_set_list[index];
+ ip_set_ip_t ip;
+ int res;
-+
++
+ IP_SET_ASSERT(set);
++
++ if (size - sizeof(struct ip_set_req_adt) != set->type->reqsize) {
++ ip_set_printk("data length wrong (want %lu, have %zu)",
++ (long unsigned)set->type->reqsize,
++ size - sizeof(struct ip_set_req_adt));
++ return -EINVAL;
++ }
+ write_lock_bh(&set->lock);
+ res = set->type->delip(set,
+ data + sizeof(struct ip_set_req_adt),
@@ -1670,13 +2511,20 @@
+static int
+ip_set_testip(ip_set_id_t index,
+ const void *data,
-+ size_t size)
++ u_int32_t size)
+{
+ struct ip_set *set = ip_set_list[index];
+ ip_set_ip_t ip;
+ int res;
+
+ IP_SET_ASSERT(set);
++
++ if (size - sizeof(struct ip_set_req_adt) != set->type->reqsize) {
++ ip_set_printk("data length wrong (want %lu, have %zu)",
++ (long unsigned)set->type->reqsize,
++ size - sizeof(struct ip_set_req_adt));
++ return -EINVAL;
++ }
+ res = __ip_set_testip(set,
+ data + sizeof(struct ip_set_req_adt),
+ size - sizeof(struct ip_set_req_adt),
@@ -1688,10 +2536,10 @@
+static int
+ip_set_bindip(ip_set_id_t index,
+ const void *data,
-+ size_t size)
++ u_int32_t size)
+{
+ struct ip_set *set = ip_set_list[index];
-+ struct ip_set_req_bind *req_bind;
++ const struct ip_set_req_bind *req_bind;
+ ip_set_id_t binding;
+ ip_set_ip_t ip;
+ int res;
@@ -1699,19 +2547,17 @@
+ IP_SET_ASSERT(set);
+ if (size < sizeof(struct ip_set_req_bind))
+ return -EINVAL;
++
++ req_bind = data;
+
-+ req_bind = (struct ip_set_req_bind *) data;
-+ req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
-+
-+ if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
++ if (SETNAME_EQ(req_bind->binding, IPSET_TOKEN_DEFAULT)) {
+ /* Default binding of a set */
-+ char *binding_name;
-+
++ const char *binding_name;
++
+ if (size != sizeof(struct ip_set_req_bind) + IP_SET_MAXNAMELEN)
+ return -EINVAL;
+
-+ binding_name = (char *)(data + sizeof(struct ip_set_req_bind));
-+ binding_name[IP_SET_MAXNAMELEN - 1] = '\0';
++ binding_name = data + sizeof(struct ip_set_req_bind);
+
+ binding = ip_set_find_byname(binding_name);
+ if (binding == IP_SET_INVALID_ID)
@@ -1737,7 +2583,7 @@
+ &ip);
+ DP("set %s, ip: %u.%u.%u.%u, binding %s",
+ set->name, HIPQUAD(ip), ip_set_list[binding]->name);
-+
++
+ if (res >= 0)
+ res = ip_set_hash_add(set->id, ip, binding);
+
@@ -1776,30 +2622,29 @@
+static int
+ip_set_unbindip(ip_set_id_t index,
+ const void *data,
-+ size_t size)
++ u_int32_t size)
+{
+ struct ip_set *set;
-+ struct ip_set_req_bind *req_bind;
++ const struct ip_set_req_bind *req_bind;
+ ip_set_ip_t ip;
+ int res;
+
+ DP("");
+ if (size < sizeof(struct ip_set_req_bind))
+ return -EINVAL;
-+
-+ req_bind = (struct ip_set_req_bind *) data;
-+ req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
-+
++
++ req_bind = data;
++
+ DP("%u %s", index, req_bind->binding);
+ if (index == IP_SET_INVALID_ID) {
+ /* unbind :all: */
-+ if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
++ if (SETNAME_EQ(req_bind->binding, IPSET_TOKEN_DEFAULT)) {
+ /* Default binding of sets */
+ write_lock_bh(&ip_set_lock);
+ FOREACH_SET_DO(__unbind_default);
+ write_unlock_bh(&ip_set_lock);
+ return 0;
-+ } else if (strcmp(req_bind->binding, IPSET_TOKEN_ALL) == 0) {
++ } else if (SETNAME_EQ(req_bind->binding, IPSET_TOKEN_ALL)) {
+ /* Flush all bindings of all sets*/
+ write_lock_bh(&ip_set_lock);
+ FOREACH_HASH_RW_DO(__set_hash_del);
@@ -1809,16 +2654,16 @@
+ DP("unreachable reached!");
+ return -EINVAL;
+ }
-+
++
+ set = ip_set_list[index];
+ IP_SET_ASSERT(set);
-+ if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
++ if (SETNAME_EQ(req_bind->binding, IPSET_TOKEN_DEFAULT)) {
+ /* Default binding of set */
+ ip_set_id_t binding = ip_set_find_byindex(set->binding);
+
+ if (binding == IP_SET_INVALID_ID)
+ return -ENOENT;
-+
++
+ write_lock_bh(&ip_set_lock);
+ /* Sets in hash values are referenced */
+ __ip_set_put(set->binding);
@@ -1826,7 +2671,7 @@
+ write_unlock_bh(&ip_set_lock);
+
+ return 0;
-+ } else if (strcmp(req_bind->binding, IPSET_TOKEN_ALL) == 0) {
++ } else if (SETNAME_EQ(req_bind->binding, IPSET_TOKEN_ALL)) {
+ /* Flush all bindings */
+
+ write_lock_bh(&ip_set_lock);
@@ -1834,7 +2679,7 @@
+ write_unlock_bh(&ip_set_lock);
+ return 0;
+ }
-+
++
+ res = __ip_set_testip(set,
+ data + sizeof(struct ip_set_req_bind),
+ size - sizeof(struct ip_set_req_bind),
@@ -1850,10 +2695,10 @@
+static int
+ip_set_testbind(ip_set_id_t index,
+ const void *data,
-+ size_t size)
++ u_int32_t size)
+{
+ struct ip_set *set = ip_set_list[index];
-+ struct ip_set_req_bind *req_bind;
++ const struct ip_set_req_bind *req_bind;
+ ip_set_id_t binding;
+ ip_set_ip_t ip;
+ int res;
@@ -1861,24 +2706,22 @@
+ IP_SET_ASSERT(set);
+ if (size < sizeof(struct ip_set_req_bind))
+ return -EINVAL;
++
++ req_bind = data;
+
-+ req_bind = (struct ip_set_req_bind *) data;
-+ req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
-+
-+ if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
++ if (SETNAME_EQ(req_bind->binding, IPSET_TOKEN_DEFAULT)) {
+ /* Default binding of set */
-+ char *binding_name;
-+
++ const char *binding_name;
++
+ if (size != sizeof(struct ip_set_req_bind) + IP_SET_MAXNAMELEN)
+ return -EINVAL;
+
-+ binding_name = (char *)(data + sizeof(struct ip_set_req_bind));
-+ binding_name[IP_SET_MAXNAMELEN - 1] = '\0';
++ binding_name = data + sizeof(struct ip_set_req_bind);
+
+ binding = ip_set_find_byname(binding_name);
+ if (binding == IP_SET_INVALID_ID)
+ return -ENOENT;
-+
++
+ res = (set->binding == binding) ? -EEXIST : 0;
+
+ return res;
@@ -1886,15 +2729,15 @@
+ binding = ip_set_find_byname(req_bind->binding);
+ if (binding == IP_SET_INVALID_ID)
+ return -ENOENT;
-+
-+
++
++
+ res = __ip_set_testip(set,
+ data + sizeof(struct ip_set_req_bind),
+ size - sizeof(struct ip_set_req_bind),
+ &ip);
+ DP("set %s, ip: %u.%u.%u.%u, binding %s",
+ set->name, HIPQUAD(ip), ip_set_list[binding]->name);
-+
++
+ if (res >= 0)
+ res = (ip_set_find_in_hash(set->id, ip) == binding)
+ ? -EEXIST : 0;
@@ -1906,7 +2749,7 @@
+find_set_type_rlock(const char *typename)
+{
+ struct ip_set_type *type;
-+
++
+ read_lock_bh(&ip_set_lock);
+ type = find_set_type(typename);
+ if (type == NULL)
@@ -1927,7 +2770,7 @@
+ if (ip_set_list[i] == NULL) {
+ if (*id == IP_SET_INVALID_ID)
+ *id = *index = i;
-+ } else if (strcmp(name, ip_set_list[i]->name) == 0)
++ } else if (SETNAME_EQ(name, ip_set_list[i]->name))
+ /* Name clash */
+ return -EEXIST;
+ }
@@ -1935,7 +2778,7 @@
+ /* No free slot remained */
+ return -ERANGE;
+ /* Check that index is usable as id (swapping) */
-+ check:
++ check:
+ for (i = 0; i < ip_set_max; i++) {
+ if (ip_set_list[i] != NULL
+ && ip_set_list[i]->id == *id) {
@@ -1954,13 +2797,14 @@
+ const char *typename,
+ ip_set_id_t restore,
+ const void *data,
-+ size_t size)
++ u_int32_t size)
+{
+ struct ip_set *set;
+ ip_set_id_t index = 0, id;
+ int res = 0;
+
+ DP("setname: %s, typename: %s, id: %u", name, typename, restore);
++
+ /*
+ * First, and without any locks, allocate and initialize
+ * a normal base set structure.
@@ -1968,7 +2812,7 @@
+ set = kmalloc(sizeof(struct ip_set), GFP_KERNEL);
+ if (!set)
+ return -ENOMEM;
-+ set->lock = RW_LOCK_UNLOCKED;
++ rwlock_init(&set->lock);
+ strncpy(set->name, name, IP_SET_MAXNAMELEN);
+ set->binding = IP_SET_INVALID_ID;
+ atomic_set(&set->ref, 0);
@@ -2004,6 +2848,14 @@
+ }
+ read_unlock_bh(&ip_set_lock);
+
++ /* Check request size */
++ if (size != set->type->header_size) {
++ ip_set_printk("data length wrong (want %lu, have %lu)",
++ (long unsigned)set->type->header_size,
++ (long unsigned)size);
++ goto put_out;
++ }
++
+ /*
+ * Without holding any locks, create private part.
+ */
@@ -2030,7 +2882,7 @@
+ res = -ERANGE;
+ goto cleanup;
+ }
-+
++
+ /*
+ * Finally! Add our shiny new set to the list, and be done.
+ */
@@ -2039,7 +2891,7 @@
+ ip_set_list[index] = set;
+ write_unlock_bh(&ip_set_lock);
+ return res;
-+
++
+ cleanup:
+ write_unlock_bh(&ip_set_lock);
+ set->type->destroy(set);
@@ -2139,9 +2991,7 @@
+ write_lock_bh(&ip_set_lock);
+ for (i = 0; i < ip_set_max; i++) {
+ if (ip_set_list[i] != NULL
-+ && strncmp(ip_set_list[i]->name,
-+ name,
-+ IP_SET_MAXNAMELEN - 1) == 0) {
++ && SETNAME_EQ(ip_set_list[i]->name, name)) {
+ res = -EEXIST;
+ goto unlock;
+ }
@@ -2165,11 +3015,13 @@
+ u_int32_t from_ref;
+
+ DP("set: %s to %s", from->name, to->name);
-+ /* Features must not change. Artifical restriction. */
++ /* Features must not change.
++ * Not an artifical restriction anymore, as we must prevent
++ * possible loops created by swapping in setlist type of sets. */
+ if (from->type->features != to->type->features)
+ return -ENOEXEC;
+
-+ /* No magic here: ref munging protected by the mutex */
++ /* No magic here: ref munging protected by the mutex */
+ write_lock_bh(&ip_set_lock);
+ strncpy(from_name, from->name, IP_SET_MAXNAMELEN);
+ from_ref = atomic_read(&from->ref);
@@ -2178,10 +3030,10 @@
+ atomic_set(&from->ref, atomic_read(&to->ref));
+ strncpy(to->name, from_name, IP_SET_MAXNAMELEN);
+ atomic_set(&to->ref, from_ref);
-+
++
+ ip_set_list[from_index] = to;
+ ip_set_list[to_index] = from;
-+
++
+ write_unlock_bh(&ip_set_lock);
+ return 0;
+}
@@ -2192,7 +3044,7 @@
+
+static inline void
+__set_hash_bindings_size_list(struct ip_set_hash *set_hash,
-+ ip_set_id_t id, size_t *size)
++ ip_set_id_t id, u_int32_t *size)
+{
+ if (set_hash->id == id)
+ *size += sizeof(struct ip_set_hash_list);
@@ -2200,7 +3052,7 @@
+
+static inline void
+__set_hash_bindings_size_save(struct ip_set_hash *set_hash,
-+ ip_set_id_t id, size_t *size)
++ ip_set_id_t id, u_int32_t *size)
+{
+ if (set_hash->id == id)
+ *size += sizeof(struct ip_set_hash_save);
@@ -2211,8 +3063,7 @@
+ ip_set_id_t id, void *data, int *used)
+{
+ if (set_hash->id == id) {
-+ struct ip_set_hash_list *hash_list =
-+ (struct ip_set_hash_list *)(data + *used);
++ struct ip_set_hash_list *hash_list = data + *used;
+
+ hash_list->ip = set_hash->ip;
+ hash_list->binding = set_hash->binding;
@@ -2229,7 +3080,7 @@
+ struct ip_set_list *set_list;
+
+ /* Pointer to our header */
-+ set_list = (struct ip_set_list *) (data + *used);
++ set_list = data + *used;
+
+ DP("set: %s, used: %d %p %p", set->name, *used, data, data + *used);
+
@@ -2274,7 +3125,7 @@
+
+ /* Fill in set spefific bindings data */
+ FOREACH_HASH_DO(__set_hash_bindings, set->id, data, used);
-+
++
+ return 0;
+
+ unlock_set:
@@ -2296,7 +3147,7 @@
+ struct ip_set_save *set_save;
+
+ /* Pointer to our header */
-+ set_save = (struct ip_set_save *) (data + *used);
++ set_save = data + *used;
+
+ /* Get and ensure header size */
+ if (*used + sizeof(struct ip_set_save) > len)
@@ -2304,7 +3155,7 @@
+ *used += sizeof(struct ip_set_save);
+
+ set = ip_set_list[index];
-+ DP("set: %s, used: %u(%u) %p %p", set->name, *used, len,
++ DP("set: %s, used: %d(%d) %p %p", set->name, *used, len,
+ data, data + *used);
+
+ read_lock_bh(&set->lock);
@@ -2321,8 +3172,8 @@
+ set->type->list_header(set, data + *used);
+ *used += set_save->header_size;
+
-+ DP("set header filled: %s, used: %u(%u) %p %p", set->name, *used,
-+ set_save->header_size, data, data + *used);
++ DP("set header filled: %s, used: %d(%lu) %p %p", set->name, *used,
++ (unsigned long)set_save->header_size, data, data + *used);
+ /* Get and ensure set specific members size */
+ set_save->members_size = set->type->list_members_size(set);
+ if (*used + set_save->members_size > len)
@@ -2332,8 +3183,8 @@
+ set->type->list_members(set, data + *used);
+ *used += set_save->members_size;
+ read_unlock_bh(&set->lock);
-+ DP("set members filled: %s, used: %u(%u) %p %p", set->name, *used,
-+ set_save->members_size, data, data + *used);
++ DP("set members filled: %s, used: %d(%lu) %p %p", set->name, *used,
++ (unsigned long)set_save->members_size, data, data + *used);
+ return 0;
+
+ unlock_set:
@@ -2353,8 +3204,7 @@
+{
+ if (*res == 0
+ && (id == IP_SET_INVALID_ID || set_hash->id == id)) {
-+ struct ip_set_hash_save *hash_save =
-+ (struct ip_set_hash_save *)(data + *used);
++ struct ip_set_hash_save *hash_save = data + *used;
+ /* Ensure bindings size */
+ if (*used + sizeof(struct ip_set_hash_save) > len) {
+ *res = -ENOMEM;
@@ -2381,7 +3231,7 @@
+ return -ENOMEM;
+
+ /* Marker */
-+ set_save = (struct ip_set_save *) (data + *used);
++ set_save = data + *used;
+ set_save->index = IP_SET_INVALID_ID;
+ set_save->header_size = 0;
+ set_save->members_size = 0;
@@ -2394,7 +3244,7 @@
+ index = ip_set_list[index]->id;
+ FOREACH_HASH_DO(__set_hash_save_bindings, index, data, used, len, &res);
+
-+ return res;
++ return res;
+}
+
+/*
@@ -2413,12 +3263,12 @@
+ /* Loop to restore sets */
+ while (1) {
+ line++;
-+
-+ DP("%u %u %u", used, sizeof(struct ip_set_restore), len);
++
++ DP("%d %zu %d", used, sizeof(struct ip_set_restore), len);
+ /* Get and ensure header size */
+ if (used + sizeof(struct ip_set_restore) > len)
+ return line;
-+ set_restore = (struct ip_set_restore *) (data + used);
++ set_restore = data + used;
+ used += sizeof(struct ip_set_restore);
+
+ /* Ensure data size */
@@ -2432,7 +3282,7 @@
+ line--;
+ goto bindings;
+ }
-+
++
+ /* Try to create the set */
+ DP("restore %s %s", set_restore->name, set_restore->typename);
+ res = ip_set_create(set_restore->name,
@@ -2440,7 +3290,7 @@
+ set_restore->index,
+ data + used,
+ set_restore->header_size);
-+
++
+ if (res != 0)
+ return line;
+ used += set_restore->header_size;
@@ -2452,12 +3302,13 @@
+ /* Try to restore members data */
+ set = ip_set_list[index];
+ members_size = 0;
-+ DP("members_size %u reqsize %u",
-+ set_restore->members_size, set->type->reqsize);
++ DP("members_size %lu reqsize %lu",
++ (unsigned long)set_restore->members_size,
++ (unsigned long)set->type->reqsize);
+ while (members_size + set->type->reqsize <=
+ set_restore->members_size) {
+ line++;
-+ DP("members: %u, line %u", members_size, line);
++ DP("members: %d, line %d", members_size, line);
+ res = __ip_set_addip(index,
+ data + used + members_size,
+ set->type->reqsize);
@@ -2466,29 +3317,29 @@
+ members_size += set->type->reqsize;
+ }
+
-+ DP("members_size %u %u",
-+ set_restore->members_size, members_size);
++ DP("members_size %lu %d",
++ (unsigned long)set_restore->members_size, members_size);
+ if (members_size != set_restore->members_size)
+ return line++;
-+ used += set_restore->members_size;
++ used += set_restore->members_size;
+ }
-+
++
+ bindings:
+ /* Loop to restore bindings */
+ while (used < len) {
+ line++;
+
-+ DP("restore binding, line %u", line);
++ DP("restore binding, line %u", line);
+ /* Get and ensure size */
+ if (used + sizeof(struct ip_set_hash_save) > len)
+ return line;
-+ hash_save = (struct ip_set_hash_save *) (data + used);
++ hash_save = data + used;
+ used += sizeof(struct ip_set_hash_save);
-+
++
+ /* hash_save->id is used to store the index */
+ index = ip_set_find_byindex(hash_save->id);
+ DP("restore binding index %u, id %u, %u -> %u",
-+ index, hash_save->id, hash_save->ip, hash_save->binding);
++ index, hash_save->id, hash_save->ip, hash_save->binding);
+ if (index != hash_save->id)
+ return line;
+ if (ip_set_find_byindex(hash_save->binding) == IP_SET_INVALID_ID) {
@@ -2514,8 +3365,8 @@
+ }
+ if (used != len)
+ return line;
-+
-+ return 0;
++
++ return 0;
+}
+
+static int
@@ -2527,10 +3378,10 @@
+ struct ip_set_req_adt *req_adt;
+ ip_set_id_t index = IP_SET_INVALID_ID;
+ int (*adtfn)(ip_set_id_t index,
-+ const void *data, size_t size);
++ const void *data, u_int32_t size);
+ struct fn_table {
+ int (*fn)(ip_set_id_t index,
-+ const void *data, size_t size);
++ const void *data, u_int32_t size);
+ } adtfn_table[] =
+ { { ip_set_addip }, { ip_set_delip }, { ip_set_testip},
+ { ip_set_bindip}, { ip_set_unbindip }, { ip_set_testbind },
@@ -2562,11 +3413,10 @@
+
+ op = (unsigned *)data;
+ DP("op=%x", *op);
-+
++
+ if (*op < IP_SET_OP_VERSION) {
+ /* Check the version at the beginning of operations */
-+ struct ip_set_req_version *req_version =
-+ (struct ip_set_req_version *) data;
++ struct ip_set_req_version *req_version = data;
+ if (req_version->version != IP_SET_PROTOCOL_VERSION) {
+ res = -EPROTO;
+ goto done;
@@ -2575,9 +3425,8 @@
+
+ switch (*op) {
+ case IP_SET_OP_CREATE:{
-+ struct ip_set_req_create *req_create
-+ = (struct ip_set_req_create *) data;
-+
++ struct ip_set_req_create *req_create = data;
++
+ if (len < sizeof(struct ip_set_req_create)) {
+ ip_set_printk("short CREATE data (want >=%zu, got %u)",
+ sizeof(struct ip_set_req_create), len);
@@ -2594,16 +3443,15 @@
+ goto done;
+ }
+ case IP_SET_OP_DESTROY:{
-+ struct ip_set_req_std *req_destroy
-+ = (struct ip_set_req_std *) data;
-+
++ struct ip_set_req_std *req_destroy = data;
++
+ if (len != sizeof(struct ip_set_req_std)) {
+ ip_set_printk("invalid DESTROY data (want %zu, got %u)",
+ sizeof(struct ip_set_req_std), len);
+ res = -EINVAL;
+ goto done;
+ }
-+ if (strcmp(req_destroy->name, IPSET_TOKEN_ALL) == 0) {
++ if (SETNAME_EQ(req_destroy->name, IPSET_TOKEN_ALL)) {
+ /* Destroy all sets */
+ index = IP_SET_INVALID_ID;
+ } else {
@@ -2615,13 +3463,12 @@
+ goto done;
+ }
+ }
-+
++
+ res = ip_set_destroy(index);
+ goto done;
+ }
+ case IP_SET_OP_FLUSH:{
-+ struct ip_set_req_std *req_flush =
-+ (struct ip_set_req_std *) data;
++ struct ip_set_req_std *req_flush = data;
+
+ if (len != sizeof(struct ip_set_req_std)) {
+ ip_set_printk("invalid FLUSH data (want %zu, got %u)",
@@ -2629,7 +3476,7 @@
+ res = -EINVAL;
+ goto done;
+ }
-+ if (strcmp(req_flush->name, IPSET_TOKEN_ALL) == 0) {
++ if (SETNAME_EQ(req_flush->name, IPSET_TOKEN_ALL)) {
+ /* Flush all sets */
+ index = IP_SET_INVALID_ID;
+ } else {
@@ -2645,8 +3492,7 @@
+ goto done;
+ }
+ case IP_SET_OP_RENAME:{
-+ struct ip_set_req_create *req_rename
-+ = (struct ip_set_req_create *) data;
++ struct ip_set_req_create *req_rename = data;
+
+ if (len != sizeof(struct ip_set_req_create)) {
+ ip_set_printk("invalid RENAME data (want %zu, got %u)",
@@ -2657,7 +3503,7 @@
+
+ req_rename->name[IP_SET_MAXNAMELEN - 1] = '\0';
+ req_rename->typename[IP_SET_MAXNAMELEN - 1] = '\0';
-+
++
+ index = ip_set_find_byname(req_rename->name);
+ if (index == IP_SET_INVALID_ID) {
+ res = -ENOENT;
@@ -2667,8 +3513,7 @@
+ goto done;
+ }
+ case IP_SET_OP_SWAP:{
-+ struct ip_set_req_create *req_swap
-+ = (struct ip_set_req_create *) data;
++ struct ip_set_req_create *req_swap = data;
+ ip_set_id_t to_index;
+
+ if (len != sizeof(struct ip_set_req_create)) {
@@ -2697,7 +3542,7 @@
+ default:
+ break; /* Set identified by id */
+ }
-+
++
+ /* There we may have add/del/test/bind/unbind/test_bind operations */
+ if (*op < IP_SET_OP_ADD_IP || *op > IP_SET_OP_TEST_BIND_SET) {
+ res = -EBADMSG;
@@ -2711,7 +3556,7 @@
+ res = -EINVAL;
+ goto done;
+ }
-+ req_adt = (struct ip_set_req_adt *) data;
++ req_adt = data;
+
+ /* -U :all: :all:|:default: uses IP_SET_INVALID_ID */
+ if (!(*op == IP_SET_OP_UNBIND_SET
@@ -2771,8 +3616,7 @@
+
+ if (*op < IP_SET_OP_VERSION) {
+ /* Check the version at the beginning of operations */
-+ struct ip_set_req_version *req_version =
-+ (struct ip_set_req_version *) data;
++ struct ip_set_req_version *req_version = data;
+ if (req_version->version != IP_SET_PROTOCOL_VERSION) {
+ res = -EPROTO;
+ goto done;
@@ -2781,8 +3625,7 @@
+
+ switch (*op) {
+ case IP_SET_OP_VERSION: {
-+ struct ip_set_req_version *req_version =
-+ (struct ip_set_req_version *) data;
++ struct ip_set_req_version *req_version = data;
+
+ if (*len != sizeof(struct ip_set_req_version)) {
+ ip_set_printk("invalid VERSION (want %zu, got %d)",
@@ -2798,8 +3641,7 @@
+ goto done;
+ }
+ case IP_SET_OP_GET_BYNAME: {
-+ struct ip_set_req_get_set *req_get
-+ = (struct ip_set_req_get_set *) data;
++ struct ip_set_req_get_set *req_get = data;
+
+ if (*len != sizeof(struct ip_set_req_get_set)) {
+ ip_set_printk("invalid GET_BYNAME (want %zu, got %d)",
@@ -2813,8 +3655,7 @@
+ goto copy;
+ }
+ case IP_SET_OP_GET_BYINDEX: {
-+ struct ip_set_req_get_set *req_get
-+ = (struct ip_set_req_get_set *) data;
++ struct ip_set_req_get_set *req_get = data;
+
+ if (*len != sizeof(struct ip_set_req_get_set)) {
+ ip_set_printk("invalid GET_BYINDEX (want %zu, got %d)",
@@ -2830,8 +3671,7 @@
+ goto copy;
+ }
+ case IP_SET_OP_ADT_GET: {
-+ struct ip_set_req_adt_get *req_get
-+ = (struct ip_set_req_adt_get *) data;
++ struct ip_set_req_adt_get *req_get = data;
+
+ if (*len != sizeof(struct ip_set_req_adt_get)) {
+ ip_set_printk("invalid ADT_GET (want %zu, got %d)",
@@ -2853,8 +3693,7 @@
+ goto copy;
+ }
+ case IP_SET_OP_MAX_SETS: {
-+ struct ip_set_req_max_sets *req_max_sets
-+ = (struct ip_set_req_max_sets *) data;
++ struct ip_set_req_max_sets *req_max_sets = data;
+ ip_set_id_t i;
+
+ if (*len != sizeof(struct ip_set_req_max_sets)) {
@@ -2864,7 +3703,7 @@
+ goto done;
+ }
+
-+ if (strcmp(req_max_sets->set.name, IPSET_TOKEN_ALL) == 0) {
++ if (SETNAME_EQ(req_max_sets->set.name, IPSET_TOKEN_ALL)) {
+ req_max_sets->set.index = IP_SET_INVALID_ID;
+ } else {
+ req_max_sets->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
@@ -2885,8 +3724,7 @@
+ }
+ case IP_SET_OP_LIST_SIZE:
+ case IP_SET_OP_SAVE_SIZE: {
-+ struct ip_set_req_setnames *req_setnames
-+ = (struct ip_set_req_setnames *) data;
++ struct ip_set_req_setnames *req_setnames = data;
+ struct ip_set_name_list *name_list;
+ struct ip_set *set;
+ ip_set_id_t i;
@@ -2904,8 +3742,7 @@
+ for (i = 0; i < ip_set_max; i++) {
+ if (ip_set_list[i] == NULL)
+ continue;
-+ name_list = (struct ip_set_name_list *)
-+ (data + used);
++ name_list = data + used;
+ used += sizeof(struct ip_set_name_list);
+ if (used > copylen) {
+ res = -EAGAIN;
@@ -2957,8 +3794,7 @@
+ goto copy;
+ }
+ case IP_SET_OP_LIST: {
-+ struct ip_set_req_list *req_list
-+ = (struct ip_set_req_list *) data;
++ struct ip_set_req_list *req_list = data;
+ ip_set_id_t i;
+ int used;
+
@@ -2994,8 +3830,7 @@
+ goto copy;
+ }
+ case IP_SET_OP_SAVE: {
-+ struct ip_set_req_list *req_save
-+ = (struct ip_set_req_list *) data;
++ struct ip_set_req_list *req_save = data;
+ ip_set_id_t i;
+ int used;
+
@@ -3011,20 +3846,30 @@
+ res = -ENOENT;
+ goto done;
+ }
++
++#define SETLIST(set) (strcmp(set->type->typename, "setlist") == 0)
++
+ used = 0;
+ if (index == IP_SET_INVALID_ID) {
-+ /* Save all sets */
++ /* Save all sets: ugly setlist type dependency */
++ int setlist = 0;
++ setlists:
+ for (i = 0; i < ip_set_max && res == 0; i++) {
-+ if (ip_set_list[i] != NULL)
++ if (ip_set_list[i] != NULL
++ && !(setlist ^ SETLIST(ip_set_list[i])))
+ res = ip_set_save_set(i, data, &used, *len);
+ }
++ if (!setlist) {
++ setlist = 1;
++ goto setlists;
++ }
+ } else {
+ /* Save an individual set */
+ res = ip_set_save_set(index, data, &used, *len);
+ }
+ if (res == 0)
+ res = ip_set_save_bindings(index, data, &used, *len);
-+
++
+ if (res != 0)
+ goto done;
+ else if (copylen != used) {
@@ -3034,20 +3879,19 @@
+ goto copy;
+ }
+ case IP_SET_OP_RESTORE: {
-+ struct ip_set_req_setnames *req_restore
-+ = (struct ip_set_req_setnames *) data;
++ struct ip_set_req_setnames *req_restore = data;
+ int line;
+
+ if (*len < sizeof(struct ip_set_req_setnames)
+ || *len != req_restore->size) {
-+ ip_set_printk("invalid RESTORE (want =%zu, got %d)",
-+ req_restore->size, *len);
++ ip_set_printk("invalid RESTORE (want =%lu, got %d)",
++ (long unsigned)req_restore->size, *len);
+ res = -EINVAL;
+ goto done;
+ }
+ line = ip_set_restore(data + sizeof(struct ip_set_req_setnames),
+ req_restore->size - sizeof(struct ip_set_req_setnames));
-+ DP("ip_set_restore: %u", line);
++ DP("ip_set_restore: %d", line);
+ if (line != 0) {
+ res = -EAGAIN;
+ req_restore->size = line;
@@ -3062,12 +3906,12 @@
+ } /* end of switch(op) */
+
+ copy:
-+ DP("set %s, copylen %u", index != IP_SET_INVALID_ID
++ DP("set %s, copylen %d", index != IP_SET_INVALID_ID
+ && ip_set_list[index]
+ ? ip_set_list[index]->name
+ : ":all:", copylen);
+ res = copy_to_user(user, data, copylen);
-+
++
+ done:
+ up(&ip_set_app_mutex);
+ vfree(data);
@@ -3085,12 +3929,15 @@
+ .get_optmin = SO_IP_SET,
+ .get_optmax = SO_IP_SET + 1,
+ .get = &ip_set_sockfn_get,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
++ .use = 0,
++#else
+ .owner = THIS_MODULE,
+#endif
+};
+
+static int max_sets, hash_size;
++
+module_param(max_sets, int, 0600);
+MODULE_PARM_DESC(max_sets, "maximal number of sets");
+module_param(hash_size, int, 0600);
@@ -3133,6 +3980,7 @@
+ vfree(ip_set_hash);
+ return res;
+ }
++
+ return 0;
+}
+
@@ -3150,7 +3998,10 @@
+
+EXPORT_SYMBOL(ip_set_get_byname);
+EXPORT_SYMBOL(ip_set_get_byindex);
-+EXPORT_SYMBOL(ip_set_put);
++EXPORT_SYMBOL(ip_set_put_byindex);
++EXPORT_SYMBOL(ip_set_id);
++EXPORT_SYMBOL(__ip_set_get_byname);
++EXPORT_SYMBOL(__ip_set_put_byindex);
+
+EXPORT_SYMBOL(ip_set_addip_kernel);
+EXPORT_SYMBOL(ip_set_delip_kernel);
@@ -3160,8 +4011,8 @@
+module_exit(ip_set_fini);
--- /dev/null
+++ b/net/ipv4/netfilter/ip_set_iphash.c
-@@ -0,0 +1,429 @@
-+/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+@@ -0,0 +1,166 @@
++/* Copyright (C) 2003-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
@@ -3171,36 +4022,26 @@
+/* Kernel module implementing an ip hash set */
+
+#include <linux/module.h>
++#include <linux/moduleparam.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
-+#include <linux/version.h>
-+#include <linux/jhash.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_jhash.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
+#include <linux/spinlock.h>
-+#include <linux/vmalloc.h>
+#include <linux/random.h>
+
+#include <net/ip.h>
+
-+#include <linux/netfilter_ipv4/ip_set_malloc.h>
+#include <linux/netfilter_ipv4/ip_set_iphash.h>
+
+static int limit = MAX_RANGE;
+
+static inline __u32
-+jhash_ip(const struct ip_set_iphash *map, uint16_t i, ip_set_ip_t ip)
-+{
-+ return jhash_1word(ip, *(((uint32_t *) map->initval) + i));
-+}
-+
-+static inline __u32
-+hash_id(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++iphash_id(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
++ struct ip_set_iphash *map = set->data;
+ __u32 id;
+ u_int16_t i;
+ ip_set_ip_t *elem;
@@ -3208,208 +4049,91 @@
+ *hash_ip = ip & map->netmask;
+ DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u, %u.%u.%u.%u",
+ set->name, HIPQUAD(ip), HIPQUAD(*hash_ip), HIPQUAD(map->netmask));
-+
++
+ for (i = 0; i < map->probes; i++) {
+ id = jhash_ip(map, i, *hash_ip) % map->hashsize;
+ DP("hash key: %u", id);
+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
+ if (*elem == *hash_ip)
+ return id;
-+ /* No shortcut at testing - there can be deleted
-+ * entries. */
++ /* No shortcut - there can be deleted entries. */
+ }
+ return UINT_MAX;
+}
+
+static inline int
-+__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++iphash_test(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ return (ip && hash_id(set, ip, hash_ip) != UINT_MAX);
++ return (ip && iphash_id(set, hash_ip, ip) != UINT_MAX);
+}
+
-+static int
-+testip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_iphash *req =
-+ (struct ip_set_req_iphash *) data;
++#define KADT_CONDITION
+
-+ if (size != sizeof(struct ip_set_req_iphash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iphash),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __testip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+testip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __testip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+}
++UADT(iphash, test)
++KADT(iphash, test, ipaddr)
+
+static inline int
-+__addip(struct ip_set_iphash *map, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++__iphash_add(struct ip_set_iphash *map, ip_set_ip_t *ip)
+{
+ __u32 probe;
+ u_int16_t i;
-+ ip_set_ip_t *elem;
-+
-+ if (!ip || map->elements >= limit)
-+ return -ERANGE;
-+
-+ *hash_ip = ip & map->netmask;
-+
++ ip_set_ip_t *elem, *slot = NULL;
++
+ for (i = 0; i < map->probes; i++) {
-+ probe = jhash_ip(map, i, *hash_ip) % map->hashsize;
++ probe = jhash_ip(map, i, *ip) % map->hashsize;
+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
-+ if (*elem == *hash_ip)
++ if (*elem == *ip)
+ return -EEXIST;
-+ if (!*elem) {
-+ *elem = *hash_ip;
-+ map->elements++;
-+ return 0;
-+ }
++ if (!(slot || *elem))
++ slot = elem;
++ /* There can be deleted entries, must check all slots */
++ }
++ if (slot) {
++ *slot = *ip;
++ map->elements++;
++ return 0;
+ }
+ /* Trigger rehashing */
+ return -EAGAIN;
+}
+
-+static int
-+addip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
++static inline int
++iphash_add(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_req_iphash *req =
-+ (struct ip_set_req_iphash *) data;
++ struct ip_set_iphash *map = set->data;
++
++ if (!ip || map->elements >= limit)
++ return -ERANGE;
+
-+ if (size != sizeof(struct ip_set_req_iphash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iphash),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __addip((struct ip_set_iphash *) set->data, req->ip, hash_ip);
++ *hash_ip = ip & map->netmask;
++
++ return __iphash_add(map, hash_ip);
+}
+
-+static int
-+addip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __addip((struct ip_set_iphash *) set->data,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+}
++UADT(iphash, add)
++KADT(iphash, add, ipaddr)
+
-+static int retry(struct ip_set *set)
++static inline void
++__iphash_retry(struct ip_set_iphash *tmp, struct ip_set_iphash *map)
+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
-+ ip_set_ip_t hash_ip, *elem;
-+ void *members;
-+ u_int32_t i, hashsize = map->hashsize;
-+ int res;
-+ struct ip_set_iphash *tmp;
-+
-+ if (map->resize == 0)
-+ return -ERANGE;
-+
-+ again:
-+ res = 0;
-+
-+ /* Calculate new hash size */
-+ hashsize += (hashsize * map->resize)/100;
-+ if (hashsize == map->hashsize)
-+ hashsize++;
-+
-+ ip_set_printk("rehashing of set %s triggered: "
-+ "hashsize grows from %u to %u",
-+ set->name, map->hashsize, hashsize);
-+
-+ tmp = kmalloc(sizeof(struct ip_set_iphash)
-+ + map->probes * sizeof(uint32_t), GFP_ATOMIC);
-+ if (!tmp) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_iphash)
-+ + map->probes * sizeof(uint32_t));
-+ return -ENOMEM;
-+ }
-+ tmp->members = harray_malloc(hashsize, sizeof(ip_set_ip_t), GFP_ATOMIC);
-+ if (!tmp->members) {
-+ DP("out of memory for %d bytes", hashsize * sizeof(ip_set_ip_t));
-+ kfree(tmp);
-+ return -ENOMEM;
-+ }
-+ tmp->hashsize = hashsize;
-+ tmp->elements = 0;
-+ tmp->probes = map->probes;
-+ tmp->resize = map->resize;
+ tmp->netmask = map->netmask;
-+ memcpy(tmp->initval, map->initval, map->probes * sizeof(uint32_t));
-+
-+ write_lock_bh(&set->lock);
-+ map = (struct ip_set_iphash *) set->data; /* Play safe */
-+ for (i = 0; i < map->hashsize && res == 0; i++) {
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
-+ if (*elem)
-+ res = __addip(tmp, *elem, &hash_ip);
-+ }
-+ if (res) {
-+ /* Failure, try again */
-+ write_unlock_bh(&set->lock);
-+ harray_free(tmp->members);
-+ kfree(tmp);
-+ goto again;
-+ }
-+
-+ /* Success at resizing! */
-+ members = map->members;
-+
-+ map->hashsize = tmp->hashsize;
-+ map->members = tmp->members;
-+ write_unlock_bh(&set->lock);
-+
-+ harray_free(members);
-+ kfree(tmp);
-+
-+ return 0;
+}
+
++HASH_RETRY(iphash, ip_set_ip_t)
++
+static inline int
-+__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++iphash_del(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
++ struct ip_set_iphash *map = set->data;
+ ip_set_ip_t id, *elem;
+
+ if (!ip)
+ return -ERANGE;
+
-+ id = hash_id(set, ip, hash_ip);
++ id = iphash_id(set, hash_ip, ip);
+ if (id == UINT_MAX)
+ return -EEXIST;
-+
++
+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
+ *elem = 0;
+ map->elements--;
@@ -3417,159 +4141,35 @@
+ return 0;
+}
+
-+static int
-+delip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_iphash *req =
-+ (struct ip_set_req_iphash *) data;
-+
-+ if (size != sizeof(struct ip_set_req_iphash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iphash),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __delip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+delip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __delip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+}
++UADT(iphash, del)
++KADT(iphash, del, ipaddr)
+
-+static int create(struct ip_set *set, const void *data, size_t size)
++static inline int
++__iphash_create(const struct ip_set_req_iphash_create *req,
++ struct ip_set_iphash *map)
+{
-+ struct ip_set_req_iphash_create *req =
-+ (struct ip_set_req_iphash_create *) data;
-+ struct ip_set_iphash *map;
-+ uint16_t i;
-+
-+ if (size != sizeof(struct ip_set_req_iphash_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iphash_create),
-+ size);
-+ return -EINVAL;
-+ }
-+
-+ if (req->hashsize < 1) {
-+ ip_set_printk("hashsize too small");
-+ return -ENOEXEC;
-+ }
-+
-+ if (req->probes < 1) {
-+ ip_set_printk("probes too small");
-+ return -ENOEXEC;
-+ }
-+
-+ map = kmalloc(sizeof(struct ip_set_iphash)
-+ + req->probes * sizeof(uint32_t), GFP_KERNEL);
-+ if (!map) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_iphash)
-+ + req->probes * sizeof(uint32_t));
-+ return -ENOMEM;
-+ }
-+ for (i = 0; i < req->probes; i++)
-+ get_random_bytes(((uint32_t *) map->initval)+i, 4);
-+ map->elements = 0;
-+ map->hashsize = req->hashsize;
-+ map->probes = req->probes;
-+ map->resize = req->resize;
+ map->netmask = req->netmask;
-+ map->members = harray_malloc(map->hashsize, sizeof(ip_set_ip_t), GFP_KERNEL);
-+ if (!map->members) {
-+ DP("out of memory for %d bytes", map->hashsize * sizeof(ip_set_ip_t));
-+ kfree(map);
-+ return -ENOMEM;
-+ }
-+
-+ set->data = map;
++
+ return 0;
+}
+
-+static void destroy(struct ip_set *set)
-+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
-+
-+ harray_free(map->members);
-+ kfree(map);
-+
-+ set->data = NULL;
-+}
-+
-+static void flush(struct ip_set *set)
-+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
-+ harray_flush(map->members, map->hashsize, sizeof(ip_set_ip_t));
-+ map->elements = 0;
-+}
++HASH_CREATE(iphash, ip_set_ip_t)
++HASH_DESTROY(iphash)
+
-+static void list_header(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
-+ struct ip_set_req_iphash_create *header =
-+ (struct ip_set_req_iphash_create *) data;
++HASH_FLUSH(iphash, ip_set_ip_t)
+
-+ header->hashsize = map->hashsize;
-+ header->probes = map->probes;
-+ header->resize = map->resize;
++static inline void
++__iphash_list_header(const struct ip_set_iphash *map,
++ struct ip_set_req_iphash_create *header)
++{
+ header->netmask = map->netmask;
+}
+
-+static int list_members_size(const struct ip_set *set)
-+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
-+
-+ return (map->hashsize * sizeof(ip_set_ip_t));
-+}
-+
-+static void list_members(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
-+ ip_set_ip_t i, *elem;
-+
-+ for (i = 0; i < map->hashsize; i++) {
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
-+ ((ip_set_ip_t *)data)[i] = *elem;
-+ }
-+}
++HASH_LIST_HEADER(iphash)
++HASH_LIST_MEMBERS_SIZE(iphash, ip_set_ip_t)
++HASH_LIST_MEMBERS(iphash, ip_set_ip_t)
+
-+static struct ip_set_type ip_set_iphash = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_iphash),
-+ .addip = &addip,
-+ .addip_kernel = &addip_kernel,
-+ .retry = &retry,
-+ .delip = &delip,
-+ .delip_kernel = &delip_kernel,
-+ .testip = &testip,
-+ .testip_kernel = &testip_kernel,
-+ .header_size = sizeof(struct ip_set_req_iphash_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
++IP_SET_RTYPE(iphash, IPSET_TYPE_IP | IPSET_DATA_SINGLE)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
@@ -3577,25 +4177,13 @@
+module_param(limit, int, 0600);
+MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
+
-+static int __init ip_set_iphash_init(void)
-+{
-+ return ip_set_register_set_type(&ip_set_iphash);
-+}
-+
-+static void __exit ip_set_iphash_fini(void)
-+{
-+ /* FIXME: possible race with ip_set_create() */
-+ ip_set_unregister_set_type(&ip_set_iphash);
-+}
-+
-+module_init(ip_set_iphash_init);
-+module_exit(ip_set_iphash_fini);
++REGISTER_MODULE(iphash)
--- /dev/null
+++ b/net/ipv4/netfilter/ip_set_ipmap.c
-@@ -0,0 +1,336 @@
+@@ -0,0 +1,142 @@
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ * Patrick Schaaf <bof@bof.de>
-+ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ * Copyright (C) 2003-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
@@ -3607,9 +4195,6 @@
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
-+#include <linux/version.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
@@ -3624,10 +4209,10 @@
+}
+
+static inline int
-+__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++ipmap_test(const struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
-+
++ const struct ip_set_ipmap *map = set->data;
++
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
+
@@ -3637,46 +4222,15 @@
+ return !!test_bit(ip_to_id(map, *hash_ip), map->members);
+}
+
-+static int
-+testip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_ipmap *req =
-+ (struct ip_set_req_ipmap *) data;
++#define KADT_CONDITION
+
-+ if (size != sizeof(struct ip_set_req_ipmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipmap),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __testip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+testip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ int res = __testip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+ return (res < 0 ? 0 : res);
-+}
++UADT(ipmap, test)
++KADT(ipmap, test, ipaddr)
+
+static inline int
-+__addip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++ipmap_add(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
++ struct ip_set_ipmap *map = set->data;
+
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
@@ -3689,46 +4243,13 @@
+ return 0;
+}
+
-+static int
-+addip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_ipmap *req =
-+ (struct ip_set_req_ipmap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_ipmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipmap),
-+ size);
-+ return -EINVAL;
-+ }
-+ DP("%u.%u.%u.%u", HIPQUAD(req->ip));
-+ return __addip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+addip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __addip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+}
++UADT(ipmap, add)
++KADT(ipmap, add, ipaddr)
+
+static inline int
-+__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++ipmap_del(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
++ struct ip_set_ipmap *map = set->data;
+
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
@@ -3737,75 +4258,17 @@
+ DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
+ if (!test_and_clear_bit(ip_to_id(map, *hash_ip), map->members))
+ return -EEXIST;
-+
++
+ return 0;
+}
+
-+static int
-+delip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_ipmap *req =
-+ (struct ip_set_req_ipmap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_ipmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipmap),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __delip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+delip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __delip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+}
++UADT(ipmap, del)
++KADT(ipmap, del, ipaddr)
+
-+static int create(struct ip_set *set, const void *data, size_t size)
++static inline int
++__ipmap_create(const struct ip_set_req_ipmap_create *req,
++ struct ip_set_ipmap *map)
+{
-+ int newbytes;
-+ struct ip_set_req_ipmap_create *req =
-+ (struct ip_set_req_ipmap_create *) data;
-+ struct ip_set_ipmap *map;
-+
-+ if (size != sizeof(struct ip_set_req_ipmap_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipmap_create),
-+ size);
-+ return -EINVAL;
-+ }
-+
-+ DP("from %u.%u.%u.%u to %u.%u.%u.%u",
-+ HIPQUAD(req->from), HIPQUAD(req->to));
-+
-+ if (req->from > req->to) {
-+ DP("bad ip range");
-+ return -ENOEXEC;
-+ }
-+
-+ map = kmalloc(sizeof(struct ip_set_ipmap), GFP_KERNEL);
-+ if (!map) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_ipmap));
-+ return -ENOMEM;
-+ }
-+ map->first_ip = req->from;
-+ map->last_ip = req->to;
+ map->netmask = req->netmask;
+
+ if (req->netmask == 0xFFFFFFFF) {
@@ -3830,109 +4293,40 @@
+ map->sizeid = 2 << (netmask_bits - mask_bits - 1);
+ }
+ if (map->sizeid > MAX_RANGE + 1) {
-+ ip_set_printk("range too big (max %d addresses)",
-+ MAX_RANGE+1);
-+ kfree(map);
++ ip_set_printk("range too big, %d elements (max %d)",
++ map->sizeid, MAX_RANGE+1);
+ return -ENOEXEC;
+ }
+ DP("hosts %u, sizeid %u", map->hosts, map->sizeid);
-+ newbytes = bitmap_bytes(0, map->sizeid - 1);
-+ map->members = kmalloc(newbytes, GFP_KERNEL);
-+ if (!map->members) {
-+ DP("out of memory for %d bytes", newbytes);
-+ kfree(map);
-+ return -ENOMEM;
-+ }
-+ memset(map->members, 0, newbytes);
-+
-+ set->data = map;
-+ return 0;
-+}
-+
-+static void destroy(struct ip_set *set)
-+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
-+
-+ kfree(map->members);
-+ kfree(map);
-+
-+ set->data = NULL;
++ return bitmap_bytes(0, map->sizeid - 1);
+}
+
-+static void flush(struct ip_set *set)
-+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
-+ memset(map->members, 0, bitmap_bytes(0, map->sizeid - 1));
-+}
++BITMAP_CREATE(ipmap)
++BITMAP_DESTROY(ipmap)
++BITMAP_FLUSH(ipmap)
+
-+static void list_header(const struct ip_set *set, void *data)
++static inline void
++__ipmap_list_header(const struct ip_set_ipmap *map,
++ struct ip_set_req_ipmap_create *header)
+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
-+ struct ip_set_req_ipmap_create *header =
-+ (struct ip_set_req_ipmap_create *) data;
-+
-+ header->from = map->first_ip;
-+ header->to = map->last_ip;
+ header->netmask = map->netmask;
+}
+
-+static int list_members_size(const struct ip_set *set)
-+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
-+
-+ return bitmap_bytes(0, map->sizeid - 1);
-+}
++BITMAP_LIST_HEADER(ipmap)
++BITMAP_LIST_MEMBERS_SIZE(ipmap)
++BITMAP_LIST_MEMBERS(ipmap)
+
-+static void list_members(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
-+ int bytes = bitmap_bytes(0, map->sizeid - 1);
-+
-+ memcpy(data, map->members, bytes);
-+}
-+
-+static struct ip_set_type ip_set_ipmap = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_ipmap),
-+ .addip = &addip,
-+ .addip_kernel = &addip_kernel,
-+ .delip = &delip,
-+ .delip_kernel = &delip_kernel,
-+ .testip = &testip,
-+ .testip_kernel = &testip_kernel,
-+ .header_size = sizeof(struct ip_set_req_ipmap_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
++IP_SET_TYPE(ipmap, IPSET_TYPE_IP | IPSET_DATA_SINGLE)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("ipmap type of IP sets");
+
-+static int __init ip_set_ipmap_init(void)
-+{
-+ return ip_set_register_set_type(&ip_set_ipmap);
-+}
-+
-+static void __exit ip_set_ipmap_fini(void)
-+{
-+ /* FIXME: possible race with ip_set_create() */
-+ ip_set_unregister_set_type(&ip_set_ipmap);
-+}
-+
-+module_init(ip_set_ipmap_init);
-+module_exit(ip_set_ipmap_fini);
++REGISTER_MODULE(ipmap)
--- /dev/null
+++ b/net/ipv4/netfilter/ip_set_ipporthash.c
-@@ -0,0 +1,581 @@
-+/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+@@ -0,0 +1,203 @@
++/* Copyright (C) 2003-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
@@ -3942,581 +4336,729 @@
+/* Kernel module implementing an ip+port hash set */
+
+#include <linux/module.h>
++#include <linux/moduleparam.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/skbuff.h>
-+#include <linux/version.h>
-+#include <linux/jhash.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_jhash.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
+#include <linux/spinlock.h>
-+#include <linux/vmalloc.h>
+#include <linux/random.h>
+
+#include <net/ip.h>
+
-+#include <linux/netfilter_ipv4/ip_set_malloc.h>
+#include <linux/netfilter_ipv4/ip_set_ipporthash.h>
++#include <linux/netfilter_ipv4/ip_set_getport.h>
+
+static int limit = MAX_RANGE;
+
-+/* We must handle non-linear skbs */
-+static inline ip_set_ip_t
-+get_port(const struct sk_buff *skb, u_int32_t flags)
-+{
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ struct iphdr *iph = ip_hdr(skb);
-+#else
-+ struct iphdr *iph = skb->nh.iph;
-+#endif
-+ u_int16_t offset = ntohs(iph->frag_off) & IP_OFFSET;
-+
-+ switch (iph->protocol) {
-+ case IPPROTO_TCP: {
-+ struct tcphdr tcph;
-+
-+ /* See comments at tcp_match in ip_tables.c */
-+ if (offset)
-+ return INVALID_PORT;
-+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &tcph, sizeof(tcph)) < 0)
-+#else
-+ if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &tcph, sizeof(tcph)) < 0)
-+#endif
-+ /* No choice either */
-+ return INVALID_PORT;
-+
-+ return ntohs(flags & IPSET_SRC ?
-+ tcph.source : tcph.dest);
-+ }
-+ case IPPROTO_UDP: {
-+ struct udphdr udph;
-+
-+ if (offset)
-+ return INVALID_PORT;
-+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &udph, sizeof(udph)) < 0)
-+#else
-+ if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &udph, sizeof(udph)) < 0)
-+#endif
-+ /* No choice either */
-+ return INVALID_PORT;
-+
-+ return ntohs(flags & IPSET_SRC ?
-+ udph.source : udph.dest);
-+ }
-+ default:
-+ return INVALID_PORT;
-+ }
-+}
-+
+static inline __u32
-+jhash_ip(const struct ip_set_ipporthash *map, uint16_t i, ip_set_ip_t ip)
++ipporthash_id(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port)
+{
-+ return jhash_1word(ip, *(((uint32_t *) map->initval) + i));
-+}
-+
-+#define HASH_IP(map, ip, port) (port + ((ip - ((map)->first_ip)) << 16))
-+
-+static inline __u32
-+hash_id(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t port,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_ipporthash *map =
-+ (struct ip_set_ipporthash *) set->data;
++ struct ip_set_ipporthash *map = set->data;
+ __u32 id;
+ u_int16_t i;
+ ip_set_ip_t *elem;
+
-+ *hash_ip = HASH_IP(map, ip, port);
++ *hash_ip = pack_ip_port(map, ip, port);
++
+ DP("set: %s, ipport:%u.%u.%u.%u:%u, %u.%u.%u.%u",
+ set->name, HIPQUAD(ip), port, HIPQUAD(*hash_ip));
-+
++ if (!*hash_ip)
++ return UINT_MAX;
++
+ for (i = 0; i < map->probes; i++) {
+ id = jhash_ip(map, i, *hash_ip) % map->hashsize;
+ DP("hash key: %u", id);
+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
+ if (*elem == *hash_ip)
+ return id;
-+ /* No shortcut at testing - there can be deleted
-+ * entries. */
++ /* No shortcut - there can be deleted entries. */
+ }
+ return UINT_MAX;
+}
+
+static inline int
-+__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t port,
-+ ip_set_ip_t *hash_ip)
++ipporthash_test(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port)
+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+
++ struct ip_set_ipporthash *map = set->data;
++
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
+
-+ return (hash_id(set, ip, port, hash_ip) != UINT_MAX);
++ return (ipporthash_id(set, hash_ip, ip, port) != UINT_MAX);
+}
+
-+static int
-+testip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_ipporthash *req =
-+ (struct ip_set_req_ipporthash *) data;
-+
-+ if (size != sizeof(struct ip_set_req_ipporthash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipporthash),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __testip(set, req->ip, req->port, hash_ip);
-+}
-+
-+static int
-+testip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ ip_set_ip_t port;
-+ int res;
-+
-+ if (flags[index+1] == 0)
-+ return 0;
-+
-+ port = get_port(skb, flags[index+1]);
-+
-+ DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
-+ flags[index] & IPSET_SRC ? "SRC" : "DST",
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ NIPQUAD(ip_hdr(skb)->saddr),
-+ NIPQUAD(ip_hdr(skb)->daddr));
-+#else
-+ NIPQUAD(skb->nh.iph->saddr),
-+ NIPQUAD(skb->nh.iph->daddr));
-+#endif
-+ DP("flag %s port %u",
-+ flags[index+1] & IPSET_SRC ? "SRC" : "DST",
-+ port);
-+ if (port == INVALID_PORT)
++#define KADT_CONDITION \
++ ip_set_ip_t port; \
++ \
++ if (flags[index+1] == 0) \
++ return 0; \
++ \
++ port = get_port(skb, flags[index+1]); \
++ \
++ if (port == INVALID_PORT) \
+ return 0;
+
-+ res = __testip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ port,
-+ hash_ip);
-+ return (res < 0 ? 0 : res);
-+
-+}
++UADT(ipporthash, test, req->port)
++KADT(ipporthash, test, ipaddr, port)
+
+static inline int
-+__add_haship(struct ip_set_ipporthash *map, ip_set_ip_t hash_ip)
++__ipporthash_add(struct ip_set_ipporthash *map, ip_set_ip_t *ip)
+{
+ __u32 probe;
+ u_int16_t i;
-+ ip_set_ip_t *elem;
++ ip_set_ip_t *elem, *slot = NULL;
+
+ for (i = 0; i < map->probes; i++) {
-+ probe = jhash_ip(map, i, hash_ip) % map->hashsize;
++ probe = jhash_ip(map, i, *ip) % map->hashsize;
+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
-+ if (*elem == hash_ip)
++ if (*elem == *ip)
+ return -EEXIST;
-+ if (!*elem) {
-+ *elem = hash_ip;
-+ map->elements++;
-+ return 0;
-+ }
++ if (!(slot || *elem))
++ slot = elem;
++ /* There can be deleted entries, must check all slots */
++ }
++ if (slot) {
++ *slot = *ip;
++ map->elements++;
++ return 0;
+ }
+ /* Trigger rehashing */
+ return -EAGAIN;
+}
+
+static inline int
-+__addip(struct ip_set_ipporthash *map, ip_set_ip_t ip, ip_set_ip_t port,
-+ ip_set_ip_t *hash_ip)
++ipporthash_add(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port)
+{
++ struct ip_set_ipporthash *map = set->data;
+ if (map->elements > limit)
+ return -ERANGE;
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
+
-+ *hash_ip = HASH_IP(map, ip, port);
++ *hash_ip = pack_ip_port(map, ip, port);
+
-+ return __add_haship(map, *hash_ip);
++ if (!*hash_ip)
++ return -ERANGE;
++
++ return __ipporthash_add(map, hash_ip);
+}
+
-+static int
-+addip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
++UADT(ipporthash, add, req->port)
++KADT(ipporthash, add, ipaddr, port)
++
++static inline void
++__ipporthash_retry(struct ip_set_ipporthash *tmp,
++ struct ip_set_ipporthash *map)
+{
-+ struct ip_set_req_ipporthash *req =
-+ (struct ip_set_req_ipporthash *) data;
++ tmp->first_ip = map->first_ip;
++ tmp->last_ip = map->last_ip;
++}
+
-+ if (size != sizeof(struct ip_set_req_ipporthash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipporthash),
-+ size);
-+ return -EINVAL;
++HASH_RETRY(ipporthash, ip_set_ip_t)
++
++static inline int
++ipporthash_del(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port)
++{
++ struct ip_set_ipporthash *map = set->data;
++ ip_set_ip_t id;
++ ip_set_ip_t *elem;
++
++ if (ip < map->first_ip || ip > map->last_ip)
++ return -ERANGE;
++
++ id = ipporthash_id(set, hash_ip, ip, port);
++
++ if (id == UINT_MAX)
++ return -EEXIST;
++
++ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
++ *elem = 0;
++ map->elements--;
++
++ return 0;
++}
++
++UADT(ipporthash, del, req->port)
++KADT(ipporthash, del, ipaddr, port)
++
++static inline int
++__ipporthash_create(const struct ip_set_req_ipporthash_create *req,
++ struct ip_set_ipporthash *map)
++{
++ if (req->to - req->from > MAX_RANGE) {
++ ip_set_printk("range too big, %d elements (max %d)",
++ req->to - req->from + 1, MAX_RANGE+1);
++ return -ENOEXEC;
+ }
-+ return __addip((struct ip_set_ipporthash *) set->data,
-+ req->ip, req->port, hash_ip);
++ map->first_ip = req->from;
++ map->last_ip = req->to;
++ return 0;
+}
+
-+static int
-+addip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
++HASH_CREATE(ipporthash, ip_set_ip_t)
++HASH_DESTROY(ipporthash)
++HASH_FLUSH(ipporthash, ip_set_ip_t)
++
++static inline void
++__ipporthash_list_header(const struct ip_set_ipporthash *map,
++ struct ip_set_req_ipporthash_create *header)
+{
-+ ip_set_ip_t port;
++ header->from = map->first_ip;
++ header->to = map->last_ip;
++}
+
-+ if (flags[index+1] == 0)
-+ return -EINVAL;
++HASH_LIST_HEADER(ipporthash)
++HASH_LIST_MEMBERS_SIZE(ipporthash, ip_set_ip_t)
++HASH_LIST_MEMBERS(ipporthash, ip_set_ip_t)
+
-+ port = get_port(skb, flags[index+1]);
++IP_SET_RTYPE(ipporthash, IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_DATA_DOUBLE)
+
-+ DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
-+ flags[index] & IPSET_SRC ? "SRC" : "DST",
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ NIPQUAD(ip_hdr(skb)->saddr),
-+ NIPQUAD(ip_hdr(skb)->daddr));
-+#else
-+ NIPQUAD(skb->nh.iph->saddr),
-+ NIPQUAD(skb->nh.iph->daddr));
-+#endif
-+ DP("flag %s port %u",
-+ flags[index+1] & IPSET_SRC ? "SRC" : "DST",
-+ port);
-+ if (port == INVALID_PORT)
-+ return -EINVAL;
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("ipporthash type of IP sets");
++module_param(limit, int, 0600);
++MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
+
-+ return __addip((struct ip_set_ipporthash *) set->data,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ port,
-+ hash_ip);
-+}
++REGISTER_MODULE(ipporthash)
+--- /dev/null
++++ b/net/ipv4/netfilter/ip_set_ipportiphash.c
+@@ -0,0 +1,216 @@
++/* Copyright (C) 2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/* Kernel module implementing an ip+port+ip hash set */
++
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++#include <linux/ip.h>
++#include <linux/tcp.h>
++#include <linux/udp.h>
++#include <linux/skbuff.h>
++#include <linux/netfilter_ipv4/ip_set_jhash.h>
++#include <linux/errno.h>
++#include <asm/uaccess.h>
++#include <asm/bitops.h>
++#include <linux/spinlock.h>
++#include <linux/random.h>
+
-+static int retry(struct ip_set *set)
++#include <net/ip.h>
++
++#include <linux/netfilter_ipv4/ip_set_ipportiphash.h>
++#include <linux/netfilter_ipv4/ip_set_getport.h>
++
++static int limit = MAX_RANGE;
++
++#define jhash_ip2(map, i, ipport, ip1) \
++ jhash_2words(ipport, ip1, *(map->initval + i))
++
++static inline __u32
++ipportiphash_id(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port, ip_set_ip_t ip1)
+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+ ip_set_ip_t *elem;
-+ void *members;
-+ u_int32_t i, hashsize = map->hashsize;
-+ int res;
-+ struct ip_set_ipporthash *tmp;
++ struct ip_set_ipportiphash *map = set->data;
++ __u32 id;
++ u_int16_t i;
++ struct ipportip *elem;
+
-+ if (map->resize == 0)
++ *hash_ip = pack_ip_port(map, ip, port);
++ DP("set: %s, ipport:%u.%u.%u.%u:%u, %u.%u.%u.%u",
++ set->name, HIPQUAD(ip), port, HIPQUAD(*hash_ip));
++ if (!(*hash_ip || ip1))
++ return UINT_MAX;
++
++ for (i = 0; i < map->probes; i++) {
++ id = jhash_ip2(map, i, *hash_ip, ip1) % map->hashsize;
++ DP("hash key: %u", id);
++ elem = HARRAY_ELEM(map->members, struct ipportip *, id);
++ if (elem->ip == *hash_ip && elem->ip1 == ip1)
++ return id;
++ /* No shortcut - there can be deleted entries. */
++ }
++ return UINT_MAX;
++}
++
++static inline int
++ipportiphash_test(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port, ip_set_ip_t ip1)
++{
++ struct ip_set_ipportiphash *map = set->data;
++
++ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
+
-+ again:
-+ res = 0;
++ return (ipportiphash_id(set, hash_ip, ip, port, ip1) != UINT_MAX);
++}
+
-+ /* Calculate new hash size */
-+ hashsize += (hashsize * map->resize)/100;
-+ if (hashsize == map->hashsize)
-+ hashsize++;
++#define KADT_CONDITION \
++ ip_set_ip_t port, ip1; \
++ \
++ if (flags[index+2] == 0) \
++ return 0; \
++ \
++ port = get_port(skb, flags[index+1]); \
++ ip1 = ipaddr(skb, flags[index+2]); \
++ \
++ if (port == INVALID_PORT) \
++ return 0;
+
-+ ip_set_printk("rehashing of set %s triggered: "
-+ "hashsize grows from %u to %u",
-+ set->name, map->hashsize, hashsize);
++UADT(ipportiphash, test, req->port, req->ip1)
++KADT(ipportiphash, test, ipaddr, port, ip1)
+
-+ tmp = kmalloc(sizeof(struct ip_set_ipporthash)
-+ + map->probes * sizeof(uint32_t), GFP_ATOMIC);
-+ if (!tmp) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_ipporthash)
-+ + map->probes * sizeof(uint32_t));
-+ return -ENOMEM;
++static inline int
++__ipportip_add(struct ip_set_ipportiphash *map,
++ ip_set_ip_t hash_ip, ip_set_ip_t ip1)
++{
++ __u32 probe;
++ u_int16_t i;
++ struct ipportip *elem, *slot = NULL;
++
++ for (i = 0; i < map->probes; i++) {
++ probe = jhash_ip2(map, i, hash_ip, ip1) % map->hashsize;
++ elem = HARRAY_ELEM(map->members, struct ipportip *, probe);
++ if (elem->ip == hash_ip && elem->ip1 == ip1)
++ return -EEXIST;
++ if (!(slot || elem->ip || elem->ip1))
++ slot = elem;
++ /* There can be deleted entries, must check all slots */
+ }
-+ tmp->members = harray_malloc(hashsize, sizeof(ip_set_ip_t), GFP_ATOMIC);
-+ if (!tmp->members) {
-+ DP("out of memory for %d bytes", hashsize * sizeof(ip_set_ip_t));
-+ kfree(tmp);
-+ return -ENOMEM;
++ if (slot) {
++ slot->ip = hash_ip;
++ slot->ip1 = ip1;
++ map->elements++;
++ return 0;
+ }
-+ tmp->hashsize = hashsize;
-+ tmp->elements = 0;
-+ tmp->probes = map->probes;
-+ tmp->resize = map->resize;
-+ tmp->first_ip = map->first_ip;
-+ tmp->last_ip = map->last_ip;
-+ memcpy(tmp->initval, map->initval, map->probes * sizeof(uint32_t));
++ /* Trigger rehashing */
++ return -EAGAIN;
++}
+
-+ write_lock_bh(&set->lock);
-+ map = (struct ip_set_ipporthash *) set->data; /* Play safe */
-+ for (i = 0; i < map->hashsize && res == 0; i++) {
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
-+ if (*elem)
-+ res = __add_haship(tmp, *elem);
-+ }
-+ if (res) {
-+ /* Failure, try again */
-+ write_unlock_bh(&set->lock);
-+ harray_free(tmp->members);
-+ kfree(tmp);
-+ goto again;
-+ }
++static inline int
++__ipportiphash_add(struct ip_set_ipportiphash *map,
++ struct ipportip *elem)
++{
++ return __ipportip_add(map, elem->ip, elem->ip1);
++}
+
-+ /* Success at resizing! */
-+ members = map->members;
++static inline int
++ipportiphash_add(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port, ip_set_ip_t ip1)
++{
++ struct ip_set_ipportiphash *map = set->data;
++
++ if (map->elements > limit)
++ return -ERANGE;
++ if (ip < map->first_ip || ip > map->last_ip)
++ return -ERANGE;
+
-+ map->hashsize = tmp->hashsize;
-+ map->members = tmp->members;
-+ write_unlock_bh(&set->lock);
++ *hash_ip = pack_ip_port(map, ip, port);
++ if (!(*hash_ip || ip1))
++ return -ERANGE;
++
++ return __ipportip_add(map, *hash_ip, ip1);
++}
+
-+ harray_free(members);
-+ kfree(tmp);
++UADT(ipportiphash, add, req->port, req->ip1)
++KADT(ipportiphash, add, ipaddr, port, ip1)
+
-+ return 0;
++static inline void
++__ipportiphash_retry(struct ip_set_ipportiphash *tmp,
++ struct ip_set_ipportiphash *map)
++{
++ tmp->first_ip = map->first_ip;
++ tmp->last_ip = map->last_ip;
+}
+
++HASH_RETRY2(ipportiphash, struct ipportip)
++
+static inline int
-+__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t port,
-+ ip_set_ip_t *hash_ip)
++ipportiphash_del(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port, ip_set_ip_t ip1)
+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
++ struct ip_set_ipportiphash *map = set->data;
+ ip_set_ip_t id;
-+ ip_set_ip_t *elem;
++ struct ipportip *elem;
+
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
+
-+ id = hash_id(set, ip, port, hash_ip);
++ id = ipportiphash_id(set, hash_ip, ip, port, ip1);
+
+ if (id == UINT_MAX)
+ return -EEXIST;
-+
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
-+ *elem = 0;
++
++ elem = HARRAY_ELEM(map->members, struct ipportip *, id);
++ elem->ip = elem->ip1 = 0;
+ map->elements--;
+
+ return 0;
+}
+
-+static int
-+delip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_ipporthash *req =
-+ (struct ip_set_req_ipporthash *) data;
++UADT(ipportiphash, del, req->port, req->ip1)
++KADT(ipportiphash, del, ipaddr, port, ip1)
+
-+ if (size != sizeof(struct ip_set_req_ipporthash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipporthash),
-+ size);
-+ return -EINVAL;
++static inline int
++__ipportiphash_create(const struct ip_set_req_ipportiphash_create *req,
++ struct ip_set_ipportiphash *map)
++{
++ if (req->to - req->from > MAX_RANGE) {
++ ip_set_printk("range too big, %d elements (max %d)",
++ req->to - req->from + 1, MAX_RANGE+1);
++ return -ENOEXEC;
+ }
-+ return __delip(set, req->ip, req->port, hash_ip);
++ map->first_ip = req->from;
++ map->last_ip = req->to;
++ return 0;
+}
+
-+static int
-+delip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
++HASH_CREATE(ipportiphash, struct ipportip)
++HASH_DESTROY(ipportiphash)
++HASH_FLUSH(ipportiphash, struct ipportip)
++
++static inline void
++__ipportiphash_list_header(const struct ip_set_ipportiphash *map,
++ struct ip_set_req_ipportiphash_create *header)
+{
-+ ip_set_ip_t port;
++ header->from = map->first_ip;
++ header->to = map->last_ip;
++}
+
-+ if (flags[index+1] == 0)
-+ return -EINVAL;
++HASH_LIST_HEADER(ipportiphash)
++HASH_LIST_MEMBERS_SIZE(ipportiphash, struct ipportip)
++HASH_LIST_MEMBERS_MEMCPY(ipportiphash, struct ipportip)
+
-+ port = get_port(skb, flags[index+1]);
++IP_SET_RTYPE(ipportiphash, IPSET_TYPE_IP | IPSET_TYPE_PORT
++ | IPSET_TYPE_IP1 | IPSET_DATA_TRIPLE)
+
-+ DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
-+ flags[index] & IPSET_SRC ? "SRC" : "DST",
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ NIPQUAD(ip_hdr(skb)->saddr),
-+ NIPQUAD(ip_hdr(skb)->daddr));
-+#else
-+ NIPQUAD(skb->nh.iph->saddr),
-+ NIPQUAD(skb->nh.iph->daddr));
-+#endif
-+ DP("flag %s port %u",
-+ flags[index+1] & IPSET_SRC ? "SRC" : "DST",
-+ port);
-+ if (port == INVALID_PORT)
-+ return -EINVAL;
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("ipportiphash type of IP sets");
++module_param(limit, int, 0600);
++MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
+
-+ return __delip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ port,
-+ hash_ip);
-+}
++REGISTER_MODULE(ipportiphash)
+--- /dev/null
++++ b/net/ipv4/netfilter/ip_set_ipportnethash.c
+@@ -0,0 +1,304 @@
++/* Copyright (C) 2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/* Kernel module implementing an ip+port+net hash set */
++
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++#include <linux/ip.h>
++#include <linux/tcp.h>
++#include <linux/udp.h>
++#include <linux/skbuff.h>
++#include <linux/netfilter_ipv4/ip_set_jhash.h>
++#include <linux/errno.h>
++#include <asm/uaccess.h>
++#include <asm/bitops.h>
++#include <linux/spinlock.h>
++#include <linux/random.h>
+
-+static int create(struct ip_set *set, const void *data, size_t size)
++#include <net/ip.h>
++
++#include <linux/netfilter_ipv4/ip_set_ipportnethash.h>
++#include <linux/netfilter_ipv4/ip_set_getport.h>
++
++static int limit = MAX_RANGE;
++
++#define jhash_ip2(map, i, ipport, ip1) \
++ jhash_2words(ipport, ip1, *(map->initval + i))
++
++static inline __u32
++ipportnethash_id_cidr(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port,
++ ip_set_ip_t ip1, uint8_t cidr)
+{
-+ struct ip_set_req_ipporthash_create *req =
-+ (struct ip_set_req_ipporthash_create *) data;
-+ struct ip_set_ipporthash *map;
-+ uint16_t i;
++ struct ip_set_ipportnethash *map = set->data;
++ __u32 id;
++ u_int16_t i;
++ struct ipportip *elem;
+
-+ if (size != sizeof(struct ip_set_req_ipporthash_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipporthash_create),
-+ size);
-+ return -EINVAL;
++ *hash_ip = pack_ip_port(map, ip, port);
++ DP("set: %s, ipport:%u.%u.%u.%u:%u, %u.%u.%u.%u",
++ set->name, HIPQUAD(ip), port, HIPQUAD(*hash_ip));
++ ip1 = pack_ip_cidr(ip1, cidr);
++ if (!(*hash_ip || ip1))
++ return UINT_MAX;
++
++ for (i = 0; i < map->probes; i++) {
++ id = jhash_ip2(map, i, *hash_ip, ip1) % map->hashsize;
++ DP("hash key: %u", id);
++ elem = HARRAY_ELEM(map->members, struct ipportip *, id);
++ if (elem->ip == *hash_ip && elem->ip1 == ip1)
++ return id;
++ /* No shortcut - there can be deleted entries. */
+ }
++ return UINT_MAX;
++}
+
-+ if (req->hashsize < 1) {
-+ ip_set_printk("hashsize too small");
-+ return -ENOEXEC;
-+ }
++static inline __u32
++ipportnethash_id(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port, ip_set_ip_t ip1)
++{
++ struct ip_set_ipportnethash *map = set->data;
++ __u32 id = UINT_MAX;
++ int i;
+
-+ if (req->probes < 1) {
-+ ip_set_printk("probes too small");
-+ return -ENOEXEC;
++ for (i = 0; i < 30 && map->cidr[i]; i++) {
++ id = ipportnethash_id_cidr(set, hash_ip, ip, port, ip1,
++ map->cidr[i]);
++ if (id != UINT_MAX)
++ break;
+ }
++ return id;
++}
+
-+ map = kmalloc(sizeof(struct ip_set_ipporthash)
-+ + req->probes * sizeof(uint32_t), GFP_KERNEL);
-+ if (!map) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_ipporthash)
-+ + req->probes * sizeof(uint32_t));
-+ return -ENOMEM;
-+ }
-+ for (i = 0; i < req->probes; i++)
-+ get_random_bytes(((uint32_t *) map->initval)+i, 4);
-+ map->elements = 0;
-+ map->hashsize = req->hashsize;
-+ map->probes = req->probes;
-+ map->resize = req->resize;
-+ map->first_ip = req->from;
-+ map->last_ip = req->to;
-+ map->members = harray_malloc(map->hashsize, sizeof(ip_set_ip_t), GFP_KERNEL);
-+ if (!map->members) {
-+ DP("out of memory for %d bytes", map->hashsize * sizeof(ip_set_ip_t));
-+ kfree(map);
-+ return -ENOMEM;
-+ }
++static inline int
++ipportnethash_test_cidr(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port,
++ ip_set_ip_t ip1, uint8_t cidr)
++{
++ struct ip_set_ipportnethash *map = set->data;
++
++ if (ip < map->first_ip || ip > map->last_ip)
++ return -ERANGE;
+
-+ set->data = map;
-+ return 0;
++ return (ipportnethash_id_cidr(set, hash_ip, ip, port, ip1,
++ cidr) != UINT_MAX);
+}
+
-+static void destroy(struct ip_set *set)
++static inline int
++ipportnethash_test(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port, ip_set_ip_t ip1)
+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+
-+ harray_free(map->members);
-+ kfree(map);
++ struct ip_set_ipportnethash *map = set->data;
++
++ if (ip < map->first_ip || ip > map->last_ip)
++ return -ERANGE;
+
-+ set->data = NULL;
++ return (ipportnethash_id(set, hash_ip, ip, port, ip1) != UINT_MAX);
+}
+
-+static void flush(struct ip_set *set)
++static int
++ipportnethash_utest(struct ip_set *set, const void *data, u_int32_t size,
++ ip_set_ip_t *hash_ip)
+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+ harray_flush(map->members, map->hashsize, sizeof(ip_set_ip_t));
-+ map->elements = 0;
++ const struct ip_set_req_ipportnethash *req = data;
++
++ if (req->cidr <= 0 || req->cidr > 32)
++ return -EINVAL;
++ return (req->cidr == 32
++ ? ipportnethash_test(set, hash_ip, req->ip, req->port,
++ req->ip1)
++ : ipportnethash_test_cidr(set, hash_ip, req->ip, req->port,
++ req->ip1, req->cidr));
+}
+
-+static void list_header(const struct ip_set *set, void *data)
++#define KADT_CONDITION \
++ ip_set_ip_t port, ip1; \
++ \
++ if (flags[index+2] == 0) \
++ return 0; \
++ \
++ port = get_port(skb, flags[index+1]); \
++ ip1 = ipaddr(skb, flags[index+2]); \
++ \
++ if (port == INVALID_PORT) \
++ return 0;
++
++KADT(ipportnethash, test, ipaddr, port, ip1)
++
++static inline int
++__ipportnet_add(struct ip_set_ipportnethash *map,
++ ip_set_ip_t hash_ip, ip_set_ip_t ip1)
+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+ struct ip_set_req_ipporthash_create *header =
-+ (struct ip_set_req_ipporthash_create *) data;
++ __u32 probe;
++ u_int16_t i;
++ struct ipportip *elem, *slot = NULL;
+
-+ header->hashsize = map->hashsize;
-+ header->probes = map->probes;
-+ header->resize = map->resize;
-+ header->from = map->first_ip;
-+ header->to = map->last_ip;
++ for (i = 0; i < map->probes; i++) {
++ probe = jhash_ip2(map, i, hash_ip, ip1) % map->hashsize;
++ elem = HARRAY_ELEM(map->members, struct ipportip *, probe);
++ if (elem->ip == hash_ip && elem->ip1 == ip1)
++ return -EEXIST;
++ if (!(slot || elem->ip || elem->ip1))
++ slot = elem;
++ /* There can be deleted entries, must check all slots */
++ }
++ if (slot) {
++ slot->ip = hash_ip;
++ slot->ip1 = ip1;
++ map->elements++;
++ return 0;
++ }
++ /* Trigger rehashing */
++ return -EAGAIN;
+}
+
-+static int list_members_size(const struct ip_set *set)
++static inline int
++__ipportnethash_add(struct ip_set_ipportnethash *map,
++ struct ipportip *elem)
+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+
-+ return (map->hashsize * sizeof(ip_set_ip_t));
++ return __ipportnet_add(map, elem->ip, elem->ip1);
+}
+
-+static void list_members(const struct ip_set *set, void *data)
++static inline int
++ipportnethash_add(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port,
++ ip_set_ip_t ip1, uint8_t cidr)
+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+ ip_set_ip_t i, *elem;
++ struct ip_set_ipportnethash *map = set->data;
++ struct ipportip;
++ int ret;
++
++ if (map->elements > limit)
++ return -ERANGE;
++ if (ip < map->first_ip || ip > map->last_ip)
++ return -ERANGE;
++ if (cidr <= 0 || cidr >= 32)
++ return -EINVAL;
++ if (map->nets[cidr-1] == UINT16_MAX)
++ return -ERANGE;
+
-+ for (i = 0; i < map->hashsize; i++) {
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
-+ ((ip_set_ip_t *)data)[i] = *elem;
++ *hash_ip = pack_ip_port(map, ip, port);
++ ip1 = pack_ip_cidr(ip1, cidr);
++ if (!(*hash_ip || ip1))
++ return -ERANGE;
++
++ ret =__ipportnet_add(map, *hash_ip, ip1);
++ if (ret == 0) {
++ if (!map->nets[cidr-1]++)
++ add_cidr_size(map->cidr, cidr);
++ map->elements++;
+ }
++ return ret;
+}
+
-+static struct ip_set_type ip_set_ipporthash = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_DATA_DOUBLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_ipporthash),
-+ .addip = &addip,
-+ .addip_kernel = &addip_kernel,
-+ .retry = &retry,
-+ .delip = &delip,
-+ .delip_kernel = &delip_kernel,
-+ .testip = &testip,
-+ .testip_kernel = &testip_kernel,
-+ .header_size = sizeof(struct ip_set_req_ipporthash_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
++#undef KADT_CONDITION
++#define KADT_CONDITION \
++ struct ip_set_ipportnethash *map = set->data; \
++ uint8_t cidr = map->cidr[0] ? map->cidr[0] : 31; \
++ ip_set_ip_t port, ip1; \
++ \
++ if (flags[index+2] == 0) \
++ return 0; \
++ \
++ port = get_port(skb, flags[index+1]); \
++ ip1 = ipaddr(skb, flags[index+2]); \
++ \
++ if (port == INVALID_PORT) \
++ return 0;
+
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-+MODULE_DESCRIPTION("ipporthash type of IP sets");
-+module_param(limit, int, 0600);
-+MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
++UADT(ipportnethash, add, req->port, req->ip1, req->cidr)
++KADT(ipportnethash, add, ipaddr, port, ip1, cidr)
+
-+static int __init ip_set_ipporthash_init(void)
++static inline void
++__ipportnethash_retry(struct ip_set_ipportnethash *tmp,
++ struct ip_set_ipportnethash *map)
+{
-+ return ip_set_register_set_type(&ip_set_ipporthash);
++ tmp->first_ip = map->first_ip;
++ tmp->last_ip = map->last_ip;
++ memcpy(tmp->cidr, map->cidr, sizeof(tmp->cidr));
++ memcpy(tmp->nets, map->nets, sizeof(tmp->nets));
+}
+
-+static void __exit ip_set_ipporthash_fini(void)
++HASH_RETRY2(ipportnethash, struct ipportip)
++
++static inline int
++ipportnethash_del(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, ip_set_ip_t port,
++ ip_set_ip_t ip1, uint8_t cidr)
+{
-+ /* FIXME: possible race with ip_set_create() */
-+ ip_set_unregister_set_type(&ip_set_ipporthash);
++ struct ip_set_ipportnethash *map = set->data;
++ ip_set_ip_t id;
++ struct ipportip *elem;
++
++ if (ip < map->first_ip || ip > map->last_ip)
++ return -ERANGE;
++ if (!ip)
++ return -ERANGE;
++ if (cidr <= 0 || cidr >= 32)
++ return -EINVAL;
++
++ id = ipportnethash_id_cidr(set, hash_ip, ip, port, ip1, cidr);
++
++ if (id == UINT_MAX)
++ return -EEXIST;
++
++ elem = HARRAY_ELEM(map->members, struct ipportip *, id);
++ elem->ip = elem->ip1 = 0;
++ map->elements--;
++ if (!map->nets[cidr-1]--)
++ del_cidr_size(map->cidr, cidr);
++
++ return 0;
++}
++
++UADT(ipportnethash, del, req->port, req->ip1, req->cidr)
++KADT(ipportnethash, del, ipaddr, port, ip1, cidr)
++
++static inline int
++__ipportnethash_create(const struct ip_set_req_ipportnethash_create *req,
++ struct ip_set_ipportnethash *map)
++{
++ if (req->to - req->from > MAX_RANGE) {
++ ip_set_printk("range too big, %d elements (max %d)",
++ req->to - req->from + 1, MAX_RANGE+1);
++ return -ENOEXEC;
++ }
++ map->first_ip = req->from;
++ map->last_ip = req->to;
++ memset(map->cidr, 0, sizeof(map->cidr));
++ memset(map->nets, 0, sizeof(map->nets));
++ return 0;
+}
+
-+module_init(ip_set_ipporthash_init);
-+module_exit(ip_set_ipporthash_fini);
++HASH_CREATE(ipportnethash, struct ipportip)
++HASH_DESTROY(ipportnethash)
++HASH_FLUSH_CIDR(ipportnethash, struct ipportip);
++
++static inline void
++__ipportnethash_list_header(const struct ip_set_ipportnethash *map,
++ struct ip_set_req_ipportnethash_create *header)
++{
++ header->from = map->first_ip;
++ header->to = map->last_ip;
++}
++
++HASH_LIST_HEADER(ipportnethash)
++
++HASH_LIST_MEMBERS_SIZE(ipportnethash, struct ipportip)
++HASH_LIST_MEMBERS_MEMCPY(ipportnethash, struct ipportip)
++
++IP_SET_RTYPE(ipportnethash, IPSET_TYPE_IP | IPSET_TYPE_PORT
++ | IPSET_TYPE_IP1 | IPSET_DATA_TRIPLE)
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("ipportnethash type of IP sets");
++module_param(limit, int, 0600);
++MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
++
++REGISTER_MODULE(ipportnethash)
--- /dev/null
+++ b/net/ipv4/netfilter/ip_set_iptree.c
-@@ -0,0 +1,612 @@
-+/* Copyright (C) 2005 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+@@ -0,0 +1,466 @@
++/* Copyright (C) 2005-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
@@ -4525,24 +5067,20 @@
+
+/* Kernel module implementing an IP set type: the iptree type */
+
-+#include <linux/version.h>
+#include <linux/module.h>
++#include <linux/moduleparam.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
+#include <linux/spinlock.h>
++#include <linux/timer.h>
+
-+/* Backward compatibility */
-+#ifndef __nocast
-+#define __nocast
-+#endif
-+
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_bitmaps.h>
+#include <linux/netfilter_ipv4/ip_set_iptree.h>
+
+static int limit = MAX_RANGE;
@@ -4553,13 +5091,9 @@
+ * to delete the gc timer at destroying/flushing a set */
+#define IPTREE_DESTROY_SLEEP 100
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
-+static struct kmem_cache *branch_cachep;
-+static struct kmem_cache *leaf_cachep;
-+#else
-+static kmem_cache_t *branch_cachep;
-+static kmem_cache_t *leaf_cachep;
-+#endif
++static __KMEM_CACHE_T__ *branch_cachep;
++static __KMEM_CACHE_T__ *leaf_cachep;
++
+
+#if defined(__LITTLE_ENDIAN)
+#define ABCD(a,b,c,d,addrp) do { \
@@ -4587,9 +5121,9 @@
+} while (0)
+
+static inline int
-+__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++iptree_test(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set_iptree *map = set->data;
+ struct ip_set_iptreeb *btree;
+ struct ip_set_iptreec *ctree;
+ struct ip_set_iptreed *dtree;
@@ -4597,7 +5131,7 @@
+
+ if (!ip)
+ return -ERANGE;
-+
++
+ *hash_ip = ip;
+ ABCD(a, b, c, d, hash_ip);
+ DP("%u %u %u %u timeout %u", a, b, c, d, map->timeout);
@@ -4610,53 +5144,10 @@
+ || time_after(dtree->expires[d], jiffies));
+}
+
-+static int
-+testip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_iptree *req =
-+ (struct ip_set_req_iptree *) data;
-+
-+ if (size != sizeof(struct ip_set_req_iptree)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iptree),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __testip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+testip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ int res;
-+
-+ DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
-+ flags[index] & IPSET_SRC ? "SRC" : "DST",
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ NIPQUAD(ip_hdr(skb)->saddr),
-+ NIPQUAD(ip_hdr(skb)->daddr));
-+#else
-+ NIPQUAD(skb->nh.iph->saddr),
-+ NIPQUAD(skb->nh.iph->daddr));
-+#endif
++#define KADT_CONDITION
+
-+ res = __testip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+ return (res < 0 ? 0 : res);
-+}
++UADT(iptree, test)
++KADT(iptree, test, ipaddr)
+
+#define ADDIP_WALK(map, elem, branch, type, cachep) do { \
+ if ((map)->tree[elem]) { \
@@ -4671,24 +5162,24 @@
+ (map)->tree[elem] = branch; \
+ DP("alloc %u", elem); \
+ } \
-+} while (0)
++} while (0)
+
+static inline int
-+__addip(struct ip_set *set, ip_set_ip_t ip, unsigned int timeout,
-+ ip_set_ip_t *hash_ip)
++iptree_add(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, unsigned int timeout)
+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set_iptree *map = set->data;
+ struct ip_set_iptreeb *btree;
+ struct ip_set_iptreec *ctree;
+ struct ip_set_iptreed *dtree;
+ unsigned char a,b,c,d;
+ int ret = 0;
-+
++
+ if (!ip || map->elements >= limit)
+ /* We could call the garbage collector
+ * but it's probably overkill */
+ return -ERANGE;
-+
++
+ *hash_ip = ip;
+ ABCD(a, b, c, d, hash_ip);
+ DP("%u %u %u %u timeout %u", a, b, c, d, timeout);
@@ -4698,6 +5189,8 @@
+ if (dtree->expires[d]
+ && (!map->timeout || time_after(dtree->expires[d], jiffies)))
+ ret = -EEXIST;
++ if (map->timeout && timeout == 0)
++ timeout = map->timeout;
+ dtree->expires[d] = map->timeout ? (timeout * HZ + jiffies) : 1;
+ /* Lottery: I won! */
+ if (dtree->expires[d] == 0)
@@ -4708,47 +5201,8 @@
+ return ret;
+}
+
-+static int
-+addip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
-+ struct ip_set_req_iptree *req =
-+ (struct ip_set_req_iptree *) data;
-+
-+ if (size != sizeof(struct ip_set_req_iptree)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iptree),
-+ size);
-+ return -EINVAL;
-+ }
-+ DP("%u.%u.%u.%u %u", HIPQUAD(req->ip), req->timeout);
-+ return __addip(set, req->ip,
-+ req->timeout ? req->timeout : map->timeout,
-+ hash_ip);
-+}
-+
-+static int
-+addip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
-+
-+ return __addip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ map->timeout,
-+ hash_ip);
-+}
++UADT(iptree, add, req->timeout)
++KADT(iptree, add, ipaddr, 0)
+
+#define DELIP_WALK(map, elem, branch) do { \
+ if ((map)->tree[elem]) { \
@@ -4758,17 +5212,17 @@
+} while (0)
+
+static inline int
-+__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++iptree_del(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set_iptree *map = set->data;
+ struct ip_set_iptreeb *btree;
+ struct ip_set_iptreec *ctree;
+ struct ip_set_iptreed *dtree;
+ unsigned char a,b,c,d;
-+
++
+ if (!ip)
+ return -ERANGE;
-+
++
+ *hash_ip = ip;
+ ABCD(a, b, c, d, hash_ip);
+ DELIP_WALK(map, a, btree);
@@ -4783,40 +5237,8 @@
+ return -EEXIST;
+}
+
-+static int
-+delip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_iptree *req =
-+ (struct ip_set_req_iptree *) data;
-+
-+ if (size != sizeof(struct ip_set_req_iptree)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iptree),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __delip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+delip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __delip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+}
++UADT(iptree, del)
++KADT(iptree, del, ipaddr)
+
+#define LOOP_WALK_BEGIN(map, i, branch) \
+ for (i = 0; i < 256; i++) { \
@@ -4826,10 +5248,11 @@
+
+#define LOOP_WALK_END }
+
-+static void ip_tree_gc(unsigned long ul_set)
++static void
++ip_tree_gc(unsigned long ul_set)
+{
-+ struct ip_set *set = (void *) ul_set;
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set *set = (struct ip_set *) ul_set;
++ struct ip_set_iptree *map = set->data;
+ struct ip_set_iptreeb *btree;
+ struct ip_set_iptreec *ctree;
+ struct ip_set_iptreed *dtree;
@@ -4891,14 +5314,15 @@
+ }
+ LOOP_WALK_END;
+ write_unlock_bh(&set->lock);
-+
++
+ map->gc.expires = jiffies + map->gc_interval * HZ;
+ add_timer(&map->gc);
+}
+
-+static inline void init_gc_timer(struct ip_set *set)
++static inline void
++init_gc_timer(struct ip_set *set)
+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set_iptree *map = set->data;
+
+ /* Even if there is no timeout for the entries,
+ * we still have to call gc because delete
@@ -4911,22 +5335,22 @@
+ add_timer(&map->gc);
+}
+
-+static int create(struct ip_set *set, const void *data, size_t size)
++static int
++iptree_create(struct ip_set *set, const void *data, u_int32_t size)
+{
-+ struct ip_set_req_iptree_create *req =
-+ (struct ip_set_req_iptree_create *) data;
++ const struct ip_set_req_iptree_create *req = data;
+ struct ip_set_iptree *map;
+
+ if (size != sizeof(struct ip_set_req_iptree_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
++ ip_set_printk("data length wrong (want %zu, have %lu)",
+ sizeof(struct ip_set_req_iptree_create),
-+ size);
++ (unsigned long)size);
+ return -EINVAL;
+ }
+
+ map = kmalloc(sizeof(struct ip_set_iptree), GFP_KERNEL);
+ if (!map) {
-+ DP("out of memory for %d bytes",
++ DP("out of memory for %zu bytes",
+ sizeof(struct ip_set_iptree));
+ return -ENOMEM;
+ }
@@ -4940,7 +5364,8 @@
+ return 0;
+}
+
-+static void __flush(struct ip_set_iptree *map)
++static inline void
++__flush(struct ip_set_iptree *map)
+{
+ struct ip_set_iptreeb *btree;
+ struct ip_set_iptreec *ctree;
@@ -4959,9 +5384,10 @@
+ map->elements = 0;
+}
+
-+static void destroy(struct ip_set *set)
++static void
++iptree_destroy(struct ip_set *set)
+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set_iptree *map = set->data;
+
+ /* gc might be running */
+ while (!del_timer(&map->gc))
@@ -4971,11 +5397,12 @@
+ set->data = NULL;
+}
+
-+static void flush(struct ip_set *set)
++static void
++iptree_flush(struct ip_set *set)
+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set_iptree *map = set->data;
+ unsigned int timeout = map->timeout;
-+
++
+ /* gc might be running */
+ while (!del_timer(&map->gc))
+ msleep(IPTREE_DESTROY_SLEEP);
@@ -4986,18 +5413,19 @@
+ init_gc_timer(set);
+}
+
-+static void list_header(const struct ip_set *set, void *data)
++static void
++iptree_list_header(const struct ip_set *set, void *data)
+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
-+ struct ip_set_req_iptree_create *header =
-+ (struct ip_set_req_iptree_create *) data;
++ const struct ip_set_iptree *map = set->data;
++ struct ip_set_req_iptree_create *header = data;
+
+ header->timeout = map->timeout;
+}
+
-+static int list_members_size(const struct ip_set *set)
++static int
++iptree_list_members_size(const struct ip_set *set)
+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ const struct ip_set_iptree *map = set->data;
+ struct ip_set_iptreeb *btree;
+ struct ip_set_iptreec *ctree;
+ struct ip_set_iptreed *dtree;
@@ -5020,9 +5448,10 @@
+ return (count * sizeof(struct ip_set_req_iptree));
+}
+
-+static void list_members(const struct ip_set *set, void *data)
++static void
++iptree_list_members(const struct ip_set *set, void *data)
+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ const struct ip_set_iptree *map = set->data;
+ struct ip_set_iptreeb *btree;
+ struct ip_set_iptreec *ctree;
+ struct ip_set_iptreed *dtree;
@@ -5036,7 +5465,7 @@
+ for (d = 0; d < 256; d++) {
+ if (dtree->expires[d]
+ && (!map->timeout || time_after(dtree->expires[d], jiffies))) {
-+ entry = (struct ip_set_req_iptree *)(data + offset);
++ entry = data + offset;
+ entry->ip = ((a << 24) | (b << 16) | (c << 8) | d);
+ entry->timeout = !map->timeout ? 0
+ : (dtree->expires[d] - jiffies)/HZ;
@@ -5048,26 +5477,7 @@
+ LOOP_WALK_END;
+}
+
-+static struct ip_set_type ip_set_iptree = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_iptree),
-+ .addip = &addip,
-+ .addip_kernel = &addip_kernel,
-+ .delip = &delip,
-+ .delip_kernel = &delip_kernel,
-+ .testip = &testip,
-+ .testip_kernel = &testip_kernel,
-+ .header_size = sizeof(struct ip_set_req_iptree_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
++IP_SET_TYPE(iptree, IPSET_TYPE_IP | IPSET_DATA_SINGLE)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
@@ -5078,30 +5488,16 @@
+static int __init ip_set_iptree_init(void)
+{
+ int ret;
-+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
-+ branch_cachep = kmem_cache_create("ip_set_iptreeb",
-+ sizeof(struct ip_set_iptreeb),
-+ 0, 0, NULL);
-+#else
-+ branch_cachep = kmem_cache_create("ip_set_iptreeb",
-+ sizeof(struct ip_set_iptreeb),
-+ 0, 0, NULL, NULL);
-+#endif
++
++ branch_cachep = KMEM_CACHE_CREATE("ip_set_iptreeb",
++ sizeof(struct ip_set_iptreeb));
+ if (!branch_cachep) {
+ printk(KERN_ERR "Unable to create ip_set_iptreeb slab cache\n");
+ ret = -ENOMEM;
+ goto out;
+ }
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
-+ leaf_cachep = kmem_cache_create("ip_set_iptreed",
-+ sizeof(struct ip_set_iptreed),
-+ 0, 0, NULL);
-+#else
-+ leaf_cachep = kmem_cache_create("ip_set_iptreed",
-+ sizeof(struct ip_set_iptreed),
-+ 0, 0, NULL, NULL);
-+#endif
++ leaf_cachep = KMEM_CACHE_CREATE("ip_set_iptreed",
++ sizeof(struct ip_set_iptreed));
+ if (!leaf_cachep) {
+ printk(KERN_ERR "Unable to create ip_set_iptreed slab cache\n");
+ ret = -ENOMEM;
@@ -5112,7 +5508,7 @@
+ goto out;
+
+ kmem_cache_destroy(leaf_cachep);
-+ free_branch:
++ free_branch:
+ kmem_cache_destroy(branch_cachep);
+ out:
+ return ret;
@@ -5130,7 +5526,7 @@
+module_exit(ip_set_iptree_fini);
--- /dev/null
+++ b/net/ipv4/netfilter/ip_set_iptreemap.c
-@@ -0,0 +1,829 @@
+@@ -0,0 +1,708 @@
+/* Copyright (C) 2007 Sven Wegener <sven.wegener@stealer.net>
+ *
+ * This program is free software; you can redistribute it and/or modify it
@@ -5139,38 +5535,33 @@
+ */
+
+/* This modules implements the iptreemap ipset type. It uses bitmaps to
-+ * represent every single IPv4 address as a single bit. The bitmaps are managed
-+ * in a tree structure, where the first three octets of an addresses are used
-+ * as an index to find the bitmap and the last octet is used as the bit number.
++ * represent every single IPv4 address as a bit. The bitmaps are managed in a
++ * tree structure, where the first three octets of an address are used as an
++ * index to find the bitmap and the last octet is used as the bit number.
+ */
+
-+#include <linux/version.h>
++#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
+#include <linux/spinlock.h>
++#include <linux/timer.h>
+
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_bitmaps.h>
+#include <linux/netfilter_ipv4/ip_set_iptreemap.h>
+
+#define IPTREEMAP_DEFAULT_GC_TIME (5 * 60)
+#define IPTREEMAP_DESTROY_SLEEP (100)
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
-+static struct kmem_cache *cachep_b;
-+static struct kmem_cache *cachep_c;
-+static struct kmem_cache *cachep_d;
-+#else
-+static kmem_cache_t *cachep_b;
-+static kmem_cache_t *cachep_c;
-+static kmem_cache_t *cachep_d;
-+#endif
++static __KMEM_CACHE_T__ *cachep_b;
++static __KMEM_CACHE_T__ *cachep_c;
++static __KMEM_CACHE_T__ *cachep_d;
+
+static struct ip_set_iptreemap_d *fullbitmap_d;
+static struct ip_set_iptreemap_c *fullbitmap_c;
@@ -5319,9 +5710,6 @@
+#define LOOP_WALK_END_COUNT() \
+ }
+
-+#define MIN(a, b) (a < b ? a : b)
-+#define MAX(a, b) (a > b ? a : b)
-+
+#define GETVALUE1(a, a1, b1, r) \
+ (a == a1 ? b1 : r)
+
@@ -5391,9 +5779,9 @@
+}
+
+static inline int
-+__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++iptreemap_test(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap *map = set->data;
+ struct ip_set_iptreemap_b *btree;
+ struct ip_set_iptreemap_c *ctree;
+ struct ip_set_iptreemap_d *dtree;
@@ -5410,40 +5798,13 @@
+ return !!test_bit(d, (void *) dtree->bitmap);
+}
+
-+static int
-+testip(struct ip_set *set, const void *data, size_t size, ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_iptreemap *req = (struct ip_set_req_iptreemap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_iptreemap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)", sizeof(struct ip_set_req_iptreemap), size);
-+ return -EINVAL;
-+ }
-+
-+ return __testip(set, req->start, hash_ip);
-+}
-+
-+static int
-+testip_kernel(struct ip_set *set, const struct sk_buff *skb, ip_set_ip_t *hash_ip, const u_int32_t *flags, unsigned char index)
-+{
-+ int res;
++#define KADT_CONDITION
+
-+ res = __testip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+
-+ return (res < 0 ? 0 : res);
-+}
++UADT(iptreemap, test)
++KADT(iptreemap, test, ipaddr)
+
+static inline int
-+__addip_single(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++__addip_single(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
+ struct ip_set_iptreemap_b *btree;
@@ -5459,18 +5820,19 @@
+ ADDIP_WALK(btree, b, ctree, struct ip_set_iptreemap_c, cachep_c, fullbitmap_c);
+ ADDIP_WALK(ctree, c, dtree, struct ip_set_iptreemap_d, cachep_d, fullbitmap_d);
+
-+ if (test_and_set_bit(d, (void *) dtree->bitmap))
++ if (__test_and_set_bit(d, (void *) dtree->bitmap))
+ return -EEXIST;
+
-+ set_bit(b, (void *) btree->dirty);
++ __set_bit(b, (void *) btree->dirty);
+
+ return 0;
+}
+
+static inline int
-+__addip_range(struct ip_set *set, ip_set_ip_t start, ip_set_ip_t end, ip_set_ip_t *hash_ip)
++iptreemap_add(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t start, ip_set_ip_t end)
+{
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap *map = set->data;
+ struct ip_set_iptreemap_b *btree;
+ struct ip_set_iptreemap_c *ctree;
+ struct ip_set_iptreemap_d *dtree;
@@ -5479,7 +5841,7 @@
+ unsigned char a2, b2, c2, d2;
+
+ if (start == end)
-+ return __addip_single(set, start, hash_ip);
++ return __addip_single(set, hash_ip, start);
+
+ *hash_ip = start;
+
@@ -5491,8 +5853,8 @@
+ ADDIP_RANGE_LOOP(btree, b, GETVALUE1(a, a1, b1, 0), GETVALUE1(a, a2, b2, 255), CHECK2(a, b, a1, a2, b1, b2, c1, c2, d1, d2), ctree, fullbitmap_c, cachep_c, free_c) {
+ ADDIP_RANGE_LOOP(ctree, c, GETVALUE2(a, b, a1, b1, c1, 0), GETVALUE2(a, b, a2, b2, c2, 255), CHECK3(a, b, c, a1, a2, b1, b2, c1, c2, d1, d2), dtree, fullbitmap_d, cachep_d, free_d) {
+ for (d = GETVALUE3(a, b, c, a1, b1, c1, d1, 0); d <= GETVALUE3(a, b, c, a2, b2, c2, d2, 255); d++)
-+ set_bit(d, (void *) dtree->bitmap);
-+ set_bit(b, (void *) btree->dirty);
++ __set_bit(d, (void *) dtree->bitmap);
++ __set_bit(b, (void *) btree->dirty);
+ } ADDIP_RANGE_LOOP_END();
+ } ADDIP_RANGE_LOOP_END();
+ } ADDIP_RANGE_LOOP_END();
@@ -5500,39 +5862,14 @@
+ return 0;
+}
+
-+static int
-+addip(struct ip_set *set, const void *data, size_t size, ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_iptreemap *req = (struct ip_set_req_iptreemap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_iptreemap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)", sizeof(struct ip_set_req_iptreemap), size);
-+ return -EINVAL;
-+ }
-+
-+ return __addip_range(set, MIN(req->start, req->end), MAX(req->start, req->end), hash_ip);
-+}
-+
-+static int
-+addip_kernel(struct ip_set *set, const struct sk_buff *skb, ip_set_ip_t *hash_ip, const u_int32_t *flags, unsigned char index)
-+{
-+
-+ return __addip_single(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+}
++UADT0(iptreemap, add, min(req->ip, req->end), max(req->ip, req->end))
++KADT(iptreemap, add, ipaddr, ip)
+
+static inline int
-+__delip_single(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip, unsigned int __nocast flags)
++__delip_single(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, gfp_t flags)
+{
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap *map = set->data;
+ struct ip_set_iptreemap_b *btree;
+ struct ip_set_iptreemap_c *ctree;
+ struct ip_set_iptreemap_d *dtree;
@@ -5546,18 +5883,19 @@
+ DELIP_WALK(btree, b, ctree, cachep_c, fullbitmap_c, flags);
+ DELIP_WALK(ctree, c, dtree, cachep_d, fullbitmap_d, flags);
+
-+ if (!test_and_clear_bit(d, (void *) dtree->bitmap))
++ if (!__test_and_clear_bit(d, (void *) dtree->bitmap))
+ return -EEXIST;
+
-+ set_bit(b, (void *) btree->dirty);
++ __set_bit(b, (void *) btree->dirty);
+
+ return 0;
+}
+
+static inline int
-+__delip_range(struct ip_set *set, ip_set_ip_t start, ip_set_ip_t end, ip_set_ip_t *hash_ip, unsigned int __nocast flags)
++iptreemap_del(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t start, ip_set_ip_t end, gfp_t flags)
+{
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap *map = set->data;
+ struct ip_set_iptreemap_b *btree;
+ struct ip_set_iptreemap_c *ctree;
+ struct ip_set_iptreemap_d *dtree;
@@ -5566,7 +5904,7 @@
+ unsigned char a2, b2, c2, d2;
+
+ if (start == end)
-+ return __delip_single(set, start, hash_ip, flags);
++ return __delip_single(set, hash_ip, start, flags);
+
+ *hash_ip = start;
+
@@ -5578,8 +5916,8 @@
+ DELIP_RANGE_LOOP(btree, b, GETVALUE1(a, a1, b1, 0), GETVALUE1(a, a2, b2, 255), CHECK2(a, b, a1, a2, b1, b2, c1, c2, d1, d2), ctree, fullbitmap_c, cachep_c, free_c, flags) {
+ DELIP_RANGE_LOOP(ctree, c, GETVALUE2(a, b, a1, b1, c1, 0), GETVALUE2(a, b, a2, b2, c2, 255), CHECK3(a, b, c, a1, a2, b1, b2, c1, c2, d1, d2), dtree, fullbitmap_d, cachep_d, free_d, flags) {
+ for (d = GETVALUE3(a, b, c, a1, b1, c1, d1, 0); d <= GETVALUE3(a, b, c, a2, b2, c2, d2, 255); d++)
-+ clear_bit(d, (void *) dtree->bitmap);
-+ set_bit(b, (void *) btree->dirty);
++ __clear_bit(d, (void *) dtree->bitmap);
++ __set_bit(b, (void *) btree->dirty);
+ } DELIP_RANGE_LOOP_END();
+ } DELIP_RANGE_LOOP_END();
+ } DELIP_RANGE_LOOP_END();
@@ -5587,34 +5925,8 @@
+ return 0;
+}
+
-+static int
-+delip(struct ip_set *set, const void *data, size_t size, ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_iptreemap *req = (struct ip_set_req_iptreemap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_iptreemap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)", sizeof(struct ip_set_req_iptreemap), size);
-+ return -EINVAL;
-+ }
-+
-+ return __delip_range(set, MIN(req->start, req->end), MAX(req->start, req->end), hash_ip, GFP_KERNEL);
-+}
-+
-+static int
-+delip_kernel(struct ip_set *set, const struct sk_buff *skb, ip_set_ip_t *hash_ip, const u_int32_t *flags, unsigned char index)
-+{
-+ return __delip_single(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip,
-+ GFP_ATOMIC);
-+}
++UADT0(iptreemap, del, min(req->ip, req->end), max(req->ip, req->end), GFP_KERNEL)
++KADT(iptreemap, del, ipaddr, ip, GFP_ATOMIC)
+
+/* Check the status of the bitmap
+ * -1 == all bits cleared
@@ -5638,7 +5950,7 @@
+gc(unsigned long addr)
+{
+ struct ip_set *set = (struct ip_set *) addr;
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap *map = set->data;
+ struct ip_set_iptreemap_b *btree;
+ struct ip_set_iptreemap_c *ctree;
+ struct ip_set_iptreemap_d *dtree;
@@ -5649,7 +5961,7 @@
+
+ LOOP_WALK_BEGIN_GC(map, a, btree, fullbitmap_b, cachep_b, i) {
+ LOOP_WALK_BEGIN_GC(btree, b, ctree, fullbitmap_c, cachep_c, j) {
-+ if (!test_and_clear_bit(b, (void *) btree->dirty))
++ if (!__test_and_clear_bit(b, (void *) btree->dirty))
+ continue;
+ LOOP_WALK_BEGIN_GC(ctree, c, dtree, fullbitmap_d, cachep_d, k) {
+ switch (bitmap_status(dtree)) {
@@ -5677,7 +5989,7 @@
+static inline void
+init_gc_timer(struct ip_set *set)
+{
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap *map = set->data;
+
+ init_timer(&map->gc);
+ map->gc.data = (unsigned long) set;
@@ -5686,16 +5998,12 @@
+ add_timer(&map->gc);
+}
+
-+static int create(struct ip_set *set, const void *data, size_t size)
++static int
++iptreemap_create(struct ip_set *set, const void *data, u_int32_t size)
+{
-+ struct ip_set_req_iptreemap_create *req = (struct ip_set_req_iptreemap_create *) data;
++ const struct ip_set_req_iptreemap_create *req = data;
+ struct ip_set_iptreemap *map;
+
-+ if (size != sizeof(struct ip_set_req_iptreemap_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)", sizeof(struct ip_set_req_iptreemap_create), size);
-+ return -EINVAL;
-+ }
-+
+ map = kzalloc(sizeof(*map), GFP_KERNEL);
+ if (!map)
+ return -ENOMEM;
@@ -5708,7 +6016,8 @@
+ return 0;
+}
+
-+static inline void __flush(struct ip_set_iptreemap *map)
++static inline void
++__flush(struct ip_set_iptreemap *map)
+{
+ struct ip_set_iptreemap_b *btree;
+ unsigned int a;
@@ -5719,9 +6028,10 @@
+ LOOP_WALK_END();
+}
+
-+static void destroy(struct ip_set *set)
++static void
++iptreemap_destroy(struct ip_set *set)
+{
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap *map = set->data;
+
+ while (!del_timer(&map->gc))
+ msleep(IPTREEMAP_DESTROY_SLEEP);
@@ -5732,9 +6042,10 @@
+ set->data = NULL;
+}
+
-+static void flush(struct ip_set *set)
++static void
++iptreemap_flush(struct ip_set *set)
+{
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap *map = set->data;
+
+ while (!del_timer(&map->gc))
+ msleep(IPTREEMAP_DESTROY_SLEEP);
@@ -5746,17 +6057,19 @@
+ init_gc_timer(set);
+}
+
-+static void list_header(const struct ip_set *set, void *data)
++static void
++iptreemap_list_header(const struct ip_set *set, void *data)
+{
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
-+ struct ip_set_req_iptreemap_create *header = (struct ip_set_req_iptreemap_create *) data;
++ struct ip_set_iptreemap *map = set->data;
++ struct ip_set_req_iptreemap_create *header = data;
+
+ header->gc_interval = map->gc_interval;
+}
+
-+static int list_members_size(const struct ip_set *set)
++static int
++iptreemap_list_members_size(const struct ip_set *set)
+{
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap *map = set->data;
+ struct ip_set_iptreemap_b *btree;
+ struct ip_set_iptreemap_c *ctree;
+ struct ip_set_iptreemap_d *dtree;
@@ -5783,19 +6096,21 @@
+ return (count * sizeof(struct ip_set_req_iptreemap));
+}
+
-+static inline size_t add_member(void *data, size_t offset, ip_set_ip_t start, ip_set_ip_t end)
++static inline u_int32_t
++add_member(void *data, size_t offset, ip_set_ip_t start, ip_set_ip_t end)
+{
-+ struct ip_set_req_iptreemap *entry = (struct ip_set_req_iptreemap *) (data + offset);
++ struct ip_set_req_iptreemap *entry = data + offset;
+
-+ entry->start = start;
++ entry->ip = start;
+ entry->end = end;
+
+ return sizeof(*entry);
+}
+
-+static void list_members(const struct ip_set *set, void *data)
++static void
++iptreemap_list_members(const struct ip_set *set, void *data)
+{
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap *map = set->data;
+ struct ip_set_iptreemap_b *btree;
+ struct ip_set_iptreemap_c *ctree;
+ struct ip_set_iptreemap_d *dtree;
@@ -5830,26 +6145,7 @@
+ add_member(data, offset, start, end);
+}
+
-+static struct ip_set_type ip_set_iptreemap = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = create,
-+ .destroy = destroy,
-+ .flush = flush,
-+ .reqsize = sizeof(struct ip_set_req_iptreemap),
-+ .addip = addip,
-+ .addip_kernel = addip_kernel,
-+ .delip = delip,
-+ .delip_kernel = delip_kernel,
-+ .testip = testip,
-+ .testip_kernel = testip_kernel,
-+ .header_size = sizeof(struct ip_set_req_iptreemap_create),
-+ .list_header = list_header,
-+ .list_members_size = list_members_size,
-+ .list_members = list_members,
-+ .me = THIS_MODULE,
-+};
++IP_SET_TYPE(iptreemap, IPSET_TYPE_IP | IPSET_DATA_SINGLE)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Sven Wegener <sven.wegener@stealer.net>");
@@ -5860,43 +6156,22 @@
+ int ret = -ENOMEM;
+ int a;
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
-+ cachep_b = kmem_cache_create("ip_set_iptreemap_b",
-+ sizeof(struct ip_set_iptreemap_b),
-+ 0, 0, NULL);
-+#else
-+ cachep_b = kmem_cache_create("ip_set_iptreemap_b",
-+ sizeof(struct ip_set_iptreemap_b),
-+ 0, 0, NULL, NULL);
-+#endif
++ cachep_b = KMEM_CACHE_CREATE("ip_set_iptreemap_b",
++ sizeof(struct ip_set_iptreemap_b));
+ if (!cachep_b) {
+ ip_set_printk("Unable to create ip_set_iptreemap_b slab cache");
+ goto out;
+ }
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
-+ cachep_c = kmem_cache_create("ip_set_iptreemap_c",
-+ sizeof(struct ip_set_iptreemap_c),
-+ 0, 0, NULL);
-+#else
-+ cachep_c = kmem_cache_create("ip_set_iptreemap_c",
-+ sizeof(struct ip_set_iptreemap_c),
-+ 0, 0, NULL, NULL);
-+#endif
++ cachep_c = KMEM_CACHE_CREATE("ip_set_iptreemap_c",
++ sizeof(struct ip_set_iptreemap_c));
+ if (!cachep_c) {
+ ip_set_printk("Unable to create ip_set_iptreemap_c slab cache");
+ goto outb;
+ }
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
-+ cachep_d = kmem_cache_create("ip_set_iptreemap_d",
-+ sizeof(struct ip_set_iptreemap_d),
-+ 0, 0, NULL);
-+#else
-+ cachep_d = kmem_cache_create("ip_set_iptreemap_d",
-+ sizeof(struct ip_set_iptreemap_d),
-+ 0, 0, NULL, NULL);
-+#endif
++ cachep_d = KMEM_CACHE_CREATE("ip_set_iptreemap_d",
++ sizeof(struct ip_set_iptreemap_d));
+ if (!cachep_d) {
+ ip_set_printk("Unable to create ip_set_iptreemap_d slab cache");
+ goto outc;
@@ -5962,11 +6237,11 @@
+module_exit(ip_set_iptreemap_fini);
--- /dev/null
+++ b/net/ipv4/netfilter/ip_set_macipmap.c
-@@ -0,0 +1,375 @@
+@@ -0,0 +1,164 @@
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ * Patrick Schaaf <bof@bof.de>
+ * Martin Josefsson <gandalf@wlug.westbo.se>
-+ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ * Copyright (C) 2003-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
@@ -5978,41 +6253,29 @@
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
-+#include <linux/version.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
+#include <linux/spinlock.h>
+#include <linux/if_ether.h>
-+#include <linux/vmalloc.h>
+
-+#include <linux/netfilter_ipv4/ip_set_malloc.h>
+#include <linux/netfilter_ipv4/ip_set_macipmap.h>
+
+static int
-+testip(struct ip_set *set, const void *data, size_t size, ip_set_ip_t *hash_ip)
++macipmap_utest(struct ip_set *set, const void *data, u_int32_t size,
++ ip_set_ip_t *hash_ip)
+{
-+ struct ip_set_macipmap *map = (struct ip_set_macipmap *) set->data;
-+ struct ip_set_macip *table = (struct ip_set_macip *) map->members;
-+ struct ip_set_req_macipmap *req = (struct ip_set_req_macipmap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_macipmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_macipmap),
-+ size);
-+ return -EINVAL;
-+ }
++ const struct ip_set_macipmap *map = set->data;
++ const struct ip_set_macip *table = map->members;
++ const struct ip_set_req_macipmap *req = data;
+
+ if (req->ip < map->first_ip || req->ip > map->last_ip)
+ return -ERANGE;
+
+ *hash_ip = req->ip;
+ DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
-+ set->name, HIPQUAD(req->ip), HIPQUAD(*hash_ip));
-+ if (test_bit(IPSET_MACIP_ISSET,
-+ (void *) &table[req->ip - map->first_ip].flags)) {
++ set->name, HIPQUAD(req->ip), HIPQUAD(*hash_ip));
++ if (table[req->ip - map->first_ip].match) {
+ return (memcmp(req->ethernet,
+ &table[req->ip - map->first_ip].ethernet,
+ ETH_ALEN) == 0);
@@ -6022,44 +6285,29 @@
+}
+
+static int
-+testip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+ struct ip_set_macip *table =
-+ (struct ip_set_macip *) map->members;
++macipmap_ktest(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ const struct ip_set_macipmap *map = set->data;
++ const struct ip_set_macip *table = map->members;
+ ip_set_ip_t ip;
-+
-+ ip = ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr);
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr);
-+#endif
++
++ ip = ipaddr(skb, flags[index]);
+
+ if (ip < map->first_ip || ip > map->last_ip)
+ return 0;
+
-+ *hash_ip = ip;
++ *hash_ip = ip;
+ DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
-+ set->name, HIPQUAD(ip), HIPQUAD(*hash_ip));
-+ if (test_bit(IPSET_MACIP_ISSET,
-+ (void *) &table[ip - map->first_ip].flags)) {
++ set->name, HIPQUAD(ip), HIPQUAD(*hash_ip));
++ if (table[ip - map->first_ip].match) {
+ /* Is mac pointer valid?
+ * If so, compare... */
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
+ return (skb_mac_header(skb) >= skb->head
+ && (skb_mac_header(skb) + ETH_HLEN) <= skb->data
-+#else
-+ return (skb->mac.raw >= skb->head
-+ && (skb->mac.raw + ETH_HLEN) <= skb->data
-+#endif
+ && (memcmp(eth_hdr(skb)->h_source,
+ &table[ip - map->first_ip].ethernet,
+ ETH_ALEN) == 0));
@@ -6070,278 +6318,94 @@
+
+/* returns 0 on success */
+static inline int
-+__addip(struct ip_set *set,
-+ ip_set_ip_t ip, unsigned char *ethernet, ip_set_ip_t *hash_ip)
++macipmap_add(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, const unsigned char *ethernet)
+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+ struct ip_set_macip *table =
-+ (struct ip_set_macip *) map->members;
++ struct ip_set_macipmap *map = set->data;
++ struct ip_set_macip *table = map->members;
+
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
-+ if (test_and_set_bit(IPSET_MACIP_ISSET,
-+ (void *) &table[ip - map->first_ip].flags))
++ if (table[ip - map->first_ip].match)
+ return -EEXIST;
+
+ *hash_ip = ip;
+ DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
+ memcpy(&table[ip - map->first_ip].ethernet, ethernet, ETH_ALEN);
++ table[ip - map->first_ip].match = IPSET_MACIP_ISSET;
+ return 0;
+}
+
-+static int
-+addip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_macipmap *req =
-+ (struct ip_set_req_macipmap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_macipmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_macipmap),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __addip(set, req->ip, req->ethernet, hash_ip);
-+}
-+
-+static int
-+addip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ ip_set_ip_t ip;
-+
-+ ip = ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr);
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr);
-+#endif
-+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ if (!(skb_mac_header(skb) >= skb->head
-+ && (skb_mac_header(skb) + ETH_HLEN) <= skb->data))
-+#else
-+ if (!(skb->mac.raw >= skb->head
-+ && (skb->mac.raw + ETH_HLEN) <= skb->data))
-+#endif
++#define KADT_CONDITION \
++ if (!(skb_mac_header(skb) >= skb->head \
++ && (skb_mac_header(skb) + ETH_HLEN) <= skb->data))\
+ return -EINVAL;
+
-+ return __addip(set, ip, eth_hdr(skb)->h_source, hash_ip);
-+}
++UADT(macipmap, add, req->ethernet)
++KADT(macipmap, add, ipaddr, eth_hdr(skb)->h_source)
+
+static inline int
-+__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++macipmap_del(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+ struct ip_set_macip *table =
-+ (struct ip_set_macip *) map->members;
++ struct ip_set_macipmap *map = set->data;
++ struct ip_set_macip *table = map->members;
+
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
-+ if (!test_and_clear_bit(IPSET_MACIP_ISSET,
-+ (void *)&table[ip - map->first_ip].flags))
++ if (!table[ip - map->first_ip].match)
+ return -EEXIST;
+
+ *hash_ip = ip;
++ table[ip - map->first_ip].match = 0;
+ DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
+ return 0;
+}
+
-+static int
-+delip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_macipmap *req =
-+ (struct ip_set_req_macipmap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_macipmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_macipmap),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __delip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+delip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __delip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+}
++#undef KADT_CONDITION
++#define KADT_CONDITION
+
-+static inline size_t members_size(ip_set_id_t from, ip_set_id_t to)
-+{
-+ return (size_t)((to - from + 1) * sizeof(struct ip_set_macip));
-+}
++UADT(macipmap, del)
++KADT(macipmap, del, ipaddr)
+
-+static int create(struct ip_set *set, const void *data, size_t size)
++static inline int
++__macipmap_create(const struct ip_set_req_macipmap_create *req,
++ struct ip_set_macipmap *map)
+{
-+ int newbytes;
-+ struct ip_set_req_macipmap_create *req =
-+ (struct ip_set_req_macipmap_create *) data;
-+ struct ip_set_macipmap *map;
-+
-+ if (size != sizeof(struct ip_set_req_macipmap_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_macipmap_create),
-+ size);
-+ return -EINVAL;
-+ }
-+
-+ DP("from %u.%u.%u.%u to %u.%u.%u.%u",
-+ HIPQUAD(req->from), HIPQUAD(req->to));
-+
-+ if (req->from > req->to) {
-+ DP("bad ip range");
-+ return -ENOEXEC;
-+ }
-+
+ if (req->to - req->from > MAX_RANGE) {
-+ ip_set_printk("range too big (max %d addresses)",
-+ MAX_RANGE+1);
++ ip_set_printk("range too big, %d elements (max %d)",
++ req->to - req->from + 1, MAX_RANGE+1);
+ return -ENOEXEC;
+ }
-+
-+ map = kmalloc(sizeof(struct ip_set_macipmap), GFP_KERNEL);
-+ if (!map) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_macipmap));
-+ return -ENOMEM;
-+ }
+ map->flags = req->flags;
-+ map->first_ip = req->from;
-+ map->last_ip = req->to;
-+ newbytes = members_size(map->first_ip, map->last_ip);
-+ map->members = ip_set_malloc(newbytes);
-+ DP("members: %u %p", newbytes, map->members);
-+ if (!map->members) {
-+ DP("out of memory for %d bytes", newbytes);
-+ kfree(map);
-+ return -ENOMEM;
-+ }
-+ memset(map->members, 0, newbytes);
-+
-+ set->data = map;
-+ return 0;
-+}
-+
-+static void destroy(struct ip_set *set)
-+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+
-+ ip_set_free(map->members, members_size(map->first_ip, map->last_ip));
-+ kfree(map);
-+
-+ set->data = NULL;
++ return (req->to - req->from + 1) * sizeof(struct ip_set_macip);
+}
+
-+static void flush(struct ip_set *set)
-+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+ memset(map->members, 0, members_size(map->first_ip, map->last_ip));
-+}
++BITMAP_CREATE(macipmap)
++BITMAP_DESTROY(macipmap)
++BITMAP_FLUSH(macipmap)
+
-+static void list_header(const struct ip_set *set, void *data)
++static inline void
++__macipmap_list_header(const struct ip_set_macipmap *map,
++ struct ip_set_req_macipmap_create *header)
+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+ struct ip_set_req_macipmap_create *header =
-+ (struct ip_set_req_macipmap_create *) data;
-+
-+ DP("list_header %x %x %u", map->first_ip, map->last_ip,
-+ map->flags);
-+
-+ header->from = map->first_ip;
-+ header->to = map->last_ip;
+ header->flags = map->flags;
+}
+
-+static int list_members_size(const struct ip_set *set)
-+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+
-+ DP("%u", members_size(map->first_ip, map->last_ip));
-+ return members_size(map->first_ip, map->last_ip);
-+}
-+
-+static void list_members(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+
-+ int bytes = members_size(map->first_ip, map->last_ip);
-+
-+ DP("members: %u %p", bytes, map->members);
-+ memcpy(data, map->members, bytes);
-+}
++BITMAP_LIST_HEADER(macipmap)
++BITMAP_LIST_MEMBERS_SIZE(macipmap)
++BITMAP_LIST_MEMBERS(macipmap)
+
-+static struct ip_set_type ip_set_macipmap = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_macipmap),
-+ .addip = &addip,
-+ .addip_kernel = &addip_kernel,
-+ .delip = &delip,
-+ .delip_kernel = &delip_kernel,
-+ .testip = &testip,
-+ .testip_kernel = &testip_kernel,
-+ .header_size = sizeof(struct ip_set_req_macipmap_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
++IP_SET_TYPE(macipmap, IPSET_TYPE_IP | IPSET_DATA_SINGLE)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("macipmap type of IP sets");
+
-+static int __init ip_set_macipmap_init(void)
-+{
-+ init_max_malloc_size();
-+ return ip_set_register_set_type(&ip_set_macipmap);
-+}
-+
-+static void __exit ip_set_macipmap_fini(void)
-+{
-+ /* FIXME: possible race with ip_set_create() */
-+ ip_set_unregister_set_type(&ip_set_macipmap);
-+}
-+
-+module_init(ip_set_macipmap_init);
-+module_exit(ip_set_macipmap_fini);
++REGISTER_MODULE(macipmap)
--- /dev/null
+++ b/net/ipv4/netfilter/ip_set_nethash.c
-@@ -0,0 +1,497 @@
-+/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+@@ -0,0 +1,225 @@
++/* Copyright (C) 2003-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
@@ -6351,63 +6415,56 @@
+/* Kernel module implementing a cidr nethash set */
+
+#include <linux/module.h>
++#include <linux/moduleparam.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
-+#include <linux/version.h>
-+#include <linux/jhash.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_jhash.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
+#include <linux/spinlock.h>
-+#include <linux/vmalloc.h>
+#include <linux/random.h>
+
+#include <net/ip.h>
+
-+#include <linux/netfilter_ipv4/ip_set_malloc.h>
+#include <linux/netfilter_ipv4/ip_set_nethash.h>
+
+static int limit = MAX_RANGE;
+
+static inline __u32
-+jhash_ip(const struct ip_set_nethash *map, uint16_t i, ip_set_ip_t ip)
-+{
-+ return jhash_1word(ip, *(((uint32_t *) map->initval) + i));
-+}
-+
-+static inline __u32
-+hash_id_cidr(struct ip_set_nethash *map,
-+ ip_set_ip_t ip,
-+ unsigned char cidr,
-+ ip_set_ip_t *hash_ip)
++nethash_id_cidr(const struct ip_set_nethash *map,
++ ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip,
++ uint8_t cidr)
+{
+ __u32 id;
+ u_int16_t i;
+ ip_set_ip_t *elem;
+
-+ *hash_ip = pack(ip, cidr);
-+
++ *hash_ip = pack_ip_cidr(ip, cidr);
++ if (!*hash_ip)
++ return MAX_RANGE;
++
+ for (i = 0; i < map->probes; i++) {
+ id = jhash_ip(map, i, *hash_ip) % map->hashsize;
+ DP("hash key: %u", id);
+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
+ if (*elem == *hash_ip)
+ return id;
++ /* No shortcut - there can be deleted entries. */
+ }
+ return UINT_MAX;
+}
+
+static inline __u32
-+hash_id(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++nethash_id(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
++ const struct ip_set_nethash *map = set->data;
+ __u32 id = UINT_MAX;
+ int i;
+
+ for (i = 0; i < 30 && map->cidr[i]; i++) {
-+ id = hash_id_cidr(map, ip, map->cidr[i], hash_ip);
++ id = nethash_id_cidr(map, hash_ip, ip, map->cidr[i]);
+ if (id != UINT_MAX)
+ break;
+ }
@@ -6415,409 +6472,156 @@
+}
+
+static inline int
-+__testip_cidr(struct ip_set *set, ip_set_ip_t ip, unsigned char cidr,
-+ ip_set_ip_t *hash_ip)
++nethash_test_cidr(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, uint8_t cidr)
+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
++ const struct ip_set_nethash *map = set->data;
+
-+ return (ip && hash_id_cidr(map, ip, cidr, hash_ip) != UINT_MAX);
++ return (nethash_id_cidr(map, hash_ip, ip, cidr) != UINT_MAX);
+}
+
+static inline int
-+__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++nethash_test(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
+{
-+ return (ip && hash_id(set, ip, hash_ip) != UINT_MAX);
++ return (nethash_id(set, hash_ip, ip) != UINT_MAX);
+}
+
+static int
-+testip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
++nethash_utest(struct ip_set *set, const void *data, u_int32_t size,
++ ip_set_ip_t *hash_ip)
+{
-+ struct ip_set_req_nethash *req =
-+ (struct ip_set_req_nethash *) data;
++ const struct ip_set_req_nethash *req = data;
+
-+ if (size != sizeof(struct ip_set_req_nethash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_nethash),
-+ size);
++ if (req->cidr <= 0 || req->cidr > 32)
+ return -EINVAL;
-+ }
-+ return (req->cidr == 32 ? __testip(set, req->ip, hash_ip)
-+ : __testip_cidr(set, req->ip, req->cidr, hash_ip));
++ return (req->cidr == 32 ? nethash_test(set, hash_ip, req->ip)
++ : nethash_test_cidr(set, hash_ip, req->ip, req->cidr));
+}
+
-+static int
-+testip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __testip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+#endif
-+ hash_ip);
-+}
++#define KADT_CONDITION
++
++KADT(nethash, test, ipaddr)
+
+static inline int
-+__addip_base(struct ip_set_nethash *map, ip_set_ip_t ip)
++__nethash_add(struct ip_set_nethash *map, ip_set_ip_t *ip)
+{
+ __u32 probe;
+ u_int16_t i;
-+ ip_set_ip_t *elem;
-+
++ ip_set_ip_t *elem, *slot = NULL;
++
+ for (i = 0; i < map->probes; i++) {
-+ probe = jhash_ip(map, i, ip) % map->hashsize;
++ probe = jhash_ip(map, i, *ip) % map->hashsize;
+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
-+ if (*elem == ip)
++ if (*elem == *ip)
+ return -EEXIST;
-+ if (!*elem) {
-+ *elem = ip;
-+ map->elements++;
-+ return 0;
-+ }
++ if (!(slot || *elem))
++ slot = elem;
++ /* There can be deleted entries, must check all slots */
++ }
++ if (slot) {
++ *slot = *ip;
++ map->elements++;
++ return 0;
+ }
+ /* Trigger rehashing */
+ return -EAGAIN;
+}
+
+static inline int
-+__addip(struct ip_set_nethash *map, ip_set_ip_t ip, unsigned char cidr,
-+ ip_set_ip_t *hash_ip)
-+{
-+ if (!ip || map->elements >= limit)
-+ return -ERANGE;
-+
-+ *hash_ip = pack(ip, cidr);
-+ DP("%u.%u.%u.%u/%u, %u.%u.%u.%u", HIPQUAD(ip), cidr, HIPQUAD(*hash_ip));
-+
-+ return __addip_base(map, *hash_ip);
-+}
-+
-+static void
-+update_cidr_sizes(struct ip_set_nethash *map, unsigned char cidr)
-+{
-+ unsigned char next;
-+ int i;
-+
-+ for (i = 0; i < 30 && map->cidr[i]; i++) {
-+ if (map->cidr[i] == cidr) {
-+ return;
-+ } else if (map->cidr[i] < cidr) {
-+ next = map->cidr[i];
-+ map->cidr[i] = cidr;
-+ cidr = next;
-+ }
-+ }
-+ if (i < 30)
-+ map->cidr[i] = cidr;
-+}
-+
-+static int
-+addip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
++nethash_add(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, uint8_t cidr)
+{
-+ struct ip_set_req_nethash *req =
-+ (struct ip_set_req_nethash *) data;
++ struct ip_set_nethash *map = set->data;
+ int ret;
-+
-+ if (size != sizeof(struct ip_set_req_nethash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_nethash),
-+ size);
++
++ if (map->elements >= limit || map->nets[cidr-1] == UINT16_MAX)
++ return -ERANGE;
++ if (cidr <= 0 || cidr >= 32)
+ return -EINVAL;
-+ }
-+ ret = __addip((struct ip_set_nethash *) set->data,
-+ req->ip, req->cidr, hash_ip);
-+
-+ if (ret == 0)
-+ update_cidr_sizes((struct ip_set_nethash *) set->data,
-+ req->cidr);
+
++ *hash_ip = pack_ip_cidr(ip, cidr);
++ DP("%u.%u.%u.%u/%u, %u.%u.%u.%u", HIPQUAD(ip), cidr, HIPQUAD(*hash_ip));
++ if (!*hash_ip)
++ return -ERANGE;
++
++ ret = __nethash_add(map, hash_ip);
++ if (ret == 0) {
++ if (!map->nets[cidr-1]++)
++ add_cidr_size(map->cidr, cidr);
++ map->elements++;
++ }
++
+ return ret;
+}
+
-+static int
-+addip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+ int ret = -ERANGE;
-+ ip_set_ip_t ip = ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr);
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr);
-+#endif
++#undef KADT_CONDITION
++#define KADT_CONDITION \
++ struct ip_set_nethash *map = set->data; \
++ uint8_t cidr = map->cidr[0] ? map->cidr[0] : 31;
+
-+ if (map->cidr[0])
-+ ret = __addip(map, ip, map->cidr[0], hash_ip);
++UADT(nethash, add, req->cidr)
++KADT(nethash, add, ipaddr, cidr)
+
-+ return ret;
-+}
-+
-+static int retry(struct ip_set *set)
++static inline void
++__nethash_retry(struct ip_set_nethash *tmp, struct ip_set_nethash *map)
+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+ ip_set_ip_t *elem;
-+ void *members;
-+ u_int32_t i, hashsize = map->hashsize;
-+ int res;
-+ struct ip_set_nethash *tmp;
-+
-+ if (map->resize == 0)
-+ return -ERANGE;
-+
-+ again:
-+ res = 0;
-+
-+ /* Calculate new parameters */
-+ hashsize += (hashsize * map->resize)/100;
-+ if (hashsize == map->hashsize)
-+ hashsize++;
-+
-+ ip_set_printk("rehashing of set %s triggered: "
-+ "hashsize grows from %u to %u",
-+ set->name, map->hashsize, hashsize);
-+
-+ tmp = kmalloc(sizeof(struct ip_set_nethash)
-+ + map->probes * sizeof(uint32_t), GFP_ATOMIC);
-+ if (!tmp) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_nethash)
-+ + map->probes * sizeof(uint32_t));
-+ return -ENOMEM;
-+ }
-+ tmp->members = harray_malloc(hashsize, sizeof(ip_set_ip_t), GFP_ATOMIC);
-+ if (!tmp->members) {
-+ DP("out of memory for %d bytes", hashsize * sizeof(ip_set_ip_t));
-+ kfree(tmp);
-+ return -ENOMEM;
-+ }
-+ tmp->hashsize = hashsize;
-+ tmp->elements = 0;
-+ tmp->probes = map->probes;
-+ tmp->resize = map->resize;
-+ memcpy(tmp->initval, map->initval, map->probes * sizeof(uint32_t));
-+ memcpy(tmp->cidr, map->cidr, 30 * sizeof(unsigned char));
-+
-+ write_lock_bh(&set->lock);
-+ map = (struct ip_set_nethash *) set->data; /* Play safe */
-+ for (i = 0; i < map->hashsize && res == 0; i++) {
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
-+ if (*elem)
-+ res = __addip_base(tmp, *elem);
-+ }
-+ if (res) {
-+ /* Failure, try again */
-+ write_unlock_bh(&set->lock);
-+ harray_free(tmp->members);
-+ kfree(tmp);
-+ goto again;
-+ }
-+
-+ /* Success at resizing! */
-+ members = map->members;
-+
-+ map->hashsize = tmp->hashsize;
-+ map->members = tmp->members;
-+ write_unlock_bh(&set->lock);
-+
-+ harray_free(members);
-+ kfree(tmp);
-+
-+ return 0;
++ memcpy(tmp->cidr, map->cidr, sizeof(tmp->cidr));
++ memcpy(tmp->nets, map->nets, sizeof(tmp->nets));
+}
+
++HASH_RETRY(nethash, ip_set_ip_t)
++
+static inline int
-+__delip(struct ip_set_nethash *map, ip_set_ip_t ip, unsigned char cidr,
-+ ip_set_ip_t *hash_ip)
++nethash_del(struct ip_set *set, ip_set_ip_t *hash_ip,
++ ip_set_ip_t ip, uint8_t cidr)
+{
++ struct ip_set_nethash *map = set->data;
+ ip_set_ip_t id, *elem;
+
-+ if (!ip)
-+ return -ERANGE;
-+
-+ id = hash_id_cidr(map, ip, cidr, hash_ip);
++ if (cidr <= 0 || cidr >= 32)
++ return -EINVAL;
++
++ id = nethash_id_cidr(map, hash_ip, ip, cidr);
+ if (id == UINT_MAX)
+ return -EEXIST;
-+
++
+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
+ *elem = 0;
+ map->elements--;
++ if (!map->nets[cidr-1]--)
++ del_cidr_size(map->cidr, cidr);
+ return 0;
+}
+
-+static int
-+delip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_nethash *req =
-+ (struct ip_set_req_nethash *) data;
-+
-+ if (size != sizeof(struct ip_set_req_nethash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_nethash),
-+ size);
-+ return -EINVAL;
-+ }
-+ /* TODO: no garbage collection in map->cidr */
-+ return __delip((struct ip_set_nethash *) set->data,
-+ req->ip, req->cidr, hash_ip);
-+}
-+
-+static int
-+delip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+ int ret = -ERANGE;
-+ ip_set_ip_t ip = ntohl(flags[index] & IPSET_SRC
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr);
-+#else
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr);
-+#endif
-+
-+ if (map->cidr[0])
-+ ret = __delip(map, ip, map->cidr[0], hash_ip);
-+
-+ return ret;
-+}
++UADT(nethash, del, req->cidr)
++KADT(nethash, del, ipaddr, cidr)
+
-+static int create(struct ip_set *set, const void *data, size_t size)
++static inline int
++__nethash_create(const struct ip_set_req_nethash_create *req,
++ struct ip_set_nethash *map)
+{
-+ struct ip_set_req_nethash_create *req =
-+ (struct ip_set_req_nethash_create *) data;
-+ struct ip_set_nethash *map;
-+ uint16_t i;
-+
-+ if (size != sizeof(struct ip_set_req_nethash_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_nethash_create),
-+ size);
-+ return -EINVAL;
-+ }
-+
-+ if (req->hashsize < 1) {
-+ ip_set_printk("hashsize too small");
-+ return -ENOEXEC;
-+ }
-+ if (req->probes < 1) {
-+ ip_set_printk("probes too small");
-+ return -ENOEXEC;
-+ }
-+
-+ map = kmalloc(sizeof(struct ip_set_nethash)
-+ + req->probes * sizeof(uint32_t), GFP_KERNEL);
-+ if (!map) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_nethash)
-+ + req->probes * sizeof(uint32_t));
-+ return -ENOMEM;
-+ }
-+ for (i = 0; i < req->probes; i++)
-+ get_random_bytes(((uint32_t *) map->initval)+i, 4);
-+ map->elements = 0;
-+ map->hashsize = req->hashsize;
-+ map->probes = req->probes;
-+ map->resize = req->resize;
-+ memset(map->cidr, 0, 30 * sizeof(unsigned char));
-+ map->members = harray_malloc(map->hashsize, sizeof(ip_set_ip_t), GFP_KERNEL);
-+ if (!map->members) {
-+ DP("out of memory for %d bytes", map->hashsize * sizeof(ip_set_ip_t));
-+ kfree(map);
-+ return -ENOMEM;
-+ }
-+
-+ set->data = map;
++ memset(map->cidr, 0, sizeof(map->cidr));
++ memset(map->nets, 0, sizeof(map->nets));
++
+ return 0;
+}
+
-+static void destroy(struct ip_set *set)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+
-+ harray_free(map->members);
-+ kfree(map);
-+
-+ set->data = NULL;
-+}
-+
-+static void flush(struct ip_set *set)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+ harray_flush(map->members, map->hashsize, sizeof(ip_set_ip_t));
-+ memset(map->cidr, 0, 30 * sizeof(unsigned char));
-+ map->elements = 0;
-+}
-+
-+static void list_header(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+ struct ip_set_req_nethash_create *header =
-+ (struct ip_set_req_nethash_create *) data;
-+
-+ header->hashsize = map->hashsize;
-+ header->probes = map->probes;
-+ header->resize = map->resize;
-+}
++HASH_CREATE(nethash, ip_set_ip_t)
++HASH_DESTROY(nethash)
+
-+static int list_members_size(const struct ip_set *set)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
++HASH_FLUSH_CIDR(nethash, ip_set_ip_t)
+
-+ return (map->hashsize * sizeof(ip_set_ip_t));
++static inline void
++__nethash_list_header(const struct ip_set_nethash *map,
++ struct ip_set_req_nethash_create *header)
++{
+}
+
-+static void list_members(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+ ip_set_ip_t i, *elem;
++HASH_LIST_HEADER(nethash)
++HASH_LIST_MEMBERS_SIZE(nethash, ip_set_ip_t)
++HASH_LIST_MEMBERS(nethash, ip_set_ip_t)
+
-+ for (i = 0; i < map->hashsize; i++) {
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
-+ ((ip_set_ip_t *)data)[i] = *elem;
-+ }
-+}
-+
-+static struct ip_set_type ip_set_nethash = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_nethash),
-+ .addip = &addip,
-+ .addip_kernel = &addip_kernel,
-+ .retry = &retry,
-+ .delip = &delip,
-+ .delip_kernel = &delip_kernel,
-+ .testip = &testip,
-+ .testip_kernel = &testip_kernel,
-+ .header_size = sizeof(struct ip_set_req_nethash_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
++IP_SET_RTYPE(nethash, IPSET_TYPE_IP | IPSET_DATA_SINGLE)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
@@ -6825,23 +6629,11 @@
+module_param(limit, int, 0600);
+MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
+
-+static int __init ip_set_nethash_init(void)
-+{
-+ return ip_set_register_set_type(&ip_set_nethash);
-+}
-+
-+static void __exit ip_set_nethash_fini(void)
-+{
-+ /* FIXME: possible race with ip_set_create() */
-+ ip_set_unregister_set_type(&ip_set_nethash);
-+}
-+
-+module_init(ip_set_nethash_init);
-+module_exit(ip_set_nethash_fini);
++REGISTER_MODULE(nethash)
--- /dev/null
+++ b/net/ipv4/netfilter/ip_set_portmap.c
-@@ -0,0 +1,346 @@
-+/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+@@ -0,0 +1,114 @@
++/* Copyright (C) 2003-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
@@ -6855,9 +6647,6 @@
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/skbuff.h>
-+#include <linux/version.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
@@ -6866,330 +6655,434 @@
+#include <net/ip.h>
+
+#include <linux/netfilter_ipv4/ip_set_portmap.h>
++#include <linux/netfilter_ipv4/ip_set_getport.h>
+
-+/* We must handle non-linear skbs */
-+static inline ip_set_ip_t
-+get_port(const struct sk_buff *skb, u_int32_t flags)
++static inline int
++portmap_test(const struct ip_set *set, ip_set_ip_t *hash_port,
++ ip_set_ip_t port)
+{
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ struct iphdr *iph = ip_hdr(skb);
-+#else
-+ struct iphdr *iph = skb->nh.iph;
-+#endif
-+ u_int16_t offset = ntohs(iph->frag_off) & IP_OFFSET;
-+ switch (iph->protocol) {
-+ case IPPROTO_TCP: {
-+ struct tcphdr tcph;
-+
-+ /* See comments at tcp_match in ip_tables.c */
-+ if (offset)
-+ return INVALID_PORT;
++ const struct ip_set_portmap *map = set->data;
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &tcph, sizeof(tcph)) < 0)
-+#else
-+ if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &tcph, sizeof(tcph)) < 0)
-+#endif
-+ /* No choice either */
-+ return INVALID_PORT;
++ if (port < map->first_ip || port > map->last_ip)
++ return -ERANGE;
++
++ *hash_port = port;
++ DP("set: %s, port:%u, %u", set->name, port, *hash_port);
++ return !!test_bit(port - map->first_ip, map->members);
++}
+
-+ return ntohs(flags & IPSET_SRC ?
-+ tcph.source : tcph.dest);
-+ }
-+ case IPPROTO_UDP: {
-+ struct udphdr udph;
++#define KADT_CONDITION \
++ if (ip == INVALID_PORT) \
++ return 0;
+
-+ if (offset)
-+ return INVALID_PORT;
++UADT(portmap, test)
++KADT(portmap, test, get_port)
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &udph, sizeof(udph)) < 0)
-+#else
-+ if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &udph, sizeof(udph)) < 0)
-+#endif
-+ /* No choice either */
-+ return INVALID_PORT;
++static inline int
++portmap_add(struct ip_set *set, ip_set_ip_t *hash_port, ip_set_ip_t port)
++{
++ struct ip_set_portmap *map = set->data;
+
-+ return ntohs(flags & IPSET_SRC ?
-+ udph.source : udph.dest);
-+ }
-+ default:
-+ return INVALID_PORT;
-+ }
++ if (port < map->first_ip || port > map->last_ip)
++ return -ERANGE;
++ if (test_and_set_bit(port - map->first_ip, map->members))
++ return -EEXIST;
++
++ *hash_port = port;
++ DP("port %u", port);
++ return 0;
+}
+
++UADT(portmap, add)
++KADT(portmap, add, get_port)
++
+static inline int
-+__testport(struct ip_set *set, ip_set_ip_t port, ip_set_ip_t *hash_port)
++portmap_del(struct ip_set *set, ip_set_ip_t *hash_port, ip_set_ip_t port)
+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
++ struct ip_set_portmap *map = set->data;
+
-+ if (port < map->first_port || port > map->last_port)
++ if (port < map->first_ip || port > map->last_ip)
+ return -ERANGE;
-+
++ if (!test_and_clear_bit(port - map->first_ip, map->members))
++ return -EEXIST;
++
+ *hash_port = port;
-+ DP("set: %s, port:%u, %u", set->name, port, *hash_port);
-+ return !!test_bit(port - map->first_port, map->members);
++ DP("port %u", port);
++ return 0;
+}
+
-+static int
-+testport(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_port)
-+{
-+ struct ip_set_req_portmap *req =
-+ (struct ip_set_req_portmap *) data;
++UADT(portmap, del)
++KADT(portmap, del, get_port)
+
-+ if (size != sizeof(struct ip_set_req_portmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_portmap),
-+ size);
-+ return -EINVAL;
++static inline int
++__portmap_create(const struct ip_set_req_portmap_create *req,
++ struct ip_set_portmap *map)
++{
++ if (req->to - req->from > MAX_RANGE) {
++ ip_set_printk("range too big, %d elements (max %d)",
++ req->to - req->from + 1, MAX_RANGE+1);
++ return -ENOEXEC;
+ }
-+ return __testport(set, req->port, hash_port);
++ return bitmap_bytes(req->from, req->to);
+}
+
-+static int
-+testport_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_port,
-+ const u_int32_t *flags,
-+ unsigned char index)
++BITMAP_CREATE(portmap)
++BITMAP_DESTROY(portmap)
++BITMAP_FLUSH(portmap)
++
++static inline void
++__portmap_list_header(const struct ip_set_portmap *map,
++ struct ip_set_req_portmap_create *header)
+{
-+ int res;
-+ ip_set_ip_t port = get_port(skb, flags[index]);
++}
+
-+ DP("flag %s port %u", flags[index] & IPSET_SRC ? "SRC" : "DST", port);
-+ if (port == INVALID_PORT)
-+ return 0;
++BITMAP_LIST_HEADER(portmap)
++BITMAP_LIST_MEMBERS_SIZE(portmap)
++BITMAP_LIST_MEMBERS(portmap)
+
-+ res = __testport(set, port, hash_port);
++IP_SET_TYPE(portmap, IPSET_TYPE_PORT | IPSET_DATA_SINGLE)
+
-+ return (res < 0 ? 0 : res);
-+}
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("portmap type of IP sets");
+
-+static inline int
-+__addport(struct ip_set *set, ip_set_ip_t port, ip_set_ip_t *hash_port)
-+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
++REGISTER_MODULE(portmap)
+--- /dev/null
++++ b/net/ipv4/netfilter/ip_set_setlist.c
+@@ -0,0 +1,330 @@
++/* Copyright (C) 2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
+
-+ if (port < map->first_port || port > map->last_port)
-+ return -ERANGE;
-+ if (test_and_set_bit(port - map->first_port, map->members))
-+ return -EEXIST;
++/* Kernel module implementing an IP set type: the setlist type */
+
-+ *hash_port = port;
-+ DP("port %u", port);
-+ return 0;
++#include <linux/module.h>
++#include <linux/ip.h>
++#include <linux/skbuff.h>
++#include <linux/errno.h>
++
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ip_set_bitmaps.h>
++#include <linux/netfilter_ipv4/ip_set_setlist.h>
++
++/*
++ * before ==> index, ref
++ * after ==> ref, index
++ */
++
++static inline int
++next_index_eq(const struct ip_set_setlist *map, int i, ip_set_id_t index)
++{
++ return i < map->size && map->index[i] == index;
+}
+
+static int
-+addport(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_port)
-+{
-+ struct ip_set_req_portmap *req =
-+ (struct ip_set_req_portmap *) data;
++setlist_utest(struct ip_set *set, const void *data, u_int32_t size,
++ ip_set_ip_t *hash_ip)
++{
++ const struct ip_set_setlist *map = set->data;
++ const struct ip_set_req_setlist *req = data;
++ ip_set_id_t index, ref = IP_SET_INVALID_ID;
++ int i, res = 0;
++ struct ip_set *s;
++
++ if (req->before && req->ref[0] == '\0')
++ return 0;
+
-+ if (size != sizeof(struct ip_set_req_portmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_portmap),
-+ size);
-+ return -EINVAL;
++ index = __ip_set_get_byname(req->name, &s);
++ if (index == IP_SET_INVALID_ID)
++ return 0;
++ if (req->ref[0] != '\0') {
++ ref = __ip_set_get_byname(req->ref, &s);
++ if (ref == IP_SET_INVALID_ID)
++ goto finish;
++ }
++ for (i = 0; i < map->size
++ && map->index[i] != IP_SET_INVALID_ID; i++) {
++ if (req->before && map->index[i] == index) {
++ res = next_index_eq(map, i + 1, ref);
++ break;
++ } else if (!req->before) {
++ if ((ref == IP_SET_INVALID_ID
++ && map->index[i] == index)
++ || (map->index[i] == ref
++ && next_index_eq(map, i + 1, index))) {
++ res = 1;
++ break;
++ }
++ }
+ }
-+ return __addport(set, req->port, hash_port);
++ if (ref != IP_SET_INVALID_ID)
++ __ip_set_put_byindex(ref);
++finish:
++ __ip_set_put_byindex(index);
++ return res;
+}
+
+static int
-+addport_kernel(struct ip_set *set,
++setlist_ktest(struct ip_set *set,
+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_port,
++ ip_set_ip_t *hash_ip,
+ const u_int32_t *flags,
+ unsigned char index)
+{
-+ ip_set_ip_t port = get_port(skb, flags[index]);
-+
-+ if (port == INVALID_PORT)
-+ return -EINVAL;
-+
-+ return __addport(set, port, hash_port);
++ struct ip_set_setlist *map = set->data;
++ int i, res = 0;
++
++ for (i = 0; i < map->size
++ && map->index[i] != IP_SET_INVALID_ID
++ && res == 0; i++)
++ res = ip_set_testip_kernel(map->index[i], skb, flags);
++ return res;
+}
+
+static inline int
-+__delport(struct ip_set *set, ip_set_ip_t port, ip_set_ip_t *hash_port)
++insert_setlist(struct ip_set_setlist *map, int i, ip_set_id_t index)
+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
++ ip_set_id_t tmp;
++ int j;
+
-+ if (port < map->first_port || port > map->last_port)
++ DP("i: %u, last %u\n", i, map->index[map->size - 1]);
++ if (i >= map->size || map->index[map->size - 1] != IP_SET_INVALID_ID)
+ return -ERANGE;
-+ if (!test_and_clear_bit(port - map->first_port, map->members))
-+ return -EEXIST;
-+
-+ *hash_port = port;
-+ DP("port %u", port);
++
++ for (j = i; j < map->size
++ && index != IP_SET_INVALID_ID; j++) {
++ tmp = map->index[j];
++ map->index[j] = index;
++ index = tmp;
++ }
+ return 0;
+}
+
+static int
-+delport(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_port)
++setlist_uadd(struct ip_set *set, const void *data, u_int32_t size,
++ ip_set_ip_t *hash_ip)
+{
-+ struct ip_set_req_portmap *req =
-+ (struct ip_set_req_portmap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_portmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_portmap),
-+ size);
++ struct ip_set_setlist *map = set->data;
++ const struct ip_set_req_setlist *req = data;
++ ip_set_id_t index, ref = IP_SET_INVALID_ID;
++ int i, res = -ERANGE;
++ struct ip_set *s;
++
++ if (req->before && req->ref[0] == '\0')
+ return -EINVAL;
++
++ index = __ip_set_get_byname(req->name, &s);
++ if (index == IP_SET_INVALID_ID)
++ return -EEXIST;
++ /* "Loop detection" */
++ if (strcmp(s->type->typename, "setlist") == 0)
++ goto finish;
++
++ if (req->ref[0] != '\0') {
++ ref = __ip_set_get_byname(req->ref, &s);
++ if (ref == IP_SET_INVALID_ID) {
++ res = -EEXIST;
++ goto finish;
++ }
+ }
-+ return __delport(set, req->port, hash_port);
++ for (i = 0; i < map->size; i++) {
++ if (map->index[i] != ref)
++ continue;
++ if (req->before)
++ res = insert_setlist(map, i, index);
++ else
++ res = insert_setlist(map,
++ ref == IP_SET_INVALID_ID ? i : i + 1,
++ index);
++ break;
++ }
++ if (ref != IP_SET_INVALID_ID)
++ __ip_set_put_byindex(ref);
++ /* In case of success, we keep the reference to the set */
++finish:
++ if (res != 0)
++ __ip_set_put_byindex(index);
++ return res;
+}
+
+static int
-+delport_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_port,
-+ const u_int32_t *flags,
-+ unsigned char index)
++setlist_kadd(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
+{
-+ ip_set_ip_t port = get_port(skb, flags[index]);
-+
-+ if (port == INVALID_PORT)
-+ return -EINVAL;
-+
-+ return __delport(set, port, hash_port);
++ struct ip_set_setlist *map = set->data;
++ int i, res = -EINVAL;
++
++ for (i = 0; i < map->size
++ && map->index[i] != IP_SET_INVALID_ID
++ && res != 0; i++)
++ res = ip_set_addip_kernel(map->index[i], skb, flags);
++ return res;
+}
+
-+static int create(struct ip_set *set, const void *data, size_t size)
++static inline int
++unshift_setlist(struct ip_set_setlist *map, int i)
+{
-+ int newbytes;
-+ struct ip_set_req_portmap_create *req =
-+ (struct ip_set_req_portmap_create *) data;
-+ struct ip_set_portmap *map;
++ int j;
++
++ for (j = i; j < map->size - 1; j++)
++ map->index[j] = map->index[j+1];
++ map->index[map->size-1] = IP_SET_INVALID_ID;
++ return 0;
++}
+
-+ if (size != sizeof(struct ip_set_req_portmap_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_portmap_create),
-+ size);
++static int
++setlist_udel(struct ip_set *set, const void *data, u_int32_t size,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_setlist *map = set->data;
++ const struct ip_set_req_setlist *req = data;
++ ip_set_id_t index, ref = IP_SET_INVALID_ID;
++ int i, res = -EEXIST;
++ struct ip_set *s;
++
++ if (req->before && req->ref[0] == '\0')
+ return -EINVAL;
-+ }
+
-+ DP("from %u to %u", req->from, req->to);
-+
-+ if (req->from > req->to) {
-+ DP("bad port range");
-+ return -ENOEXEC;
++ index = __ip_set_get_byname(req->name, &s);
++ if (index == IP_SET_INVALID_ID)
++ return -EEXIST;
++ if (req->ref[0] != '\0') {
++ ref = __ip_set_get_byname(req->ref, &s);
++ if (ref == IP_SET_INVALID_ID)
++ goto finish;
++ }
++ for (i = 0; i < map->size
++ && map->index[i] != IP_SET_INVALID_ID; i++) {
++ if (req->before) {
++ if (map->index[i] == index
++ && next_index_eq(map, i + 1, ref)) {
++ res = unshift_setlist(map, i);
++ break;
++ }
++ } else if (ref == IP_SET_INVALID_ID) {
++ if (map->index[i] == index) {
++ res = unshift_setlist(map, i);
++ break;
++ }
++ } else if (map->index[i] == ref
++ && next_index_eq(map, i + 1, index)) {
++ res = unshift_setlist(map, i + 1);
++ break;
++ }
+ }
++ if (ref != IP_SET_INVALID_ID)
++ __ip_set_put_byindex(ref);
++finish:
++ __ip_set_put_byindex(index);
++ /* In case of success, release the reference to the set */
++ if (res == 0)
++ __ip_set_put_byindex(index);
++ return res;
++}
+
-+ if (req->to - req->from > MAX_RANGE) {
-+ ip_set_printk("range too big (max %d ports)",
-+ MAX_RANGE+1);
-+ return -ENOEXEC;
-+ }
++static int
++setlist_kdel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ struct ip_set_setlist *map = set->data;
++ int i, res = -EINVAL;
++
++ for (i = 0; i < map->size
++ && map->index[i] != IP_SET_INVALID_ID
++ && res != 0; i++)
++ res = ip_set_delip_kernel(map->index[i], skb, flags);
++ return res;
++}
+
-+ map = kmalloc(sizeof(struct ip_set_portmap), GFP_KERNEL);
-+ if (!map) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_portmap));
-+ return -ENOMEM;
-+ }
-+ map->first_port = req->from;
-+ map->last_port = req->to;
-+ newbytes = bitmap_bytes(req->from, req->to);
-+ map->members = kmalloc(newbytes, GFP_KERNEL);
-+ if (!map->members) {
-+ DP("out of memory for %d bytes", newbytes);
-+ kfree(map);
++static int
++setlist_create(struct ip_set *set, const void *data, u_int32_t size)
++{
++ struct ip_set_setlist *map;
++ const struct ip_set_req_setlist_create *req = data;
++ int i;
++
++ map = kmalloc(sizeof(struct ip_set_setlist) +
++ req->size * sizeof(ip_set_id_t), GFP_KERNEL);
++ if (!map)
+ return -ENOMEM;
-+ }
-+ memset(map->members, 0, newbytes);
-+
++ map->size = req->size;
++ for (i = 0; i < map->size; i++)
++ map->index[i] = IP_SET_INVALID_ID;
++
+ set->data = map;
+ return 0;
-+}
++}
+
-+static void destroy(struct ip_set *set)
++static void
++setlist_destroy(struct ip_set *set)
+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
++ struct ip_set_setlist *map = set->data;
++ int i;
++
++ for (i = 0; i < map->size
++ && map->index[i] != IP_SET_INVALID_ID; i++)
++ __ip_set_put_byindex(map->index[i]);
+
-+ kfree(map->members);
+ kfree(map);
-+
+ set->data = NULL;
+}
+
-+static void flush(struct ip_set *set)
++static void
++setlist_flush(struct ip_set *set)
+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
-+ memset(map->members, 0, bitmap_bytes(map->first_port, map->last_port));
++ struct ip_set_setlist *map = set->data;
++ int i;
++
++ for (i = 0; i < map->size
++ && map->index[i] != IP_SET_INVALID_ID; i++) {
++ __ip_set_put_byindex(map->index[i]);
++ map->index[i] = IP_SET_INVALID_ID;
++ }
+}
+
-+static void list_header(const struct ip_set *set, void *data)
++static void
++setlist_list_header(const struct ip_set *set, void *data)
+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
-+ struct ip_set_req_portmap_create *header =
-+ (struct ip_set_req_portmap_create *) data;
-+
-+ DP("list_header %u %u", map->first_port, map->last_port);
-+
-+ header->from = map->first_port;
-+ header->to = map->last_port;
++ const struct ip_set_setlist *map = set->data;
++ struct ip_set_req_setlist_create *header = data;
++
++ header->size = map->size;
+}
+
-+static int list_members_size(const struct ip_set *set)
++static int
++setlist_list_members_size(const struct ip_set *set)
+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
-+
-+ return bitmap_bytes(map->first_port, map->last_port);
++ const struct ip_set_setlist *map = set->data;
++
++ return map->size * sizeof(ip_set_id_t);
+}
+
-+static void list_members(const struct ip_set *set, void *data)
++static void
++setlist_list_members(const struct ip_set *set, void *data)
+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
-+ int bytes = bitmap_bytes(map->first_port, map->last_port);
-+
-+ memcpy(data, map->members, bytes);
++ struct ip_set_setlist *map = set->data;
++ int i;
++
++ for (i = 0; i < map->size; i++)
++ *((ip_set_id_t *)data + i) = ip_set_id(map->index[i]);
+}
+
-+static struct ip_set_type ip_set_portmap = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_PORT | IPSET_DATA_SINGLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_portmap),
-+ .addip = &addport,
-+ .addip_kernel = &addport_kernel,
-+ .delip = &delport,
-+ .delip_kernel = &delport_kernel,
-+ .testip = &testport,
-+ .testip_kernel = &testport_kernel,
-+ .header_size = sizeof(struct ip_set_req_portmap_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
++IP_SET_TYPE(setlist, IPSET_TYPE_SETNAME | IPSET_DATA_SINGLE)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-+MODULE_DESCRIPTION("portmap type of IP sets");
++MODULE_DESCRIPTION("setlist type of IP sets");
+
-+static int __init ip_set_portmap_init(void)
-+{
-+ return ip_set_register_set_type(&ip_set_portmap);
-+}
-+
-+static void __exit ip_set_portmap_fini(void)
-+{
-+ /* FIXME: possible race with ip_set_create() */
-+ ip_set_unregister_set_type(&ip_set_portmap);
-+}
-+
-+module_init(ip_set_portmap_init);
-+module_exit(ip_set_portmap_fini);
++REGISTER_MODULE(setlist)
--- /dev/null
+++ b/net/ipv4/netfilter/ipt_set.c
-@@ -0,0 +1,160 @@
+@@ -0,0 +1,238 @@
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ * Patrick Schaaf <bof@bof.de>
+ * Martin Josefsson <gandalf@wlug.westbo.se>
@@ -7207,7 +7100,14 @@
+#include <linux/skbuff.h>
+#include <linux/version.h>
+
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
+#include <linux/netfilter_ipv4/ip_tables.h>
++#define xt_register_match ipt_register_match
++#define xt_unregister_match ipt_unregister_match
++#define xt_match ipt_match
++#else
++#include <linux/netfilter/x_tables.h>
++#endif
+#include <linux/netfilter_ipv4/ip_set.h>
+#include <linux/netfilter_ipv4/ipt_set.h>
+
@@ -7215,64 +7115,125 @@
+match_set(const struct ipt_set_info *info,
+ const struct sk_buff *skb,
+ int inv)
-+{
++{
+ if (ip_set_testip_kernel(info->index, skb, info->flags))
+ inv = !inv;
+ return inv;
+}
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
-+static bool
-+#else
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++static int
++match(const struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ const void *matchinfo,
++ int offset,
++ const void *hdr,
++ u_int16_t datalen,
++ int *hotdrop)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
++static int
++match(const struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ const void *matchinfo,
++ int offset,
++ int *hotdrop)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
++static int
++match(const struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ const void *matchinfo,
++ int offset,
++ unsigned int protoff,
++ int *hotdrop)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
+static int
-+#endif
+match(const struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
+ const struct xt_match *match,
-+#endif
+ const void *matchinfo,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
-+ int offset, unsigned int protoff, bool *hotdrop)
-+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
-+ int offset, unsigned int protoff, int *hotdrop)
-+#else
-+ int offset, int *hotdrop)
++ int offset,
++ unsigned int protoff,
++ int *hotdrop)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++static bool
++match(const struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ const struct xt_match *match,
++ const void *matchinfo,
++ int offset,
++ unsigned int protoff,
++ bool *hotdrop)
++#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */
++static bool
++match(const struct sk_buff *skb,
++ const struct xt_match_param *par)
+#endif
+{
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
+ const struct ipt_set_info_match *info = matchinfo;
-+
++#else
++ const struct ipt_set_info_match *info = par->matchinfo;
++#endif
++
+ return match_set(&info->match_set,
+ skb,
+ info->match_set.flags[0] & IPSET_MATCH_INV);
+}
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
-+bool
-+#else
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
+static int
-+#endif
+checkentry(const char *tablename,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
-+ const void *inf,
-+#else
+ const struct ipt_ip *ip,
-+#endif
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
++ void *matchinfo,
++ unsigned int matchsize,
++ unsigned int hook_mask)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
++static int
++checkentry(const char *tablename,
++ const void *inf,
++ void *matchinfo,
++ unsigned int matchsize,
++ unsigned int hook_mask)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++static int
++checkentry(const char *tablename,
++ const void *inf,
+ const struct xt_match *match,
-+#endif
+ void *matchinfo,
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
+ unsigned int matchsize,
-+#endif
+ unsigned int hook_mask)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
++static int
++checkentry(const char *tablename,
++ const void *inf,
++ const struct xt_match *match,
++ void *matchinfo,
++ unsigned int hook_mask)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++static bool
++checkentry(const char *tablename,
++ const void *inf,
++ const struct xt_match *match,
++ void *matchinfo,
++ unsigned int hook_mask)
++#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */
++static bool
++checkentry(const struct xt_mtchk_param *par)
++#endif
+{
-+ struct ipt_set_info_match *info =
-+ (struct ipt_set_info_match *) matchinfo;
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++ struct ipt_set_info_match *info = matchinfo;
++#else
++ struct ipt_set_info_match *info = par->matchinfo;
++#endif
+ ip_set_id_t index;
+
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
+ if (matchsize != IPT_ALIGN(sizeof(struct ipt_set_info_match))) {
+ ip_set_printk("invalid matchsize %d", matchsize);
+ return 0;
@@ -7280,7 +7241,7 @@
+#endif
+
+ index = ip_set_get_byindex(info->match_set.index);
-+
++
+ if (index == IP_SET_INVALID_ID) {
+ ip_set_printk("Cannot find set indentified by id %u to match",
+ info->match_set.index);
@@ -7294,65 +7255,75 @@
+ return 1;
+}
+
-+static void destroy(
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
-+ const struct xt_match *match,
-+#endif
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-+ void *matchinfo, unsigned int matchsize)
-+#else
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
++static void destroy(void *matchinfo,
++ unsigned int matchsize)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++static void destroy(const struct xt_match *match,
++ void *matchinfo,
++ unsigned int matchsize)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++static void destroy(const struct xt_match *match,
+ void *matchinfo)
++#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */
++static void destroy(const struct xt_mtdtor_param *par)
+#endif
+{
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
+ struct ipt_set_info_match *info = matchinfo;
++#else
++ struct ipt_set_info_match *info = par->matchinfo;
++#endif
+
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
+ if (matchsize != IPT_ALIGN(sizeof(struct ipt_set_info_match))) {
+ ip_set_printk("invalid matchsize %d", matchsize);
+ return;
+ }
+#endif
-+ ip_set_put(info->match_set.index);
++ ip_set_put_byindex(info->match_set.index);
+}
+
-+static struct ipt_match set_match = {
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
++static struct xt_match set_match = {
++ .name = "set",
++ .match = &match,
++ .checkentry = &checkentry,
++ .destroy = &destroy,
++ .me = THIS_MODULE
++};
++#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17) */
++static struct xt_match set_match = {
+ .name = "set",
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
+ .family = AF_INET,
-+#endif
+ .match = &match,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
+ .matchsize = sizeof(struct ipt_set_info_match),
-+#endif
+ .checkentry = &checkentry,
+ .destroy = &destroy,
+ .me = THIS_MODULE
+};
++#endif
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("iptables IP set match module");
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
-+#define ipt_register_match xt_register_match
-+#define ipt_unregister_match xt_unregister_match
-+#endif
-+
+static int __init ipt_ipset_init(void)
+{
-+ return ipt_register_match(&set_match);
++ return xt_register_match(&set_match);
+}
+
+static void __exit ipt_ipset_fini(void)
+{
-+ ipt_unregister_match(&set_match);
++ xt_unregister_match(&set_match);
+}
+
+module_init(ipt_ipset_init);
+module_exit(ipt_ipset_fini);
--- /dev/null
+++ b/net/ipv4/netfilter/ipt_SET.c
-@@ -0,0 +1,179 @@
+@@ -0,0 +1,242 @@
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ * Patrick Schaaf <bof@bof.de>
+ * Martin Josefsson <gandalf@wlug.westbo.se>
@@ -7365,45 +7336,75 @@
+
+/* ipt_SET.c - netfilter target to manipulate IP sets */
+
-+#include <linux/types.h>
-+#include <linux/ip.h>
-+#include <linux/timer.h>
+#include <linux/module.h>
-+#include <linux/netfilter.h>
-+#include <linux/netdevice.h>
-+#include <linux/if.h>
-+#include <linux/inetdevice.h>
++#include <linux/ip.h>
++#include <linux/skbuff.h>
+#include <linux/version.h>
-+#include <net/protocol.h>
-+#include <net/checksum.h>
++
+#include <linux/netfilter_ipv4.h>
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
+#include <linux/netfilter_ipv4/ip_tables.h>
++#define xt_register_target ipt_register_target
++#define xt_unregister_target ipt_unregister_target
++#define xt_target ipt_target
++#define XT_CONTINUE IPT_CONTINUE
++#else
++#include <linux/netfilter/x_tables.h>
++#endif
+#include <linux/netfilter_ipv4/ipt_set.h>
+
+static unsigned int
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
-+target(struct sk_buff *skb,
-+#else
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++target(struct sk_buff **pskb,
++ unsigned int hooknum,
++ const struct net_device *in,
++ const struct net_device *out,
++ const void *targinfo,
++ void *userinfo)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
++target(struct sk_buff **pskb,
++ const struct net_device *in,
++ const struct net_device *out,
++ unsigned int hooknum,
++ const void *targinfo,
++ void *userinfo)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
+target(struct sk_buff **pskb,
-+#endif
+ const struct net_device *in,
+ const struct net_device *out,
+ unsigned int hooknum,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
+ const struct xt_target *target,
-+#endif
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
+ const void *targinfo,
+ void *userinfo)
-+#else
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
++target(struct sk_buff **pskb,
++ const struct net_device *in,
++ const struct net_device *out,
++ unsigned int hooknum,
++ const struct xt_target *target,
++ const void *targinfo)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++target(struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ unsigned int hooknum,
++ const struct xt_target *target,
+ const void *targinfo)
++#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */
++target(struct sk_buff *skb,
++ const struct xt_target_param *par)
+#endif
+{
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
+ const struct ipt_set_info_target *info = targinfo;
++#else
++ const struct ipt_set_info_target *info = par->targinfo;
++#endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+ struct sk_buff *skb = *pskb;
+#endif
+
++
+ if (info->add_set.index != IP_SET_INVALID_ID)
+ ip_set_addip_kernel(info->add_set.index,
+ skb,
@@ -7413,34 +7414,58 @@
+ skb,
+ info->del_set.flags);
+
-+ return IPT_CONTINUE;
++ return XT_CONTINUE;
+}
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
-+static bool
-+#else
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
+static int
-+#endif
+checkentry(const char *tablename,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
-+ const void *e,
-+#else
+ const struct ipt_entry *e,
-+#endif
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
++ void *targinfo,
++ unsigned int targinfosize,
++ unsigned int hook_mask)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
++static int
++checkentry(const char *tablename,
++ const void *e,
++ void *targinfo,
++ unsigned int targinfosize,
++ unsigned int hook_mask)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++static int
++checkentry(const char *tablename,
++ const void *e,
+ const struct xt_target *target,
-+#endif
+ void *targinfo,
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
+ unsigned int targinfosize,
-+#endif
+ unsigned int hook_mask)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
++static int
++checkentry(const char *tablename,
++ const void *e,
++ const struct xt_target *target,
++ void *targinfo,
++ unsigned int hook_mask)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++static bool
++checkentry(const char *tablename,
++ const void *e,
++ const struct xt_target *target,
++ void *targinfo,
++ unsigned int hook_mask)
++#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */
++static bool
++checkentry(const struct xt_tgchk_param *par)
++#endif
+{
-+ struct ipt_set_info_target *info =
-+ (struct ipt_set_info_target *) targinfo;
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++ const struct ipt_set_info_target *info = targinfo;
++#else
++ const struct ipt_set_info_target *info = par->targinfo;
++#endif
+ ip_set_id_t index;
+
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
+ if (targinfosize != IPT_ALIGN(sizeof(*info))) {
+ DP("bad target info size %u", targinfosize);
+ return 0;
@@ -7473,68 +7498,77 @@
+ return 1;
+}
+
-+static void destroy(
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
-+ const struct xt_target *target,
-+#endif
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-+ void *targetinfo, unsigned int targetsize)
-+#else
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
++static void destroy(void *targetinfo,
++ unsigned int targetsize)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++static void destroy(const struct xt_target *target,
++ void *targetinfo,
++ unsigned int targetsize)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++static void destroy(const struct xt_target *target,
+ void *targetinfo)
++#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */
++static void destroy(const struct xt_tgdtor_param *par)
+#endif
+{
-+ struct ipt_set_info_target *info = targetinfo;
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++ const struct ipt_set_info_target *info = targetinfo;
++#else
++ const struct ipt_set_info_target *info = par->targinfo;
++#endif
+
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
+ if (targetsize != IPT_ALIGN(sizeof(struct ipt_set_info_target))) {
+ ip_set_printk("invalid targetsize %d", targetsize);
+ return;
+ }
+#endif
+ if (info->add_set.index != IP_SET_INVALID_ID)
-+ ip_set_put(info->add_set.index);
++ ip_set_put_byindex(info->add_set.index);
+ if (info->del_set.index != IP_SET_INVALID_ID)
-+ ip_set_put(info->del_set.index);
++ ip_set_put_byindex(info->del_set.index);
+}
+
-+static struct ipt_target SET_target = {
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
++static struct xt_target SET_target = {
++ .name = "SET",
++ .target = target,
++ .checkentry = checkentry,
++ .destroy = destroy,
++ .me = THIS_MODULE
++};
++#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17) */
++static struct xt_target SET_target = {
+ .name = "SET",
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
+ .family = AF_INET,
-+#endif
+ .target = target,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
+ .targetsize = sizeof(struct ipt_set_info_target),
-+#endif
+ .checkentry = checkentry,
+ .destroy = destroy,
+ .me = THIS_MODULE
+};
++#endif
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("iptables IP set target module");
+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
-+#define ipt_register_target xt_register_target
-+#define ipt_unregister_target xt_unregister_target
-+#endif
-+
+static int __init ipt_SET_init(void)
+{
-+ return ipt_register_target(&SET_target);
++ return xt_register_target(&SET_target);
+}
+
+static void __exit ipt_SET_fini(void)
+{
-+ ipt_unregister_target(&SET_target);
++ xt_unregister_target(&SET_target);
+}
+
+module_init(ipt_SET_init);
+module_exit(ipt_SET_fini);
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
-@@ -388,5 +388,122 @@ config IP_NF_ARP_MANGLE
+@@ -388,5 +388,146 @@ config IP_NF_ARP_MANGLE
endif # IP_NF_ARPTABLES
@@ -7619,6 +7653,22 @@
+
+ To compile it as a module, choose M here. If unsure, say N.
+
++config IP_NF_SET_IPPORTIPHASH
++ tristate "ipportiphash set support"
++ depends on IP_NF_SET
++ help
++ This option adds the ipportiphash set type support.
++
++ To compile it as a module, choose M here. If unsure, say N.
++
++config IP_NF_SET_IPPORTNETHASH
++ tristate "ipportnethash set support"
++ depends on IP_NF_SET
++ help
++ This option adds the ipportnethash set type support.
++
++ To compile it as a module, choose M here. If unsure, say N.
++
+config IP_NF_SET_IPTREE
+ tristate "iptree set support"
+ depends on IP_NF_SET
@@ -7635,6 +7685,14 @@
+
+ To compile it as a module, choose M here. If unsure, say N.
+
++config IP_NF_SET_SETLIST
++ tristate "setlist set support"
++ depends on IP_NF_SET
++ help
++ This option adds the setlist set type support.
++
++ To compile it as a module, choose M here. If unsure, say N.
++
+config IP_NF_MATCH_SET
+ tristate "set match support"
+ depends on IP_NF_SET
@@ -7667,7 +7725,7 @@
# targets
obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o
-@@ -61,6 +62,18 @@ obj-$(CONFIG_IP_NF_TARGET_NETMAP) += ipt
+@@ -61,6 +62,21 @@ obj-$(CONFIG_IP_NF_TARGET_NETMAP) += ipt
obj-$(CONFIG_IP_NF_TARGET_REDIRECT) += ipt_REDIRECT.o
obj-$(CONFIG_IP_NF_TARGET_REJECT) += ipt_REJECT.o
obj-$(CONFIG_IP_NF_TARGET_ULOG) += ipt_ULOG.o
@@ -7681,8 +7739,11 @@
+obj-$(CONFIG_IP_NF_SET_IPHASH) += ip_set_iphash.o
+obj-$(CONFIG_IP_NF_SET_NETHASH) += ip_set_nethash.o
+obj-$(CONFIG_IP_NF_SET_IPPORTHASH) += ip_set_ipporthash.o
++obj-$(CONFIG_IP_NF_SET_IPPORTIPHASH) += ip_set_ipportiphash.o
++obj-$(CONFIG_IP_NF_SET_IPPORTNETHASH) += ip_set_ipportnethash.o
+obj-$(CONFIG_IP_NF_SET_IPTREE) += ip_set_iptree.o
+obj-$(CONFIG_IP_NF_SET_IPTREEMAP) += ip_set_iptreemap.o
++obj-$(CONFIG_IP_NF_SET_SETLIST) += ip_set_setlist.o
# generic ARP tables
obj-$(CONFIG_IP_NF_ARPTABLES) += arp_tables.o