diff options
Diffstat (limited to 'openwrt/target/linux/linux-2.6/patches/generic')
-rw-r--r-- | openwrt/target/linux/linux-2.6/patches/generic/000-reenable_devfs.patch | 219 | ||||
-rw-r--r-- | openwrt/target/linux/linux-2.6/patches/generic/003-net-b44-1.patch | 807 | ||||
-rw-r--r-- | openwrt/target/linux/linux-2.6/patches/generic/003-net-b44-2.patch (renamed from openwrt/target/linux/linux-2.6/patches/generic/003-net-b44.patch) | 30 | ||||
-rw-r--r-- | openwrt/target/linux/linux-2.6/patches/generic/100-netfilter_layer7.patch | 158 | ||||
-rw-r--r-- | openwrt/target/linux/linux-2.6/patches/generic/101-mppe-mppc-1.3.patch | 1559 | ||||
-rw-r--r-- | openwrt/target/linux/linux-2.6/patches/generic/104-pf_ring.patch | 5299 |
6 files changed, 1124 insertions, 6948 deletions
diff --git a/openwrt/target/linux/linux-2.6/patches/generic/000-reenable_devfs.patch b/openwrt/target/linux/linux-2.6/patches/generic/000-reenable_devfs.patch new file mode 100644 index 0000000000..ce98def5da --- /dev/null +++ b/openwrt/target/linux/linux-2.6/patches/generic/000-reenable_devfs.patch @@ -0,0 +1,219 @@ +diff -ur linux-2.6.15-rc5/drivers/mtd/mtd_blkdevs.c linux-2.6.15-rc5-openwrt/drivers/mtd/mtd_blkdevs.c +--- linux-2.6.15-rc5/drivers/mtd/mtd_blkdevs.c 2005-12-04 06:10:42.000000000 +0100 ++++ linux-2.6.15-rc5-openwrt/drivers/mtd/mtd_blkdevs.c 2005-12-15 07:53:20.000000000 +0100 +@@ -21,6 +21,9 @@ + #include <linux/init.h> + #include <asm/semaphore.h> + #include <asm/uaccess.h> ++#ifdef CONFIG_DEVFS_FS ++#include <linux/devfs_fs_kernel.h> ++#endif + + static LIST_HEAD(blktrans_majors); + +@@ -302,6 +305,11 @@ + snprintf(gd->disk_name, sizeof(gd->disk_name), + "%s%d", tr->name, new->devnum); + ++#ifdef CONFIG_DEVFS_FS ++ snprintf(gd->devfs_name, sizeof(gd->devfs_name), ++ "%s/%c", tr->name, (tr->part_bits?'a':'0') + new->devnum); ++#endif ++ + /* 2.5 has capacity in units of 512 bytes while still + having BLOCK_SIZE_BITS set to 10. Just to keep us amused. */ + set_capacity(gd, (new->size * new->blksize) >> 9); +@@ -418,6 +426,10 @@ + return ret; + } + ++#ifdef CONFIG_DEVFS_FS ++ devfs_mk_dir(tr->name); ++#endif ++ + INIT_LIST_HEAD(&tr->devs); + list_add(&tr->list, &blktrans_majors); + +@@ -450,6 +462,10 @@ + tr->remove_dev(dev); + } + ++#ifdef CONFIG_DEVFS_FS ++ devfs_remove(tr->name); ++#endif ++ + blk_cleanup_queue(tr->blkcore_priv->rq); + unregister_blkdev(tr->major, tr->name); + +diff -ur linux-2.6.15-rc5/drivers/mtd/mtdchar.c linux-2.6.15-rc5-openwrt/drivers/mtd/mtdchar.c +--- linux-2.6.15-rc5/drivers/mtd/mtdchar.c 2005-12-04 06:10:42.000000000 +0100 ++++ linux-2.6.15-rc5-openwrt/drivers/mtd/mtdchar.c 2005-12-15 07:49:15.000000000 +0100 +@@ -6,7 +6,6 @@ + */ + + #include <linux/config.h> +-#include <linux/device.h> + #include <linux/fs.h> + #include <linux/init.h> + #include <linux/kernel.h> +@@ -19,19 +18,33 @@ + + #include <asm/uaccess.h> + ++#ifdef CONFIG_DEVFS_FS ++#include <linux/devfs_fs_kernel.h> ++#else ++#include <linux/device.h> ++ + static struct class *mtd_class; ++#endif + + static void mtd_notify_add(struct mtd_info* mtd) + { + if (!mtd) + return; + ++#ifdef CONFIG_DEVFS_FS ++ devfs_mk_cdev(MKDEV(MTD_CHAR_MAJOR, mtd->index*2), ++ S_IFCHR | S_IRUGO | S_IWUGO, "mtd/%d", mtd->index); ++ ++ devfs_mk_cdev(MKDEV(MTD_CHAR_MAJOR, mtd->index*2+1), ++ S_IFCHR | S_IRUGO, "mtd/%dro", mtd->index); ++#else + class_device_create(mtd_class, NULL, MKDEV(MTD_CHAR_MAJOR, mtd->index*2), + NULL, "mtd%d", mtd->index); + + class_device_create(mtd_class, NULL, + MKDEV(MTD_CHAR_MAJOR, mtd->index*2+1), + NULL, "mtd%dro", mtd->index); ++#endif + } + + static void mtd_notify_remove(struct mtd_info* mtd) +@@ -39,8 +52,13 @@ + if (!mtd) + return; + ++#ifdef CONFIG_DEVFS_FS ++ devfs_remove("mtd/%d", mtd->index); ++ devfs_remove("mtd/%dro", mtd->index); ++#else + class_device_destroy(mtd_class, MKDEV(MTD_CHAR_MAJOR, mtd->index*2)); + class_device_destroy(mtd_class, MKDEV(MTD_CHAR_MAJOR, mtd->index*2+1)); ++#endif + } + + static struct mtd_notifier notifier = { +@@ -48,6 +66,22 @@ + .remove = mtd_notify_remove, + }; + ++#ifdef CONFIG_DEVFS_FS ++ static inline void mtdchar_devfs_init(void) ++ { ++ devfs_mk_dir("mtd"); ++ register_mtd_user(¬ifier); ++ } ++ static inline void mtdchar_devfs_exit(void) ++ { ++ unregister_mtd_user(¬ifier); ++ devfs_remove("mtd"); ++ } ++ #else /* !DEVFS */ ++ #define mtdchar_devfs_init() do { } while(0) ++ #define mtdchar_devfs_exit() do { } while(0) ++#endif ++ + /* + * We use file->private_data to store a pointer to the MTDdevice. + * Since alighment is at least 32 bits, we have 2 bits free for OTP +@@ -643,6 +677,9 @@ + return -EAGAIN; + } + ++#ifdef CONFIG_DEVFS_FS ++ mtdchar_devfs_init(); ++#else + mtd_class = class_create(THIS_MODULE, "mtd"); + + if (IS_ERR(mtd_class)) { +@@ -652,13 +689,19 @@ + } + + register_mtd_user(¬ifier); ++#endif + return 0; + } + + static void __exit cleanup_mtdchar(void) + { ++ ++#ifdef CONFIG_DEVFS_FS ++ mtdchar_devfs_exit(); ++#else + unregister_mtd_user(¬ifier); + class_destroy(mtd_class); ++#endif + unregister_chrdev(MTD_CHAR_MAJOR, "mtd"); + } + +diff -ur linux-2.6.15-rc5/fs/Kconfig linux-2.6.15-rc5-openwrt/fs/Kconfig +--- linux-2.6.15-rc5/fs/Kconfig 2005-12-04 06:10:42.000000000 +0100 ++++ linux-2.6.15-rc5-openwrt/fs/Kconfig 2005-12-15 07:44:01.000000000 +0100 +@@ -772,6 +772,56 @@ + help + Exports the dump image of crashed kernel in ELF format. + ++config DEVFS_FS ++ bool "/dev file system support (OBSOLETE)" ++ depends on EXPERIMENTAL ++ help ++ This is support for devfs, a virtual file system (like /proc) which ++ provides the file system interface to device drivers, normally found ++ in /dev. Devfs does not depend on major and minor number ++ allocations. Device drivers register entries in /dev which then ++ appear automatically, which means that the system administrator does ++ not have to create character and block special device files in the ++ /dev directory using the mknod command (or MAKEDEV script) anymore. ++ ++ This is work in progress. If you want to use this, you *must* read ++ the material in <file:Documentation/filesystems/devfs/>, especially ++ the file README there. ++ ++ Note that devfs no longer manages /dev/pts! If you are using UNIX98 ++ ptys, you will also need to mount the /dev/pts filesystem (devpts). ++ ++ Note that devfs has been obsoleted by udev, ++ <http://www.kernel.org/pub/linux/utils/kernel/hotplug/>. ++ It has been stripped down to a bare minimum and is only provided for ++ legacy installations that use its naming scheme which is ++ unfortunately different from the names normal Linux installations ++ use. ++ ++ If unsure, say N. ++ ++config DEVFS_MOUNT ++ bool "Automatically mount at boot" ++ depends on DEVFS_FS ++ help ++ This option appears if you have CONFIG_DEVFS_FS enabled. Setting ++ this to 'Y' will make the kernel automatically mount devfs onto /dev ++ when the system is booted, before the init thread is started. ++ You can override this with the "devfs=nomount" boot option. ++ ++ If unsure, say N. ++ ++config DEVFS_DEBUG ++ bool "Debug devfs" ++ depends on DEVFS_FS ++ help ++ If you say Y here, then the /dev file system code will generate ++ debugging messages. See the file ++ <file:Documentation/filesystems/devfs/boot-options> for more ++ details. ++ ++ If unsure, say N. ++ + config SYSFS + bool "sysfs file system support" if EMBEDDED + default y diff --git a/openwrt/target/linux/linux-2.6/patches/generic/003-net-b44-1.patch b/openwrt/target/linux/linux-2.6/patches/generic/003-net-b44-1.patch new file mode 100644 index 0000000000..726ea542a3 --- /dev/null +++ b/openwrt/target/linux/linux-2.6/patches/generic/003-net-b44-1.patch @@ -0,0 +1,807 @@ +diff -ur linux-2.6.15-rc5/drivers/net/b44.c linux-2.6.15-rc5-openwrt/drivers/net/b44.c +--- linux-2.6.15-rc5/drivers/net/b44.c 2005-12-04 06:10:42.000000000 +0100 ++++ linux-2.6.15-rc5-openwrt/drivers/net/b44.c 2005-08-15 02:20:18.000000000 +0200 +@@ -18,7 +18,7 @@ + #include <linux/pci.h> + #include <linux/delay.h> + #include <linux/init.h> +-#include <linux/dma-mapping.h> ++#include <linux/version.h> + + #include <asm/uaccess.h> + #include <asm/io.h> +@@ -28,8 +28,8 @@ + + #define DRV_MODULE_NAME "b44" + #define PFX DRV_MODULE_NAME ": " +-#define DRV_MODULE_VERSION "0.97" +-#define DRV_MODULE_RELDATE "Nov 30, 2005" ++#define DRV_MODULE_VERSION "0.95" ++#define DRV_MODULE_RELDATE "Aug 3, 2004" + + #define B44_DEF_MSG_ENABLE \ + (NETIF_MSG_DRV | \ +@@ -101,35 +101,10 @@ + static void b44_halt(struct b44 *); + static void b44_init_rings(struct b44 *); + static void b44_init_hw(struct b44 *); +- +-static int dma_desc_align_mask; +-static int dma_desc_sync_size; +- +-static const char b44_gstrings[][ETH_GSTRING_LEN] = { +-#define _B44(x...) # x, +-B44_STAT_REG_DECLARE +-#undef _B44 +-}; +- +-static inline void b44_sync_dma_desc_for_device(struct pci_dev *pdev, +- dma_addr_t dma_base, +- unsigned long offset, +- enum dma_data_direction dir) +-{ +- dma_sync_single_range_for_device(&pdev->dev, dma_base, +- offset & dma_desc_align_mask, +- dma_desc_sync_size, dir); +-} +- +-static inline void b44_sync_dma_desc_for_cpu(struct pci_dev *pdev, +- dma_addr_t dma_base, +- unsigned long offset, +- enum dma_data_direction dir) +-{ +- dma_sync_single_range_for_cpu(&pdev->dev, dma_base, +- offset & dma_desc_align_mask, +- dma_desc_sync_size, dir); +-} ++static int b44_poll(struct net_device *dev, int *budget); ++#ifdef CONFIG_NET_POLL_CONTROLLER ++static void b44_poll_controller(struct net_device *dev); ++#endif + + static inline unsigned long br32(const struct b44 *bp, unsigned long reg) + { +@@ -503,10 +478,7 @@ + for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) { + *val++ += br32(bp, reg); + } +- +- /* Pad */ +- reg += 8*4UL; +- ++ val = &bp->hw_stats.rx_good_octets; + for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) { + *val++ += br32(bp, reg); + } +@@ -657,7 +629,7 @@ + + /* Hardware bug work-around, the chip is unable to do PCI DMA + to/from anything above 1GB :-( */ +- if (mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) { ++ if(mapping+RX_PKT_BUF_SZ > B44_DMA_MASK) { + /* Sigh... */ + pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE); + dev_kfree_skb_any(skb); +@@ -667,7 +639,7 @@ + mapping = pci_map_single(bp->pdev, skb->data, + RX_PKT_BUF_SZ, + PCI_DMA_FROMDEVICE); +- if (mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) { ++ if(mapping+RX_PKT_BUF_SZ > B44_DMA_MASK) { + pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE); + dev_kfree_skb_any(skb); + return -ENOMEM; +@@ -696,11 +668,6 @@ + dp->ctrl = cpu_to_le32(ctrl); + dp->addr = cpu_to_le32((u32) mapping + bp->rx_offset + bp->dma_offset); + +- if (bp->flags & B44_FLAG_RX_RING_HACK) +- b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma, +- dest_idx * sizeof(dp), +- DMA_BIDIRECTIONAL); +- + return RX_PKT_BUF_SZ; + } + +@@ -725,11 +692,6 @@ + pci_unmap_addr_set(dest_map, mapping, + pci_unmap_addr(src_map, mapping)); + +- if (bp->flags & B44_FLAG_RX_RING_HACK) +- b44_sync_dma_desc_for_cpu(bp->pdev, bp->rx_ring_dma, +- src_idx * sizeof(src_desc), +- DMA_BIDIRECTIONAL); +- + ctrl = src_desc->ctrl; + if (dest_idx == (B44_RX_RING_SIZE - 1)) + ctrl |= cpu_to_le32(DESC_CTRL_EOT); +@@ -738,14 +700,8 @@ + + dest_desc->ctrl = ctrl; + dest_desc->addr = src_desc->addr; +- + src_map->skb = NULL; + +- if (bp->flags & B44_FLAG_RX_RING_HACK) +- b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma, +- dest_idx * sizeof(dest_desc), +- DMA_BIDIRECTIONAL); +- + pci_dma_sync_single_for_device(bp->pdev, src_desc->addr, + RX_PKT_BUF_SZ, + PCI_DMA_FROMDEVICE); +@@ -894,10 +850,11 @@ + { + struct net_device *dev = dev_id; + struct b44 *bp = netdev_priv(dev); ++ unsigned long flags; + u32 istat, imask; + int handled = 0; + +- spin_lock(&bp->lock); ++ spin_lock_irqsave(&bp->lock, flags); + + istat = br32(bp, B44_ISTAT); + imask = br32(bp, B44_IMASK); +@@ -908,12 +865,6 @@ + istat &= imask; + if (istat) { + handled = 1; +- +- if (unlikely(!netif_running(dev))) { +- printk(KERN_INFO "%s: late interrupt.\n", dev->name); +- goto irq_ack; +- } +- + if (netif_rx_schedule_prep(dev)) { + /* NOTE: These writes are posted by the readback of + * the ISTAT register below. +@@ -926,11 +877,10 @@ + dev->name); + } + +-irq_ack: + bw32(bp, B44_ISTAT, istat); + br32(bp, B44_ISTAT); + } +- spin_unlock(&bp->lock); ++ spin_unlock_irqrestore(&bp->lock, flags); + return IRQ_RETVAL(handled); + } + +@@ -958,7 +908,6 @@ + { + struct b44 *bp = netdev_priv(dev); + struct sk_buff *bounce_skb; +- int rc = NETDEV_TX_OK; + dma_addr_t mapping; + u32 len, entry, ctrl; + +@@ -968,28 +917,29 @@ + /* This is a hard error, log it. */ + if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) { + netif_stop_queue(dev); ++ spin_unlock_irq(&bp->lock); + printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n", + dev->name); +- goto err_out; ++ return 1; + } + + mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE); +- if (mapping + len > B44_DMA_MASK) { ++ if(mapping+len > B44_DMA_MASK) { + /* Chip can't handle DMA to/from >1GB, use bounce buffer */ + pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE); + + bounce_skb = __dev_alloc_skb(TX_PKT_BUF_SZ, + GFP_ATOMIC|GFP_DMA); + if (!bounce_skb) +- goto err_out; ++ return NETDEV_TX_BUSY; + + mapping = pci_map_single(bp->pdev, bounce_skb->data, + len, PCI_DMA_TODEVICE); +- if (mapping + len > B44_DMA_MASK) { ++ if(mapping+len > B44_DMA_MASK) { + pci_unmap_single(bp->pdev, mapping, + len, PCI_DMA_TODEVICE); + dev_kfree_skb_any(bounce_skb); +- goto err_out; ++ return NETDEV_TX_BUSY; + } + + memcpy(skb_put(bounce_skb, len), skb->data, skb->len); +@@ -1009,11 +959,6 @@ + bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl); + bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset); + +- if (bp->flags & B44_FLAG_TX_RING_HACK) +- b44_sync_dma_desc_for_device(bp->pdev, bp->tx_ring_dma, +- entry * sizeof(bp->tx_ring[0]), +- DMA_TO_DEVICE); +- + entry = NEXT_TX(entry); + + bp->tx_prod = entry; +@@ -1029,16 +974,11 @@ + if (TX_BUFFS_AVAIL(bp) < 1) + netif_stop_queue(dev); + +- dev->trans_start = jiffies; +- +-out_unlock: + spin_unlock_irq(&bp->lock); + +- return rc; ++ dev->trans_start = jiffies; + +-err_out: +- rc = NETDEV_TX_BUSY; +- goto out_unlock; ++ return 0; + } + + static int b44_change_mtu(struct net_device *dev, int new_mtu) +@@ -1112,7 +1052,8 @@ + * + * The chip has been shut down and the driver detached from + * the networking, so no interrupts or new tx packets will +- * end up in the driver. ++ * end up in the driver. bp->lock is not held and we are not ++ * in an interrupt context and thus may sleep. + */ + static void b44_init_rings(struct b44 *bp) + { +@@ -1123,16 +1064,6 @@ + memset(bp->rx_ring, 0, B44_RX_RING_BYTES); + memset(bp->tx_ring, 0, B44_TX_RING_BYTES); + +- if (bp->flags & B44_FLAG_RX_RING_HACK) +- dma_sync_single_for_device(&bp->pdev->dev, bp->rx_ring_dma, +- DMA_TABLE_BYTES, +- PCI_DMA_BIDIRECTIONAL); +- +- if (bp->flags & B44_FLAG_TX_RING_HACK) +- dma_sync_single_for_device(&bp->pdev->dev, bp->tx_ring_dma, +- DMA_TABLE_BYTES, +- PCI_DMA_TODEVICE); +- + for (i = 0; i < bp->rx_pending; i++) { + if (b44_alloc_rx_skb(bp, -1, i) < 0) + break; +@@ -1145,33 +1076,23 @@ + */ + static void b44_free_consistent(struct b44 *bp) + { +- kfree(bp->rx_buffers); +- bp->rx_buffers = NULL; +- kfree(bp->tx_buffers); +- bp->tx_buffers = NULL; ++ if (bp->rx_buffers) { ++ kfree(bp->rx_buffers); ++ bp->rx_buffers = NULL; ++ } ++ if (bp->tx_buffers) { ++ kfree(bp->tx_buffers); ++ bp->tx_buffers = NULL; ++ } + if (bp->rx_ring) { +- if (bp->flags & B44_FLAG_RX_RING_HACK) { +- dma_unmap_single(&bp->pdev->dev, bp->rx_ring_dma, +- DMA_TABLE_BYTES, +- DMA_BIDIRECTIONAL); +- kfree(bp->rx_ring); +- } else +- pci_free_consistent(bp->pdev, DMA_TABLE_BYTES, +- bp->rx_ring, bp->rx_ring_dma); ++ pci_free_consistent(bp->pdev, DMA_TABLE_BYTES, ++ bp->rx_ring, bp->rx_ring_dma); + bp->rx_ring = NULL; +- bp->flags &= ~B44_FLAG_RX_RING_HACK; + } + if (bp->tx_ring) { +- if (bp->flags & B44_FLAG_TX_RING_HACK) { +- dma_unmap_single(&bp->pdev->dev, bp->tx_ring_dma, +- DMA_TABLE_BYTES, +- DMA_TO_DEVICE); +- kfree(bp->tx_ring); +- } else +- pci_free_consistent(bp->pdev, DMA_TABLE_BYTES, +- bp->tx_ring, bp->tx_ring_dma); ++ pci_free_consistent(bp->pdev, DMA_TABLE_BYTES, ++ bp->tx_ring, bp->tx_ring_dma); + bp->tx_ring = NULL; +- bp->flags &= ~B44_FLAG_TX_RING_HACK; + } + } + +@@ -1184,67 +1105,25 @@ + int size; + + size = B44_RX_RING_SIZE * sizeof(struct ring_info); +- bp->rx_buffers = kzalloc(size, GFP_KERNEL); ++ bp->rx_buffers = kmalloc(size, GFP_KERNEL); + if (!bp->rx_buffers) + goto out_err; ++ memset(bp->rx_buffers, 0, size); + + size = B44_TX_RING_SIZE * sizeof(struct ring_info); +- bp->tx_buffers = kzalloc(size, GFP_KERNEL); ++ bp->tx_buffers = kmalloc(size, GFP_KERNEL); + if (!bp->tx_buffers) + goto out_err; ++ memset(bp->tx_buffers, 0, size); + + size = DMA_TABLE_BYTES; + bp->rx_ring = pci_alloc_consistent(bp->pdev, size, &bp->rx_ring_dma); +- if (!bp->rx_ring) { +- /* Allocation may have failed due to pci_alloc_consistent +- insisting on use of GFP_DMA, which is more restrictive +- than necessary... */ +- struct dma_desc *rx_ring; +- dma_addr_t rx_ring_dma; +- +- rx_ring = kzalloc(size, GFP_KERNEL); +- if (!rx_ring) +- goto out_err; +- +- rx_ring_dma = dma_map_single(&bp->pdev->dev, rx_ring, +- DMA_TABLE_BYTES, +- DMA_BIDIRECTIONAL); +- +- if (rx_ring_dma + size > B44_DMA_MASK) { +- kfree(rx_ring); +- goto out_err; +- } +- +- bp->rx_ring = rx_ring; +- bp->rx_ring_dma = rx_ring_dma; +- bp->flags |= B44_FLAG_RX_RING_HACK; +- } ++ if (!bp->rx_ring) ++ goto out_err; + + bp->tx_ring = pci_alloc_consistent(bp->pdev, size, &bp->tx_ring_dma); +- if (!bp->tx_ring) { +- /* Allocation may have failed due to pci_alloc_consistent +- insisting on use of GFP_DMA, which is more restrictive +- than necessary... */ +- struct dma_desc *tx_ring; +- dma_addr_t tx_ring_dma; +- +- tx_ring = kzalloc(size, GFP_KERNEL); +- if (!tx_ring) +- goto out_err; +- +- tx_ring_dma = dma_map_single(&bp->pdev->dev, tx_ring, +- DMA_TABLE_BYTES, +- DMA_TO_DEVICE); +- +- if (tx_ring_dma + size > B44_DMA_MASK) { +- kfree(tx_ring); +- goto out_err; +- } +- +- bp->tx_ring = tx_ring; +- bp->tx_ring_dma = tx_ring_dma; +- bp->flags |= B44_FLAG_TX_RING_HACK; +- } ++ if (!bp->tx_ring) ++ goto out_err; + + return 0; + +@@ -1394,21 +1273,19 @@ + + err = b44_alloc_consistent(bp); + if (err) +- goto out; ++ return err; ++ ++ err = request_irq(dev->irq, b44_interrupt, SA_SHIRQ, dev->name, dev); ++ if (err) ++ goto err_out_free; ++ ++ spin_lock_irq(&bp->lock); + + b44_init_rings(bp); + b44_init_hw(bp); ++ bp->flags |= B44_FLAG_INIT_COMPLETE; + +- netif_carrier_off(dev); +- b44_check_phy(bp); +- +- err = request_irq(dev->irq, b44_interrupt, SA_SHIRQ, dev->name, dev); +- if (unlikely(err < 0)) { +- b44_chip_reset(bp); +- b44_free_rings(bp); +- b44_free_consistent(bp); +- goto out; +- } ++ spin_unlock_irq(&bp->lock); + + init_timer(&bp->timer); + bp->timer.expires = jiffies + HZ; +@@ -1417,8 +1294,11 @@ + add_timer(&bp->timer); + + b44_enable_ints(bp); +- netif_start_queue(dev); +-out: ++ ++ return 0; ++ ++err_out_free: ++ b44_free_consistent(bp); + return err; + } + +@@ -1453,8 +1333,6 @@ + + netif_stop_queue(dev); + +- netif_poll_disable(dev); +- + del_timer_sync(&bp->timer); + + spin_lock_irq(&bp->lock); +@@ -1464,14 +1342,13 @@ + #endif + b44_halt(bp); + b44_free_rings(bp); ++ bp->flags &= ~B44_FLAG_INIT_COMPLETE; + netif_carrier_off(bp->dev); + + spin_unlock_irq(&bp->lock); + + free_irq(dev->irq, dev); + +- netif_poll_enable(dev); +- + b44_free_consistent(bp); + + return 0; +@@ -1536,6 +1413,8 @@ + { + struct b44 *bp = netdev_priv(dev); + u32 val; ++ int i=0; ++ unsigned char zero[6] = {0,0,0,0,0,0}; + + val = br32(bp, B44_RXCONFIG); + val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI); +@@ -1543,17 +1422,14 @@ + val |= RXCONFIG_PROMISC; + bw32(bp, B44_RXCONFIG, val); + } else { +- unsigned char zero[6] = {0, 0, 0, 0, 0, 0}; +- int i = 0; +- + __b44_set_mac_addr(bp); + + if (dev->flags & IFF_ALLMULTI) + val |= RXCONFIG_ALLMULTI; + else +- i = __b44_load_mcast(bp, dev); ++ i=__b44_load_mcast(bp, dev); + +- for (; i < 64; i++) { ++ for(;i<64;i++) { + __b44_cam_write(bp, zero, i); + } + bw32(bp, B44_RXCONFIG, val); +@@ -1617,7 +1493,7 @@ + { + struct b44 *bp = netdev_priv(dev); + +- if (!netif_running(dev)) ++ if (!(bp->flags & B44_FLAG_INIT_COMPLETE)) + return -EAGAIN; + cmd->supported = (SUPPORTED_Autoneg); + cmd->supported |= (SUPPORTED_100baseT_Half | +@@ -1628,14 +1504,14 @@ + + cmd->advertising = 0; + if (bp->flags & B44_FLAG_ADV_10HALF) +- cmd->advertising |= ADVERTISED_10baseT_Half; ++ cmd->advertising |= ADVERTISE_10HALF; + if (bp->flags & B44_FLAG_ADV_10FULL) +- cmd->advertising |= ADVERTISED_10baseT_Full; ++ cmd->advertising |= ADVERTISE_10FULL; + if (bp->flags & B44_FLAG_ADV_100HALF) +- cmd->advertising |= ADVERTISED_100baseT_Half; ++ cmd->advertising |= ADVERTISE_100HALF; + if (bp->flags & B44_FLAG_ADV_100FULL) +- cmd->advertising |= ADVERTISED_100baseT_Full; +- cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause; ++ cmd->advertising |= ADVERTISE_100FULL; ++ cmd->advertising |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; + cmd->speed = (bp->flags & B44_FLAG_100_BASE_T) ? + SPEED_100 : SPEED_10; + cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ? +@@ -1655,7 +1531,7 @@ + { + struct b44 *bp = netdev_priv(dev); + +- if (!netif_running(dev)) ++ if (!(bp->flags & B44_FLAG_INIT_COMPLETE)) + return -EAGAIN; + + /* We do not support gigabit. */ +@@ -1785,37 +1661,6 @@ + return 0; + } + +-static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data) +-{ +- switch(stringset) { +- case ETH_SS_STATS: +- memcpy(data, *b44_gstrings, sizeof(b44_gstrings)); +- break; +- } +-} +- +-static int b44_get_stats_count(struct net_device *dev) +-{ +- return ARRAY_SIZE(b44_gstrings); +-} +- +-static void b44_get_ethtool_stats(struct net_device *dev, +- struct ethtool_stats *stats, u64 *data) +-{ +- struct b44 *bp = netdev_priv(dev); +- u32 *val = &bp->hw_stats.tx_good_octets; +- u32 i; +- +- spin_lock_irq(&bp->lock); +- +- b44_stats_update(bp); +- +- for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++) +- *data++ = *val++; +- +- spin_unlock_irq(&bp->lock); +-} +- + static struct ethtool_ops b44_ethtool_ops = { + .get_drvinfo = b44_get_drvinfo, + .get_settings = b44_get_settings, +@@ -1828,25 +1673,18 @@ + .set_pauseparam = b44_set_pauseparam, + .get_msglevel = b44_get_msglevel, + .set_msglevel = b44_set_msglevel, +- .get_strings = b44_get_strings, +- .get_stats_count = b44_get_stats_count, +- .get_ethtool_stats = b44_get_ethtool_stats, +- .get_perm_addr = ethtool_op_get_perm_addr, + }; + + static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) + { + struct mii_ioctl_data *data = if_mii(ifr); + struct b44 *bp = netdev_priv(dev); +- int err = -EINVAL; +- +- if (!netif_running(dev)) +- goto out; ++ int err; + + spin_lock_irq(&bp->lock); + err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL); + spin_unlock_irq(&bp->lock); +-out: ++ + return err; + } + +@@ -1877,7 +1715,6 @@ + bp->dev->dev_addr[3] = eeprom[80]; + bp->dev->dev_addr[4] = eeprom[83]; + bp->dev->dev_addr[5] = eeprom[82]; +- memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len); + + bp->phy_addr = eeprom[90] & 0x1f; + +@@ -1942,9 +1779,9 @@ + + err = pci_set_consistent_dma_mask(pdev, (u64) B44_DMA_MASK); + if (err) { +- printk(KERN_ERR PFX "No usable DMA configuration, " +- "aborting.\n"); +- goto err_out_free_res; ++ printk(KERN_ERR PFX "No usable DMA configuration, " ++ "aborting.\n"); ++ goto err_out_free_res; + } + + b44reg_base = pci_resource_start(pdev, 0); +@@ -1966,8 +1803,10 @@ + bp = netdev_priv(dev); + bp->pdev = pdev; + bp->dev = dev; +- +- bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE); ++ if (b44_debug >= 0) ++ bp->msg_enable = (1 << b44_debug) - 1; ++ else ++ bp->msg_enable = B44_DEF_MSG_ENABLE; + + spin_lock_init(&bp->lock); + +@@ -2057,14 +1896,17 @@ + static void __devexit b44_remove_one(struct pci_dev *pdev) + { + struct net_device *dev = pci_get_drvdata(pdev); +- struct b44 *bp = netdev_priv(dev); + +- unregister_netdev(dev); +- iounmap(bp->regs); +- free_netdev(dev); +- pci_release_regions(pdev); +- pci_disable_device(pdev); +- pci_set_drvdata(pdev, NULL); ++ if (dev) { ++ struct b44 *bp = netdev_priv(dev); ++ ++ unregister_netdev(dev); ++ iounmap(bp->regs); ++ free_netdev(dev); ++ pci_release_regions(pdev); ++ pci_disable_device(pdev); ++ pci_set_drvdata(pdev, NULL); ++ } + } + + static int b44_suspend(struct pci_dev *pdev, pm_message_t state) +@@ -2085,9 +1927,6 @@ + b44_free_rings(bp); + + spin_unlock_irq(&bp->lock); +- +- free_irq(dev->irq, dev); +- pci_disable_device(pdev); + return 0; + } + +@@ -2097,15 +1936,10 @@ + struct b44 *bp = netdev_priv(dev); + + pci_restore_state(pdev); +- pci_enable_device(pdev); +- pci_set_master(pdev); + + if (!netif_running(dev)) + return 0; + +- if (request_irq(dev->irq, b44_interrupt, SA_SHIRQ, dev->name, dev)) +- printk(KERN_ERR PFX "%s: request_irq failed\n", dev->name); +- + spin_lock_irq(&bp->lock); + + b44_init_rings(bp); +@@ -2117,7 +1951,6 @@ + add_timer(&bp->timer); + + b44_enable_ints(bp); +- netif_wake_queue(dev); + return 0; + } + +@@ -2132,12 +1965,6 @@ + + static int __init b44_init(void) + { +- unsigned int dma_desc_align_size = dma_get_cache_alignment(); +- +- /* Setup paramaters for syncing RX/TX DMA descriptors */ +- dma_desc_align_mask = ~(dma_desc_align_size - 1); +- dma_desc_sync_size = max(dma_desc_align_size, sizeof(struct dma_desc)); +- + return pci_module_init(&b44_driver); + } + +diff -ur linux-2.6.15-rc5/drivers/net/b44.h linux-2.6.15-rc5-openwrt/drivers/net/b44.h +--- linux-2.6.15-rc5/drivers/net/b44.h 2005-12-04 06:10:42.000000000 +0100 ++++ linux-2.6.15-rc5-openwrt/drivers/net/b44.h 2005-08-15 02:20:18.000000000 +0200 +@@ -346,63 +346,29 @@ + + #define B44_MCAST_TABLE_SIZE 32 + +-#define B44_STAT_REG_DECLARE \ +- _B44(tx_good_octets) \ +- _B44(tx_good_pkts) \ +- _B44(tx_octets) \ +- _B44(tx_pkts) \ +- _B44(tx_broadcast_pkts) \ +- _B44(tx_multicast_pkts) \ +- _B44(tx_len_64) \ +- _B44(tx_len_65_to_127) \ +- _B44(tx_len_128_to_255) \ +- _B44(tx_len_256_to_511) \ +- _B44(tx_len_512_to_1023) \ +- _B44(tx_len_1024_to_max) \ +- _B44(tx_jabber_pkts) \ +- _B44(tx_oversize_pkts) \ +- _B44(tx_fragment_pkts) \ +- _B44(tx_underruns) \ +- _B44(tx_total_cols) \ +- _B44(tx_single_cols) \ +- _B44(tx_multiple_cols) \ +- _B44(tx_excessive_cols) \ +- _B44(tx_late_cols) \ +- _B44(tx_defered) \ +- _B44(tx_carrier_lost) \ +- _B44(tx_pause_pkts) \ +- _B44(rx_good_octets) \ +- _B44(rx_good_pkts) \ +- _B44(rx_octets) \ +- _B44(rx_pkts) \ +- _B44(rx_broadcast_pkts) \ +- _B44(rx_multicast_pkts) \ +- _B44(rx_len_64) \ +- _B44(rx_len_65_to_127) \ +- _B44(rx_len_128_to_255) \ +- _B44(rx_len_256_to_511) \ +- _B44(rx_len_512_to_1023) \ +- _B44(rx_len_1024_to_max) \ +- _B44(rx_jabber_pkts) \ +- _B44(rx_oversize_pkts) \ +- _B44(rx_fragment_pkts) \ +- _B44(rx_missed_pkts) \ +- _B44(rx_crc_align_errs) \ +- _B44(rx_undersize) \ +- _B44(rx_crc_errs) \ +- _B44(rx_align_errs) \ +- _B44(rx_symbol_errs) \ +- _B44(rx_pause_pkts) \ +- _B44(rx_nonpause_pkts) +- + /* SW copy of device statistics, kept up to date by periodic timer +- * which probes HW values. Check b44_stats_update if you mess with +- * the layout ++ * which probes HW values. Must have same relative layout as HW ++ * register above, because b44_stats_update depends upon this. + */ + struct b44_hw_stats { +-#define _B44(x) u32 x; +-B44_STAT_REG_DECLARE +-#undef _B44 ++ u32 tx_good_octets, tx_good_pkts, tx_octets; ++ u32 tx_pkts, tx_broadcast_pkts, tx_multicast_pkts; ++ u32 tx_len_64, tx_len_65_to_127, tx_len_128_to_255; ++ u32 tx_len_256_to_511, tx_len_512_to_1023, tx_len_1024_to_max; ++ u32 tx_jabber_pkts, tx_oversize_pkts, tx_fragment_pkts; ++ u32 tx_underruns, tx_total_cols, tx_single_cols; ++ u32 tx_multiple_cols, tx_excessive_cols, tx_late_cols; ++ u32 tx_defered, tx_carrier_lost, tx_pause_pkts; ++ u32 __pad1[8]; ++ ++ u32 rx_good_octets, rx_good_pkts, rx_octets; ++ u32 rx_pkts, rx_broadcast_pkts, rx_multicast_pkts; ++ u32 rx_len_64, rx_len_65_to_127, rx_len_128_to_255; ++ u32 rx_len_256_to_511, rx_len_512_to_1023, rx_len_1024_to_max; ++ u32 rx_jabber_pkts, rx_oversize_pkts, rx_fragment_pkts; ++ u32 rx_missed_pkts, rx_crc_align_errs, rx_undersize; ++ u32 rx_crc_errs, rx_align_errs, rx_symbol_errs; ++ u32 rx_pause_pkts, rx_nonpause_pkts; + }; + + struct b44 { +@@ -420,6 +386,7 @@ + + u32 dma_offset; + u32 flags; ++#define B44_FLAG_INIT_COMPLETE 0x00000001 + #define B44_FLAG_BUGGY_TXPTR 0x00000002 + #define B44_FLAG_REORDER_BUG 0x00000004 + #define B44_FLAG_PAUSE_AUTO 0x00008000 +@@ -433,8 +400,6 @@ + #define B44_FLAG_ADV_100HALF 0x04000000 + #define B44_FLAG_ADV_100FULL 0x08000000 + #define B44_FLAG_INTERNAL_PHY 0x10000000 +-#define B44_FLAG_RX_RING_HACK 0x20000000 +-#define B44_FLAG_TX_RING_HACK 0x40000000 + + u32 rx_offset; + diff --git a/openwrt/target/linux/linux-2.6/patches/generic/003-net-b44.patch b/openwrt/target/linux/linux-2.6/patches/generic/003-net-b44-2.patch index 17f82b69a6..8bfe429698 100644 --- a/openwrt/target/linux/linux-2.6/patches/generic/003-net-b44.patch +++ b/openwrt/target/linux/linux-2.6/patches/generic/003-net-b44-2.patch @@ -1,6 +1,6 @@ -diff -Nur linux-2.6.12.5/drivers/net/b44.c linux-2.6.12.5-b44/drivers/net/b44.c ---- linux-2.6.12.5/drivers/net/b44.c 2005-08-15 02:20:18.000000000 +0200 -+++ linux-2.6.12.5-b44/drivers/net/b44.c 2005-09-16 22:19:33.841633800 +0200 +diff -ur linux-2.6.14.3/drivers/net/b44.c linux-2.6.14.3-openwrt/drivers/net/b44.c +--- linux-2.6.14.3/drivers/net/b44.c 2005-11-24 23:10:21.000000000 +0100 ++++ linux-2.6.14.3-openwrt/drivers/net/b44.c 2005-12-08 13:24:35.000000000 +0100 @@ -1,7 +1,8 @@ -/* b44.c: Broadcom 4400 device driver. +/* b44.c: Broadcom 4400/47xx device driver. @@ -615,7 +615,7 @@ diff -Nur linux-2.6.12.5/drivers/net/b44.c linux-2.6.12.5-b44/drivers/net/b44.c } static int b44_open(struct net_device *dev) -@@ -1416,11 +1460,11 @@ +@@ -1419,11 +1463,11 @@ int i=0; unsigned char zero[6] = {0,0,0,0,0,0}; @@ -629,7 +629,7 @@ diff -Nur linux-2.6.12.5/drivers/net/b44.c linux-2.6.12.5-b44/drivers/net/b44.c } else { __b44_set_mac_addr(bp); -@@ -1432,9 +1476,9 @@ +@@ -1435,9 +1479,9 @@ for(;i<64;i++) { __b44_cam_write(bp, zero, i); } @@ -642,7 +642,7 @@ diff -Nur linux-2.6.12.5/drivers/net/b44.c linux-2.6.12.5-b44/drivers/net/b44.c } } -@@ -1675,17 +1719,288 @@ +@@ -1678,17 +1722,288 @@ .set_msglevel = b44_set_msglevel, }; @@ -935,7 +935,7 @@ diff -Nur linux-2.6.12.5/drivers/net/b44.c linux-2.6.12.5-b44/drivers/net/b44.c } /* Read 128-bytes of EEPROM. */ -@@ -1695,7 +2010,7 @@ +@@ -1698,7 +2013,7 @@ u16 *ptr = (u16 *) data; for (i = 0; i < 128; i += 2) @@ -944,7 +944,7 @@ diff -Nur linux-2.6.12.5/drivers/net/b44.c linux-2.6.12.5-b44/drivers/net/b44.c return 0; } -@@ -1704,19 +2019,41 @@ +@@ -1707,19 +2022,41 @@ { u8 eeprom[128]; int err; @@ -998,7 +998,7 @@ diff -Nur linux-2.6.12.5/drivers/net/b44.c linux-2.6.12.5-b44/drivers/net/b44.c /* With this, plus the rx_header prepended to the data by the * hardware, we'll land the ethernet header on a 2-byte boundary. -@@ -1726,13 +2063,12 @@ +@@ -1729,13 +2066,12 @@ bp->imask = IMASK_DEF; bp->core_unit = ssb_core_unit(bp); @@ -1014,7 +1014,7 @@ diff -Nur linux-2.6.12.5/drivers/net/b44.c linux-2.6.12.5-b44/drivers/net/b44.c } static int __devinit b44_init_one(struct pci_dev *pdev, -@@ -1810,7 +2146,7 @@ +@@ -1813,7 +2149,7 @@ spin_lock_init(&bp->lock); @@ -1023,7 +1023,7 @@ diff -Nur linux-2.6.12.5/drivers/net/b44.c linux-2.6.12.5-b44/drivers/net/b44.c if (bp->regs == 0UL) { printk(KERN_ERR PFX "Cannot map device registers, " "aborting.\n"); -@@ -1871,15 +2207,21 @@ +@@ -1874,15 +2210,21 @@ pci_save_state(bp->pdev); @@ -1047,7 +1047,7 @@ diff -Nur linux-2.6.12.5/drivers/net/b44.c linux-2.6.12.5-b44/drivers/net/b44.c err_out_free_dev: free_netdev(dev); -@@ -1901,7 +2243,7 @@ +@@ -1904,7 +2246,7 @@ struct b44 *bp = netdev_priv(dev); unregister_netdev(dev); @@ -1056,9 +1056,9 @@ diff -Nur linux-2.6.12.5/drivers/net/b44.c linux-2.6.12.5-b44/drivers/net/b44.c free_netdev(dev); pci_release_regions(pdev); pci_disable_device(pdev); -diff -Nur linux-2.6.12.5/drivers/net/b44.h linux-2.6.12.5-b44/drivers/net/b44.h ---- linux-2.6.12.5/drivers/net/b44.h 2005-08-15 02:20:18.000000000 +0200 -+++ linux-2.6.12.5-b44/drivers/net/b44.h 2005-09-16 22:18:06.217954624 +0200 +diff -ur linux-2.6.14.3/drivers/net/b44.h linux-2.6.14.3-openwrt/drivers/net/b44.h +--- linux-2.6.14.3/drivers/net/b44.h 2005-11-24 23:10:21.000000000 +0100 ++++ linux-2.6.14.3-openwrt/drivers/net/b44.h 2005-12-08 13:24:35.000000000 +0100 @@ -292,6 +292,9 @@ #define SSB_PCI_MASK1 0xfc000000 #define SSB_PCI_MASK2 0xc0000000 diff --git a/openwrt/target/linux/linux-2.6/patches/generic/100-netfilter_layer7.patch b/openwrt/target/linux/linux-2.6/patches/generic/100-netfilter_layer7.patch index 80a7b90b85..0dd2ccf7cc 100644 --- a/openwrt/target/linux/linux-2.6/patches/generic/100-netfilter_layer7.patch +++ b/openwrt/target/linux/linux-2.6/patches/generic/100-netfilter_layer7.patch @@ -1,6 +1,6 @@ ---- linux-2.6.11.3-stock/include/linux/netfilter_ipv4/ip_conntrack.h 2005-03-13 00:44:41.000000000 -0600 -+++ linux-2.6.11.3-layer7/include/linux/netfilter_ipv4/ip_conntrack.h 2005-03-13 20:30:01.000000000 -0600 -@@ -177,6 +177,15 @@ struct ip_conntrack +--- linux-2.6.14/include/linux/netfilter_ipv4/ip_conntrack.h 2005-10-27 19:02:08.000000000 -0500 ++++ linux-2.6.14-layer7/include/linux/netfilter_ipv4/ip_conntrack.h 2005-11-12 17:31:34.000000000 -0600 +@@ -253,6 +253,15 @@ struct ip_conntrack /* Traversed often, so hopefully in different cacheline to top */ /* These are my tuples; original and reply */ struct ip_conntrack_tuple_hash tuplehash[IP_CT_DIR_MAX]; @@ -16,8 +16,8 @@ }; struct ip_conntrack_expect ---- linux-2.6.11.3-stock/include/linux/netfilter_ipv4/ipt_layer7.h 1969-12-31 18:00:00.000000000 -0600 -+++ linux-2.6.11.3-layer7/include/linux/netfilter_ipv4/ipt_layer7.h 2005-03-13 20:30:01.000000000 -0600 +--- linux-2.6.14/include/linux/netfilter_ipv4/ipt_layer7.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-2.6.14-layer7/include/linux/netfilter_ipv4/ipt_layer7.h 2005-11-12 17:31:34.000000000 -0600 @@ -0,0 +1,26 @@ +/* + By Matthew Strait <quadong@users.sf.net>, Dec 2003. @@ -45,9 +45,9 @@ +}; + +#endif /* _IPT_LAYER7_H */ ---- linux-2.6.11.3-stock/net/ipv4/netfilter/Kconfig 2005-03-13 00:44:38.000000000 -0600 -+++ linux-2.6.11.3-layer7/net/ipv4/netfilter/Kconfig 2005-03-13 20:30:01.000000000 -0600 -@@ -146,6 +146,33 @@ config IP_NF_MATCH_MAC +--- linux-2.6.14/net/ipv4/netfilter/Kconfig 2005-10-27 19:02:08.000000000 -0500 ++++ linux-2.6.14-layer7/net/ipv4/netfilter/Kconfig 2005-11-12 17:31:34.000000000 -0600 +@@ -205,6 +205,24 @@ config IP_NF_MATCH_MAC To compile it as a module, choose M here. If unsure, say N. @@ -69,34 +69,25 @@ + help + Say Y to get lots of debugging output. + -+config IP_NF_MATCH_LAYER7_MAXDATALEN -+ int "Buffer size for application layer data" if IP_NF_MATCH_LAYER7 -+ range 256 65536 -+ default 2048 -+ help -+ Size of the buffer that the application layer data is stored in. -+ Unless you know what you're doing, leave it at the default of 2kB. -+ -+ config IP_NF_MATCH_PKTTYPE tristate "Packet type match support" depends on IP_NF_IPTABLES ---- linux-2.6.11.3-stock/net/ipv4/netfilter/Makefile 2005-03-13 00:44:14.000000000 -0600 -+++ linux-2.6.11.3-layer7/net/ipv4/netfilter/Makefile 2005-03-13 20:30:01.000000000 -0600 -@@ -60,6 +60,8 @@ obj-$(CONFIG_IP_NF_MATCH_ADDRTYPE) += ip - obj-$(CONFIG_IP_NF_MATCH_PHYSDEV) += ipt_physdev.o +--- linux-2.6.14/net/ipv4/netfilter/Makefile 2005-10-27 19:02:08.000000000 -0500 ++++ linux-2.6.14-layer7/net/ipv4/netfilter/Makefile 2005-11-12 17:31:34.000000000 -0600 +@@ -74,6 +74,8 @@ obj-$(CONFIG_IP_NF_MATCH_PHYSDEV) += ipt obj-$(CONFIG_IP_NF_MATCH_COMMENT) += ipt_comment.o + obj-$(CONFIG_IP_NF_MATCH_STRING) += ipt_string.o +obj-$(CONFIG_IP_NF_MATCH_LAYER7) += ipt_layer7.o + # targets obj-$(CONFIG_IP_NF_TARGET_REJECT) += ipt_REJECT.o obj-$(CONFIG_IP_NF_TARGET_TOS) += ipt_TOS.o ---- linux-2.6.11.3-stock/net/ipv4/netfilter/ip_conntrack_core.c 2005-03-13 00:43:57.000000000 -0600 -+++ linux-2.6.11.3-layer7/net/ipv4/netfilter/ip_conntrack_core.c 2005-03-13 22:09:32.000000000 -0600 -@@ -247,6 +247,13 @@ destroy_conntrack(struct nf_conntrack *n +--- linux-2.6.14/net/ipv4/netfilter/ip_conntrack_core.c 2005-10-27 19:02:08.000000000 -0500 ++++ linux-2.6.14-layer7/net/ipv4/netfilter/ip_conntrack_core.c 2005-11-12 17:31:34.000000000 -0600 +@@ -335,6 +335,13 @@ destroy_conntrack(struct nf_conntrack *n * too. */ - remove_expectations(ct); + ip_ct_remove_expectations(ct); + #if defined(CONFIG_IP_NF_MATCH_LAYER7) || defined(CONFIG_IP_NF_MATCH_LAYER7_MODULE) + if(ct->layer7.app_proto) @@ -108,10 +99,10 @@ /* We overload first tuple to link into unconfirmed list. */ if (!is_confirmed(ct)) { BUG_ON(list_empty(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list)); ---- linux-2.6.11.3-stock/net/ipv4/netfilter/ip_conntrack_standalone.c 2005-03-13 00:44:25.000000000 -0600 -+++ linux-2.6.11.3-layer7/net/ipv4/netfilter/ip_conntrack_standalone.c 2005-03-13 20:30:01.000000000 -0600 -@@ -152,6 +152,12 @@ static int ct_seq_real_show(const struct - return 1; +--- linux-2.6.14/net/ipv4/netfilter/ip_conntrack_standalone.c 2005-10-27 19:02:08.000000000 -0500 ++++ linux-2.6.14-layer7/net/ipv4/netfilter/ip_conntrack_standalone.c 2005-11-12 17:31:34.000000000 -0600 +@@ -188,6 +188,12 @@ static int ct_seq_show(struct seq_file * + return -ENOSPC; #endif +#if defined(CONFIG_IP_NF_MATCH_LAYER7) || defined(CONFIG_IP_NF_MATCH_LAYER7_MODULE) @@ -121,11 +112,11 @@ +#endif + if (seq_printf(s, "use=%u\n", atomic_read(&conntrack->ct_general.use))) - return 1; + return -ENOSPC; ---- linux-2.6.11.3-stock/net/ipv4/netfilter/ipt_layer7.c 1969-12-31 18:00:00.000000000 -0600 -+++ linux-2.6.11.3-layer7/net/ipv4/netfilter/ipt_layer7.c 2005-03-13 20:30:01.000000000 -0600 -@@ -0,0 +1,552 @@ +--- linux-2.6.14/net/ipv4/netfilter/ipt_layer7.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-2.6.14-layer7/net/ipv4/netfilter/ipt_layer7.c 2005-11-12 17:49:24.000000000 -0600 +@@ -0,0 +1,569 @@ +/* + Kernel module to match application layer (OSI layer 7) + data in connections. @@ -151,7 +142,7 @@ +#include <linux/ctype.h> +#include <net/ip.h> +#include <net/tcp.h> -+#include <linux/netfilter_ipv4/lockhelp.h> ++#include <linux/spinlock.h> + +#include "regexp/regexp.c" + @@ -161,8 +152,13 @@ +MODULE_AUTHOR("Matthew Strait <quadong@users.sf.net>, Ethan Sommer <sommere@users.sf.net>"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("iptables application layer match module"); ++MODULE_VERSION("2.0"); ++ ++static int maxdatalen = 2048; // this is the default ++module_param(maxdatalen, int, 0444); ++MODULE_PARM_DESC(maxdatalen, "maximum bytes of data looked at by l7-filter"); + -+#if defined(CONFIG_IP_NF_MATCH_LAYER7_DEBUG) ++#ifdef CONFIG_IP_NF_MATCH_LAYER7_DEBUG + #define DPRINTK(format,args...) printk(format,##args) +#else + #define DPRINTK(format,args...) @@ -173,7 +169,7 @@ + +/* Number of packets whose data we look at. +This can be modified through /proc/net/layer7_numpackets */ -+static int num_packets = 8; ++static int num_packets = 10; + +static struct pattern_cache { + char * regex_string; @@ -196,10 +192,10 @@ + time. In this case, we have to protect the conntracks and the list of + compiled patterns. +*/ -+DECLARE_RWLOCK(ct_lock); -+DECLARE_LOCK(list_lock); ++DEFINE_RWLOCK(ct_lock); ++DEFINE_SPINLOCK(list_lock); + -+#if CONFIG_IP_NF_MATCH_LAYER7_DEBUG ++#ifdef CONFIG_IP_NF_MATCH_LAYER7_DEBUG +/* Converts an unfriendly string into a friendly one by +replacing unprintables with periods and all whitespace with " ". */ +static char * friendly_print(unsigned char * s) @@ -366,7 +362,7 @@ + struct ipt_layer7_info * info) +{ + /* If we're in here, throw the app data away */ -+ WRITE_LOCK(&ct_lock); ++ write_lock(&ct_lock); + if(master_conntrack->layer7.app_data != NULL) { + + #ifdef CONFIG_IP_NF_MATCH_LAYER7_DEBUG @@ -385,38 +381,38 @@ + kfree(master_conntrack->layer7.app_data); + master_conntrack->layer7.app_data = NULL; /* don't free again */ + } -+ WRITE_UNLOCK(&ct_lock); ++ write_unlock(&ct_lock); + + if(master_conntrack->layer7.app_proto){ + /* Here child connections set their .app_proto (for /proc/net/ip_conntrack) */ -+ WRITE_LOCK(&ct_lock); ++ write_lock(&ct_lock); + if(!conntrack->layer7.app_proto) { + conntrack->layer7.app_proto = kmalloc(strlen(master_conntrack->layer7.app_proto)+1, GFP_ATOMIC); + if(!conntrack->layer7.app_proto){ + if (net_ratelimit()) + printk(KERN_ERR "layer7: out of memory in match_no_append, bailing.\n"); -+ WRITE_UNLOCK(&ct_lock); ++ write_unlock(&ct_lock); + return 1; + } + strcpy(conntrack->layer7.app_proto, master_conntrack->layer7.app_proto); + } -+ WRITE_UNLOCK(&ct_lock); ++ write_unlock(&ct_lock); + + return (!strcmp(master_conntrack->layer7.app_proto, info->protocol)); + } + else { + /* If not classified, set to "unknown" to distinguish from + connections that are still being tested. */ -+ WRITE_LOCK(&ct_lock); ++ write_lock(&ct_lock); + master_conntrack->layer7.app_proto = kmalloc(strlen("unknown")+1, GFP_ATOMIC); + if(!master_conntrack->layer7.app_proto){ + if (net_ratelimit()) + printk(KERN_ERR "layer7: out of memory in match_no_append, bailing.\n"); -+ WRITE_UNLOCK(&ct_lock); ++ write_unlock(&ct_lock); + return 1; + } + strcpy(master_conntrack->layer7.app_proto, "unknown"); -+ WRITE_UNLOCK(&ct_lock); ++ write_unlock(&ct_lock); + return 0; + } +} @@ -430,7 +426,7 @@ + + /* Strip nulls. Make everything lower case (our regex lib doesn't + do case insensitivity). Add it to the end of the current data. */ -+ for(i = 0; i < CONFIG_IP_NF_MATCH_LAYER7_MAXDATALEN-oldlength-1 && ++ for(i = 0; i < maxdatalen-oldlength-1 && + i < appdatalen; i++) { + if(app_data[i] != '\0') { + master_conntrack->layer7.app_data[length+oldlength] = @@ -463,13 +459,12 @@ + return info->invert; + } + -+ /* Treat the parent and all its children together as one connection, -+ except for the purpose of setting conntrack->layer7.app_proto in the -+ actual connection. This makes /proc/net/ip_conntrack somewhat more -+ satisfying. */ -+ if(!(conntrack = ip_conntrack_get((struct sk_buff *)skb, &ctinfo)) || ++ /* Treat parent & all its children together as one connection, except ++ for the purpose of setting conntrack->layer7.app_proto in the actual ++ connection. This makes /proc/net/ip_conntrack more satisfying. */ ++ if(!(conntrack = ip_conntrack_get((struct sk_buff *)skb, &ctinfo)) || + !(master_conntrack = ip_conntrack_get((struct sk_buff *)skb, &master_ctinfo))) { -+ DPRINTK("layer7: packet is not from a known connection, giving up.\n"); ++ //DPRINTK("layer7: packet is not from a known connection, giving up.\n"); + return info->invert; + } + @@ -505,25 +500,25 @@ + app_data = skb->data + app_data_offset(skb); + appdatalen = skb->tail - app_data; + -+ LOCK_BH(&list_lock); ++ spin_lock_bh(&list_lock); + /* the return value gets checked later, when we're ready to use it */ + comppattern = compile_and_cache(info->pattern, info->protocol); -+ UNLOCK_BH(&list_lock); ++ spin_unlock_bh(&list_lock); + + /* On the first packet of a connection, allocate space for app data */ -+ WRITE_LOCK(&ct_lock); ++ write_lock(&ct_lock); + if(TOTAL_PACKETS == 1 && !skb->cb[0] && !master_conntrack->layer7.app_data) { -+ master_conntrack->layer7.app_data = kmalloc(CONFIG_IP_NF_MATCH_LAYER7_MAXDATALEN, GFP_ATOMIC); ++ master_conntrack->layer7.app_data = kmalloc(maxdatalen, GFP_ATOMIC); + if(!master_conntrack->layer7.app_data){ + if (net_ratelimit()) + printk(KERN_ERR "layer7: out of memory in match, bailing.\n"); -+ WRITE_UNLOCK(&ct_lock); ++ write_unlock(&ct_lock); + return info->invert; + } + + master_conntrack->layer7.app_data[0] = '\0'; + } -+ WRITE_UNLOCK(&ct_lock); ++ write_unlock(&ct_lock); + + /* Can be here, but unallocated, if numpackets is increased near + the beginning of a connection */ @@ -532,9 +527,9 @@ + + if(!skb->cb[0]){ + int newbytes; -+ WRITE_LOCK(&ct_lock); ++ write_lock(&ct_lock); + newbytes = add_data(master_conntrack, app_data, appdatalen); -+ WRITE_UNLOCK(&ct_lock); ++ write_unlock(&ct_lock); + + if(newbytes == 0) { /* didn't add any data */ + skb->cb[0] = 1; @@ -549,21 +544,21 @@ + pattern_result = 0; + /* If the regexp failed to compile, don't bother running it */ + } else if(comppattern && regexec(comppattern, master_conntrack->layer7.app_data)) { -+ DPRINTK("layer7: regexec positive: %s!\n", info->protocol); ++ DPRINTK("layer7: matched %s\n", info->protocol); + pattern_result = 1; + } else pattern_result = 0; + + if(pattern_result) { -+ WRITE_LOCK(&ct_lock); ++ write_lock(&ct_lock); + master_conntrack->layer7.app_proto = kmalloc(strlen(info->protocol)+1, GFP_ATOMIC); + if(!master_conntrack->layer7.app_proto){ + if (net_ratelimit()) + printk(KERN_ERR "layer7: out of memory in match, bailing.\n"); -+ WRITE_UNLOCK(&ct_lock); ++ write_unlock(&ct_lock); + return (pattern_result ^ info->invert); + } + strcpy(master_conntrack->layer7.app_proto, info->protocol); -+ WRITE_UNLOCK(&ct_lock); ++ write_unlock(&ct_lock); + } + + /* mark the packet seen */ @@ -632,7 +627,10 @@ + return count; + } + -+ copy_from_user(foo, buffer, count); ++ if(copy_from_user(foo, buffer, count)) { ++ return -EFAULT; ++ } ++ + + num_packets = my_atoi(foo); + kfree (foo); @@ -667,6 +665,16 @@ +static int __init init(void) +{ + layer7_init_proc(); ++ if(maxdatalen < 1) { ++ printk(KERN_WARNING "layer7: maxdatalen can't be < 1, using 1\n"); ++ maxdatalen = 1; ++ } ++ /* This is not a hard limit. It's just here to prevent people from ++ bringing their slow machines to a grinding halt. */ ++ else if(maxdatalen > 65536) { ++ printk(KERN_WARNING "layer7: maxdatalen can't be > 65536, using 65536\n"); ++ maxdatalen = 65536; ++ } + return ipt_register_match(&layer7_match); +} + @@ -678,8 +686,8 @@ + +module_init(init); +module_exit(fini); ---- linux-2.6.11.3-stock/net/ipv4/netfilter/regexp/regexp.c 1969-12-31 18:00:00.000000000 -0600 -+++ linux-2.6.11.3-layer7/net/ipv4/netfilter/regexp/regexp.c 2005-03-13 20:30:01.000000000 -0600 +--- linux-2.6.14/net/ipv4/netfilter/regexp/regexp.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-2.6.14-layer7/net/ipv4/netfilter/regexp/regexp.c 2005-11-12 17:31:34.000000000 -0600 @@ -0,0 +1,1195 @@ +/* + * regcomp and regexec -- regsub and regerror are elsewhere @@ -1876,8 +1884,8 @@ +#endif + + ---- linux-2.6.11.3-stock/net/ipv4/netfilter/regexp/regexp.h 1969-12-31 18:00:00.000000000 -0600 -+++ linux-2.6.11.3-layer7/net/ipv4/netfilter/regexp/regexp.h 2005-03-13 20:30:01.000000000 -0600 +--- linux-2.6.14/net/ipv4/netfilter/regexp/regexp.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-2.6.14-layer7/net/ipv4/netfilter/regexp/regexp.h 2005-11-12 17:31:34.000000000 -0600 @@ -0,0 +1,41 @@ +/* + * Definitions etc. for regexp(3) routines. @@ -1920,16 +1928,16 @@ +void regerror(char *s); + +#endif ---- linux-2.6.11.3-stock/net/ipv4/netfilter/regexp/regmagic.h 1969-12-31 18:00:00.000000000 -0600 -+++ linux-2.6.11.3-layer7/net/ipv4/netfilter/regexp/regmagic.h 2005-03-13 20:30:01.000000000 -0600 +--- linux-2.6.14/net/ipv4/netfilter/regexp/regmagic.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-2.6.14-layer7/net/ipv4/netfilter/regexp/regmagic.h 2005-11-12 17:31:34.000000000 -0600 @@ -0,0 +1,5 @@ +/* + * The first byte of the regexp internal "program" is actually this magic + * number; the start node begins in the second byte. + */ +#define MAGIC 0234 ---- linux-2.6.11.3-stock/net/ipv4/netfilter/regexp/regsub.c 1969-12-31 18:00:00.000000000 -0600 -+++ linux-2.6.11.3-layer7/net/ipv4/netfilter/regexp/regsub.c 2005-03-13 20:30:01.000000000 -0600 +--- linux-2.6.14/net/ipv4/netfilter/regexp/regsub.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-2.6.14-layer7/net/ipv4/netfilter/regexp/regsub.c 2005-11-12 17:31:34.000000000 -0600 @@ -0,0 +1,95 @@ +/* + * regsub diff --git a/openwrt/target/linux/linux-2.6/patches/generic/101-mppe-mppc-1.3.patch b/openwrt/target/linux/linux-2.6/patches/generic/101-mppe-mppc-1.3.patch deleted file mode 100644 index aa430252c7..0000000000 --- a/openwrt/target/linux/linux-2.6/patches/generic/101-mppe-mppc-1.3.patch +++ /dev/null @@ -1,1559 +0,0 @@ -diff -ruN linux-2.6.12.orig/drivers/net/Kconfig linux-2.6.12/drivers/net/Kconfig ---- linux-2.6.12.orig/drivers/net/Kconfig 2005-06-28 19:57:16.000000000 +0200 -+++ linux-2.6.12/drivers/net/Kconfig 2005-06-28 20:07:01.000000000 +0200 -@@ -2417,6 +2417,32 @@ - module; it is called bsd_comp and will show up in the directory - modules once you have said "make modules". If unsure, say N. - -+config PPP_MPPE_MPPC -+ tristate "Microsoft PPP compression/encryption (MPPC/MPPE)" -+ depends on PPP -+ select CRYPTO_SHA1 -+ select CRYPTO_ARC4 -+ ---help--- -+ Support for the Microsoft Point-To-Point Compression (RFC2118) and -+ Microsoft Point-To-Point Encryption (RFC3078). These protocols are -+ supported by Microsoft Windows and wide range of "hardware" access -+ servers. MPPE is common protocol in Virtual Private Networks. According -+ to RFC3078, MPPE supports 40, 56 and 128-bit key lengths. Depending on -+ PPP daemon configuration on both ends of the link, following scenarios -+ are possible: -+ - only compression (MPPC) is used, -+ - only encryption (MPPE) is used, -+ - compression and encryption (MPPC+MPPE) are used. -+ -+ Please note that Hi/Fn (http://www.hifn.com) holds patent on MPPC so -+ you should check if this patent is valid in your country in order to -+ avoid legal problems. -+ -+ For more information please visit http://free.polbox.pl/h/hs001 -+ -+ To compile this driver as a module, choose M here. The module will -+ be called ppp_mppe_mppc.ko. -+ - config PPPOE - tristate "PPP over Ethernet (EXPERIMENTAL)" - depends on EXPERIMENTAL && PPP -diff -ruN linux-2.6.12.orig/drivers/net/Makefile linux-2.6.12/drivers/net/Makefile ---- linux-2.6.12.orig/drivers/net/Makefile 2005-06-28 19:57:16.000000000 +0200 -+++ linux-2.6.12/drivers/net/Makefile 2005-06-28 20:07:01.000000000 +0200 -@@ -105,6 +105,7 @@ - obj-$(CONFIG_PPP_SYNC_TTY) += ppp_synctty.o - obj-$(CONFIG_PPP_DEFLATE) += ppp_deflate.o - obj-$(CONFIG_PPP_BSDCOMP) += bsd_comp.o -+obj-$(CONFIG_PPP_MPPE_MPPC) += ppp_mppe_mppc.o - obj-$(CONFIG_PPPOE) += pppox.o pppoe.o - - obj-$(CONFIG_SLIP) += slip.o -diff -ruN linux-2.6.12.orig/drivers/net/ppp_generic.c linux-2.6.12/drivers/net/ppp_generic.c ---- linux-2.6.12.orig/drivers/net/ppp_generic.c 2005-06-28 19:57:20.000000000 +0200 -+++ linux-2.6.12/drivers/net/ppp_generic.c 2005-06-28 20:07:01.000000000 +0200 -@@ -19,7 +19,7 @@ - * PPP driver, written by Michael Callahan and Al Longyear, and - * subsequently hacked by Paul Mackerras. - * -- * ==FILEVERSION 20041108== -+ * ==FILEVERSION 20050110== - */ - - #include <linux/config.h> -@@ -105,6 +105,7 @@ - spinlock_t rlock; /* lock for receive side 58 */ - spinlock_t wlock; /* lock for transmit side 5c */ - int mru; /* max receive unit 60 */ -+ int mru_alloc; /* MAX(1500,MRU) for dev_alloc_skb() */ - unsigned int flags; /* control bits 64 */ - unsigned int xstate; /* transmit state bits 68 */ - unsigned int rstate; /* receive state bits 6c */ -@@ -632,7 +633,9 @@ - case PPPIOCSMRU: - if (get_user(val, p)) - break; -- ppp->mru = val; -+ ppp->mru_alloc = ppp->mru = val; -+ if (ppp->mru_alloc < PPP_MRU) -+ ppp->mru_alloc = PPP_MRU; /* increase for broken peers */ - err = 0; - break; - -@@ -1107,14 +1110,37 @@ - case PPP_CCP: - /* peek at outbound CCP frames */ - ppp_ccp_peek(ppp, skb, 0); -+ /* -+ * When LZS or MPPE/MPPC has been negotiated we don't send -+ * CCP_RESETACK after receiving CCP_RESETREQ; in fact pppd -+ * sends such a packet but we silently discard it here -+ */ -+ if (CCP_CODE(skb->data+2) == CCP_RESETACK -+ && (ppp->xcomp->compress_proto == CI_MPPE -+ || ppp->xcomp->compress_proto == CI_LZS)) { -+ --ppp->stats.tx_packets; -+ ppp->stats.tx_bytes -= skb->len - 2; -+ kfree_skb(skb); -+ return; -+ } - break; - } - - /* try to do packet compression */ - if ((ppp->xstate & SC_COMP_RUN) && ppp->xc_state != 0 - && proto != PPP_LCP && proto != PPP_CCP) { -- new_skb = alloc_skb(ppp->dev->mtu + ppp->dev->hard_header_len, -- GFP_ATOMIC); -+ int comp_ovhd = 0; -+ /* -+ * because of possible data expansion when MPPC or LZS -+ * is used, allocate compressor's buffer 12.5% bigger -+ * than MTU -+ */ -+ if (ppp->xcomp->compress_proto == CI_MPPE) -+ comp_ovhd = ((ppp->dev->mtu * 9) / 8) + 1 + MPPE_OVHD; -+ else if (ppp->xcomp->compress_proto == CI_LZS) -+ comp_ovhd = ((ppp->dev->mtu * 9) / 8) + 1 + LZS_OVHD; -+ new_skb = alloc_skb(ppp->dev->mtu + ppp->dev->hard_header_len -+ + comp_ovhd, GFP_ATOMIC); - if (new_skb == 0) { - printk(KERN_ERR "PPP: no memory (comp pkt)\n"); - goto drop; -@@ -1132,9 +1158,21 @@ - skb = new_skb; - skb_put(skb, len); - skb_pull(skb, 2); /* pull off A/C bytes */ -- } else { -+ } else if (len == 0) { - /* didn't compress, or CCP not up yet */ - kfree_skb(new_skb); -+ } else { -+ /* -+ * (len < 0) -+ * MPPE requires that we do not send unencrypted -+ * frames. The compressor will return -1 if we -+ * should drop the frame. We cannot simply test -+ * the compress_proto because MPPE and MPPC share -+ * the same number. -+ */ -+ printk(KERN_ERR "ppp: compressor dropped pkt\n"); -+ kfree_skb(new_skb); -+ goto drop; - } - } - -@@ -1640,14 +1678,15 @@ - goto err; - - if (proto == PPP_COMP) { -- ns = dev_alloc_skb(ppp->mru + PPP_HDRLEN); -+ ns = dev_alloc_skb(ppp->mru_alloc + PPP_HDRLEN); - if (ns == 0) { - printk(KERN_ERR "ppp_decompress_frame: no memory\n"); - goto err; - } - /* the decompressor still expects the A/C bytes in the hdr */ - len = ppp->rcomp->decompress(ppp->rc_state, skb->data - 2, -- skb->len + 2, ns->data, ppp->mru + PPP_HDRLEN); -+ skb->len + 2, ns->data, -+ ppp->mru_alloc + PPP_HDRLEN); - if (len < 0) { - /* Pass the compressed frame to pppd as an - error indication. */ -@@ -1673,7 +1712,14 @@ - return skb; - - err: -- ppp->rstate |= SC_DC_ERROR; -+ if (ppp->rcomp->compress_proto != CI_MPPE -+ && ppp->rcomp->compress_proto != CI_LZS) { -+ /* -+ * If decompression protocol isn't MPPE/MPPC or LZS, we set -+ * SC_DC_ERROR flag and wait for CCP_RESETACK -+ */ -+ ppp->rstate |= SC_DC_ERROR; -+ } - ppp_receive_error(ppp); - return skb; - } -@@ -2349,6 +2395,7 @@ - memset(ppp, 0, sizeof(struct ppp)); - - ppp->mru = PPP_MRU; -+ ppp->mru_alloc = PPP_MRU; - init_ppp_file(&ppp->file, INTERFACE); - ppp->file.hdrlen = PPP_HDRLEN - 2; /* don't count proto bytes */ - for (i = 0; i < NUM_NP; ++i) -diff -ruN linux-2.6.12.orig/drivers/net/ppp_mppe_mppc.c linux-2.6.12/drivers/net/ppp_mppe_mppc.c ---- linux-2.6.12.orig/drivers/net/ppp_mppe_mppc.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-2.6.12/drivers/net/ppp_mppe_mppc.c 2005-06-28 20:07:01.000000000 +0200 -@@ -0,0 +1,1299 @@ -+/* -+ * ppp_mppe_mppc.c - MPPC/MPPE "compressor/decompressor" module. -+ * -+ * Copyright (c) 1994 Árpád Magosányi <mag@bunuel.tii.matav.hu> -+ * Copyright (c) 1999 Tim Hockin, Cobalt Networks Inc. <thockin@cobaltnet.com> -+ * Copyright (c) 2002-2004 Jan Dubiec <jdx@slackware.pl> -+ * -+ * Permission to use, copy, modify, and distribute this software and its -+ * documentation is hereby granted, provided that the above copyright -+ * notice appears in all copies. This software is provided without any -+ * warranty, express or implied. -+ * -+ * The code is based on MPPE kernel module written by Árpád Magosányi and -+ * Tim Hockin which can be found on http://planetmirror.com/pub/mppe/. -+ * I have added MPPC and 56 bit session keys support in MPPE. -+ * -+ * WARNING! Although this is open source code, its usage in some countries -+ * (in particular in the USA) may violate Stac Inc. patent for MPPC. -+ * -+ * ==FILEVERSION 20041123== -+ * -+ */ -+ -+#include <linux/init.h> -+#include <linux/module.h> -+#include <linux/mm.h> -+#include <linux/slab.h> -+#include <asm/scatterlist.h> -+#include <linux/vmalloc.h> -+#include <linux/crypto.h> -+ -+#include <linux/ppp_defs.h> -+#include <linux/ppp-comp.h> -+ -+/* -+ * State for a mppc/mppe "(de)compressor". -+ */ -+struct ppp_mppe_state { -+ struct crypto_tfm *arc4_tfm; -+ struct crypto_tfm *sha1_tfm; -+ u8 *sha1_digest; -+ u8 master_key[MPPE_MAX_KEY_LEN]; -+ u8 session_key[MPPE_MAX_KEY_LEN]; -+ u8 mppc; /* do we use compression (MPPC)? */ -+ u8 mppe; /* do we use encryption (MPPE)? */ -+ u8 keylen; /* key length in bytes */ -+ u8 bitkeylen; /* key length in bits */ -+ u16 ccount; /* coherency counter */ -+ u16 bits; /* MPPC/MPPE control bits */ -+ u8 stateless; /* do we use stateless mode? */ -+ u8 nextflushed; /* set A bit in the next outgoing packet; -+ used only by compressor*/ -+ u8 flushexpected; /* drop packets until A bit is received; -+ used only by decompressor*/ -+ u8 *hist; /* MPPC history */ -+ u16 *hash; /* Hash table; used only by compressor */ -+ u16 histptr; /* history "cursor" */ -+ int unit; -+ int debug; -+ int mru; -+ struct compstat stats; -+}; -+ -+#define MPPE_HIST_LEN 8192 /* MPPC history size */ -+#define MPPE_MAX_CCOUNT 0x0FFF /* max. coherency counter value */ -+ -+#define MPPE_BIT_FLUSHED 0x80 /* bit A */ -+#define MPPE_BIT_RESET 0x40 /* bit B */ -+#define MPPE_BIT_COMP 0x20 /* bit C */ -+#define MPPE_BIT_ENCRYPTED 0x10 /* bit D */ -+ -+#define MPPE_SALT0 0xD1 /* values used in MPPE key derivation */ -+#define MPPE_SALT1 0x26 /* according to RFC3079 */ -+#define MPPE_SALT2 0x9E -+ -+#define MPPE_CCOUNT(x) ((((x)[4] & 0x0f) << 8) + (x)[5]) -+#define MPPE_BITS(x) ((x)[4] & 0xf0) -+#define MPPE_CTRLHI(x) ((((x)->ccount & 0xf00)>>8)|((x)->bits)) -+#define MPPE_CTRLLO(x) ((x)->ccount & 0xff) -+ -+/* -+ * Kernel Crypto API needs its arguments to be in kmalloc'd memory, not in the -+ * module static data area. That means sha_pad needs to be kmalloc'd. It is done -+ * in mppe_module_init(). This has been pointed out on 30th July 2004 by Oleg -+ * Makarenko on pptpclient-devel mailing list. -+ */ -+#define SHA1_PAD_SIZE 40 -+struct sha_pad { -+ unsigned char sha_pad1[SHA1_PAD_SIZE]; -+ unsigned char sha_pad2[SHA1_PAD_SIZE]; -+}; -+static struct sha_pad *sha_pad; -+ -+static inline void -+setup_sg(struct scatterlist *sg, const void *address, unsigned int length) -+{ -+ sg[0].page = virt_to_page(address); -+ sg[0].offset = offset_in_page(address); -+ sg[0].length = length; -+} -+ -+static inline void -+arc4_setkey(struct ppp_mppe_state *state, const unsigned char *key, -+ const unsigned int keylen) -+{ -+ crypto_cipher_setkey(state->arc4_tfm, key, keylen); -+} -+ -+static inline void -+arc4_encrypt(struct ppp_mppe_state *state, const unsigned char *in, -+ const unsigned int len, unsigned char *out) -+{ -+ struct scatterlist sgin[4], sgout[4]; -+ -+ setup_sg(sgin, in, len); -+ setup_sg(sgout, out, len); -+ crypto_cipher_encrypt(state->arc4_tfm, sgout, sgin, len); -+} -+ -+#define arc4_decrypt arc4_encrypt -+ -+/* -+ * Key Derivation, from RFC 3078, RFC 3079. -+ * Equivalent to Get_Key() for MS-CHAP as described in RFC 3079. -+ */ -+static void -+get_new_key_from_sha(struct ppp_mppe_state *state, unsigned char *interim_key) -+{ -+ struct scatterlist sg[4]; -+ -+ setup_sg(&sg[0], state->master_key, state->keylen); -+ setup_sg(&sg[1], sha_pad->sha_pad1, sizeof(sha_pad->sha_pad1)); -+ setup_sg(&sg[2], state->session_key, state->keylen); -+ setup_sg(&sg[3], sha_pad->sha_pad2, sizeof(sha_pad->sha_pad2)); -+ -+ crypto_digest_digest (state->sha1_tfm, sg, 4, state->sha1_digest); -+ -+ memcpy(interim_key, state->sha1_digest, state->keylen); -+} -+ -+static void -+mppe_change_key(struct ppp_mppe_state *state, int initialize) -+{ -+ unsigned char interim_key[MPPE_MAX_KEY_LEN]; -+ -+ get_new_key_from_sha(state, interim_key); -+ if (initialize) { -+ memcpy(state->session_key, interim_key, state->keylen); -+ } else { -+ arc4_setkey(state, interim_key, state->keylen); -+ arc4_encrypt(state, interim_key, state->keylen, state->session_key); -+ } -+ if (state->keylen == 8) { -+ if (state->bitkeylen == 40) { -+ state->session_key[0] = MPPE_SALT0; -+ state->session_key[1] = MPPE_SALT1; -+ state->session_key[2] = MPPE_SALT2; -+ } else { -+ state->session_key[0] = MPPE_SALT0; -+ } -+ } -+ arc4_setkey(state, state->session_key, state->keylen); -+} -+ -+/* increase 12-bit coherency counter */ -+static inline void -+mppe_increase_ccount(struct ppp_mppe_state *state) -+{ -+ state->ccount = (state->ccount + 1) & MPPE_MAX_CCOUNT; -+ if (state->mppe) { -+ if (state->stateless) { -+ mppe_change_key(state, 0); -+ state->nextflushed = 1; -+ } else { -+ if ((state->ccount & 0xff) == 0xff) { -+ mppe_change_key(state, 0); -+ } -+ } -+ } -+} -+ -+/* allocate space for a MPPE/MPPC (de)compressor. */ -+/* comp != 0 -> init compressor */ -+/* comp = 0 -> init decompressor */ -+static void * -+mppe_alloc(unsigned char *options, int opt_len, int comp) -+{ -+ struct ppp_mppe_state *state; -+ unsigned int digestsize; -+ u8* fname; -+ -+ fname = comp ? "mppe_comp_alloc" : "mppe_decomp_alloc"; -+ -+ /* -+ * Hack warning - additionally to the standard MPPC/MPPE configuration -+ * options, pppd passes to the (de)copressor 8 or 16 byte session key. -+ * Therefore options[1] contains MPPC/MPPE configuration option length -+ * (CILEN_MPPE = 6), but the real options length, depending on the key -+ * length, is 6+8 or 6+16. -+ */ -+ if (opt_len < CILEN_MPPE) { -+ printk(KERN_WARNING "%s: wrong options length: %u\n", fname, opt_len); -+ return NULL; -+ } -+ -+ if (options[0] != CI_MPPE || options[1] != CILEN_MPPE || -+ (options[2] & ~MPPE_STATELESS) != 0 || -+ options[3] != 0 || options[4] != 0 || -+ (options[5] & ~(MPPE_128BIT|MPPE_56BIT|MPPE_40BIT|MPPE_MPPC)) != 0 || -+ (options[5] & (MPPE_128BIT|MPPE_56BIT|MPPE_40BIT|MPPE_MPPC)) == 0) { -+ printk(KERN_WARNING "%s: options rejected: o[0]=%02x, o[1]=%02x, " -+ "o[2]=%02x, o[3]=%02x, o[4]=%02x, o[5]=%02x\n", fname, options[0], -+ options[1], options[2], options[3], options[4], options[5]); -+ return NULL; -+ } -+ -+ state = (struct ppp_mppe_state *)kmalloc(sizeof(*state), GFP_KERNEL); -+ if (state == NULL) { -+ printk(KERN_ERR "%s: cannot allocate space for %scompressor\n", fname, -+ comp ? "" : "de"); -+ return NULL; -+ } -+ memset(state, 0, sizeof(struct ppp_mppe_state)); -+ -+ state->mppc = options[5] & MPPE_MPPC; /* Do we use MPPC? */ -+ state->mppe = options[5] & (MPPE_128BIT | MPPE_56BIT | -+ MPPE_40BIT); /* Do we use MPPE? */ -+ -+ if (state->mppc) { -+ /* allocate MPPC history */ -+ state->hist = (u8*)vmalloc(2*MPPE_HIST_LEN*sizeof(u8)); -+ if (state->hist == NULL) { -+ kfree(state); -+ printk(KERN_ERR "%s: cannot allocate space for MPPC history\n", -+ fname); -+ return NULL; -+ } -+ -+ /* allocate hashtable for MPPC compressor */ -+ if (comp) { -+ state->hash = (u16*)vmalloc(MPPE_HIST_LEN*sizeof(u16)); -+ if (state->hash == NULL) { -+ vfree(state->hist); -+ kfree(state); -+ printk(KERN_ERR "%s: cannot allocate space for MPPC history\n", -+ fname); -+ return NULL; -+ } -+ } -+ } -+ -+ if (state->mppe) { /* specific for MPPE */ -+ /* Load ARC4 algorithm */ -+ state->arc4_tfm = crypto_alloc_tfm("arc4", 0); -+ if (state->arc4_tfm == NULL) { -+ if (state->mppc) { -+ vfree(state->hash); -+ if (comp) -+ vfree(state->hist); -+ } -+ kfree(state); -+ printk(KERN_ERR "%s: cannot load ARC4 module\n", fname); -+ return NULL; -+ } -+ -+ /* Load SHA1 algorithm */ -+ state->sha1_tfm = crypto_alloc_tfm("sha1", 0); -+ if (state->sha1_tfm == NULL) { -+ crypto_free_tfm(state->arc4_tfm); -+ if (state->mppc) { -+ vfree(state->hash); -+ if (comp) -+ vfree(state->hist); -+ } -+ kfree(state); -+ printk(KERN_ERR "%s: cannot load SHA1 module\n", fname); -+ return NULL; -+ } -+ -+ digestsize = crypto_tfm_alg_digestsize(state->sha1_tfm); -+ if (digestsize < MPPE_MAX_KEY_LEN) { -+ crypto_free_tfm(state->sha1_tfm); -+ crypto_free_tfm(state->arc4_tfm); -+ if (state->mppc) { -+ vfree(state->hash); -+ if (comp) -+ vfree(state->hist); -+ } -+ kfree(state); -+ printk(KERN_ERR "%s: CryptoAPI SHA1 digest size too small\n", fname); -+ } -+ -+ state->sha1_digest = kmalloc(digestsize, GFP_KERNEL); -+ if (!state->sha1_digest) { -+ crypto_free_tfm(state->sha1_tfm); -+ crypto_free_tfm(state->arc4_tfm); -+ if (state->mppc) { -+ vfree(state->hash); -+ if (comp) -+ vfree(state->hist); -+ } -+ kfree(state); -+ printk(KERN_ERR "%s: cannot allocate space for SHA1 digest\n", fname); -+ } -+ -+ memcpy(state->master_key, options+CILEN_MPPE, MPPE_MAX_KEY_LEN); -+ memcpy(state->session_key, state->master_key, MPPE_MAX_KEY_LEN); -+ /* initial key generation is done in mppe_init() */ -+ } -+ -+ return (void *) state; -+} -+ -+static void * -+mppe_comp_alloc(unsigned char *options, int opt_len) -+{ -+ return mppe_alloc(options, opt_len, 1); -+} -+ -+static void * -+mppe_decomp_alloc(unsigned char *options, int opt_len) -+{ -+ return mppe_alloc(options, opt_len, 0); -+} -+ -+/* cleanup the (de)compressor */ -+static void -+mppe_comp_free(void *arg) -+{ -+ struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg; -+ -+ if (state != NULL) { -+ if (state->mppe) { -+ if (state->sha1_digest != NULL) -+ kfree(state->sha1_digest); -+ if (state->sha1_tfm != NULL) -+ crypto_free_tfm(state->sha1_tfm); -+ if (state->arc4_tfm != NULL) -+ crypto_free_tfm(state->arc4_tfm); -+ } -+ if (state->hist != NULL) -+ vfree(state->hist); -+ if (state->hash != NULL) -+ vfree(state->hash); -+ kfree(state); -+ } -+} -+ -+/* init MPPC/MPPE (de)compresor */ -+/* comp != 0 -> init compressor */ -+/* comp = 0 -> init decompressor */ -+static int -+mppe_init(void *arg, unsigned char *options, int opt_len, int unit, -+ int hdrlen, int mru, int debug, int comp) -+{ -+ struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg; -+ u8* fname; -+ -+ fname = comp ? "mppe_comp_init" : "mppe_decomp_init"; -+ -+ if (opt_len < CILEN_MPPE) { -+ if (debug) -+ printk(KERN_WARNING "%s: wrong options length: %u\n", -+ fname, opt_len); -+ return 0; -+ } -+ -+ if (options[0] != CI_MPPE || options[1] != CILEN_MPPE || -+ (options[2] & ~MPPE_STATELESS) != 0 || -+ options[3] != 0 || options[4] != 0 || -+ (options[5] & ~(MPPE_56BIT|MPPE_128BIT|MPPE_40BIT|MPPE_MPPC)) != 0 || -+ (options[5] & (MPPE_56BIT|MPPE_128BIT|MPPE_40BIT|MPPE_MPPC)) == 0) { -+ if (debug) -+ printk(KERN_WARNING "%s: options rejected: o[0]=%02x, o[1]=%02x, " -+ "o[2]=%02x, o[3]=%02x, o[4]=%02x, o[5]=%02x\n", fname, -+ options[0], options[1], options[2], options[3], options[4], -+ options[5]); -+ return 0; -+ } -+ -+ if ((options[5] & ~MPPE_MPPC) != MPPE_128BIT && -+ (options[5] & ~MPPE_MPPC) != MPPE_56BIT && -+ (options[5] & ~MPPE_MPPC) != MPPE_40BIT && -+ (options[5] & MPPE_MPPC) != MPPE_MPPC) { -+ if (debug) -+ printk(KERN_WARNING "%s: don't know what to do: o[5]=%02x\n", -+ fname, options[5]); -+ return 0; -+ } -+ -+ state->mppc = options[5] & MPPE_MPPC; /* Do we use MPPC? */ -+ state->mppe = options[5] & (MPPE_128BIT | MPPE_56BIT | -+ MPPE_40BIT); /* Do we use MPPE? */ -+ state->stateless = options[2] & MPPE_STATELESS; /* Do we use stateless mode? */ -+ -+ switch (state->mppe) { -+ case MPPE_40BIT: /* 40 bit key */ -+ state->keylen = 8; -+ state->bitkeylen = 40; -+ break; -+ case MPPE_56BIT: /* 56 bit key */ -+ state->keylen = 8; -+ state->bitkeylen = 56; -+ break; -+ case MPPE_128BIT: /* 128 bit key */ -+ state->keylen = 16; -+ state->bitkeylen = 128; -+ break; -+ default: -+ state->keylen = 0; -+ state->bitkeylen = 0; -+ } -+ -+ state->ccount = MPPE_MAX_CCOUNT; -+ state->bits = 0; -+ state->unit = unit; -+ state->debug = debug; -+ state->histptr = MPPE_HIST_LEN; -+ if (state->mppc) { /* reset history if MPPC was negotiated */ -+ memset(state->hist, 0, 2*MPPE_HIST_LEN*sizeof(u8)); -+ } -+ -+ if (state->mppe) { /* generate initial session keys */ -+ mppe_change_key(state, 1); -+ } -+ -+ if (comp) { /* specific for compressor */ -+ state->nextflushed = 1; -+ } else { /* specific for decompressor */ -+ state->mru = mru; -+ state->flushexpected = 1; -+ } -+ -+ return 1; -+} -+ -+static int -+mppe_comp_init(void *arg, unsigned char *options, int opt_len, int unit, -+ int hdrlen, int debug) -+{ -+ return mppe_init(arg, options, opt_len, unit, hdrlen, 0, debug, 1); -+} -+ -+ -+static int -+mppe_decomp_init(void *arg, unsigned char *options, int opt_len, int unit, -+ int hdrlen, int mru, int debug) -+{ -+ return mppe_init(arg, options, opt_len, unit, hdrlen, mru, debug, 0); -+} -+ -+static void -+mppe_comp_reset(void *arg) -+{ -+ struct ppp_mppe_state *state = (struct ppp_mppe_state *)arg; -+ -+ if (state->debug) -+ printk(KERN_DEBUG "%s%d: resetting MPPC/MPPE compressor\n", -+ __FUNCTION__, state->unit); -+ -+ state->nextflushed = 1; -+ if (state->mppe) -+ arc4_setkey(state, state->session_key, state->keylen); -+} -+ -+static void -+mppe_decomp_reset(void *arg) -+{ -+ /* When MPPC/MPPE is in use, we shouldn't receive any CCP Reset-Ack. -+ But when we receive such a packet, we just ignore it. */ -+ return; -+} -+ -+static void -+mppe_stats(void *arg, struct compstat *stats) -+{ -+ struct ppp_mppe_state *state = (struct ppp_mppe_state *)arg; -+ -+ *stats = state->stats; -+} -+ -+/***************************/ -+/**** Compression stuff ****/ -+/***************************/ -+/* inserts 1 to 8 bits into the output buffer */ -+static inline void putbits8(u8 *buf, u32 val, const u32 n, u32 *i, u32 *l) -+{ -+ buf += *i; -+ if (*l >= n) { -+ *l = (*l) - n; -+ val <<= *l; -+ *buf = *buf | (val & 0xff); -+ if (*l == 0) { -+ *l = 8; -+ (*i)++; -+ *(++buf) = 0; -+ } -+ } else { -+ (*i)++; -+ *l = 8 - n + (*l); -+ val <<= *l; -+ *buf = *buf | ((val >> 8) & 0xff); -+ *(++buf) = val & 0xff; -+ } -+} -+ -+/* inserts 9 to 16 bits into the output buffer */ -+static inline void putbits16(u8 *buf, u32 val, const u32 n, u32 *i, u32 *l) -+{ -+ buf += *i; -+ if (*l >= n - 8) { -+ (*i)++; -+ *l = 8 - n + (*l); -+ val <<= *l; -+ *buf = *buf | ((val >> 8) & 0xff); -+ *(++buf) = val & 0xff; -+ if (*l == 0) { -+ *l = 8; -+ (*i)++; -+ *(++buf) = 0; -+ } -+ } else { -+ (*i)++; (*i)++; -+ *l = 16 - n + (*l); -+ val <<= *l; -+ *buf = *buf | ((val >> 16) & 0xff); -+ *(++buf) = (val >> 8) & 0xff; -+ *(++buf) = val & 0xff; -+ } -+} -+ -+/* inserts 17 to 24 bits into the output buffer */ -+static inline void putbits24(u8 *buf, u32 val, const u32 n, u32 *i, u32 *l) -+{ -+ buf += *i; -+ if (*l >= n - 16) { -+ (*i)++; (*i)++; -+ *l = 16 - n + (*l); -+ val <<= *l; -+ *buf = *buf | ((val >> 16) & 0xff); -+ *(++buf) = (val >> 8) & 0xff; -+ *(++buf) = val & 0xff; -+ if (*l == 0) { -+ *l = 8; -+ (*i)++; -+ *(++buf) = 0; -+ } -+ } else { -+ (*i)++; (*i)++; (*i)++; -+ *l = 24 - n + (*l); -+ val <<= *l; -+ *buf = *buf | ((val >> 24) & 0xff); -+ *(++buf) = (val >> 16) & 0xff; -+ *(++buf) = (val >> 8) & 0xff; -+ *(++buf) = val & 0xff; -+ } -+} -+ -+static int -+mppc_compress(struct ppp_mppe_state *state, unsigned char *ibuf, -+ unsigned char *obuf, int isize, int osize) -+{ -+ u32 olen, off, len, idx, i, l; -+ u8 *hist, *sbuf, *p, *q, *r, *s; -+ -+ /* -+ At this point, to avoid possible buffer overflow caused by packet -+ expansion during/after compression, we should make sure that -+ osize >= (((isize*9)/8)+1)+2, but we don't do that because in -+ ppp_generic.c we simply allocate bigger obuf. -+ -+ Maximum MPPC packet expansion is 12.5%. This is the worst case when -+ all octets in the input buffer are >= 0x80 and we cannot find any -+ repeated tokens. Additionally we have to reserve 2 bytes for MPPE/MPPC -+ status bits and coherency counter. -+ */ -+ -+ hist = state->hist + MPPE_HIST_LEN; -+ /* check if there is enough room at the end of the history */ -+ if (state->histptr + isize >= 2*MPPE_HIST_LEN) { -+ state->bits |= MPPE_BIT_RESET; -+ state->histptr = MPPE_HIST_LEN; -+ memcpy(state->hist, hist, MPPE_HIST_LEN); -+ } -+ /* add packet to the history; isize must be <= MPPE_HIST_LEN */ -+ sbuf = state->hist + state->histptr; -+ memcpy(sbuf, ibuf, isize); -+ state->histptr += isize; -+ -+ /* compress data */ -+ r = sbuf + isize; -+ *obuf = olen = i = 0; -+ l = 8; -+ while (i < isize - 2) { -+ s = q = sbuf + i; -+ idx = ((40543*((((s[0]<<4)^s[1])<<4)^s[2]))>>4) & 0x1fff; -+ p = hist + state->hash[idx]; -+ state->hash[idx] = (u16) (s - hist); -+ off = s - p; -+ if (off > MPPE_HIST_LEN - 1 || off < 1 || *p++ != *s++ || *p++ != *s++ || -+ *p++ != *s++) { -+ /* no match found; encode literal byte */ -+ if (ibuf[i] < 0x80) { /* literal byte < 0x80 */ -+ putbits8(obuf, (u32) ibuf[i], 8, &olen, &l); -+ } else { /* literal byte >= 0x80 */ -+ putbits16(obuf, (u32) (0x100|(ibuf[i]&0x7f)), 9, &olen, &l); -+ } -+ ++i; -+ continue; -+ } -+ if (r - q >= 64) { -+ *p++ != *s++ || *p++ != *s++ || *p++ != *s++ || *p++ != *s++ || -+ *p++ != *s++ || *p++ != *s++ || *p++ != *s++ || *p++ != *s++ || -+ *p++ != *s++ || *p++ != *s++ || *p++ != *s++ || *p++ != *s++ || -+ *p++ != *s++ || *p++ != *s++ || *p++ != *s++ || *p++ != *s++ || -+ *p++ != *s++ || *p++ != *s++ || *p++ != *s++ || *p++ != *s++ || -+ *p++ != *s++ || *p++ != *s++ || *p++ != *s++ || *p++ != *s++ || -+ *p++ != *s++ || *p++ != *s++ || *p++ != *s++ || *p++ != *s++ || -+ *p++ != *s++ || *p++ != *s++ || *p++ != *s++ || *p++ != *s++ || -+ *p++ != *s++ || *p++ != *s++ || *p++ != *s++ || *p++ != *s++ || -+ *p++ != *s++ || *p++ != *s++ || *p++ != *s++ || *p++ != *s++ || -+ *p++ != *s++ || *p++ != *s++ || *p++ != *s++ || *p++ != *s++ || -+ *p++ != *s++ || *p++ != *s++ || *p++ != *s++ || *p++ != *s++ || -+ *p++ != *s++ || *p++ != *s++ || *p++ != *s++ || *p++ != *s++ || -+ *p++ != *s++ || *p++ != *s++ || *p++ != *s++ || *p++ != *s++ || -+ *p++ != *s++ || *p++ != *s++ || *p++ != *s++ || *p++ != *s++ || -+ *p++ != *s++; -+ if (s - q == 64) { -+ p--; s--; -+ while((*p++ == *s++) && (s < r) && (p < q)); -+ } -+ } else { -+ while((*p++ == *s++) && (s < r) && (p < q)); -+ } -+ len = s - q - 1; -+ i += len; -+ -+ /* at least 3 character match found; code data */ -+ /* encode offset */ -+ if (off < 64) { /* 10-bit offset; 0 <= offset < 64 */ -+ putbits16(obuf, 0x3c0|off, 10, &olen, &l); -+ } else if (off < 320) { /* 12-bit offset; 64 <= offset < 320 */ -+ putbits16(obuf, 0xe00|(off-64), 12, &olen, &l); -+ } else if (off < 8192) { /* 16-bit offset; 320 <= offset < 8192 */ -+ putbits16(obuf, 0xc000|(off-320), 16, &olen, &l); -+ } else { -+ /* This shouldn't happen; we return 0 what means "packet expands", -+ and we send packet uncompressed. */ -+ if (state->debug) -+ printk(KERN_DEBUG "%s%d: wrong offset value: %d\n", -+ __FUNCTION__, state->unit, off); -+ return 0; -+ } -+ /* encode length of match */ -+ if (len < 4) { /* length = 3 */ -+ putbits8(obuf, 0, 1, &olen, &l); -+ } else if (len < 8) { /* 4 <= length < 8 */ -+ putbits8(obuf, 0x08|(len&0x03), 4, &olen, &l); -+ } else if (len < 16) { /* 8 <= length < 16 */ -+ putbits8(obuf, 0x30|(len&0x07), 6, &olen, &l); -+ } else if (len < 32) { /* 16 <= length < 32 */ -+ putbits8(obuf, 0xe0|(len&0x0f), 8, &olen, &l); -+ } else if (len < 64) { /* 32 <= length < 64 */ -+ putbits16(obuf, 0x3c0|(len&0x1f), 10, &olen, &l); -+ } else if (len < 128) { /* 64 <= length < 128 */ -+ putbits16(obuf, 0xf80|(len&0x3f), 12, &olen, &l); -+ } else if (len < 256) { /* 128 <= length < 256 */ -+ putbits16(obuf, 0x3f00|(len&0x7f), 14, &olen, &l); -+ } else if (len < 512) { /* 256 <= length < 512 */ -+ putbits16(obuf, 0xfe00|(len&0xff), 16, &olen, &l); -+ } else if (len < 1024) { /* 512 <= length < 1024 */ -+ putbits24(obuf, 0x3fc00|(len&0x1ff), 18, &olen, &l); -+ } else if (len < 2048) { /* 1024 <= length < 2048 */ -+ putbits24(obuf, 0xff800|(len&0x3ff), 20, &olen, &l); -+ } else if (len < 4096) { /* 2048 <= length < 4096 */ -+ putbits24(obuf, 0x3ff000|(len&0x7ff), 22, &olen, &l); -+ } else if (len < 8192) { /* 4096 <= length < 8192 */ -+ putbits24(obuf, 0xffe000|(len&0xfff), 24, &olen, &l); -+ } else { -+ /* This shouldn't happen; we return 0 what means "packet expands", -+ and send packet uncompressed. */ -+ if (state->debug) -+ printk(KERN_DEBUG "%s%d: wrong length of match value: %d\n", -+ __FUNCTION__, state->unit, len); -+ return 0; -+ } -+ } -+ -+ /* Add remaining octets to the output */ -+ while(isize - i > 0) { -+ if (ibuf[i] < 0x80) { /* literal byte < 0x80 */ -+ putbits8(obuf, (u32) ibuf[i++], 8, &olen, &l); -+ } else { /* literal byte >= 0x80 */ -+ putbits16(obuf, (u32) (0x100|(ibuf[i++]&0x7f)), 9, &olen, &l); -+ } -+ } -+ /* Reset unused bits of the last output octet */ -+ if ((l != 0) && (l != 8)) { -+ putbits8(obuf, 0, l, &olen, &l); -+ } -+ -+ return (int) olen; -+} -+ -+int -+mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf, -+ int isize, int osize) -+{ -+ struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg; -+ int proto, olen, complen, off; -+ unsigned char *wptr; -+ -+ /* Check that the protocol is in the range we handle. */ -+ proto = PPP_PROTOCOL(ibuf); -+ if (proto < 0x0021 || proto > 0x00fa) -+ return 0; -+ -+ wptr = obuf; -+ /* Copy over the PPP header */ -+ wptr[0] = PPP_ADDRESS(ibuf); -+ wptr[1] = PPP_CONTROL(ibuf); -+ wptr[2] = PPP_COMP >> 8; -+ wptr[3] = PPP_COMP; -+ wptr += PPP_HDRLEN + (MPPE_OVHD / 2); /* Leave two octets for MPPE/MPPC bits */ -+ -+ /* -+ * In ver. 0.99 protocol field was compressed. Deflate and BSD compress -+ * do PFC before actual compression, RCF2118 and RFC3078 are not precise -+ * on this topic so I decided to do PFC. Unfortunately this change caused -+ * incompatibility with older/other MPPE/MPPC modules. I have received -+ * a lot of complaints from unexperienced users so I have decided to revert -+ * to previous state, i.e. the protocol field is sent uncompressed now. -+ * Although this may be changed in the future. -+ * -+ * Receiving side (mppe_decompress()) still accepts packets with compressed -+ * and uncompressed protocol field so you shouldn't get "Unsupported protocol -+ * 0x2145 received" messages anymore. -+ */ -+ //off = (proto > 0xff) ? 2 : 3; /* PFC - skip first protocol byte if 0 */ -+ off = 2; -+ -+ ibuf += off; -+ -+ mppe_increase_ccount(state); -+ -+ if (state->nextflushed) { -+ state->bits |= MPPE_BIT_FLUSHED; -+ state->nextflushed = 0; -+ if (state->mppe && !state->stateless) { -+ /* -+ * If this is the flag packet, the key has been already changed in -+ * mppe_increase_ccount() so we dont't do it once again. -+ */ -+ if ((state->ccount & 0xff) != 0xff) { -+ arc4_setkey(state, state->session_key, state->keylen); -+ } -+ } -+ if (state->mppc) { /* reset history */ -+ state->bits |= MPPE_BIT_RESET; -+ state->histptr = MPPE_HIST_LEN; -+ memset(state->hist + MPPE_HIST_LEN, 0, MPPE_HIST_LEN*sizeof(u8)); -+ } -+ } -+ -+ if (state->mppc && !state->mppe) { /* Do only compression */ -+ complen = mppc_compress(state, ibuf, wptr, isize - off, -+ osize - PPP_HDRLEN - (MPPE_OVHD / 2)); -+ /* -+ * TODO: Implement an heuristics to handle packet expansion in a smart -+ * way. Now, when a packet expands, we send it as uncompressed and -+ * when next packet is sent we have to reset compressor's history. -+ * Maybe it would be better to send such packet as compressed in order -+ * to keep history's continuity. -+ */ -+ if ((complen > isize) || (complen > osize - PPP_HDRLEN) || -+ (complen == 0)) { -+ /* packet expands */ -+ state->nextflushed = 1; -+ memcpy(wptr, ibuf, isize - off); -+ olen = isize - (off - 2) + MPPE_OVHD; -+ (state->stats).inc_bytes += olen; -+ (state->stats).inc_packets++; -+ } else { -+ state->bits |= MPPE_BIT_COMP; -+ olen = complen + PPP_HDRLEN + (MPPE_OVHD / 2); -+ (state->stats).comp_bytes += olen; -+ (state->stats).comp_packets++; -+ } -+ } else { /* Do encryption with or without compression */ -+ state->bits |= MPPE_BIT_ENCRYPTED; -+ if (!state->mppc && state->mppe) { /* Do only encryption */ -+ /* read from ibuf, write to wptr, adjust for PPP_HDRLEN */ -+ arc4_encrypt(state, ibuf, isize - off, wptr); -+ olen = isize - (off - 2) + MPPE_OVHD; -+ (state->stats).inc_bytes += olen; -+ (state->stats).inc_packets++; -+ } else { /* Do compression and then encryption - RFC3078 */ -+ complen = mppc_compress(state, ibuf, wptr, isize - off, -+ osize - PPP_HDRLEN - (MPPE_OVHD / 2)); -+ /* -+ * TODO: Implement an heuristics to handle packet expansion in a smart -+ * way. Now, when a packet expands, we send it as uncompressed and -+ * when next packet is sent we have to reset compressor's history. -+ * Maybe it would be good to send such packet as compressed in order -+ * to keep history's continuity. -+ */ -+ if ((complen > isize) || (complen > osize - PPP_HDRLEN) || -+ (complen == 0)) { -+ /* packet expands */ -+ state->nextflushed = 1; -+ arc4_encrypt(state, ibuf, isize - off, wptr); -+ olen = isize - (off - 2) + MPPE_OVHD; -+ (state->stats).inc_bytes += olen; -+ (state->stats).inc_packets++; -+ } else { -+ state->bits |= MPPE_BIT_COMP; -+ /* Hack warning !!! RC4 implementation which we use does -+ encryption "in place" - it means that input and output -+ buffers can be *the same* memory area. Therefore we don't -+ need to use a temporary buffer. But be careful - other -+ implementations don't have to be so nice. -+ I used to use ibuf as temporary buffer here, but it led -+ packet sniffers into error. Thanks to Wilfried Weissmann -+ for pointing that. */ -+ arc4_encrypt(state, wptr, complen, wptr); -+ olen = complen + PPP_HDRLEN + (MPPE_OVHD / 2); -+ (state->stats).comp_bytes += olen; -+ (state->stats).comp_packets++; -+ } -+ } -+ } -+ -+ /* write status bits and coherency counter into the output buffer */ -+ wptr = obuf + PPP_HDRLEN; -+ wptr[0] = MPPE_CTRLHI(state); -+ wptr[1] = MPPE_CTRLLO(state); -+ -+ state->bits = 0; -+ -+ (state->stats).unc_bytes += isize; -+ (state->stats).unc_packets++; -+ -+ return olen; -+} -+ -+/***************************/ -+/*** Decompression stuff ***/ -+/***************************/ -+static inline u32 getbits(const u8 *buf, const u32 n, u32 *i, u32 *l) -+{ -+ static const u32 m[] = {0x00, 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f, 0x7f, 0xff}; -+ u32 res, ol; -+ -+ ol = *l; -+ if (*l >= n) { -+ *l = (*l) - n; -+ res = (buf[*i] & m[ol]) >> (*l); -+ if (*l == 0) { -+ *l = 8; -+ (*i)++; -+ } -+ } else { -+ *l = 8 - n + (*l); -+ res = (buf[(*i)++] & m[ol]) << 8; -+ res = (res | buf[*i]) >> (*l); -+ } -+ -+ return res; -+} -+ -+static inline u32 getbyte(const u8 *buf, const u32 i, const u32 l) -+{ -+ if (l == 8) { -+ return buf[i]; -+ } else { -+ return (((buf[i] << 8) | buf[i+1]) >> l) & 0xff; -+ } -+} -+ -+static inline void lamecopy(u8 *dst, u8 *src, u32 len) -+{ -+ while (len--) -+ *dst++ = *src++; -+} -+ -+static int -+mppc_decompress(struct ppp_mppe_state *state, unsigned char *ibuf, -+ unsigned char *obuf, int isize, int osize) -+{ -+ u32 olen, off, len, bits, val, sig, i, l; -+ u8 *history, *s; -+ -+ history = state->hist + state->histptr; -+ olen = len = i = 0; -+ l = 8; -+ bits = isize * 8; -+ while (bits >= 8) { -+ val = getbyte(ibuf, i++, l); -+ if (val < 0x80) { /* literal byte < 0x80 */ -+ if (state->histptr < 2*MPPE_HIST_LEN) { -+ /* copy uncompressed byte to the history */ -+ (state->hist)[(state->histptr)++] = (u8) val; -+ } else { -+ /* buffer overflow; drop packet */ -+ if (state->debug) -+ printk(KERN_ERR "%s%d: trying to write outside history " -+ "buffer\n", __FUNCTION__, state->unit); -+ return DECOMP_ERROR; -+ } -+ olen++; -+ bits -= 8; -+ continue; -+ } -+ -+ sig = val & 0xc0; -+ if (sig == 0x80) { /* literal byte >= 0x80 */ -+ if (state->histptr < 2*MPPE_HIST_LEN) { -+ /* copy uncompressed byte to the history */ -+ (state->hist)[(state->histptr)++] = -+ (u8) (0x80|((val&0x3f)<<1)|getbits(ibuf, 1 , &i ,&l)); -+ } else { -+ /* buffer overflow; drop packet */ -+ if (state->debug) -+ printk(KERN_ERR "%s%d: trying to write outside history " -+ "buffer\n", __FUNCTION__, state->unit); -+ return DECOMP_ERROR; -+ } -+ olen++; -+ bits -= 9; -+ continue; -+ } -+ -+ /* Not a literal byte so it must be an (offset,length) pair */ -+ /* decode offset */ -+ sig = val & 0xf0; -+ if (sig == 0xf0) { /* 10-bit offset; 0 <= offset < 64 */ -+ off = (((val&0x0f)<<2)|getbits(ibuf, 2 , &i ,&l)); -+ bits -= 10; -+ } else { -+ if (sig == 0xe0) { /* 12-bit offset; 64 <= offset < 320 */ -+ off = ((((val&0x0f)<<4)|getbits(ibuf, 4 , &i ,&l))+64); -+ bits -= 12; -+ } else { -+ if ((sig&0xe0) == 0xc0) {/* 16-bit offset; 320 <= offset < 8192 */ -+ off = ((((val&0x1f)<<8)|getbyte(ibuf, i++, l))+320); -+ bits -= 16; -+ if (off > MPPE_HIST_LEN - 1) { -+ if (state->debug) -+ printk(KERN_DEBUG "%s%d: too big offset value: %d\n", -+ __FUNCTION__, state->unit, off); -+ return DECOMP_ERROR; -+ } -+ } else { /* this shouldn't happen */ -+ if (state->debug) -+ printk(KERN_DEBUG "%s%d: cannot decode offset value\n", -+ __FUNCTION__, state->unit); -+ return DECOMP_ERROR; -+ } -+ } -+ } -+ /* decode length of match */ -+ val = getbyte(ibuf, i, l); -+ if ((val & 0x80) == 0x00) { /* len = 3 */ -+ len = 3; -+ bits--; -+ getbits(ibuf, 1 , &i ,&l); -+ } else if ((val & 0xc0) == 0x80) { /* 4 <= len < 8 */ -+ len = 0x04 | ((val>>4) & 0x03); -+ bits -= 4; -+ getbits(ibuf, 4 , &i ,&l); -+ } else if ((val & 0xe0) == 0xc0) { /* 8 <= len < 16 */ -+ len = 0x08 | ((val>>2) & 0x07); -+ bits -= 6; -+ getbits(ibuf, 6 , &i ,&l); -+ } else if ((val & 0xf0) == 0xe0) { /* 16 <= len < 32 */ -+ len = 0x10 | (val & 0x0f); -+ bits -= 8; -+ i++; -+ } else { -+ bits -= 8; -+ val = (val << 8) | getbyte(ibuf, ++i, l); -+ if ((val & 0xf800) == 0xf000) { /* 32 <= len < 64 */ -+ len = 0x0020 | ((val >> 6) & 0x001f); -+ bits -= 2; -+ getbits(ibuf, 2 , &i ,&l); -+ } else if ((val & 0xfc00) == 0xf800) { /* 64 <= len < 128 */ -+ len = 0x0040 | ((val >> 4) & 0x003f); -+ bits -= 4; -+ getbits(ibuf, 4 , &i ,&l); -+ } else if ((val & 0xfe00) == 0xfc00) { /* 128 <= len < 256 */ -+ len = 0x0080 | ((val >> 2) & 0x007f); -+ bits -= 6; -+ getbits(ibuf, 6 , &i ,&l); -+ } else if ((val & 0xff00) == 0xfe00) { /* 256 <= len < 512 */ -+ len = 0x0100 | (val & 0x00ff); -+ bits -= 8; -+ i++; -+ } else { -+ bits -= 8; -+ val = (val << 8) | getbyte(ibuf, ++i, l); -+ if ((val & 0xff8000) == 0xff0000) { /* 512 <= len < 1024 */ -+ len = 0x000200 | ((val >> 6) & 0x0001ff); -+ bits -= 2; -+ getbits(ibuf, 2 , &i ,&l); -+ } else if ((val & 0xffc000) == 0xff8000) {/* 1024 <= len < 2048 */ -+ len = 0x000400 | ((val >> 4) & 0x0003ff); -+ bits -= 4; -+ getbits(ibuf, 4 , &i ,&l); -+ } else if ((val & 0xffe000) == 0xffc000) {/* 2048 <= len < 4096 */ -+ len = 0x000800 | ((val >> 2) & 0x0007ff); -+ bits -= 6; -+ getbits(ibuf, 6 , &i ,&l); -+ } else if ((val & 0xfff000) == 0xffe000) {/* 4096 <= len < 8192 */ -+ len = 0x001000 | (val & 0x000fff); -+ bits -= 8; -+ i++; -+ } else { /* this shouldn't happen */ -+ if (state->debug) -+ printk(KERN_DEBUG "%s%d: wrong length code: 0x%X\n", -+ __FUNCTION__, state->unit, val); -+ return DECOMP_ERROR; -+ } -+ } -+ } -+ s = state->hist + state->histptr; -+ state->histptr += len; -+ olen += len; -+ if (state->histptr < 2*MPPE_HIST_LEN) { -+ /* copy uncompressed bytes to the history */ -+ -+ /* In some cases len may be greater than off. It means that memory -+ * areas pointed by s and s-off overlap. I had used memmove() here -+ * because I thought that it acts as libc's version. Unfortunately, -+ * I was wrong. :-) I got strange errors sometimes. Wilfried suggested -+ * using of byte by byte copying here and strange errors disappeared. -+ */ -+ lamecopy(s, s - off, len); -+ } else { -+ /* buffer overflow; drop packet */ -+ if (state->debug) -+ printk(KERN_ERR "%s%d: trying to write outside history " -+ "buffer\n", __FUNCTION__, state->unit); -+ return DECOMP_ERROR; -+ } -+ } -+ -+ /* Do PFC decompression */ -+ len = olen; -+ if ((history[0] & 0x01) != 0) { -+ obuf[0] = 0; -+ obuf++; -+ len++; -+ } -+ -+ if (len <= osize) { -+ /* copy uncompressed packet to the output buffer */ -+ memcpy(obuf, history, olen); -+ } else { -+ /* buffer overflow; drop packet */ -+ if (state->debug) -+ printk(KERN_ERR "%s%d: too big uncompressed packet: %d\n", -+ __FUNCTION__, state->unit, len + (PPP_HDRLEN / 2)); -+ return DECOMP_ERROR; -+ } -+ -+ return (int) len; -+} -+ -+int -+mppe_decompress(void *arg, unsigned char *ibuf, int isize, -+ unsigned char *obuf, int osize) -+{ -+ struct ppp_mppe_state *state = (struct ppp_mppe_state *)arg; -+ int seq, bits, uncomplen; -+ -+ if (isize <= PPP_HDRLEN + MPPE_OVHD) { -+ if (state->debug) { -+ printk(KERN_DEBUG "%s%d: short packet (len=%d)\n", __FUNCTION__, -+ state->unit, isize); -+ } -+ return DECOMP_ERROR; -+ } -+ -+ /* Get coherency counter and control bits from input buffer */ -+ seq = MPPE_CCOUNT(ibuf); -+ bits = MPPE_BITS(ibuf); -+ -+ if (state->stateless) { -+ /* RFC 3078, sec 8.1. */ -+ mppe_increase_ccount(state); -+ if ((seq != state->ccount) && state->debug) -+ printk(KERN_DEBUG "%s%d: bad sequence number: %d, expected: %d\n", -+ __FUNCTION__, state->unit, seq, state->ccount); -+ while (seq != state->ccount) -+ mppe_increase_ccount(state); -+ } else { -+ /* RFC 3078, sec 8.2. */ -+ if (state->flushexpected) { /* discard state */ -+ if ((bits & MPPE_BIT_FLUSHED)) { /* we received expected FLUSH bit */ -+ while (seq != state->ccount) -+ mppe_increase_ccount(state); -+ state->flushexpected = 0; -+ } else /* drop packet*/ -+ return DECOMP_ERROR; -+ } else { /* normal state */ -+ mppe_increase_ccount(state); -+ if (seq != state->ccount) { -+ /* Packet loss detected, enter the discard state. */ -+ if (state->debug) -+ printk(KERN_DEBUG "%s%d: bad sequence number: %d, expected: %d\n", -+ __FUNCTION__, state->unit, seq, state->ccount); -+ state->flushexpected = 1; -+ return DECOMP_ERROR; -+ } -+ } -+ if (state->mppe && (bits & MPPE_BIT_FLUSHED)) { -+ arc4_setkey(state, state->session_key, state->keylen); -+ } -+ } -+ -+ if (state->mppc && (bits & (MPPE_BIT_FLUSHED | MPPE_BIT_RESET))) { -+ state->histptr = MPPE_HIST_LEN; -+ if ((bits & MPPE_BIT_FLUSHED)) { -+ memset(state->hist + MPPE_HIST_LEN, 0, MPPE_HIST_LEN*sizeof(u8)); -+ } else -+ if ((bits & MPPE_BIT_RESET)) { -+ memcpy(state->hist, state->hist + MPPE_HIST_LEN, MPPE_HIST_LEN); -+ } -+ } -+ -+ /* Fill in the first part of the PPP header. The protocol field -+ comes from the decompressed data. */ -+ obuf[0] = PPP_ADDRESS(ibuf); -+ obuf[1] = PPP_CONTROL(ibuf); -+ obuf += PPP_HDRLEN / 2; -+ -+ if (state->mppe) { /* process encrypted packet */ -+ if ((bits & MPPE_BIT_ENCRYPTED)) { -+ /* OK, packet encrypted, so decrypt it */ -+ if (state->mppc && (bits & MPPE_BIT_COMP)) { -+ /* Hack warning !!! RC4 implementation which we use does -+ decryption "in place" - it means that input and output -+ buffers can be *the same* memory area. Therefore we don't -+ need to use a temporary buffer. But be careful - other -+ implementations don't have to be so nice. */ -+ arc4_decrypt(state, ibuf + PPP_HDRLEN + (MPPE_OVHD / 2), isize - -+ PPP_HDRLEN - (MPPE_OVHD / 2), ibuf + PPP_HDRLEN + -+ (MPPE_OVHD / 2)); -+ uncomplen = mppc_decompress(state, ibuf + PPP_HDRLEN + -+ (MPPE_OVHD / 2), obuf, isize - -+ PPP_HDRLEN - (MPPE_OVHD / 2), -+ osize - (PPP_HDRLEN / 2)); -+ if (uncomplen == DECOMP_ERROR) { -+ state->flushexpected = 1; -+ return DECOMP_ERROR; -+ } -+ uncomplen += PPP_HDRLEN / 2; -+ (state->stats).comp_bytes += isize; -+ (state->stats).comp_packets++; -+ } else { -+ uncomplen = isize - MPPE_OVHD; -+ /* Decrypt the first byte in order to check if it is -+ compressed or uncompressed protocol field */ -+ arc4_decrypt(state, ibuf + PPP_HDRLEN + (MPPE_OVHD / 2), 1, obuf); -+ /* Do PFC decompression */ -+ if ((obuf[0] & 0x01) != 0) { -+ obuf[1] = obuf[0]; -+ obuf[0] = 0; -+ obuf++; -+ uncomplen++; -+ } -+ /* And finally, decrypt the rest of the frame. */ -+ arc4_decrypt(state, ibuf + PPP_HDRLEN + (MPPE_OVHD / 2) + 1, -+ isize - PPP_HDRLEN - (MPPE_OVHD / 2) - 1, obuf + 1); -+ (state->stats).inc_bytes += isize; -+ (state->stats).inc_packets++; -+ } -+ } else { /* this shouldn't happen */ -+ if (state->debug) -+ printk(KERN_ERR "%s%d: encryption negotiated but not an " -+ "encrypted packet received\n", __FUNCTION__, state->unit); -+ mppe_change_key(state, 0); -+ state->flushexpected = 1; -+ return DECOMP_ERROR; -+ } -+ } else { -+ if (state->mppc) { /* no MPPE, only MPPC */ -+ if ((bits & MPPE_BIT_COMP)) { -+ uncomplen = mppc_decompress(state, ibuf + PPP_HDRLEN + -+ (MPPE_OVHD / 2), obuf, isize - -+ PPP_HDRLEN - (MPPE_OVHD / 2), -+ osize - (PPP_HDRLEN / 2)); -+ if (uncomplen == DECOMP_ERROR) { -+ state->flushexpected = 1; -+ return DECOMP_ERROR; -+ } -+ uncomplen += PPP_HDRLEN / 2; -+ (state->stats).comp_bytes += isize; -+ (state->stats).comp_packets++; -+ } else { -+ memcpy(obuf, ibuf + PPP_HDRLEN + (MPPE_OVHD / 2), isize - -+ PPP_HDRLEN - (MPPE_OVHD / 2)); -+ uncomplen = isize - MPPE_OVHD; -+ (state->stats).inc_bytes += isize; -+ (state->stats).inc_packets++; -+ } -+ } else { /* this shouldn't happen */ -+ if (state->debug) -+ printk(KERN_ERR "%s%d: error - not an MPPC or MPPE frame " -+ "received\n", __FUNCTION__, state->unit); -+ state->flushexpected = 1; -+ return DECOMP_ERROR; -+ } -+ } -+ -+ (state->stats).unc_bytes += uncomplen; -+ (state->stats).unc_packets++; -+ -+ return uncomplen; -+} -+ -+ -+/************************************************************ -+ * Module interface table -+ ************************************************************/ -+ -+/* These are in ppp_generic.c */ -+extern int ppp_register_compressor (struct compressor *cp); -+extern void ppp_unregister_compressor (struct compressor *cp); -+ -+/* -+ * Functions exported to ppp_generic.c. -+ * -+ * In case of MPPC/MPPE there is no need to process incompressible data -+ * because such a data is sent in MPPC/MPPE frame. Therefore the (*incomp) -+ * callback function isn't needed. -+ */ -+struct compressor ppp_mppe = { -+ .compress_proto = CI_MPPE, -+ .comp_alloc = mppe_comp_alloc, -+ .comp_free = mppe_comp_free, -+ .comp_init = mppe_comp_init, -+ .comp_reset = mppe_comp_reset, -+ .compress = mppe_compress, -+ .comp_stat = mppe_stats, -+ .decomp_alloc = mppe_decomp_alloc, -+ .decomp_free = mppe_comp_free, -+ .decomp_init = mppe_decomp_init, -+ .decomp_reset = mppe_decomp_reset, -+ .decompress = mppe_decompress, -+ .incomp = NULL, -+ .decomp_stat = mppe_stats, -+ .owner = THIS_MODULE -+}; -+ -+/************************************************************ -+ * Module support routines -+ ************************************************************/ -+ -+int __init mppe_module_init(void) -+{ -+ int answer; -+ -+ if (!(crypto_alg_available("arc4", 0) && crypto_alg_available("sha1", 0))) { -+ printk(KERN_ERR "Kernel doesn't provide ARC4 and/or SHA1 algorithms " -+ "required by MPPE/MPPC. Check CryptoAPI configuration.\n"); -+ return -ENODEV; -+ } -+ -+ /* Allocate space for SHAPad1, SHAPad2 and ... */ -+ sha_pad = kmalloc(sizeof(struct sha_pad), GFP_KERNEL); -+ if (sha_pad == NULL) -+ return -ENOMEM; -+ /* ... initialize them */ -+ memset(sha_pad->sha_pad1, 0x00, sizeof(sha_pad->sha_pad1)); -+ memset(sha_pad->sha_pad2, 0xf2, sizeof(sha_pad->sha_pad2)); -+ -+ answer = ppp_register_compressor(&ppp_mppe); -+ if (answer == 0) { -+ printk(KERN_INFO "MPPE/MPPC encryption/compression module registered\n"); -+ } -+ return answer; -+} -+ -+void __exit mppe_module_cleanup(void) -+{ -+ kfree(sha_pad); -+ ppp_unregister_compressor(&ppp_mppe); -+ printk(KERN_INFO "MPPE/MPPC encryption/compression module unregistered\n"); -+} -+ -+module_init(mppe_module_init); -+module_exit(mppe_module_cleanup); -+ -+MODULE_AUTHOR("Jan Dubiec <jdx@slackware.pl>"); -+MODULE_DESCRIPTION("MPPE/MPPC encryption/compression module for Linux"); -+MODULE_VERSION("1.2"); -+MODULE_LICENSE("Dual BSD/GPL"); -+MODULE_ALIAS("ppp-compress-" __stringify(CI_MPPE)); -diff -ruN linux-2.6.12.orig/include/linux/ppp-comp.h linux-2.6.12/include/linux/ppp-comp.h ---- linux-2.6.12.orig/include/linux/ppp-comp.h 2004-12-24 22:33:47.000000000 +0100 -+++ linux-2.6.12/include/linux/ppp-comp.h 2005-06-28 20:07:01.000000000 +0200 -@@ -28,7 +28,7 @@ - */ - - /* -- * ==FILEVERSION 980319== -+ * ==FILEVERSION 20040509== - * - * NOTE TO MAINTAINERS: - * If you modify this file at all, please set the above date. -@@ -80,7 +80,7 @@ - - /* Compress a packet */ - int (*compress) (void *state, unsigned char *rptr, -- unsigned char *obuf, int isize, int osize); -+ unsigned char *obuf, int isize, int osize); - - /* Return compression statistics */ - void (*comp_stat) (void *state, struct compstat *stats); -@@ -101,7 +101,7 @@ - - /* Decompress a packet. */ - int (*decompress) (void *state, unsigned char *ibuf, int isize, -- unsigned char *obuf, int osize); -+ unsigned char *obuf, int osize); - - /* Update state for an incompressible packet received */ - void (*incomp) (void *state, unsigned char *ibuf, int icnt); -@@ -191,6 +191,42 @@ - #define DEFLATE_CHK_SEQUENCE 0 - - /* -+ * Definitions for MPPE/MPPC. -+ */ -+ -+#define CI_MPPE 18 /* config option for MPPE */ -+#define CILEN_MPPE 6 /* length of config option */ -+ -+#define MPPE_OVHD 4 /* MPPE overhead */ -+#define MPPE_MAX_KEY_LEN 16 /* largest key length (128-bit) */ -+ -+#define MPPE_STATELESS 0x01 /* configuration bit H */ -+#define MPPE_40BIT 0x20 /* configuration bit L */ -+#define MPPE_56BIT 0x80 /* configuration bit M */ -+#define MPPE_128BIT 0x40 /* configuration bit S */ -+#define MPPE_MPPC 0x01 /* configuration bit C */ -+ -+/* -+ * Definitions for Stac LZS. -+ */ -+ -+#define CI_LZS 17 /* config option for Stac LZS */ -+#define CILEN_LZS 5 /* length of config option */ -+ -+#define LZS_OVHD 4 /* max. LZS overhead */ -+#define LZS_HIST_LEN 2048 /* LZS history size */ -+#define LZS_MAX_CCOUNT 0x0FFF /* max. coherency counter value */ -+ -+#define LZS_MODE_NONE 0 -+#define LZS_MODE_LCB 1 -+#define LZS_MODE_CRC 2 -+#define LZS_MODE_SEQ 3 -+#define LZS_MODE_EXT 4 -+ -+#define LZS_EXT_BIT_FLUSHED 0x80 /* bit A */ -+#define LZS_EXT_BIT_COMP 0x20 /* bit C */ -+ -+/* - * Definitions for other, as yet unsupported, compression methods. - */ - diff --git a/openwrt/target/linux/linux-2.6/patches/generic/104-pf_ring.patch b/openwrt/target/linux/linux-2.6/patches/generic/104-pf_ring.patch deleted file mode 100644 index 759fb2cc92..0000000000 --- a/openwrt/target/linux/linux-2.6/patches/generic/104-pf_ring.patch +++ /dev/null @@ -1,5299 +0,0 @@ -diff --unified --recursive --new-file linux-2.6.12.5/include/linux/ring.h linux-2.6.12.5-1-686-smp-ring3/include/linux/ring.h ---- linux-2.6.12.5/include/linux/ring.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-2.6.12.5-1-686-smp-ring3/include/linux/ring.h 2005-10-22 23:50:44.951445250 +0200 -@@ -0,0 +1,108 @@ -+/* -+ * Definitions for packet ring -+ * -+ * 2004 - Luca Deri <deri@ntop.org> -+ */ -+#ifndef __RING_H -+#define __RING_H -+ -+ -+#define INCLUDE_MAC_INFO -+ -+#ifdef INCLUDE_MAC_INFO -+#define SKB_DISPLACEMENT 14 /* Include MAC address information */ -+#else -+#define SKB_DISPLACEMENT 0 /* Do NOT include MAC address information */ -+#endif -+ -+#define RING_MAGIC -+#define RING_MAGIC_VALUE 0x88 -+#define RING_FLOWSLOT_VERSION 5 -+#define RING_VERSION "3.0" -+ -+#define SO_ADD_TO_CLUSTER 99 -+#define SO_REMOVE_FROM_CLUSTER 100 -+#define SO_SET_REFLECTOR 101 -+ -+/* *********************************** */ -+ -+#ifndef HAVE_PCAP -+struct pcap_pkthdr { -+ struct timeval ts; /* time stamp */ -+ u_int32_t caplen; /* length of portion present */ -+ u_int32_t len; /* length this packet (off wire) */ -+}; -+#endif -+ -+/* *********************************** */ -+ -+enum cluster_type { -+ cluster_per_flow = 0, -+ cluster_round_robin -+}; -+ -+/* *********************************** */ -+ -+#define RING_MIN_SLOT_SIZE (60+sizeof(struct pcap_pkthdr)) -+#define RING_MAX_SLOT_SIZE (1514+sizeof(struct pcap_pkthdr)) -+ -+/* *********************************** */ -+ -+typedef struct flowSlotInfo { -+ u_int16_t version, sample_rate; -+ u_int32_t tot_slots, slot_len, tot_mem; -+ -+ u_int64_t tot_pkts, tot_lost; -+ u_int64_t tot_insert, tot_read; -+ u_int16_t insert_idx; -+ u_int16_t remove_idx; -+} FlowSlotInfo; -+ -+/* *********************************** */ -+ -+typedef struct flowSlot { -+#ifdef RING_MAGIC -+ u_char magic; /* It must alwasy be zero */ -+#endif -+ u_char slot_state; /* 0=empty, 1=full */ -+ u_char bucket; /* bucket[bucketLen] */ -+} FlowSlot; -+ -+/* *********************************** */ -+ -+#ifdef __KERNEL__ -+ -+FlowSlotInfo* getRingPtr(void); -+int allocateRing(char *deviceName, u_int numSlots, -+ u_int bucketLen, u_int sampleRate); -+unsigned int pollRing(struct file *fp, struct poll_table_struct * wait); -+void deallocateRing(void); -+ -+/* ************************* */ -+ -+typedef int (*handle_ring_skb)(struct sk_buff *skb, -+ u_char recv_packet, u_char real_skb); -+extern handle_ring_skb get_skb_ring_handler(void); -+extern void set_skb_ring_handler(handle_ring_skb the_handler); -+extern void do_skb_ring_handler(struct sk_buff *skb, -+ u_char recv_packet, u_char real_skb); -+ -+typedef int (*handle_ring_buffer)(struct net_device *dev, -+ char *data, int len); -+extern handle_ring_buffer get_buffer_ring_handler(void); -+extern void set_buffer_ring_handler(handle_ring_buffer the_handler); -+extern int do_buffer_ring_handler(struct net_device *dev, -+ char *data, int len); -+#endif /* __KERNEL__ */ -+ -+/* *********************************** */ -+ -+#define PF_RING 27 /* Packet Ring */ -+#define SOCK_RING PF_RING -+ -+/* ioctl() */ -+#define SIORINGPOLL 0x8888 -+ -+/* *********************************** */ -+ -+#endif /* __RING_H */ -diff --unified --recursive --new-file linux-2.6.12.5/net/Kconfig linux-2.6.12.5-1-686-smp-ring3/net/Kconfig ---- linux-2.6.12.5/net/Kconfig 2005-08-15 02:20:18.000000000 +0200 -+++ linux-2.6.12.5-1-686-smp-ring3/net/Kconfig 2005-10-22 23:50:45.535481750 +0200 -@@ -72,6 +72,7 @@ - - Say Y unless you know what you are doing. - -+source "net/ring/Kconfig" - config INET - bool "TCP/IP networking" - ---help--- -diff --unified --recursive --new-file linux-2.6.12.5/net/Makefile linux-2.6.12.5-1-686-smp-ring3/net/Makefile ---- linux-2.6.12.5/net/Makefile 2005-08-15 02:20:18.000000000 +0200 -+++ linux-2.6.12.5-1-686-smp-ring3/net/Makefile 2005-10-22 23:50:45.491479000 +0200 -@@ -41,6 +41,7 @@ - obj-$(CONFIG_DECNET) += decnet/ - obj-$(CONFIG_ECONET) += econet/ - obj-$(CONFIG_VLAN_8021Q) += 8021q/ -+obj-$(CONFIG_RING) += ring/ - obj-$(CONFIG_IP_SCTP) += sctp/ - - ifeq ($(CONFIG_NET),y) -diff --unified --recursive --new-file linux-2.6.12.5/net/Makefile.ORG linux-2.6.12.5-1-686-smp-ring3/net/Makefile.ORG ---- linux-2.6.12.5/net/Makefile.ORG 1970-01-01 01:00:00.000000000 +0100 -+++ linux-2.6.12.5-1-686-smp-ring3/net/Makefile.ORG 2005-10-22 23:50:45.483478500 +0200 -@@ -0,0 +1,48 @@ -+# -+# Makefile for the linux networking. -+# -+# 2 Sep 2000, Christoph Hellwig <hch@infradead.org> -+# Rewritten to use lists instead of if-statements. -+# -+ -+obj-y := nonet.o -+ -+obj-$(CONFIG_NET) := socket.o core/ -+ -+tmp-$(CONFIG_COMPAT) := compat.o -+obj-$(CONFIG_NET) += $(tmp-y) -+ -+# LLC has to be linked before the files in net/802/ -+obj-$(CONFIG_LLC) += llc/ -+obj-$(CONFIG_NET) += ethernet/ 802/ sched/ netlink/ -+obj-$(CONFIG_INET) += ipv4/ -+obj-$(CONFIG_XFRM) += xfrm/ -+obj-$(CONFIG_UNIX) += unix/ -+ifneq ($(CONFIG_IPV6),) -+obj-y += ipv6/ -+endif -+obj-$(CONFIG_PACKET) += packet/ -+obj-$(CONFIG_NET_KEY) += key/ -+obj-$(CONFIG_NET_SCHED) += sched/ -+obj-$(CONFIG_BRIDGE) += bridge/ -+obj-$(CONFIG_IPX) += ipx/ -+obj-$(CONFIG_ATALK) += appletalk/ -+obj-$(CONFIG_WAN_ROUTER) += wanrouter/ -+obj-$(CONFIG_X25) += x25/ -+obj-$(CONFIG_LAPB) += lapb/ -+obj-$(CONFIG_NETROM) += netrom/ -+obj-$(CONFIG_ROSE) += rose/ -+obj-$(CONFIG_AX25) += ax25/ -+obj-$(CONFIG_IRDA) += irda/ -+obj-$(CONFIG_BT) += bluetooth/ -+obj-$(CONFIG_SUNRPC) += sunrpc/ -+obj-$(CONFIG_RXRPC) += rxrpc/ -+obj-$(CONFIG_ATM) += atm/ -+obj-$(CONFIG_DECNET) += decnet/ -+obj-$(CONFIG_ECONET) += econet/ -+obj-$(CONFIG_VLAN_8021Q) += 8021q/ -+obj-$(CONFIG_IP_SCTP) += sctp/ -+ -+ifeq ($(CONFIG_NET),y) -+obj-$(CONFIG_SYSCTL) += sysctl_net.o -+endif -diff --unified --recursive --new-file linux-2.6.12.5/net/core/dev.c linux-2.6.12.5-1-686-smp-ring3/net/core/dev.c ---- linux-2.6.12.5/net/core/dev.c 2005-08-15 02:20:18.000000000 +0200 -+++ linux-2.6.12.5-1-686-smp-ring3/net/core/dev.c 2005-10-22 23:50:45.479478250 +0200 -@@ -115,6 +115,56 @@ - #endif /* CONFIG_NET_RADIO */ - #include <asm/current.h> - -+#if defined (CONFIG_RING) || defined(CONFIG_RING_MODULE) -+ -+/* #define RING_DEBUG */ -+ -+#include <linux/ring.h> -+#include <linux/version.h> -+ -+static handle_ring_skb ring_handler = NULL; -+ -+handle_ring_skb get_skb_ring_handler() { return(ring_handler); } -+ -+void set_skb_ring_handler(handle_ring_skb the_handler) { -+ ring_handler = the_handler; -+} -+ -+void do_skb_ring_handler(struct sk_buff *skb, -+ u_char recv_packet, u_char real_skb) { -+ if(ring_handler) -+ ring_handler(skb, recv_packet, real_skb); -+} -+ -+/* ******************* */ -+ -+static handle_ring_buffer buffer_ring_handler = NULL; -+ -+handle_ring_buffer get_buffer_ring_handler() { return(buffer_ring_handler); } -+ -+void set_buffer_ring_handler(handle_ring_buffer the_handler) { -+ buffer_ring_handler = the_handler; -+} -+ -+int do_buffer_ring_handler(struct net_device *dev, char *data, int len) { -+ if(buffer_ring_handler) { -+ buffer_ring_handler(dev, data, len); -+ return(1); -+ } else -+ return(0); -+} -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)) -+EXPORT_SYMBOL(get_skb_ring_handler); -+EXPORT_SYMBOL(set_skb_ring_handler); -+EXPORT_SYMBOL(do_skb_ring_handler); -+ -+EXPORT_SYMBOL(get_buffer_ring_handler); -+EXPORT_SYMBOL(set_buffer_ring_handler); -+EXPORT_SYMBOL(do_buffer_ring_handler); -+#endif -+ -+#endif - /* This define, if set, will randomly drop a packet when congestion - * is more than moderate. It helps fairness in the multi-interface - * case when one of them is a hog, but it kills performance for the -@@ -1293,6 +1343,10 @@ - skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS); - #endif - if (q->enqueue) { -+#if defined (CONFIG_RING) || defined(CONFIG_RING_MODULE) -+ if(ring_handler) ring_handler(skb, 0, 1); -+#endif /* CONFIG_RING */ -+ - /* Grab device queue */ - spin_lock(&dev->queue_lock); - -@@ -1509,6 +1563,13 @@ - - preempt_disable(); - err = netif_rx(skb); -+#if defined (CONFIG_RING) || defined(CONFIG_RING_MODULE) -+ if(ring_handler && ring_handler(skb, 1, 1)) { -+ /* The packet has been copied into a ring */ -+ return(NET_RX_SUCCESS); -+ } -+#endif /* CONFIG_RING */ -+ - if (local_softirq_pending()) - do_softirq(); - preempt_enable(); -@@ -1655,6 +1716,13 @@ - int ret = NET_RX_DROP; - unsigned short type; - -+#if defined (CONFIG_RING) || defined(CONFIG_RING_MODULE) -+ if(ring_handler && ring_handler(skb, 1, 1)) { -+ /* The packet has been copied into a ring */ -+ return(NET_RX_SUCCESS); -+ } -+#endif /* CONFIG_RING */ -+ - /* if we've gotten here through NAPI, check netpoll */ - if (skb->dev->poll && netpoll_rx(skb)) - return NET_RX_DROP; -diff --unified --recursive --new-file linux-2.6.12.5/net/core/dev.c.ORG linux-2.6.12.5-1-686-smp-ring3/net/core/dev.c.ORG ---- linux-2.6.12.5/net/core/dev.c.ORG 1970-01-01 01:00:00.000000000 +0100 -+++ linux-2.6.12.5-1-686-smp-ring3/net/core/dev.c.ORG 2005-10-22 23:50:45.203461000 +0200 -@@ -0,0 +1,3385 @@ -+/* -+ * NET3 Protocol independent device support routines. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * as published by the Free Software Foundation; either version -+ * 2 of the License, or (at your option) any later version. -+ * -+ * Derived from the non IP parts of dev.c 1.0.19 -+ * Authors: Ross Biro -+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> -+ * Mark Evans, <evansmp@uhura.aston.ac.uk> -+ * -+ * Additional Authors: -+ * Florian la Roche <rzsfl@rz.uni-sb.de> -+ * Alan Cox <gw4pts@gw4pts.ampr.org> -+ * David Hinds <dahinds@users.sourceforge.net> -+ * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> -+ * Adam Sulmicki <adam@cfar.umd.edu> -+ * Pekka Riikonen <priikone@poesidon.pspt.fi> -+ * -+ * Changes: -+ * D.J. Barrow : Fixed bug where dev->refcnt gets set -+ * to 2 if register_netdev gets called -+ * before net_dev_init & also removed a -+ * few lines of code in the process. -+ * Alan Cox : device private ioctl copies fields back. -+ * Alan Cox : Transmit queue code does relevant -+ * stunts to keep the queue safe. -+ * Alan Cox : Fixed double lock. -+ * Alan Cox : Fixed promisc NULL pointer trap -+ * ???????? : Support the full private ioctl range -+ * Alan Cox : Moved ioctl permission check into -+ * drivers -+ * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI -+ * Alan Cox : 100 backlog just doesn't cut it when -+ * you start doing multicast video 8) -+ * Alan Cox : Rewrote net_bh and list manager. -+ * Alan Cox : Fix ETH_P_ALL echoback lengths. -+ * Alan Cox : Took out transmit every packet pass -+ * Saved a few bytes in the ioctl handler -+ * Alan Cox : Network driver sets packet type before -+ * calling netif_rx. Saves a function -+ * call a packet. -+ * Alan Cox : Hashed net_bh() -+ * Richard Kooijman: Timestamp fixes. -+ * Alan Cox : Wrong field in SIOCGIFDSTADDR -+ * Alan Cox : Device lock protection. -+ * Alan Cox : Fixed nasty side effect of device close -+ * changes. -+ * Rudi Cilibrasi : Pass the right thing to -+ * set_mac_address() -+ * Dave Miller : 32bit quantity for the device lock to -+ * make it work out on a Sparc. -+ * Bjorn Ekwall : Added KERNELD hack. -+ * Alan Cox : Cleaned up the backlog initialise. -+ * Craig Metz : SIOCGIFCONF fix if space for under -+ * 1 device. -+ * Thomas Bogendoerfer : Return ENODEV for dev_open, if there -+ * is no device open function. -+ * Andi Kleen : Fix error reporting for SIOCGIFCONF -+ * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF -+ * Cyrus Durgin : Cleaned for KMOD -+ * Adam Sulmicki : Bug Fix : Network Device Unload -+ * A network device unload needs to purge -+ * the backlog queue. -+ * Paul Rusty Russell : SIOCSIFNAME -+ * Pekka Riikonen : Netdev boot-time settings code -+ * Andrew Morton : Make unregister_netdevice wait -+ * indefinitely on dev->refcnt -+ * J Hadi Salim : - Backlog queue sampling -+ * - netif_rx() feedback -+ */ -+ -+#include <asm/uaccess.h> -+#include <asm/system.h> -+#include <linux/bitops.h> -+#include <linux/config.h> -+#include <linux/cpu.h> -+#include <linux/types.h> -+#include <linux/kernel.h> -+#include <linux/sched.h> -+#include <linux/string.h> -+#include <linux/mm.h> -+#include <linux/socket.h> -+#include <linux/sockios.h> -+#include <linux/errno.h> -+#include <linux/interrupt.h> -+#include <linux/if_ether.h> -+#include <linux/netdevice.h> -+#include <linux/etherdevice.h> -+#include <linux/notifier.h> -+#include <linux/skbuff.h> -+#include <net/sock.h> -+#include <linux/rtnetlink.h> -+#include <linux/proc_fs.h> -+#include <linux/seq_file.h> -+#include <linux/stat.h> -+#include <linux/if_bridge.h> -+#include <linux/divert.h> -+#include <net/dst.h> -+#include <net/pkt_sched.h> -+#include <net/checksum.h> -+#include <linux/highmem.h> -+#include <linux/init.h> -+#include <linux/kmod.h> -+#include <linux/module.h> -+#include <linux/kallsyms.h> -+#include <linux/netpoll.h> -+#include <linux/rcupdate.h> -+#include <linux/delay.h> -+#ifdef CONFIG_NET_RADIO -+#include <linux/wireless.h> /* Note : will define WIRELESS_EXT */ -+#include <net/iw_handler.h> -+#endif /* CONFIG_NET_RADIO */ -+#include <asm/current.h> -+ -+/* This define, if set, will randomly drop a packet when congestion -+ * is more than moderate. It helps fairness in the multi-interface -+ * case when one of them is a hog, but it kills performance for the -+ * single interface case so it is off now by default. -+ */ -+#undef RAND_LIE -+ -+/* Setting this will sample the queue lengths and thus congestion -+ * via a timer instead of as each packet is received. -+ */ -+#undef OFFLINE_SAMPLE -+ -+/* -+ * The list of packet types we will receive (as opposed to discard) -+ * and the routines to invoke. -+ * -+ * Why 16. Because with 16 the only overlap we get on a hash of the -+ * low nibble of the protocol value is RARP/SNAP/X.25. -+ * -+ * NOTE: That is no longer true with the addition of VLAN tags. Not -+ * sure which should go first, but I bet it won't make much -+ * difference if we are running VLANs. The good news is that -+ * this protocol won't be in the list unless compiled in, so -+ * the average user (w/out VLANs) will not be adversly affected. -+ * --BLG -+ * -+ * 0800 IP -+ * 8100 802.1Q VLAN -+ * 0001 802.3 -+ * 0002 AX.25 -+ * 0004 802.2 -+ * 8035 RARP -+ * 0005 SNAP -+ * 0805 X.25 -+ * 0806 ARP -+ * 8137 IPX -+ * 0009 Localtalk -+ * 86DD IPv6 -+ */ -+ -+static DEFINE_SPINLOCK(ptype_lock); -+static struct list_head ptype_base[16]; /* 16 way hashed list */ -+static struct list_head ptype_all; /* Taps */ -+ -+#ifdef OFFLINE_SAMPLE -+static void sample_queue(unsigned long dummy); -+static struct timer_list samp_timer = TIMER_INITIALIZER(sample_queue, 0, 0); -+#endif -+ -+/* -+ * The @dev_base list is protected by @dev_base_lock and the rtln -+ * semaphore. -+ * -+ * Pure readers hold dev_base_lock for reading. -+ * -+ * Writers must hold the rtnl semaphore while they loop through the -+ * dev_base list, and hold dev_base_lock for writing when they do the -+ * actual updates. This allows pure readers to access the list even -+ * while a writer is preparing to update it. -+ * -+ * To put it another way, dev_base_lock is held for writing only to -+ * protect against pure readers; the rtnl semaphore provides the -+ * protection against other writers. -+ * -+ * See, for example usages, register_netdevice() and -+ * unregister_netdevice(), which must be called with the rtnl -+ * semaphore held. -+ */ -+struct net_device *dev_base; -+static struct net_device **dev_tail = &dev_base; -+DEFINE_RWLOCK(dev_base_lock); -+ -+EXPORT_SYMBOL(dev_base); -+EXPORT_SYMBOL(dev_base_lock); -+ -+#define NETDEV_HASHBITS 8 -+static struct hlist_head dev_name_head[1<<NETDEV_HASHBITS]; -+static struct hlist_head dev_index_head[1<<NETDEV_HASHBITS]; -+ -+static inline struct hlist_head *dev_name_hash(const char *name) -+{ -+ unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ)); -+ return &dev_name_head[hash & ((1<<NETDEV_HASHBITS)-1)]; -+} -+ -+static inline struct hlist_head *dev_index_hash(int ifindex) -+{ -+ return &dev_index_head[ifindex & ((1<<NETDEV_HASHBITS)-1)]; -+} -+ -+/* -+ * Our notifier list -+ */ -+ -+static struct notifier_block *netdev_chain; -+ -+/* -+ * Device drivers call our routines to queue packets here. We empty the -+ * queue in the local softnet handler. -+ */ -+DEFINE_PER_CPU(struct softnet_data, softnet_data) = { 0, }; -+ -+#ifdef CONFIG_SYSFS -+extern int netdev_sysfs_init(void); -+extern int netdev_register_sysfs(struct net_device *); -+extern void netdev_unregister_sysfs(struct net_device *); -+#else -+#define netdev_sysfs_init() (0) -+#define netdev_register_sysfs(dev) (0) -+#define netdev_unregister_sysfs(dev) do { } while(0) -+#endif -+ -+ -+/******************************************************************************* -+ -+ Protocol management and registration routines -+ -+*******************************************************************************/ -+ -+/* -+ * For efficiency -+ */ -+ -+int netdev_nit; -+ -+/* -+ * Add a protocol ID to the list. Now that the input handler is -+ * smarter we can dispense with all the messy stuff that used to be -+ * here. -+ * -+ * BEWARE!!! Protocol handlers, mangling input packets, -+ * MUST BE last in hash buckets and checking protocol handlers -+ * MUST start from promiscuous ptype_all chain in net_bh. -+ * It is true now, do not change it. -+ * Explanation follows: if protocol handler, mangling packet, will -+ * be the first on list, it is not able to sense, that packet -+ * is cloned and should be copied-on-write, so that it will -+ * change it and subsequent readers will get broken packet. -+ * --ANK (980803) -+ */ -+ -+/** -+ * dev_add_pack - add packet handler -+ * @pt: packet type declaration -+ * -+ * Add a protocol handler to the networking stack. The passed &packet_type -+ * is linked into kernel lists and may not be freed until it has been -+ * removed from the kernel lists. -+ * -+ * This call does not sleep therefore it can not -+ * guarantee all CPU's that are in middle of receiving packets -+ * will see the new packet type (until the next received packet). -+ */ -+ -+void dev_add_pack(struct packet_type *pt) -+{ -+ int hash; -+ -+ spin_lock_bh(&ptype_lock); -+ if (pt->type == htons(ETH_P_ALL)) { -+ netdev_nit++; -+ list_add_rcu(&pt->list, &ptype_all); -+ } else { -+ hash = ntohs(pt->type) & 15; -+ list_add_rcu(&pt->list, &ptype_base[hash]); -+ } -+ spin_unlock_bh(&ptype_lock); -+} -+ -+extern void linkwatch_run_queue(void); -+ -+ -+ -+/** -+ * __dev_remove_pack - remove packet handler -+ * @pt: packet type declaration -+ * -+ * Remove a protocol handler that was previously added to the kernel -+ * protocol handlers by dev_add_pack(). The passed &packet_type is removed -+ * from the kernel lists and can be freed or reused once this function -+ * returns. -+ * -+ * The packet type might still be in use by receivers -+ * and must not be freed until after all the CPU's have gone -+ * through a quiescent state. -+ */ -+void __dev_remove_pack(struct packet_type *pt) -+{ -+ struct list_head *head; -+ struct packet_type *pt1; -+ -+ spin_lock_bh(&ptype_lock); -+ -+ if (pt->type == htons(ETH_P_ALL)) { -+ netdev_nit--; -+ head = &ptype_all; -+ } else -+ head = &ptype_base[ntohs(pt->type) & 15]; -+ -+ list_for_each_entry(pt1, head, list) { -+ if (pt == pt1) { -+ list_del_rcu(&pt->list); -+ goto out; -+ } -+ } -+ -+ printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt); -+out: -+ spin_unlock_bh(&ptype_lock); -+} -+/** -+ * dev_remove_pack - remove packet handler -+ * @pt: packet type declaration -+ * -+ * Remove a protocol handler that was previously added to the kernel -+ * protocol handlers by dev_add_pack(). The passed &packet_type is removed -+ * from the kernel lists and can be freed or reused once this function -+ * returns. -+ * -+ * This call sleeps to guarantee that no CPU is looking at the packet -+ * type after return. -+ */ -+void dev_remove_pack(struct packet_type *pt) -+{ -+ __dev_remove_pack(pt); -+ -+ synchronize_net(); -+} -+ -+/****************************************************************************** -+ -+ Device Boot-time Settings Routines -+ -+*******************************************************************************/ -+ -+/* Boot time configuration table */ -+static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX]; -+ -+/** -+ * netdev_boot_setup_add - add new setup entry -+ * @name: name of the device -+ * @map: configured settings for the device -+ * -+ * Adds new setup entry to the dev_boot_setup list. The function -+ * returns 0 on error and 1 on success. This is a generic routine to -+ * all netdevices. -+ */ -+static int netdev_boot_setup_add(char *name, struct ifmap *map) -+{ -+ struct netdev_boot_setup *s; -+ int i; -+ -+ s = dev_boot_setup; -+ for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) { -+ if (s[i].name[0] == '\0' || s[i].name[0] == ' ') { -+ memset(s[i].name, 0, sizeof(s[i].name)); -+ strcpy(s[i].name, name); -+ memcpy(&s[i].map, map, sizeof(s[i].map)); -+ break; -+ } -+ } -+ -+ return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1; -+} -+ -+/** -+ * netdev_boot_setup_check - check boot time settings -+ * @dev: the netdevice -+ * -+ * Check boot time settings for the device. -+ * The found settings are set for the device to be used -+ * later in the device probing. -+ * Returns 0 if no settings found, 1 if they are. -+ */ -+int netdev_boot_setup_check(struct net_device *dev) -+{ -+ struct netdev_boot_setup *s = dev_boot_setup; -+ int i; -+ -+ for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) { -+ if (s[i].name[0] != '\0' && s[i].name[0] != ' ' && -+ !strncmp(dev->name, s[i].name, strlen(s[i].name))) { -+ dev->irq = s[i].map.irq; -+ dev->base_addr = s[i].map.base_addr; -+ dev->mem_start = s[i].map.mem_start; -+ dev->mem_end = s[i].map.mem_end; -+ return 1; -+ } -+ } -+ return 0; -+} -+ -+ -+/** -+ * netdev_boot_base - get address from boot time settings -+ * @prefix: prefix for network device -+ * @unit: id for network device -+ * -+ * Check boot time settings for the base address of device. -+ * The found settings are set for the device to be used -+ * later in the device probing. -+ * Returns 0 if no settings found. -+ */ -+unsigned long netdev_boot_base(const char *prefix, int unit) -+{ -+ const struct netdev_boot_setup *s = dev_boot_setup; -+ char name[IFNAMSIZ]; -+ int i; -+ -+ sprintf(name, "%s%d", prefix, unit); -+ -+ /* -+ * If device already registered then return base of 1 -+ * to indicate not to probe for this interface -+ */ -+ if (__dev_get_by_name(name)) -+ return 1; -+ -+ for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) -+ if (!strcmp(name, s[i].name)) -+ return s[i].map.base_addr; -+ return 0; -+} -+ -+/* -+ * Saves at boot time configured settings for any netdevice. -+ */ -+int __init netdev_boot_setup(char *str) -+{ -+ int ints[5]; -+ struct ifmap map; -+ -+ str = get_options(str, ARRAY_SIZE(ints), ints); -+ if (!str || !*str) -+ return 0; -+ -+ /* Save settings */ -+ memset(&map, 0, sizeof(map)); -+ if (ints[0] > 0) -+ map.irq = ints[1]; -+ if (ints[0] > 1) -+ map.base_addr = ints[2]; -+ if (ints[0] > 2) -+ map.mem_start = ints[3]; -+ if (ints[0] > 3) -+ map.mem_end = ints[4]; -+ -+ /* Add new entry to the list */ -+ return netdev_boot_setup_add(str, &map); -+} -+ -+__setup("netdev=", netdev_boot_setup); -+ -+/******************************************************************************* -+ -+ Device Interface Subroutines -+ -+*******************************************************************************/ -+ -+/** -+ * __dev_get_by_name - find a device by its name -+ * @name: name to find -+ * -+ * Find an interface by name. Must be called under RTNL semaphore -+ * or @dev_base_lock. If the name is found a pointer to the device -+ * is returned. If the name is not found then %NULL is returned. The -+ * reference counters are not incremented so the caller must be -+ * careful with locks. -+ */ -+ -+struct net_device *__dev_get_by_name(const char *name) -+{ -+ struct hlist_node *p; -+ -+ hlist_for_each(p, dev_name_hash(name)) { -+ struct net_device *dev -+ = hlist_entry(p, struct net_device, name_hlist); -+ if (!strncmp(dev->name, name, IFNAMSIZ)) -+ return dev; -+ } -+ return NULL; -+} -+ -+/** -+ * dev_get_by_name - find a device by its name -+ * @name: name to find -+ * -+ * Find an interface by name. This can be called from any -+ * context and does its own locking. The returned handle has -+ * the usage count incremented and the caller must use dev_put() to -+ * release it when it is no longer needed. %NULL is returned if no -+ * matching device is found. -+ */ -+ -+struct net_device *dev_get_by_name(const char *name) -+{ -+ struct net_device *dev; -+ -+ read_lock(&dev_base_lock); -+ dev = __dev_get_by_name(name); -+ if (dev) -+ dev_hold(dev); -+ read_unlock(&dev_base_lock); -+ return dev; -+} -+ -+/** -+ * __dev_get_by_index - find a device by its ifindex -+ * @ifindex: index of device -+ * -+ * Search for an interface by index. Returns %NULL if the device -+ * is not found or a pointer to the device. The device has not -+ * had its reference counter increased so the caller must be careful -+ * about locking. The caller must hold either the RTNL semaphore -+ * or @dev_base_lock. -+ */ -+ -+struct net_device *__dev_get_by_index(int ifindex) -+{ -+ struct hlist_node *p; -+ -+ hlist_for_each(p, dev_index_hash(ifindex)) { -+ struct net_device *dev -+ = hlist_entry(p, struct net_device, index_hlist); -+ if (dev->ifindex == ifindex) -+ return dev; -+ } -+ return NULL; -+} -+ -+ -+/** -+ * dev_get_by_index - find a device by its ifindex -+ * @ifindex: index of device -+ * -+ * Search for an interface by index. Returns NULL if the device -+ * is not found or a pointer to the device. The device returned has -+ * had a reference added and the pointer is safe until the user calls -+ * dev_put to indicate they have finished with it. -+ */ -+ -+struct net_device *dev_get_by_index(int ifindex) -+{ -+ struct net_device *dev; -+ -+ read_lock(&dev_base_lock); -+ dev = __dev_get_by_index(ifindex); -+ if (dev) -+ dev_hold(dev); -+ read_unlock(&dev_base_lock); -+ return dev; -+} -+ -+/** -+ * dev_getbyhwaddr - find a device by its hardware address -+ * @type: media type of device -+ * @ha: hardware address -+ * -+ * Search for an interface by MAC address. Returns NULL if the device -+ * is not found or a pointer to the device. The caller must hold the -+ * rtnl semaphore. The returned device has not had its ref count increased -+ * and the caller must therefore be careful about locking -+ * -+ * BUGS: -+ * If the API was consistent this would be __dev_get_by_hwaddr -+ */ -+ -+struct net_device *dev_getbyhwaddr(unsigned short type, char *ha) -+{ -+ struct net_device *dev; -+ -+ ASSERT_RTNL(); -+ -+ for (dev = dev_base; dev; dev = dev->next) -+ if (dev->type == type && -+ !memcmp(dev->dev_addr, ha, dev->addr_len)) -+ break; -+ return dev; -+} -+ -+struct net_device *dev_getfirstbyhwtype(unsigned short type) -+{ -+ struct net_device *dev; -+ -+ rtnl_lock(); -+ for (dev = dev_base; dev; dev = dev->next) { -+ if (dev->type == type) { -+ dev_hold(dev); -+ break; -+ } -+ } -+ rtnl_unlock(); -+ return dev; -+} -+ -+EXPORT_SYMBOL(dev_getfirstbyhwtype); -+ -+/** -+ * dev_get_by_flags - find any device with given flags -+ * @if_flags: IFF_* values -+ * @mask: bitmask of bits in if_flags to check -+ * -+ * Search for any interface with the given flags. Returns NULL if a device -+ * is not found or a pointer to the device. The device returned has -+ * had a reference added and the pointer is safe until the user calls -+ * dev_put to indicate they have finished with it. -+ */ -+ -+struct net_device * dev_get_by_flags(unsigned short if_flags, unsigned short mask) -+{ -+ struct net_device *dev; -+ -+ read_lock(&dev_base_lock); -+ for (dev = dev_base; dev != NULL; dev = dev->next) { -+ if (((dev->flags ^ if_flags) & mask) == 0) { -+ dev_hold(dev); -+ break; -+ } -+ } -+ read_unlock(&dev_base_lock); -+ return dev; -+} -+ -+/** -+ * dev_valid_name - check if name is okay for network device -+ * @name: name string -+ * -+ * Network device names need to be valid file names to -+ * to allow sysfs to work -+ */ -+static int dev_valid_name(const char *name) -+{ -+ return !(*name == '\0' -+ || !strcmp(name, ".") -+ || !strcmp(name, "..") -+ || strchr(name, '/')); -+} -+ -+/** -+ * dev_alloc_name - allocate a name for a device -+ * @dev: device -+ * @name: name format string -+ * -+ * Passed a format string - eg "lt%d" it will try and find a suitable -+ * id. Not efficient for many devices, not called a lot. The caller -+ * must hold the dev_base or rtnl lock while allocating the name and -+ * adding the device in order to avoid duplicates. Returns the number -+ * of the unit assigned or a negative errno code. -+ */ -+ -+int dev_alloc_name(struct net_device *dev, const char *name) -+{ -+ int i = 0; -+ char buf[IFNAMSIZ]; -+ const char *p; -+ const int max_netdevices = 8*PAGE_SIZE; -+ long *inuse; -+ struct net_device *d; -+ -+ p = strnchr(name, IFNAMSIZ-1, '%'); -+ if (p) { -+ /* -+ * Verify the string as this thing may have come from -+ * the user. There must be either one "%d" and no other "%" -+ * characters. -+ */ -+ if (p[1] != 'd' || strchr(p + 2, '%')) -+ return -EINVAL; -+ -+ /* Use one page as a bit array of possible slots */ -+ inuse = (long *) get_zeroed_page(GFP_ATOMIC); -+ if (!inuse) -+ return -ENOMEM; -+ -+ for (d = dev_base; d; d = d->next) { -+ if (!sscanf(d->name, name, &i)) -+ continue; -+ if (i < 0 || i >= max_netdevices) -+ continue; -+ -+ /* avoid cases where sscanf is not exact inverse of printf */ -+ snprintf(buf, sizeof(buf), name, i); -+ if (!strncmp(buf, d->name, IFNAMSIZ)) -+ set_bit(i, inuse); -+ } -+ -+ i = find_first_zero_bit(inuse, max_netdevices); -+ free_page((unsigned long) inuse); -+ } -+ -+ snprintf(buf, sizeof(buf), name, i); -+ if (!__dev_get_by_name(buf)) { -+ strlcpy(dev->name, buf, IFNAMSIZ); -+ return i; -+ } -+ -+ /* It is possible to run out of possible slots -+ * when the name is long and there isn't enough space left -+ * for the digits, or if all bits are used. -+ */ -+ return -ENFILE; -+} -+ -+ -+/** -+ * dev_change_name - change name of a device -+ * @dev: device -+ * @newname: name (or format string) must be at least IFNAMSIZ -+ * -+ * Change name of a device, can pass format strings "eth%d". -+ * for wildcarding. -+ */ -+int dev_change_name(struct net_device *dev, char *newname) -+{ -+ int err = 0; -+ -+ ASSERT_RTNL(); -+ -+ if (dev->flags & IFF_UP) -+ return -EBUSY; -+ -+ if (!dev_valid_name(newname)) -+ return -EINVAL; -+ -+ if (strchr(newname, '%')) { -+ err = dev_alloc_name(dev, newname); -+ if (err < 0) -+ return err; -+ strcpy(newname, dev->name); -+ } -+ else if (__dev_get_by_name(newname)) -+ return -EEXIST; -+ else -+ strlcpy(dev->name, newname, IFNAMSIZ); -+ -+ err = class_device_rename(&dev->class_dev, dev->name); -+ if (!err) { -+ hlist_del(&dev->name_hlist); -+ hlist_add_head(&dev->name_hlist, dev_name_hash(dev->name)); -+ notifier_call_chain(&netdev_chain, NETDEV_CHANGENAME, dev); -+ } -+ -+ return err; -+} -+ -+/** -+ * netdev_features_change - device changes fatures -+ * @dev: device to cause notification -+ * -+ * Called to indicate a device has changed features. -+ */ -+void netdev_features_change(struct net_device *dev) -+{ -+ notifier_call_chain(&netdev_chain, NETDEV_FEAT_CHANGE, dev); -+} -+EXPORT_SYMBOL(netdev_features_change); -+ -+/** -+ * netdev_state_change - device changes state -+ * @dev: device to cause notification -+ * -+ * Called to indicate a device has changed state. This function calls -+ * the notifier chains for netdev_chain and sends a NEWLINK message -+ * to the routing socket. -+ */ -+void netdev_state_change(struct net_device *dev) -+{ -+ if (dev->flags & IFF_UP) { -+ notifier_call_chain(&netdev_chain, NETDEV_CHANGE, dev); -+ rtmsg_ifinfo(RTM_NEWLINK, dev, 0); -+ } -+} -+ -+/** -+ * dev_load - load a network module -+ * @name: name of interface -+ * -+ * If a network interface is not present and the process has suitable -+ * privileges this function loads the module. If module loading is not -+ * available in this kernel then it becomes a nop. -+ */ -+ -+void dev_load(const char *name) -+{ -+ struct net_device *dev; -+ -+ read_lock(&dev_base_lock); -+ dev = __dev_get_by_name(name); -+ read_unlock(&dev_base_lock); -+ -+ if (!dev && capable(CAP_SYS_MODULE)) -+ request_module("%s", name); -+} -+ -+static int default_rebuild_header(struct sk_buff *skb) -+{ -+ printk(KERN_DEBUG "%s: default_rebuild_header called -- BUG!\n", -+ skb->dev ? skb->dev->name : "NULL!!!"); -+ kfree_skb(skb); -+ return 1; -+} -+ -+ -+/** -+ * dev_open - prepare an interface for use. -+ * @dev: device to open -+ * -+ * Takes a device from down to up state. The device's private open -+ * function is invoked and then the multicast lists are loaded. Finally -+ * the device is moved into the up state and a %NETDEV_UP message is -+ * sent to the netdev notifier chain. -+ * -+ * Calling this function on an active interface is a nop. On a failure -+ * a negative errno code is returned. -+ */ -+int dev_open(struct net_device *dev) -+{ -+ int ret = 0; -+ -+ /* -+ * Is it already up? -+ */ -+ -+ if (dev->flags & IFF_UP) -+ return 0; -+ -+ /* -+ * Is it even present? -+ */ -+ if (!netif_device_present(dev)) -+ return -ENODEV; -+ -+ /* -+ * Call device private open method -+ */ -+ set_bit(__LINK_STATE_START, &dev->state); -+ if (dev->open) { -+ ret = dev->open(dev); -+ if (ret) -+ clear_bit(__LINK_STATE_START, &dev->state); -+ } -+ -+ /* -+ * If it went open OK then: -+ */ -+ -+ if (!ret) { -+ /* -+ * Set the flags. -+ */ -+ dev->flags |= IFF_UP; -+ -+ /* -+ * Initialize multicasting status -+ */ -+ dev_mc_upload(dev); -+ -+ /* -+ * Wakeup transmit queue engine -+ */ -+ dev_activate(dev); -+ -+ /* -+ * ... and announce new interface. -+ */ -+ notifier_call_chain(&netdev_chain, NETDEV_UP, dev); -+ } -+ return ret; -+} -+ -+/** -+ * dev_close - shutdown an interface. -+ * @dev: device to shutdown -+ * -+ * This function moves an active device into down state. A -+ * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device -+ * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier -+ * chain. -+ */ -+int dev_close(struct net_device *dev) -+{ -+ if (!(dev->flags & IFF_UP)) -+ return 0; -+ -+ /* -+ * Tell people we are going down, so that they can -+ * prepare to death, when device is still operating. -+ */ -+ notifier_call_chain(&netdev_chain, NETDEV_GOING_DOWN, dev); -+ -+ dev_deactivate(dev); -+ -+ clear_bit(__LINK_STATE_START, &dev->state); -+ -+ /* Synchronize to scheduled poll. We cannot touch poll list, -+ * it can be even on different cpu. So just clear netif_running(), -+ * and wait when poll really will happen. Actually, the best place -+ * for this is inside dev->stop() after device stopped its irq -+ * engine, but this requires more changes in devices. */ -+ -+ smp_mb__after_clear_bit(); /* Commit netif_running(). */ -+ while (test_bit(__LINK_STATE_RX_SCHED, &dev->state)) { -+ /* No hurry. */ -+ current->state = TASK_INTERRUPTIBLE; -+ schedule_timeout(1); -+ } -+ -+ /* -+ * Call the device specific close. This cannot fail. -+ * Only if device is UP -+ * -+ * We allow it to be called even after a DETACH hot-plug -+ * event. -+ */ -+ if (dev->stop) -+ dev->stop(dev); -+ -+ /* -+ * Device is now down. -+ */ -+ -+ dev->flags &= ~IFF_UP; -+ -+ /* -+ * Tell people we are down -+ */ -+ notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev); -+ -+ return 0; -+} -+ -+ -+/* -+ * Device change register/unregister. These are not inline or static -+ * as we export them to the world. -+ */ -+ -+/** -+ * register_netdevice_notifier - register a network notifier block -+ * @nb: notifier -+ * -+ * Register a notifier to be called when network device events occur. -+ * The notifier passed is linked into the kernel structures and must -+ * not be reused until it has been unregistered. A negative errno code -+ * is returned on a failure. -+ * -+ * When registered all registration and up events are replayed -+ * to the new notifier to allow device to have a race free -+ * view of the network device list. -+ */ -+ -+int register_netdevice_notifier(struct notifier_block *nb) -+{ -+ struct net_device *dev; -+ int err; -+ -+ rtnl_lock(); -+ err = notifier_chain_register(&netdev_chain, nb); -+ if (!err) { -+ for (dev = dev_base; dev; dev = dev->next) { -+ nb->notifier_call(nb, NETDEV_REGISTER, dev); -+ -+ if (dev->flags & IFF_UP) -+ nb->notifier_call(nb, NETDEV_UP, dev); -+ } -+ } -+ rtnl_unlock(); -+ return err; -+} -+ -+/** -+ * unregister_netdevice_notifier - unregister a network notifier block -+ * @nb: notifier -+ * -+ * Unregister a notifier previously registered by -+ * register_netdevice_notifier(). The notifier is unlinked into the -+ * kernel structures and may then be reused. A negative errno code -+ * is returned on a failure. -+ */ -+ -+int unregister_netdevice_notifier(struct notifier_block *nb) -+{ -+ return notifier_chain_unregister(&netdev_chain, nb); -+} -+ -+/** -+ * call_netdevice_notifiers - call all network notifier blocks -+ * @val: value passed unmodified to notifier function -+ * @v: pointer passed unmodified to notifier function -+ * -+ * Call all network notifier blocks. Parameters and return value -+ * are as for notifier_call_chain(). -+ */ -+ -+int call_netdevice_notifiers(unsigned long val, void *v) -+{ -+ return notifier_call_chain(&netdev_chain, val, v); -+} -+ -+/* When > 0 there are consumers of rx skb time stamps */ -+static atomic_t netstamp_needed = ATOMIC_INIT(0); -+ -+void net_enable_timestamp(void) -+{ -+ atomic_inc(&netstamp_needed); -+} -+ -+void net_disable_timestamp(void) -+{ -+ atomic_dec(&netstamp_needed); -+} -+ -+static inline void net_timestamp(struct timeval *stamp) -+{ -+ if (atomic_read(&netstamp_needed)) -+ do_gettimeofday(stamp); -+ else { -+ stamp->tv_sec = 0; -+ stamp->tv_usec = 0; -+ } -+} -+ -+/* -+ * Support routine. Sends outgoing frames to any network -+ * taps currently in use. -+ */ -+ -+void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) -+{ -+ struct packet_type *ptype; -+ net_timestamp(&skb->stamp); -+ -+ rcu_read_lock(); -+ list_for_each_entry_rcu(ptype, &ptype_all, list) { -+ /* Never send packets back to the socket -+ * they originated from - MvS (miquels@drinkel.ow.org) -+ */ -+ if ((ptype->dev == dev || !ptype->dev) && -+ (ptype->af_packet_priv == NULL || -+ (struct sock *)ptype->af_packet_priv != skb->sk)) { -+ struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC); -+ if (!skb2) -+ break; -+ -+ /* skb->nh should be correctly -+ set by sender, so that the second statement is -+ just protection against buggy protocols. -+ */ -+ skb2->mac.raw = skb2->data; -+ -+ if (skb2->nh.raw < skb2->data || -+ skb2->nh.raw > skb2->tail) { -+ if (net_ratelimit()) -+ printk(KERN_CRIT "protocol %04x is " -+ "buggy, dev %s\n", -+ skb2->protocol, dev->name); -+ skb2->nh.raw = skb2->data; -+ } -+ -+ skb2->h.raw = skb2->nh.raw; -+ skb2->pkt_type = PACKET_OUTGOING; -+ ptype->func(skb2, skb->dev, ptype); -+ } -+ } -+ rcu_read_unlock(); -+} -+ -+/* -+ * Invalidate hardware checksum when packet is to be mangled, and -+ * complete checksum manually on outgoing path. -+ */ -+int skb_checksum_help(struct sk_buff *skb, int inward) -+{ -+ unsigned int csum; -+ int ret = 0, offset = skb->h.raw - skb->data; -+ -+ if (inward) { -+ skb->ip_summed = CHECKSUM_NONE; -+ goto out; -+ } -+ -+ if (skb_cloned(skb)) { -+ ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); -+ if (ret) -+ goto out; -+ } -+ -+ if (offset > (int)skb->len) -+ BUG(); -+ csum = skb_checksum(skb, offset, skb->len-offset, 0); -+ -+ offset = skb->tail - skb->h.raw; -+ if (offset <= 0) -+ BUG(); -+ if (skb->csum + 2 > offset) -+ BUG(); -+ -+ *(u16*)(skb->h.raw + skb->csum) = csum_fold(csum); -+ skb->ip_summed = CHECKSUM_NONE; -+out: -+ return ret; -+} -+ -+#ifdef CONFIG_HIGHMEM -+/* Actually, we should eliminate this check as soon as we know, that: -+ * 1. IOMMU is present and allows to map all the memory. -+ * 2. No high memory really exists on this machine. -+ */ -+ -+static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb) -+{ -+ int i; -+ -+ if (dev->features & NETIF_F_HIGHDMA) -+ return 0; -+ -+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) -+ if (PageHighMem(skb_shinfo(skb)->frags[i].page)) -+ return 1; -+ -+ return 0; -+} -+#else -+#define illegal_highdma(dev, skb) (0) -+#endif -+ -+extern void skb_release_data(struct sk_buff *); -+ -+/* Keep head the same: replace data */ -+int __skb_linearize(struct sk_buff *skb, int gfp_mask) -+{ -+ unsigned int size; -+ u8 *data; -+ long offset; -+ struct skb_shared_info *ninfo; -+ int headerlen = skb->data - skb->head; -+ int expand = (skb->tail + skb->data_len) - skb->end; -+ -+ if (skb_shared(skb)) -+ BUG(); -+ -+ if (expand <= 0) -+ expand = 0; -+ -+ size = skb->end - skb->head + expand; -+ size = SKB_DATA_ALIGN(size); -+ data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask); -+ if (!data) -+ return -ENOMEM; -+ -+ /* Copy entire thing */ -+ if (skb_copy_bits(skb, -headerlen, data, headerlen + skb->len)) -+ BUG(); -+ -+ /* Set up shinfo */ -+ ninfo = (struct skb_shared_info*)(data + size); -+ atomic_set(&ninfo->dataref, 1); -+ ninfo->tso_size = skb_shinfo(skb)->tso_size; -+ ninfo->tso_segs = skb_shinfo(skb)->tso_segs; -+ ninfo->nr_frags = 0; -+ ninfo->frag_list = NULL; -+ -+ /* Offset between the two in bytes */ -+ offset = data - skb->head; -+ -+ /* Free old data. */ -+ skb_release_data(skb); -+ -+ skb->head = data; -+ skb->end = data + size; -+ -+ /* Set up new pointers */ -+ skb->h.raw += offset; -+ skb->nh.raw += offset; -+ skb->mac.raw += offset; -+ skb->tail += offset; -+ skb->data += offset; -+ -+ /* We are no longer a clone, even if we were. */ -+ skb->cloned = 0; -+ -+ skb->tail += skb->data_len; -+ skb->data_len = 0; -+ return 0; -+} -+ -+#define HARD_TX_LOCK(dev, cpu) { \ -+ if ((dev->features & NETIF_F_LLTX) == 0) { \ -+ spin_lock(&dev->xmit_lock); \ -+ dev->xmit_lock_owner = cpu; \ -+ } \ -+} -+ -+#define HARD_TX_UNLOCK(dev) { \ -+ if ((dev->features & NETIF_F_LLTX) == 0) { \ -+ dev->xmit_lock_owner = -1; \ -+ spin_unlock(&dev->xmit_lock); \ -+ } \ -+} -+ -+/** -+ * dev_queue_xmit - transmit a buffer -+ * @skb: buffer to transmit -+ * -+ * Queue a buffer for transmission to a network device. The caller must -+ * have set the device and priority and built the buffer before calling -+ * this function. The function can be called from an interrupt. -+ * -+ * A negative errno code is returned on a failure. A success does not -+ * guarantee the frame will be transmitted as it may be dropped due -+ * to congestion or traffic shaping. -+ * -+ * ----------------------------------------------------------------------------------- -+ * I notice this method can also return errors from the queue disciplines, -+ * including NET_XMIT_DROP, which is a positive value. So, errors can also -+ * be positive. -+ * -+ * Regardless of the return value, the skb is consumed, so it is currently -+ * difficult to retry a send to this method. (You can bump the ref count -+ * before sending to hold a reference for retry if you are careful.) -+ * -+ * When calling this method, interrupts MUST be enabled. This is because -+ * the BH enable code must have IRQs enabled so that it will not deadlock. -+ * --BLG -+ */ -+ -+int dev_queue_xmit(struct sk_buff *skb) -+{ -+ struct net_device *dev = skb->dev; -+ struct Qdisc *q; -+ int rc = -ENOMEM; -+ -+ if (skb_shinfo(skb)->frag_list && -+ !(dev->features & NETIF_F_FRAGLIST) && -+ __skb_linearize(skb, GFP_ATOMIC)) -+ goto out_kfree_skb; -+ -+ /* Fragmented skb is linearized if device does not support SG, -+ * or if at least one of fragments is in highmem and device -+ * does not support DMA from it. -+ */ -+ if (skb_shinfo(skb)->nr_frags && -+ (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) && -+ __skb_linearize(skb, GFP_ATOMIC)) -+ goto out_kfree_skb; -+ -+ /* If packet is not checksummed and device does not support -+ * checksumming for this protocol, complete checksumming here. -+ */ -+ if (skb->ip_summed == CHECKSUM_HW && -+ (!(dev->features & (NETIF_F_HW_CSUM | NETIF_F_NO_CSUM)) && -+ (!(dev->features & NETIF_F_IP_CSUM) || -+ skb->protocol != htons(ETH_P_IP)))) -+ if (skb_checksum_help(skb, 0)) -+ goto out_kfree_skb; -+ -+ /* Disable soft irqs for various locks below. Also -+ * stops preemption for RCU. -+ */ -+ local_bh_disable(); -+ -+ /* Updates of qdisc are serialized by queue_lock. -+ * The struct Qdisc which is pointed to by qdisc is now a -+ * rcu structure - it may be accessed without acquiring -+ * a lock (but the structure may be stale.) The freeing of the -+ * qdisc will be deferred until it's known that there are no -+ * more references to it. -+ * -+ * If the qdisc has an enqueue function, we still need to -+ * hold the queue_lock before calling it, since queue_lock -+ * also serializes access to the device queue. -+ */ -+ -+ q = rcu_dereference(dev->qdisc); -+#ifdef CONFIG_NET_CLS_ACT -+ skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS); -+#endif -+ if (q->enqueue) { -+ /* Grab device queue */ -+ spin_lock(&dev->queue_lock); -+ -+ rc = q->enqueue(skb, q); -+ -+ qdisc_run(dev); -+ -+ spin_unlock(&dev->queue_lock); -+ rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc; -+ goto out; -+ } -+ -+ /* The device has no queue. Common case for software devices: -+ loopback, all the sorts of tunnels... -+ -+ Really, it is unlikely that xmit_lock protection is necessary here. -+ (f.e. loopback and IP tunnels are clean ignoring statistics -+ counters.) -+ However, it is possible, that they rely on protection -+ made by us here. -+ -+ Check this and shot the lock. It is not prone from deadlocks. -+ Either shot noqueue qdisc, it is even simpler 8) -+ */ -+ if (dev->flags & IFF_UP) { -+ int cpu = smp_processor_id(); /* ok because BHs are off */ -+ -+ if (dev->xmit_lock_owner != cpu) { -+ -+ HARD_TX_LOCK(dev, cpu); -+ -+ if (!netif_queue_stopped(dev)) { -+ if (netdev_nit) -+ dev_queue_xmit_nit(skb, dev); -+ -+ rc = 0; -+ if (!dev->hard_start_xmit(skb, dev)) { -+ HARD_TX_UNLOCK(dev); -+ goto out; -+ } -+ } -+ HARD_TX_UNLOCK(dev); -+ if (net_ratelimit()) -+ printk(KERN_CRIT "Virtual device %s asks to " -+ "queue packet!\n", dev->name); -+ } else { -+ /* Recursion is detected! It is possible, -+ * unfortunately */ -+ if (net_ratelimit()) -+ printk(KERN_CRIT "Dead loop on virtual device " -+ "%s, fix it urgently!\n", dev->name); -+ } -+ } -+ -+ rc = -ENETDOWN; -+ local_bh_enable(); -+ -+out_kfree_skb: -+ kfree_skb(skb); -+ return rc; -+out: -+ local_bh_enable(); -+ return rc; -+} -+ -+ -+/*======================================================================= -+ Receiver routines -+ =======================================================================*/ -+ -+int netdev_max_backlog = 300; -+int weight_p = 64; /* old backlog weight */ -+/* These numbers are selected based on intuition and some -+ * experimentatiom, if you have more scientific way of doing this -+ * please go ahead and fix things. -+ */ -+int no_cong_thresh = 10; -+int no_cong = 20; -+int lo_cong = 100; -+int mod_cong = 290; -+ -+DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, }; -+ -+ -+static void get_sample_stats(int cpu) -+{ -+#ifdef RAND_LIE -+ unsigned long rd; -+ int rq; -+#endif -+ struct softnet_data *sd = &per_cpu(softnet_data, cpu); -+ int blog = sd->input_pkt_queue.qlen; -+ int avg_blog = sd->avg_blog; -+ -+ avg_blog = (avg_blog >> 1) + (blog >> 1); -+ -+ if (avg_blog > mod_cong) { -+ /* Above moderate congestion levels. */ -+ sd->cng_level = NET_RX_CN_HIGH; -+#ifdef RAND_LIE -+ rd = net_random(); -+ rq = rd % netdev_max_backlog; -+ if (rq < avg_blog) /* unlucky bastard */ -+ sd->cng_level = NET_RX_DROP; -+#endif -+ } else if (avg_blog > lo_cong) { -+ sd->cng_level = NET_RX_CN_MOD; -+#ifdef RAND_LIE -+ rd = net_random(); -+ rq = rd % netdev_max_backlog; -+ if (rq < avg_blog) /* unlucky bastard */ -+ sd->cng_level = NET_RX_CN_HIGH; -+#endif -+ } else if (avg_blog > no_cong) -+ sd->cng_level = NET_RX_CN_LOW; -+ else /* no congestion */ -+ sd->cng_level = NET_RX_SUCCESS; -+ -+ sd->avg_blog = avg_blog; -+} -+ -+#ifdef OFFLINE_SAMPLE -+static void sample_queue(unsigned long dummy) -+{ -+/* 10 ms 0r 1ms -- i don't care -- JHS */ -+ int next_tick = 1; -+ int cpu = smp_processor_id(); -+ -+ get_sample_stats(cpu); -+ next_tick += jiffies; -+ mod_timer(&samp_timer, next_tick); -+} -+#endif -+ -+ -+/** -+ * netif_rx - post buffer to the network code -+ * @skb: buffer to post -+ * -+ * This function receives a packet from a device driver and queues it for -+ * the upper (protocol) levels to process. It always succeeds. The buffer -+ * may be dropped during processing for congestion control or by the -+ * protocol layers. -+ * -+ * return values: -+ * NET_RX_SUCCESS (no congestion) -+ * NET_RX_CN_LOW (low congestion) -+ * NET_RX_CN_MOD (moderate congestion) -+ * NET_RX_CN_HIGH (high congestion) -+ * NET_RX_DROP (packet was dropped) -+ * -+ */ -+ -+int netif_rx(struct sk_buff *skb) -+{ -+ int this_cpu; -+ struct softnet_data *queue; -+ unsigned long flags; -+ -+ /* if netpoll wants it, pretend we never saw it */ -+ if (netpoll_rx(skb)) -+ return NET_RX_DROP; -+ -+ if (!skb->stamp.tv_sec) -+ net_timestamp(&skb->stamp); -+ -+ /* -+ * The code is rearranged so that the path is the most -+ * short when CPU is congested, but is still operating. -+ */ -+ local_irq_save(flags); -+ this_cpu = smp_processor_id(); -+ queue = &__get_cpu_var(softnet_data); -+ -+ __get_cpu_var(netdev_rx_stat).total++; -+ if (queue->input_pkt_queue.qlen <= netdev_max_backlog) { -+ if (queue->input_pkt_queue.qlen) { -+ if (queue->throttle) -+ goto drop; -+ -+enqueue: -+ dev_hold(skb->dev); -+ __skb_queue_tail(&queue->input_pkt_queue, skb); -+#ifndef OFFLINE_SAMPLE -+ get_sample_stats(this_cpu); -+#endif -+ local_irq_restore(flags); -+ return queue->cng_level; -+ } -+ -+ if (queue->throttle) -+ queue->throttle = 0; -+ -+ netif_rx_schedule(&queue->backlog_dev); -+ goto enqueue; -+ } -+ -+ if (!queue->throttle) { -+ queue->throttle = 1; -+ __get_cpu_var(netdev_rx_stat).throttled++; -+ } -+ -+drop: -+ __get_cpu_var(netdev_rx_stat).dropped++; -+ local_irq_restore(flags); -+ -+ kfree_skb(skb); -+ return NET_RX_DROP; -+} -+ -+int netif_rx_ni(struct sk_buff *skb) -+{ -+ int err; -+ -+ preempt_disable(); -+ err = netif_rx(skb); -+ if (local_softirq_pending()) -+ do_softirq(); -+ preempt_enable(); -+ -+ return err; -+} -+ -+EXPORT_SYMBOL(netif_rx_ni); -+ -+static __inline__ void skb_bond(struct sk_buff *skb) -+{ -+ struct net_device *dev = skb->dev; -+ -+ if (dev->master) { -+ skb->real_dev = skb->dev; -+ skb->dev = dev->master; -+ } -+} -+ -+static void net_tx_action(struct softirq_action *h) -+{ -+ struct softnet_data *sd = &__get_cpu_var(softnet_data); -+ -+ if (sd->completion_queue) { -+ struct sk_buff *clist; -+ -+ local_irq_disable(); -+ clist = sd->completion_queue; -+ sd->completion_queue = NULL; -+ local_irq_enable(); -+ -+ while (clist) { -+ struct sk_buff *skb = clist; -+ clist = clist->next; -+ -+ BUG_TRAP(!atomic_read(&skb->users)); -+ __kfree_skb(skb); -+ } -+ } -+ -+ if (sd->output_queue) { -+ struct net_device *head; -+ -+ local_irq_disable(); -+ head = sd->output_queue; -+ sd->output_queue = NULL; -+ local_irq_enable(); -+ -+ while (head) { -+ struct net_device *dev = head; -+ head = head->next_sched; -+ -+ smp_mb__before_clear_bit(); -+ clear_bit(__LINK_STATE_SCHED, &dev->state); -+ -+ if (spin_trylock(&dev->queue_lock)) { -+ qdisc_run(dev); -+ spin_unlock(&dev->queue_lock); -+ } else { -+ netif_schedule(dev); -+ } -+ } -+ } -+} -+ -+static __inline__ int deliver_skb(struct sk_buff *skb, -+ struct packet_type *pt_prev) -+{ -+ atomic_inc(&skb->users); -+ return pt_prev->func(skb, skb->dev, pt_prev); -+} -+ -+#if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE) -+int (*br_handle_frame_hook)(struct net_bridge_port *p, struct sk_buff **pskb); -+struct net_bridge; -+struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br, -+ unsigned char *addr); -+void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent); -+ -+static __inline__ int handle_bridge(struct sk_buff **pskb, -+ struct packet_type **pt_prev, int *ret) -+{ -+ struct net_bridge_port *port; -+ -+ if ((*pskb)->pkt_type == PACKET_LOOPBACK || -+ (port = rcu_dereference((*pskb)->dev->br_port)) == NULL) -+ return 0; -+ -+ if (*pt_prev) { -+ *ret = deliver_skb(*pskb, *pt_prev); -+ *pt_prev = NULL; -+ } -+ -+ return br_handle_frame_hook(port, pskb); -+} -+#else -+#define handle_bridge(skb, pt_prev, ret) (0) -+#endif -+ -+#ifdef CONFIG_NET_CLS_ACT -+/* TODO: Maybe we should just force sch_ingress to be compiled in -+ * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions -+ * a compare and 2 stores extra right now if we dont have it on -+ * but have CONFIG_NET_CLS_ACT -+ * NOTE: This doesnt stop any functionality; if you dont have -+ * the ingress scheduler, you just cant add policies on ingress. -+ * -+ */ -+static int ing_filter(struct sk_buff *skb) -+{ -+ struct Qdisc *q; -+ struct net_device *dev = skb->dev; -+ int result = TC_ACT_OK; -+ -+ if (dev->qdisc_ingress) { -+ __u32 ttl = (__u32) G_TC_RTTL(skb->tc_verd); -+ if (MAX_RED_LOOP < ttl++) { -+ printk("Redir loop detected Dropping packet (%s->%s)\n", -+ skb->input_dev?skb->input_dev->name:"??",skb->dev->name); -+ return TC_ACT_SHOT; -+ } -+ -+ skb->tc_verd = SET_TC_RTTL(skb->tc_verd,ttl); -+ -+ skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_INGRESS); -+ if (NULL == skb->input_dev) { -+ skb->input_dev = skb->dev; -+ printk("ing_filter: fixed %s out %s\n",skb->input_dev->name,skb->dev->name); -+ } -+ spin_lock(&dev->ingress_lock); -+ if ((q = dev->qdisc_ingress) != NULL) -+ result = q->enqueue(skb, q); -+ spin_unlock(&dev->ingress_lock); -+ -+ } -+ -+ return result; -+} -+#endif -+ -+int netif_receive_skb(struct sk_buff *skb) -+{ -+ struct packet_type *ptype, *pt_prev; -+ int ret = NET_RX_DROP; -+ unsigned short type; -+ -+ /* if we've gotten here through NAPI, check netpoll */ -+ if (skb->dev->poll && netpoll_rx(skb)) -+ return NET_RX_DROP; -+ -+ if (!skb->stamp.tv_sec) -+ net_timestamp(&skb->stamp); -+ -+ skb_bond(skb); -+ -+ __get_cpu_var(netdev_rx_stat).total++; -+ -+ skb->h.raw = skb->nh.raw = skb->data; -+ skb->mac_len = skb->nh.raw - skb->mac.raw; -+ -+ pt_prev = NULL; -+ -+ rcu_read_lock(); -+ -+#ifdef CONFIG_NET_CLS_ACT -+ if (skb->tc_verd & TC_NCLS) { -+ skb->tc_verd = CLR_TC_NCLS(skb->tc_verd); -+ goto ncls; -+ } -+#endif -+ -+ list_for_each_entry_rcu(ptype, &ptype_all, list) { -+ if (!ptype->dev || ptype->dev == skb->dev) { -+ if (pt_prev) -+ ret = deliver_skb(skb, pt_prev); -+ pt_prev = ptype; -+ } -+ } -+ -+#ifdef CONFIG_NET_CLS_ACT -+ if (pt_prev) { -+ ret = deliver_skb(skb, pt_prev); -+ pt_prev = NULL; /* noone else should process this after*/ -+ } else { -+ skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd); -+ } -+ -+ ret = ing_filter(skb); -+ -+ if (ret == TC_ACT_SHOT || (ret == TC_ACT_STOLEN)) { -+ kfree_skb(skb); -+ goto out; -+ } -+ -+ skb->tc_verd = 0; -+ncls: -+#endif -+ -+ handle_diverter(skb); -+ -+ if (handle_bridge(&skb, &pt_prev, &ret)) -+ goto out; -+ -+ type = skb->protocol; -+ list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type)&15], list) { -+ if (ptype->type == type && -+ (!ptype->dev || ptype->dev == skb->dev)) { -+ if (pt_prev) -+ ret = deliver_skb(skb, pt_prev); -+ pt_prev = ptype; -+ } -+ } -+ -+ if (pt_prev) { -+ ret = pt_prev->func(skb, skb->dev, pt_prev); -+ } else { -+ kfree_skb(skb); -+ /* Jamal, now you will not able to escape explaining -+ * me how you were going to use this. :-) -+ */ -+ ret = NET_RX_DROP; -+ } -+ -+out: -+ rcu_read_unlock(); -+ return ret; -+} -+ -+static int process_backlog(struct net_device *backlog_dev, int *budget) -+{ -+ int work = 0; -+ int quota = min(backlog_dev->quota, *budget); -+ struct softnet_data *queue = &__get_cpu_var(softnet_data); -+ unsigned long start_time = jiffies; -+ -+ backlog_dev->weight = weight_p; -+ for (;;) { -+ struct sk_buff *skb; -+ struct net_device *dev; -+ -+ local_irq_disable(); -+ skb = __skb_dequeue(&queue->input_pkt_queue); -+ if (!skb) -+ goto job_done; -+ local_irq_enable(); -+ -+ dev = skb->dev; -+ -+ netif_receive_skb(skb); -+ -+ dev_put(dev); -+ -+ work++; -+ -+ if (work >= quota || jiffies - start_time > 1) -+ break; -+ -+ } -+ -+ backlog_dev->quota -= work; -+ *budget -= work; -+ return -1; -+ -+job_done: -+ backlog_dev->quota -= work; -+ *budget -= work; -+ -+ list_del(&backlog_dev->poll_list); -+ smp_mb__before_clear_bit(); -+ netif_poll_enable(backlog_dev); -+ -+ if (queue->throttle) -+ queue->throttle = 0; -+ local_irq_enable(); -+ return 0; -+} -+ -+static void net_rx_action(struct softirq_action *h) -+{ -+ struct softnet_data *queue = &__get_cpu_var(softnet_data); -+ unsigned long start_time = jiffies; -+ int budget = netdev_max_backlog; -+ -+ -+ local_irq_disable(); -+ -+ while (!list_empty(&queue->poll_list)) { -+ struct net_device *dev; -+ -+ if (budget <= 0 || jiffies - start_time > 1) -+ goto softnet_break; -+ -+ local_irq_enable(); -+ -+ dev = list_entry(queue->poll_list.next, -+ struct net_device, poll_list); -+ netpoll_poll_lock(dev); -+ -+ if (dev->quota <= 0 || dev->poll(dev, &budget)) { -+ netpoll_poll_unlock(dev); -+ local_irq_disable(); -+ list_del(&dev->poll_list); -+ list_add_tail(&dev->poll_list, &queue->poll_list); -+ if (dev->quota < 0) -+ dev->quota += dev->weight; -+ else -+ dev->quota = dev->weight; -+ } else { -+ netpoll_poll_unlock(dev); -+ dev_put(dev); -+ local_irq_disable(); -+ } -+ } -+out: -+ local_irq_enable(); -+ return; -+ -+softnet_break: -+ __get_cpu_var(netdev_rx_stat).time_squeeze++; -+ __raise_softirq_irqoff(NET_RX_SOFTIRQ); -+ goto out; -+} -+ -+static gifconf_func_t * gifconf_list [NPROTO]; -+ -+/** -+ * register_gifconf - register a SIOCGIF handler -+ * @family: Address family -+ * @gifconf: Function handler -+ * -+ * Register protocol dependent address dumping routines. The handler -+ * that is passed must not be freed or reused until it has been replaced -+ * by another handler. -+ */ -+int register_gifconf(unsigned int family, gifconf_func_t * gifconf) -+{ -+ if (family >= NPROTO) -+ return -EINVAL; -+ gifconf_list[family] = gifconf; -+ return 0; -+} -+ -+ -+/* -+ * Map an interface index to its name (SIOCGIFNAME) -+ */ -+ -+/* -+ * We need this ioctl for efficient implementation of the -+ * if_indextoname() function required by the IPv6 API. Without -+ * it, we would have to search all the interfaces to find a -+ * match. --pb -+ */ -+ -+static int dev_ifname(struct ifreq __user *arg) -+{ -+ struct net_device *dev; -+ struct ifreq ifr; -+ -+ /* -+ * Fetch the caller's info block. -+ */ -+ -+ if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) -+ return -EFAULT; -+ -+ read_lock(&dev_base_lock); -+ dev = __dev_get_by_index(ifr.ifr_ifindex); -+ if (!dev) { -+ read_unlock(&dev_base_lock); -+ return -ENODEV; -+ } -+ -+ strcpy(ifr.ifr_name, dev->name); -+ read_unlock(&dev_base_lock); -+ -+ if (copy_to_user(arg, &ifr, sizeof(struct ifreq))) -+ return -EFAULT; -+ return 0; -+} -+ -+/* -+ * Perform a SIOCGIFCONF call. This structure will change -+ * size eventually, and there is nothing I can do about it. -+ * Thus we will need a 'compatibility mode'. -+ */ -+ -+static int dev_ifconf(char __user *arg) -+{ -+ struct ifconf ifc; -+ struct net_device *dev; -+ char __user *pos; -+ int len; -+ int total; -+ int i; -+ -+ /* -+ * Fetch the caller's info block. -+ */ -+ -+ if (copy_from_user(&ifc, arg, sizeof(struct ifconf))) -+ return -EFAULT; -+ -+ pos = ifc.ifc_buf; -+ len = ifc.ifc_len; -+ -+ /* -+ * Loop over the interfaces, and write an info block for each. -+ */ -+ -+ total = 0; -+ for (dev = dev_base; dev; dev = dev->next) { -+ for (i = 0; i < NPROTO; i++) { -+ if (gifconf_list[i]) { -+ int done; -+ if (!pos) -+ done = gifconf_list[i](dev, NULL, 0); -+ else -+ done = gifconf_list[i](dev, pos + total, -+ len - total); -+ if (done < 0) -+ return -EFAULT; -+ total += done; -+ } -+ } -+ } -+ -+ /* -+ * All done. Write the updated control block back to the caller. -+ */ -+ ifc.ifc_len = total; -+ -+ /* -+ * Both BSD and Solaris return 0 here, so we do too. -+ */ -+ return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0; -+} -+ -+#ifdef CONFIG_PROC_FS -+/* -+ * This is invoked by the /proc filesystem handler to display a device -+ * in detail. -+ */ -+static __inline__ struct net_device *dev_get_idx(loff_t pos) -+{ -+ struct net_device *dev; -+ loff_t i; -+ -+ for (i = 0, dev = dev_base; dev && i < pos; ++i, dev = dev->next); -+ -+ return i == pos ? dev : NULL; -+} -+ -+void *dev_seq_start(struct seq_file *seq, loff_t *pos) -+{ -+ read_lock(&dev_base_lock); -+ return *pos ? dev_get_idx(*pos - 1) : SEQ_START_TOKEN; -+} -+ -+void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) -+{ -+ ++*pos; -+ return v == SEQ_START_TOKEN ? dev_base : ((struct net_device *)v)->next; -+} -+ -+void dev_seq_stop(struct seq_file *seq, void *v) -+{ -+ read_unlock(&dev_base_lock); -+} -+ -+static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev) -+{ -+ if (dev->get_stats) { -+ struct net_device_stats *stats = dev->get_stats(dev); -+ -+ seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu " -+ "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n", -+ dev->name, stats->rx_bytes, stats->rx_packets, -+ stats->rx_errors, -+ stats->rx_dropped + stats->rx_missed_errors, -+ stats->rx_fifo_errors, -+ stats->rx_length_errors + stats->rx_over_errors + -+ stats->rx_crc_errors + stats->rx_frame_errors, -+ stats->rx_compressed, stats->multicast, -+ stats->tx_bytes, stats->tx_packets, -+ stats->tx_errors, stats->tx_dropped, -+ stats->tx_fifo_errors, stats->collisions, -+ stats->tx_carrier_errors + -+ stats->tx_aborted_errors + -+ stats->tx_window_errors + -+ stats->tx_heartbeat_errors, -+ stats->tx_compressed); -+ } else -+ seq_printf(seq, "%6s: No statistics available.\n", dev->name); -+} -+ -+/* -+ * Called from the PROCfs module. This now uses the new arbitrary sized -+ * /proc/net interface to create /proc/net/dev -+ */ -+static int dev_seq_show(struct seq_file *seq, void *v) -+{ -+ if (v == SEQ_START_TOKEN) -+ seq_puts(seq, "Inter-| Receive " -+ " | Transmit\n" -+ " face |bytes packets errs drop fifo frame " -+ "compressed multicast|bytes packets errs " -+ "drop fifo colls carrier compressed\n"); -+ else -+ dev_seq_printf_stats(seq, v); -+ return 0; -+} -+ -+static struct netif_rx_stats *softnet_get_online(loff_t *pos) -+{ -+ struct netif_rx_stats *rc = NULL; -+ -+ while (*pos < NR_CPUS) -+ if (cpu_online(*pos)) { -+ rc = &per_cpu(netdev_rx_stat, *pos); -+ break; -+ } else -+ ++*pos; -+ return rc; -+} -+ -+static void *softnet_seq_start(struct seq_file *seq, loff_t *pos) -+{ -+ return softnet_get_online(pos); -+} -+ -+static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos) -+{ -+ ++*pos; -+ return softnet_get_online(pos); -+} -+ -+static void softnet_seq_stop(struct seq_file *seq, void *v) -+{ -+} -+ -+static int softnet_seq_show(struct seq_file *seq, void *v) -+{ -+ struct netif_rx_stats *s = v; -+ -+ seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n", -+ s->total, s->dropped, s->time_squeeze, s->throttled, -+ s->fastroute_hit, s->fastroute_success, s->fastroute_defer, -+ s->fastroute_deferred_out, -+#if 0 -+ s->fastroute_latency_reduction -+#else -+ s->cpu_collision -+#endif -+ ); -+ return 0; -+} -+ -+static struct seq_operations dev_seq_ops = { -+ .start = dev_seq_start, -+ .next = dev_seq_next, -+ .stop = dev_seq_stop, -+ .show = dev_seq_show, -+}; -+ -+static int dev_seq_open(struct inode *inode, struct file *file) -+{ -+ return seq_open(file, &dev_seq_ops); -+} -+ -+static struct file_operations dev_seq_fops = { -+ .owner = THIS_MODULE, -+ .open = dev_seq_open, -+ .read = seq_read, -+ .llseek = seq_lseek, -+ .release = seq_release, -+}; -+ -+static struct seq_operations softnet_seq_ops = { -+ .start = softnet_seq_start, -+ .next = softnet_seq_next, -+ .stop = softnet_seq_stop, -+ .show = softnet_seq_show, -+}; -+ -+static int softnet_seq_open(struct inode *inode, struct file *file) -+{ -+ return seq_open(file, &softnet_seq_ops); -+} -+ -+static struct file_operations softnet_seq_fops = { -+ .owner = THIS_MODULE, -+ .open = softnet_seq_open, -+ .read = seq_read, -+ .llseek = seq_lseek, -+ .release = seq_release, -+}; -+ -+#ifdef WIRELESS_EXT -+extern int wireless_proc_init(void); -+#else -+#define wireless_proc_init() 0 -+#endif -+ -+static int __init dev_proc_init(void) -+{ -+ int rc = -ENOMEM; -+ -+ if (!proc_net_fops_create("dev", S_IRUGO, &dev_seq_fops)) -+ goto out; -+ if (!proc_net_fops_create("softnet_stat", S_IRUGO, &softnet_seq_fops)) -+ goto out_dev; -+ if (wireless_proc_init()) -+ goto out_softnet; -+ rc = 0; -+out: -+ return rc; -+out_softnet: -+ proc_net_remove("softnet_stat"); -+out_dev: -+ proc_net_remove("dev"); -+ goto out; -+} -+#else -+#define dev_proc_init() 0 -+#endif /* CONFIG_PROC_FS */ -+ -+ -+/** -+ * netdev_set_master - set up master/slave pair -+ * @slave: slave device -+ * @master: new master device -+ * -+ * Changes the master device of the slave. Pass %NULL to break the -+ * bonding. The caller must hold the RTNL semaphore. On a failure -+ * a negative errno code is returned. On success the reference counts -+ * are adjusted, %RTM_NEWLINK is sent to the routing socket and the -+ * function returns zero. -+ */ -+int netdev_set_master(struct net_device *slave, struct net_device *master) -+{ -+ struct net_device *old = slave->master; -+ -+ ASSERT_RTNL(); -+ -+ if (master) { -+ if (old) -+ return -EBUSY; -+ dev_hold(master); -+ } -+ -+ slave->master = master; -+ -+ synchronize_net(); -+ -+ if (old) -+ dev_put(old); -+ -+ if (master) -+ slave->flags |= IFF_SLAVE; -+ else -+ slave->flags &= ~IFF_SLAVE; -+ -+ rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE); -+ return 0; -+} -+ -+/** -+ * dev_set_promiscuity - update promiscuity count on a device -+ * @dev: device -+ * @inc: modifier -+ * -+ * Add or remove promsicuity from a device. While the count in the device -+ * remains above zero the interface remains promiscuous. Once it hits zero -+ * the device reverts back to normal filtering operation. A negative inc -+ * value is used to drop promiscuity on the device. -+ */ -+void dev_set_promiscuity(struct net_device *dev, int inc) -+{ -+ unsigned short old_flags = dev->flags; -+ -+ dev->flags |= IFF_PROMISC; -+ if ((dev->promiscuity += inc) == 0) -+ dev->flags &= ~IFF_PROMISC; -+ if (dev->flags ^ old_flags) { -+ dev_mc_upload(dev); -+ printk(KERN_INFO "device %s %s promiscuous mode\n", -+ dev->name, (dev->flags & IFF_PROMISC) ? "entered" : -+ "left"); -+ } -+} -+ -+/** -+ * dev_set_allmulti - update allmulti count on a device -+ * @dev: device -+ * @inc: modifier -+ * -+ * Add or remove reception of all multicast frames to a device. While the -+ * count in the device remains above zero the interface remains listening -+ * to all interfaces. Once it hits zero the device reverts back to normal -+ * filtering operation. A negative @inc value is used to drop the counter -+ * when releasing a resource needing all multicasts. -+ */ -+ -+void dev_set_allmulti(struct net_device *dev, int inc) -+{ -+ unsigned short old_flags = dev->flags; -+ -+ dev->flags |= IFF_ALLMULTI; -+ if ((dev->allmulti += inc) == 0) -+ dev->flags &= ~IFF_ALLMULTI; -+ if (dev->flags ^ old_flags) -+ dev_mc_upload(dev); -+} -+ -+unsigned dev_get_flags(const struct net_device *dev) -+{ -+ unsigned flags; -+ -+ flags = (dev->flags & ~(IFF_PROMISC | -+ IFF_ALLMULTI | -+ IFF_RUNNING)) | -+ (dev->gflags & (IFF_PROMISC | -+ IFF_ALLMULTI)); -+ -+ if (netif_running(dev) && netif_carrier_ok(dev)) -+ flags |= IFF_RUNNING; -+ -+ return flags; -+} -+ -+int dev_change_flags(struct net_device *dev, unsigned flags) -+{ -+ int ret; -+ int old_flags = dev->flags; -+ -+ /* -+ * Set the flags on our device. -+ */ -+ -+ dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP | -+ IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL | -+ IFF_AUTOMEDIA)) | -+ (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC | -+ IFF_ALLMULTI)); -+ -+ /* -+ * Load in the correct multicast list now the flags have changed. -+ */ -+ -+ dev_mc_upload(dev); -+ -+ /* -+ * Have we downed the interface. We handle IFF_UP ourselves -+ * according to user attempts to set it, rather than blindly -+ * setting it. -+ */ -+ -+ ret = 0; -+ if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */ -+ ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev); -+ -+ if (!ret) -+ dev_mc_upload(dev); -+ } -+ -+ if (dev->flags & IFF_UP && -+ ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI | -+ IFF_VOLATILE))) -+ notifier_call_chain(&netdev_chain, NETDEV_CHANGE, dev); -+ -+ if ((flags ^ dev->gflags) & IFF_PROMISC) { -+ int inc = (flags & IFF_PROMISC) ? +1 : -1; -+ dev->gflags ^= IFF_PROMISC; -+ dev_set_promiscuity(dev, inc); -+ } -+ -+ /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI -+ is important. Some (broken) drivers set IFF_PROMISC, when -+ IFF_ALLMULTI is requested not asking us and not reporting. -+ */ -+ if ((flags ^ dev->gflags) & IFF_ALLMULTI) { -+ int inc = (flags & IFF_ALLMULTI) ? +1 : -1; -+ dev->gflags ^= IFF_ALLMULTI; -+ dev_set_allmulti(dev, inc); -+ } -+ -+ if (old_flags ^ dev->flags) -+ rtmsg_ifinfo(RTM_NEWLINK, dev, old_flags ^ dev->flags); -+ -+ return ret; -+} -+ -+int dev_set_mtu(struct net_device *dev, int new_mtu) -+{ -+ int err; -+ -+ if (new_mtu == dev->mtu) -+ return 0; -+ -+ /* MTU must be positive. */ -+ if (new_mtu < 0) -+ return -EINVAL; -+ -+ if (!netif_device_present(dev)) -+ return -ENODEV; -+ -+ err = 0; -+ if (dev->change_mtu) -+ err = dev->change_mtu(dev, new_mtu); -+ else -+ dev->mtu = new_mtu; -+ if (!err && dev->flags & IFF_UP) -+ notifier_call_chain(&netdev_chain, -+ NETDEV_CHANGEMTU, dev); -+ return err; -+} -+ -+int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa) -+{ -+ int err; -+ -+ if (!dev->set_mac_address) -+ return -EOPNOTSUPP; -+ if (sa->sa_family != dev->type) -+ return -EINVAL; -+ if (!netif_device_present(dev)) -+ return -ENODEV; -+ err = dev->set_mac_address(dev, sa); -+ if (!err) -+ notifier_call_chain(&netdev_chain, NETDEV_CHANGEADDR, dev); -+ return err; -+} -+ -+/* -+ * Perform the SIOCxIFxxx calls. -+ */ -+static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd) -+{ -+ int err; -+ struct net_device *dev = __dev_get_by_name(ifr->ifr_name); -+ -+ if (!dev) -+ return -ENODEV; -+ -+ switch (cmd) { -+ case SIOCGIFFLAGS: /* Get interface flags */ -+ ifr->ifr_flags = dev_get_flags(dev); -+ return 0; -+ -+ case SIOCSIFFLAGS: /* Set interface flags */ -+ return dev_change_flags(dev, ifr->ifr_flags); -+ -+ case SIOCGIFMETRIC: /* Get the metric on the interface -+ (currently unused) */ -+ ifr->ifr_metric = 0; -+ return 0; -+ -+ case SIOCSIFMETRIC: /* Set the metric on the interface -+ (currently unused) */ -+ return -EOPNOTSUPP; -+ -+ case SIOCGIFMTU: /* Get the MTU of a device */ -+ ifr->ifr_mtu = dev->mtu; -+ return 0; -+ -+ case SIOCSIFMTU: /* Set the MTU of a device */ -+ return dev_set_mtu(dev, ifr->ifr_mtu); -+ -+ case SIOCGIFHWADDR: -+ if (!dev->addr_len) -+ memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data); -+ else -+ memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr, -+ min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len)); -+ ifr->ifr_hwaddr.sa_family = dev->type; -+ return 0; -+ -+ case SIOCSIFHWADDR: -+ return dev_set_mac_address(dev, &ifr->ifr_hwaddr); -+ -+ case SIOCSIFHWBROADCAST: -+ if (ifr->ifr_hwaddr.sa_family != dev->type) -+ return -EINVAL; -+ memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data, -+ min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len)); -+ notifier_call_chain(&netdev_chain, -+ NETDEV_CHANGEADDR, dev); -+ return 0; -+ -+ case SIOCGIFMAP: -+ ifr->ifr_map.mem_start = dev->mem_start; -+ ifr->ifr_map.mem_end = dev->mem_end; -+ ifr->ifr_map.base_addr = dev->base_addr; -+ ifr->ifr_map.irq = dev->irq; -+ ifr->ifr_map.dma = dev->dma; -+ ifr->ifr_map.port = dev->if_port; -+ return 0; -+ -+ case SIOCSIFMAP: -+ if (dev->set_config) { -+ if (!netif_device_present(dev)) -+ return -ENODEV; -+ return dev->set_config(dev, &ifr->ifr_map); -+ } -+ return -EOPNOTSUPP; -+ -+ case SIOCADDMULTI: -+ if (!dev->set_multicast_list || -+ ifr->ifr_hwaddr.sa_family != AF_UNSPEC) -+ return -EINVAL; -+ if (!netif_device_present(dev)) -+ return -ENODEV; -+ return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data, -+ dev->addr_len, 1); -+ -+ case SIOCDELMULTI: -+ if (!dev->set_multicast_list || -+ ifr->ifr_hwaddr.sa_family != AF_UNSPEC) -+ return -EINVAL; -+ if (!netif_device_present(dev)) -+ return -ENODEV; -+ return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data, -+ dev->addr_len, 1); -+ -+ case SIOCGIFINDEX: -+ ifr->ifr_ifindex = dev->ifindex; -+ return 0; -+ -+ case SIOCGIFTXQLEN: -+ ifr->ifr_qlen = dev->tx_queue_len; -+ return 0; -+ -+ case SIOCSIFTXQLEN: -+ if (ifr->ifr_qlen < 0) -+ return -EINVAL; -+ dev->tx_queue_len = ifr->ifr_qlen; -+ return 0; -+ -+ case SIOCSIFNAME: -+ ifr->ifr_newname[IFNAMSIZ-1] = '\0'; -+ return dev_change_name(dev, ifr->ifr_newname); -+ -+ /* -+ * Unknown or private ioctl -+ */ -+ -+ default: -+ if ((cmd >= SIOCDEVPRIVATE && -+ cmd <= SIOCDEVPRIVATE + 15) || -+ cmd == SIOCBONDENSLAVE || -+ cmd == SIOCBONDRELEASE || -+ cmd == SIOCBONDSETHWADDR || -+ cmd == SIOCBONDSLAVEINFOQUERY || -+ cmd == SIOCBONDINFOQUERY || -+ cmd == SIOCBONDCHANGEACTIVE || -+ cmd == SIOCGMIIPHY || -+ cmd == SIOCGMIIREG || -+ cmd == SIOCSMIIREG || -+ cmd == SIOCBRADDIF || -+ cmd == SIOCBRDELIF || -+ cmd == SIOCWANDEV) { -+ err = -EOPNOTSUPP; -+ if (dev->do_ioctl) { -+ if (netif_device_present(dev)) -+ err = dev->do_ioctl(dev, ifr, -+ cmd); -+ else -+ err = -ENODEV; -+ } -+ } else -+ err = -EINVAL; -+ -+ } -+ return err; -+} -+ -+/* -+ * This function handles all "interface"-type I/O control requests. The actual -+ * 'doing' part of this is dev_ifsioc above. -+ */ -+ -+/** -+ * dev_ioctl - network device ioctl -+ * @cmd: command to issue -+ * @arg: pointer to a struct ifreq in user space -+ * -+ * Issue ioctl functions to devices. This is normally called by the -+ * user space syscall interfaces but can sometimes be useful for -+ * other purposes. The return value is the return from the syscall if -+ * positive or a negative errno code on error. -+ */ -+ -+int dev_ioctl(unsigned int cmd, void __user *arg) -+{ -+ struct ifreq ifr; -+ int ret; -+ char *colon; -+ -+ /* One special case: SIOCGIFCONF takes ifconf argument -+ and requires shared lock, because it sleeps writing -+ to user space. -+ */ -+ -+ if (cmd == SIOCGIFCONF) { -+ rtnl_shlock(); -+ ret = dev_ifconf((char __user *) arg); -+ rtnl_shunlock(); -+ return ret; -+ } -+ if (cmd == SIOCGIFNAME) -+ return dev_ifname((struct ifreq __user *)arg); -+ -+ if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) -+ return -EFAULT; -+ -+ ifr.ifr_name[IFNAMSIZ-1] = 0; -+ -+ colon = strchr(ifr.ifr_name, ':'); -+ if (colon) -+ *colon = 0; -+ -+ /* -+ * See which interface the caller is talking about. -+ */ -+ -+ switch (cmd) { -+ /* -+ * These ioctl calls: -+ * - can be done by all. -+ * - atomic and do not require locking. -+ * - return a value -+ */ -+ case SIOCGIFFLAGS: -+ case SIOCGIFMETRIC: -+ case SIOCGIFMTU: -+ case SIOCGIFHWADDR: -+ case SIOCGIFSLAVE: -+ case SIOCGIFMAP: -+ case SIOCGIFINDEX: -+ case SIOCGIFTXQLEN: -+ dev_load(ifr.ifr_name); -+ read_lock(&dev_base_lock); -+ ret = dev_ifsioc(&ifr, cmd); -+ read_unlock(&dev_base_lock); -+ if (!ret) { -+ if (colon) -+ *colon = ':'; -+ if (copy_to_user(arg, &ifr, -+ sizeof(struct ifreq))) -+ ret = -EFAULT; -+ } -+ return ret; -+ -+ case SIOCETHTOOL: -+ dev_load(ifr.ifr_name); -+ rtnl_lock(); -+ ret = dev_ethtool(&ifr); -+ rtnl_unlock(); -+ if (!ret) { -+ if (colon) -+ *colon = ':'; -+ if (copy_to_user(arg, &ifr, -+ sizeof(struct ifreq))) -+ ret = -EFAULT; -+ } -+ return ret; -+ -+ /* -+ * These ioctl calls: -+ * - require superuser power. -+ * - require strict serialization. -+ * - return a value -+ */ -+ case SIOCGMIIPHY: -+ case SIOCGMIIREG: -+ case SIOCSIFNAME: -+ if (!capable(CAP_NET_ADMIN)) -+ return -EPERM; -+ dev_load(ifr.ifr_name); -+ rtnl_lock(); -+ ret = dev_ifsioc(&ifr, cmd); -+ rtnl_unlock(); -+ if (!ret) { -+ if (colon) -+ *colon = ':'; -+ if (copy_to_user(arg, &ifr, -+ sizeof(struct ifreq))) -+ ret = -EFAULT; -+ } -+ return ret; -+ -+ /* -+ * These ioctl calls: -+ * - require superuser power. -+ * - require strict serialization. -+ * - do not return a value -+ */ -+ case SIOCSIFFLAGS: -+ case SIOCSIFMETRIC: -+ case SIOCSIFMTU: -+ case SIOCSIFMAP: -+ case SIOCSIFHWADDR: -+ case SIOCSIFSLAVE: -+ case SIOCADDMULTI: -+ case SIOCDELMULTI: -+ case SIOCSIFHWBROADCAST: -+ case SIOCSIFTXQLEN: -+ case SIOCSMIIREG: -+ case SIOCBONDENSLAVE: -+ case SIOCBONDRELEASE: -+ case SIOCBONDSETHWADDR: -+ case SIOCBONDSLAVEINFOQUERY: -+ case SIOCBONDINFOQUERY: -+ case SIOCBONDCHANGEACTIVE: -+ case SIOCBRADDIF: -+ case SIOCBRDELIF: -+ if (!capable(CAP_NET_ADMIN)) -+ return -EPERM; -+ dev_load(ifr.ifr_name); -+ rtnl_lock(); -+ ret = dev_ifsioc(&ifr, cmd); -+ rtnl_unlock(); -+ return ret; -+ -+ case SIOCGIFMEM: -+ /* Get the per device memory space. We can add this but -+ * currently do not support it */ -+ case SIOCSIFMEM: -+ /* Set the per device memory buffer space. -+ * Not applicable in our case */ -+ case SIOCSIFLINK: -+ return -EINVAL; -+ -+ /* -+ * Unknown or private ioctl. -+ */ -+ default: -+ if (cmd == SIOCWANDEV || -+ (cmd >= SIOCDEVPRIVATE && -+ cmd <= SIOCDEVPRIVATE + 15)) { -+ dev_load(ifr.ifr_name); -+ rtnl_lock(); -+ ret = dev_ifsioc(&ifr, cmd); -+ rtnl_unlock(); -+ if (!ret && copy_to_user(arg, &ifr, -+ sizeof(struct ifreq))) -+ ret = -EFAULT; -+ return ret; -+ } -+#ifdef WIRELESS_EXT -+ /* Take care of Wireless Extensions */ -+ if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) { -+ /* If command is `set a parameter', or -+ * `get the encoding parameters', check if -+ * the user has the right to do it */ -+ if (IW_IS_SET(cmd) || cmd == SIOCGIWENCODE) { -+ if (!capable(CAP_NET_ADMIN)) -+ return -EPERM; -+ } -+ dev_load(ifr.ifr_name); -+ rtnl_lock(); -+ /* Follow me in net/core/wireless.c */ -+ ret = wireless_process_ioctl(&ifr, cmd); -+ rtnl_unlock(); -+ if (IW_IS_GET(cmd) && -+ copy_to_user(arg, &ifr, -+ sizeof(struct ifreq))) -+ ret = -EFAULT; -+ return ret; -+ } -+#endif /* WIRELESS_EXT */ -+ return -EINVAL; -+ } -+} -+ -+ -+/** -+ * dev_new_index - allocate an ifindex -+ * -+ * Returns a suitable unique value for a new device interface -+ * number. The caller must hold the rtnl semaphore or the -+ * dev_base_lock to be sure it remains unique. -+ */ -+static int dev_new_index(void) -+{ -+ static int ifindex; -+ for (;;) { -+ if (++ifindex <= 0) -+ ifindex = 1; -+ if (!__dev_get_by_index(ifindex)) -+ return ifindex; -+ } -+} -+ -+static int dev_boot_phase = 1; -+ -+/* Delayed registration/unregisteration */ -+static DEFINE_SPINLOCK(net_todo_list_lock); -+static struct list_head net_todo_list = LIST_HEAD_INIT(net_todo_list); -+ -+static inline void net_set_todo(struct net_device *dev) -+{ -+ spin_lock(&net_todo_list_lock); -+ list_add_tail(&dev->todo_list, &net_todo_list); -+ spin_unlock(&net_todo_list_lock); -+} -+ -+/** -+ * register_netdevice - register a network device -+ * @dev: device to register -+ * -+ * Take a completed network device structure and add it to the kernel -+ * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier -+ * chain. 0 is returned on success. A negative errno code is returned -+ * on a failure to set up the device, or if the name is a duplicate. -+ * -+ * Callers must hold the rtnl semaphore. You may want -+ * register_netdev() instead of this. -+ * -+ * BUGS: -+ * The locking appears insufficient to guarantee two parallel registers -+ * will not get the same name. -+ */ -+ -+int register_netdevice(struct net_device *dev) -+{ -+ struct hlist_head *head; -+ struct hlist_node *p; -+ int ret; -+ -+ BUG_ON(dev_boot_phase); -+ ASSERT_RTNL(); -+ -+ /* When net_device's are persistent, this will be fatal. */ -+ BUG_ON(dev->reg_state != NETREG_UNINITIALIZED); -+ -+ spin_lock_init(&dev->queue_lock); -+ spin_lock_init(&dev->xmit_lock); -+ dev->xmit_lock_owner = -1; -+#ifdef CONFIG_NET_CLS_ACT -+ spin_lock_init(&dev->ingress_lock); -+#endif -+ -+ ret = alloc_divert_blk(dev); -+ if (ret) -+ goto out; -+ -+ dev->iflink = -1; -+ -+ /* Init, if this function is available */ -+ if (dev->init) { -+ ret = dev->init(dev); -+ if (ret) { -+ if (ret > 0) -+ ret = -EIO; -+ goto out_err; -+ } -+ } -+ -+ if (!dev_valid_name(dev->name)) { -+ ret = -EINVAL; -+ goto out_err; -+ } -+ -+ dev->ifindex = dev_new_index(); -+ if (dev->iflink == -1) -+ dev->iflink = dev->ifindex; -+ -+ /* Check for existence of name */ -+ head = dev_name_hash(dev->name); -+ hlist_for_each(p, head) { -+ struct net_device *d -+ = hlist_entry(p, struct net_device, name_hlist); -+ if (!strncmp(d->name, dev->name, IFNAMSIZ)) { -+ ret = -EEXIST; -+ goto out_err; -+ } -+ } -+ -+ /* Fix illegal SG+CSUM combinations. */ -+ if ((dev->features & NETIF_F_SG) && -+ !(dev->features & (NETIF_F_IP_CSUM | -+ NETIF_F_NO_CSUM | -+ NETIF_F_HW_CSUM))) { -+ printk("%s: Dropping NETIF_F_SG since no checksum feature.\n", -+ dev->name); -+ dev->features &= ~NETIF_F_SG; -+ } -+ -+ /* TSO requires that SG is present as well. */ -+ if ((dev->features & NETIF_F_TSO) && -+ !(dev->features & NETIF_F_SG)) { -+ printk("%s: Dropping NETIF_F_TSO since no SG feature.\n", -+ dev->name); -+ dev->features &= ~NETIF_F_TSO; -+ } -+ -+ /* -+ * nil rebuild_header routine, -+ * that should be never called and used as just bug trap. -+ */ -+ -+ if (!dev->rebuild_header) -+ dev->rebuild_header = default_rebuild_header; -+ -+ /* -+ * Default initial state at registry is that the -+ * device is present. -+ */ -+ -+ set_bit(__LINK_STATE_PRESENT, &dev->state); -+ -+ dev->next = NULL; -+ dev_init_scheduler(dev); -+ write_lock_bh(&dev_base_lock); -+ *dev_tail = dev; -+ dev_tail = &dev->next; -+ hlist_add_head(&dev->name_hlist, head); -+ hlist_add_head(&dev->index_hlist, dev_index_hash(dev->ifindex)); -+ dev_hold(dev); -+ dev->reg_state = NETREG_REGISTERING; -+ write_unlock_bh(&dev_base_lock); -+ -+ /* Notify protocols, that a new device appeared. */ -+ notifier_call_chain(&netdev_chain, NETDEV_REGISTER, dev); -+ -+ /* Finish registration after unlock */ -+ net_set_todo(dev); -+ ret = 0; -+ -+out: -+ return ret; -+out_err: -+ free_divert_blk(dev); -+ goto out; -+} -+ -+/** -+ * register_netdev - register a network device -+ * @dev: device to register -+ * -+ * Take a completed network device structure and add it to the kernel -+ * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier -+ * chain. 0 is returned on success. A negative errno code is returned -+ * on a failure to set up the device, or if the name is a duplicate. -+ * -+ * This is a wrapper around register_netdev that takes the rtnl semaphore -+ * and expands the device name if you passed a format string to -+ * alloc_netdev. -+ */ -+int register_netdev(struct net_device *dev) -+{ -+ int err; -+ -+ rtnl_lock(); -+ -+ /* -+ * If the name is a format string the caller wants us to do a -+ * name allocation. -+ */ -+ if (strchr(dev->name, '%')) { -+ err = dev_alloc_name(dev, dev->name); -+ if (err < 0) -+ goto out; -+ } -+ -+ /* -+ * Back compatibility hook. Kill this one in 2.5 -+ */ -+ if (dev->name[0] == 0 || dev->name[0] == ' ') { -+ err = dev_alloc_name(dev, "eth%d"); -+ if (err < 0) -+ goto out; -+ } -+ -+ err = register_netdevice(dev); -+out: -+ rtnl_unlock(); -+ return err; -+} -+EXPORT_SYMBOL(register_netdev); -+ -+/* -+ * netdev_wait_allrefs - wait until all references are gone. -+ * -+ * This is called when unregistering network devices. -+ * -+ * Any protocol or device that holds a reference should register -+ * for netdevice notification, and cleanup and put back the -+ * reference if they receive an UNREGISTER event. -+ * We can get stuck here if buggy protocols don't correctly -+ * call dev_put. -+ */ -+static void netdev_wait_allrefs(struct net_device *dev) -+{ -+ unsigned long rebroadcast_time, warning_time; -+ -+ rebroadcast_time = warning_time = jiffies; -+ while (atomic_read(&dev->refcnt) != 0) { -+ if (time_after(jiffies, rebroadcast_time + 1 * HZ)) { -+ rtnl_shlock(); -+ -+ /* Rebroadcast unregister notification */ -+ notifier_call_chain(&netdev_chain, -+ NETDEV_UNREGISTER, dev); -+ -+ if (test_bit(__LINK_STATE_LINKWATCH_PENDING, -+ &dev->state)) { -+ /* We must not have linkwatch events -+ * pending on unregister. If this -+ * happens, we simply run the queue -+ * unscheduled, resulting in a noop -+ * for this device. -+ */ -+ linkwatch_run_queue(); -+ } -+ -+ rtnl_shunlock(); -+ -+ rebroadcast_time = jiffies; -+ } -+ -+ msleep(250); -+ -+ if (time_after(jiffies, warning_time + 10 * HZ)) { -+ printk(KERN_EMERG "unregister_netdevice: " -+ "waiting for %s to become free. Usage " -+ "count = %d\n", -+ dev->name, atomic_read(&dev->refcnt)); -+ warning_time = jiffies; -+ } -+ } -+} -+ -+/* The sequence is: -+ * -+ * rtnl_lock(); -+ * ... -+ * register_netdevice(x1); -+ * register_netdevice(x2); -+ * ... -+ * unregister_netdevice(y1); -+ * unregister_netdevice(y2); -+ * ... -+ * rtnl_unlock(); -+ * free_netdev(y1); -+ * free_netdev(y2); -+ * -+ * We are invoked by rtnl_unlock() after it drops the semaphore. -+ * This allows us to deal with problems: -+ * 1) We can create/delete sysfs objects which invoke hotplug -+ * without deadlocking with linkwatch via keventd. -+ * 2) Since we run with the RTNL semaphore not held, we can sleep -+ * safely in order to wait for the netdev refcnt to drop to zero. -+ */ -+static DECLARE_MUTEX(net_todo_run_mutex); -+void netdev_run_todo(void) -+{ -+ struct list_head list = LIST_HEAD_INIT(list); -+ int err; -+ -+ -+ /* Need to guard against multiple cpu's getting out of order. */ -+ down(&net_todo_run_mutex); -+ -+ /* Not safe to do outside the semaphore. We must not return -+ * until all unregister events invoked by the local processor -+ * have been completed (either by this todo run, or one on -+ * another cpu). -+ */ -+ if (list_empty(&net_todo_list)) -+ goto out; -+ -+ /* Snapshot list, allow later requests */ -+ spin_lock(&net_todo_list_lock); -+ list_splice_init(&net_todo_list, &list); -+ spin_unlock(&net_todo_list_lock); -+ -+ while (!list_empty(&list)) { -+ struct net_device *dev -+ = list_entry(list.next, struct net_device, todo_list); -+ list_del(&dev->todo_list); -+ -+ switch(dev->reg_state) { -+ case NETREG_REGISTERING: -+ err = netdev_register_sysfs(dev); -+ if (err) -+ printk(KERN_ERR "%s: failed sysfs registration (%d)\n", -+ dev->name, err); -+ dev->reg_state = NETREG_REGISTERED; -+ break; -+ -+ case NETREG_UNREGISTERING: -+ netdev_unregister_sysfs(dev); -+ dev->reg_state = NETREG_UNREGISTERED; -+ -+ netdev_wait_allrefs(dev); -+ -+ /* paranoia */ -+ BUG_ON(atomic_read(&dev->refcnt)); -+ BUG_TRAP(!dev->ip_ptr); -+ BUG_TRAP(!dev->ip6_ptr); -+ BUG_TRAP(!dev->dn_ptr); -+ -+ -+ /* It must be the very last action, -+ * after this 'dev' may point to freed up memory. -+ */ -+ if (dev->destructor) -+ dev->destructor(dev); -+ break; -+ -+ default: -+ printk(KERN_ERR "network todo '%s' but state %d\n", -+ dev->name, dev->reg_state); -+ break; -+ } -+ } -+ -+out: -+ up(&net_todo_run_mutex); -+} -+ -+/** -+ * alloc_netdev - allocate network device -+ * @sizeof_priv: size of private data to allocate space for -+ * @name: device name format string -+ * @setup: callback to initialize device -+ * -+ * Allocates a struct net_device with private data area for driver use -+ * and performs basic initialization. -+ */ -+struct net_device *alloc_netdev(int sizeof_priv, const char *name, -+ void (*setup)(struct net_device *)) -+{ -+ void *p; -+ struct net_device *dev; -+ int alloc_size; -+ -+ /* ensure 32-byte alignment of both the device and private area */ -+ alloc_size = (sizeof(*dev) + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST; -+ alloc_size += sizeof_priv + NETDEV_ALIGN_CONST; -+ -+ p = kmalloc(alloc_size, GFP_KERNEL); -+ if (!p) { -+ printk(KERN_ERR "alloc_dev: Unable to allocate device.\n"); -+ return NULL; -+ } -+ memset(p, 0, alloc_size); -+ -+ dev = (struct net_device *) -+ (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST); -+ dev->padded = (char *)dev - (char *)p; -+ -+ if (sizeof_priv) -+ dev->priv = netdev_priv(dev); -+ -+ setup(dev); -+ strcpy(dev->name, name); -+ return dev; -+} -+EXPORT_SYMBOL(alloc_netdev); -+ -+/** -+ * free_netdev - free network device -+ * @dev: device -+ * -+ * This function does the last stage of destroying an allocated device -+ * interface. The reference to the device object is released. -+ * If this is the last reference then it will be freed. -+ */ -+void free_netdev(struct net_device *dev) -+{ -+#ifdef CONFIG_SYSFS -+ /* Compatiablity with error handling in drivers */ -+ if (dev->reg_state == NETREG_UNINITIALIZED) { -+ kfree((char *)dev - dev->padded); -+ return; -+ } -+ -+ BUG_ON(dev->reg_state != NETREG_UNREGISTERED); -+ dev->reg_state = NETREG_RELEASED; -+ -+ /* will free via class release */ -+ class_device_put(&dev->class_dev); -+#else -+ kfree((char *)dev - dev->padded); -+#endif -+} -+ -+/* Synchronize with packet receive processing. */ -+void synchronize_net(void) -+{ -+ might_sleep(); -+ synchronize_rcu(); -+} -+ -+/** -+ * unregister_netdevice - remove device from the kernel -+ * @dev: device -+ * -+ * This function shuts down a device interface and removes it -+ * from the kernel tables. On success 0 is returned, on a failure -+ * a negative errno code is returned. -+ * -+ * Callers must hold the rtnl semaphore. You may want -+ * unregister_netdev() instead of this. -+ */ -+ -+int unregister_netdevice(struct net_device *dev) -+{ -+ struct net_device *d, **dp; -+ -+ BUG_ON(dev_boot_phase); -+ ASSERT_RTNL(); -+ -+ /* Some devices call without registering for initialization unwind. */ -+ if (dev->reg_state == NETREG_UNINITIALIZED) { -+ printk(KERN_DEBUG "unregister_netdevice: device %s/%p never " -+ "was registered\n", dev->name, dev); -+ return -ENODEV; -+ } -+ -+ BUG_ON(dev->reg_state != NETREG_REGISTERED); -+ -+ /* If device is running, close it first. */ -+ if (dev->flags & IFF_UP) -+ dev_close(dev); -+ -+ /* And unlink it from device chain. */ -+ for (dp = &dev_base; (d = *dp) != NULL; dp = &d->next) { -+ if (d == dev) { -+ write_lock_bh(&dev_base_lock); -+ hlist_del(&dev->name_hlist); -+ hlist_del(&dev->index_hlist); -+ if (dev_tail == &dev->next) -+ dev_tail = dp; -+ *dp = d->next; -+ write_unlock_bh(&dev_base_lock); -+ break; -+ } -+ } -+ if (!d) { -+ printk(KERN_ERR "unregister net_device: '%s' not found\n", -+ dev->name); -+ return -ENODEV; -+ } -+ -+ dev->reg_state = NETREG_UNREGISTERING; -+ -+ synchronize_net(); -+ -+ /* Shutdown queueing discipline. */ -+ dev_shutdown(dev); -+ -+ -+ /* Notify protocols, that we are about to destroy -+ this device. They should clean all the things. -+ */ -+ notifier_call_chain(&netdev_chain, NETDEV_UNREGISTER, dev); -+ -+ /* -+ * Flush the multicast chain -+ */ -+ dev_mc_discard(dev); -+ -+ if (dev->uninit) -+ dev->uninit(dev); -+ -+ /* Notifier chain MUST detach us from master device. */ -+ BUG_TRAP(!dev->master); -+ -+ free_divert_blk(dev); -+ -+ /* Finish processing unregister after unlock */ -+ net_set_todo(dev); -+ -+ synchronize_net(); -+ -+ dev_put(dev); -+ return 0; -+} -+ -+/** -+ * unregister_netdev - remove device from the kernel -+ * @dev: device -+ * -+ * This function shuts down a device interface and removes it -+ * from the kernel tables. On success 0 is returned, on a failure -+ * a negative errno code is returned. -+ * -+ * This is just a wrapper for unregister_netdevice that takes -+ * the rtnl semaphore. In general you want to use this and not -+ * unregister_netdevice. -+ */ -+void unregister_netdev(struct net_device *dev) -+{ -+ rtnl_lock(); -+ unregister_netdevice(dev); -+ rtnl_unlock(); -+} -+ -+EXPORT_SYMBOL(unregister_netdev); -+ -+#ifdef CONFIG_HOTPLUG_CPU -+static int dev_cpu_callback(struct notifier_block *nfb, -+ unsigned long action, -+ void *ocpu) -+{ -+ struct sk_buff **list_skb; -+ struct net_device **list_net; -+ struct sk_buff *skb; -+ unsigned int cpu, oldcpu = (unsigned long)ocpu; -+ struct softnet_data *sd, *oldsd; -+ -+ if (action != CPU_DEAD) -+ return NOTIFY_OK; -+ -+ local_irq_disable(); -+ cpu = smp_processor_id(); -+ sd = &per_cpu(softnet_data, cpu); -+ oldsd = &per_cpu(softnet_data, oldcpu); -+ -+ /* Find end of our completion_queue. */ -+ list_skb = &sd->completion_queue; -+ while (*list_skb) -+ list_skb = &(*list_skb)->next; -+ /* Append completion queue from offline CPU. */ -+ *list_skb = oldsd->completion_queue; -+ oldsd->completion_queue = NULL; -+ -+ /* Find end of our output_queue. */ -+ list_net = &sd->output_queue; -+ while (*list_net) -+ list_net = &(*list_net)->next_sched; -+ /* Append output queue from offline CPU. */ -+ *list_net = oldsd->output_queue; -+ oldsd->output_queue = NULL; -+ -+ raise_softirq_irqoff(NET_TX_SOFTIRQ); -+ local_irq_enable(); -+ -+ /* Process offline CPU's input_pkt_queue */ -+ while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) -+ netif_rx(skb); -+ -+ return NOTIFY_OK; -+} -+#endif /* CONFIG_HOTPLUG_CPU */ -+ -+ -+/* -+ * Initialize the DEV module. At boot time this walks the device list and -+ * unhooks any devices that fail to initialise (normally hardware not -+ * present) and leaves us with a valid list of present and active devices. -+ * -+ */ -+ -+/* -+ * This is called single threaded during boot, so no need -+ * to take the rtnl semaphore. -+ */ -+static int __init net_dev_init(void) -+{ -+ int i, rc = -ENOMEM; -+ -+ BUG_ON(!dev_boot_phase); -+ -+ net_random_init(); -+ -+ if (dev_proc_init()) -+ goto out; -+ -+ if (netdev_sysfs_init()) -+ goto out; -+ -+ INIT_LIST_HEAD(&ptype_all); -+ for (i = 0; i < 16; i++) -+ INIT_LIST_HEAD(&ptype_base[i]); -+ -+ for (i = 0; i < ARRAY_SIZE(dev_name_head); i++) -+ INIT_HLIST_HEAD(&dev_name_head[i]); -+ -+ for (i = 0; i < ARRAY_SIZE(dev_index_head); i++) -+ INIT_HLIST_HEAD(&dev_index_head[i]); -+ -+ /* -+ * Initialise the packet receive queues. -+ */ -+ -+ for (i = 0; i < NR_CPUS; i++) { -+ struct softnet_data *queue; -+ -+ queue = &per_cpu(softnet_data, i); -+ skb_queue_head_init(&queue->input_pkt_queue); -+ queue->throttle = 0; -+ queue->cng_level = 0; -+ queue->avg_blog = 10; /* arbitrary non-zero */ -+ queue->completion_queue = NULL; -+ INIT_LIST_HEAD(&queue->poll_list); -+ set_bit(__LINK_STATE_START, &queue->backlog_dev.state); -+ queue->backlog_dev.weight = weight_p; -+ queue->backlog_dev.poll = process_backlog; -+ atomic_set(&queue->backlog_dev.refcnt, 1); -+ } -+ -+#ifdef OFFLINE_SAMPLE -+ samp_timer.expires = jiffies + (10 * HZ); -+ add_timer(&samp_timer); -+#endif -+ -+ dev_boot_phase = 0; -+ -+ open_softirq(NET_TX_SOFTIRQ, net_tx_action, NULL); -+ open_softirq(NET_RX_SOFTIRQ, net_rx_action, NULL); -+ -+ hotcpu_notifier(dev_cpu_callback, 0); -+ dst_init(); -+ dev_mcast_init(); -+ rc = 0; -+out: -+ return rc; -+} -+ -+subsys_initcall(net_dev_init); -+ -+EXPORT_SYMBOL(__dev_get_by_index); -+EXPORT_SYMBOL(__dev_get_by_name); -+EXPORT_SYMBOL(__dev_remove_pack); -+EXPORT_SYMBOL(__skb_linearize); -+EXPORT_SYMBOL(dev_add_pack); -+EXPORT_SYMBOL(dev_alloc_name); -+EXPORT_SYMBOL(dev_close); -+EXPORT_SYMBOL(dev_get_by_flags); -+EXPORT_SYMBOL(dev_get_by_index); -+EXPORT_SYMBOL(dev_get_by_name); -+EXPORT_SYMBOL(dev_ioctl); -+EXPORT_SYMBOL(dev_open); -+EXPORT_SYMBOL(dev_queue_xmit); -+EXPORT_SYMBOL(dev_remove_pack); -+EXPORT_SYMBOL(dev_set_allmulti); -+EXPORT_SYMBOL(dev_set_promiscuity); -+EXPORT_SYMBOL(dev_change_flags); -+EXPORT_SYMBOL(dev_set_mtu); -+EXPORT_SYMBOL(dev_set_mac_address); -+EXPORT_SYMBOL(free_netdev); -+EXPORT_SYMBOL(netdev_boot_setup_check); -+EXPORT_SYMBOL(netdev_set_master); -+EXPORT_SYMBOL(netdev_state_change); -+EXPORT_SYMBOL(netif_receive_skb); -+EXPORT_SYMBOL(netif_rx); -+EXPORT_SYMBOL(register_gifconf); -+EXPORT_SYMBOL(register_netdevice); -+EXPORT_SYMBOL(register_netdevice_notifier); -+EXPORT_SYMBOL(skb_checksum_help); -+EXPORT_SYMBOL(synchronize_net); -+EXPORT_SYMBOL(unregister_netdevice); -+EXPORT_SYMBOL(unregister_netdevice_notifier); -+EXPORT_SYMBOL(net_enable_timestamp); -+EXPORT_SYMBOL(net_disable_timestamp); -+EXPORT_SYMBOL(dev_get_flags); -+ -+#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) -+EXPORT_SYMBOL(br_handle_frame_hook); -+EXPORT_SYMBOL(br_fdb_get_hook); -+EXPORT_SYMBOL(br_fdb_put_hook); -+#endif -+ -+#ifdef CONFIG_KMOD -+EXPORT_SYMBOL(dev_load); -+#endif -+ -+EXPORT_PER_CPU_SYMBOL(softnet_data); -diff --unified --recursive --new-file linux-2.6.12.5/net/ring/Kconfig linux-2.6.12.5-1-686-smp-ring3/net/ring/Kconfig ---- linux-2.6.12.5/net/ring/Kconfig 1970-01-01 01:00:00.000000000 +0100 -+++ linux-2.6.12.5-1-686-smp-ring3/net/ring/Kconfig 2005-10-22 23:50:45.539482000 +0200 -@@ -0,0 +1,14 @@ -+config RING -+ tristate "PF_RING sockets (EXPERIMENTAL)" -+ depends on EXPERIMENTAL -+ ---help--- -+ PF_RING socket family, optimized for packet capture. -+ If a PF_RING socket is bound to an adapter (via the bind() system -+ call), such adapter will be used in read-only mode until the socket -+ is destroyed. Whenever an incoming packet is received from the adapter -+ it will not passed to upper layers, but instead it is copied to a ring -+ buffer, which in turn is exported to user space applications via mmap. -+ Please refer to http://luca.ntop.org/Ring.pdf for more. -+ -+ Say N unless you know what you are doing. -+ -diff --unified --recursive --new-file linux-2.6.12.5/net/ring/Makefile linux-2.6.12.5-1-686-smp-ring3/net/ring/Makefile ---- linux-2.6.12.5/net/ring/Makefile 1970-01-01 01:00:00.000000000 +0100 -+++ linux-2.6.12.5-1-686-smp-ring3/net/ring/Makefile 2005-10-22 23:50:45.051451500 +0200 -@@ -0,0 +1,7 @@ -+# -+# Makefile for the ring driver. -+# -+ -+obj-m += ring.o -+ -+ring-objs := ring_packet.o -diff --unified --recursive --new-file linux-2.6.12.5/net/ring/ring_packet.c linux-2.6.12.5-1-686-smp-ring3/net/ring/ring_packet.c ---- linux-2.6.12.5/net/ring/ring_packet.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-2.6.12.5-1-686-smp-ring3/net/ring/ring_packet.c 2005-10-22 23:50:45.159458250 +0200 -@@ -0,0 +1,1592 @@ -+/* -+ * -+ * (C) 2004-05 - Luca Deri <deri@ntop.org> -+ * -+ * This code includes patches courtesy of -+ * - Jeff Randall <jrandall@nexvu.com> -+ * - Helmut Manck <helmut.manck@secunet.com> -+ * - Brad Doctor <bdoctor@ps-ax.com> -+ * -+ */ -+ -+/* FIX: add an entry inside the /proc filesystem */ -+ -+#include <linux/version.h> -+#include <linux/config.h> -+#include <linux/module.h> -+#include <linux/kernel.h> -+#include <linux/socket.h> -+#include <linux/skbuff.h> -+#include <linux/rtnetlink.h> -+#include <linux/in.h> -+#include <linux/in6.h> -+#include <linux/init.h> -+#include <linux/filter.h> -+#include <linux/ring.h> -+#include <linux/ip.h> -+#include <linux/tcp.h> -+#include <linux/udp.h> -+#include <linux/list.h> -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)) -+#include <net/xfrm.h> -+#else -+#include <linux/poll.h> -+#endif -+#include <net/sock.h> -+#include <asm/io.h> /* needed for virt_to_phys() */ -+ -+/* #define RING_DEBUG */ -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11)) -+static inline int remap_page_range(struct vm_area_struct *vma, -+ unsigned long uvaddr, -+ unsigned long paddr, -+ unsigned long size, -+ pgprot_t prot) { -+ return(remap_pfn_range(vma, uvaddr, paddr >> PAGE_SHIFT, -+ size, prot)); -+} -+#endif -+ -+/* ************************************************* */ -+ -+#define CLUSTER_LEN 8 -+ -+struct ring_cluster { -+ u_short cluster_id; /* 0 = no cluster */ -+ u_short num_cluster_elements; -+ enum cluster_type hashing_mode; -+ u_short hashing_id; -+ struct sock *sk[CLUSTER_LEN]; -+ struct ring_cluster *next; /* NULL = last element of the cluster */ -+}; -+ -+/* ************************************************* */ -+ -+struct ring_element { -+ struct list_head list; -+ struct sock *sk; -+}; -+ -+/* ************************************************* */ -+ -+struct ring_opt { -+ struct net_device *ring_netdev; -+ -+ /* Cluster */ -+ u_short cluster_id; /* 0 = no cluster */ -+ -+ /* Reflector */ -+ struct net_device *reflector_dev; -+ -+ /* Packet buffers */ -+ unsigned long order; -+ -+ /* Ring Slots */ -+ unsigned long ring_memory; -+ FlowSlotInfo *slots_info; /* Basically it points to ring_memory */ -+ char *ring_slots; /* Basically it points to ring_memory -+ +sizeof(FlowSlotInfo) */ -+ -+ /* Packet Sampling */ -+ u_int pktToSample, sample_rate; -+ -+ /* BPF Filter */ -+ struct sk_filter *bpfFilter; -+ -+ /* Locks */ -+ atomic_t num_ring_slots_waiters; -+ wait_queue_head_t ring_slots_waitqueue; -+ rwlock_t ring_index_lock; -+ -+ /* Indexes (Internal) */ -+ u_int insert_page_id, insert_slot_id; -+}; -+ -+/* ************************************************* */ -+ -+/* List of all ring sockets. */ -+static struct list_head ring_table; -+ -+/* List of all clusters */ -+static struct ring_cluster *ring_cluster_list; -+ -+static rwlock_t ring_mgmt_lock = RW_LOCK_UNLOCKED; -+ -+/* ********************************** */ -+ -+/* Forward */ -+static struct proto_ops ring_ops; -+ -+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,11)) -+static struct proto ring_proto; -+#endif -+ -+static int skb_ring_handler(struct sk_buff *skb, u_char recv_packet, -+ u_char real_skb); -+static int buffer_ring_handler(struct net_device *dev, char *data, int len); -+static int remove_from_cluster(struct sock *sock, struct ring_opt *pfr); -+ -+/* Extern */ -+ -+/* ********************************** */ -+ -+/* Defaults */ -+static u_int bucket_len = 128, num_slots = 4096, sample_rate = 1, -+ transparent_mode = 0, enable_tx_capture = 0; -+ -+MODULE_PARM(bucket_len, "i"); -+MODULE_PARM_DESC(bucket_len, "Number of ring buckets"); -+MODULE_PARM(num_slots, "i"); -+MODULE_PARM_DESC(num_slots, "Number of ring slots"); -+MODULE_PARM(sample_rate, "i"); -+MODULE_PARM_DESC(sample_rate, "Ring packet sample rate"); -+MODULE_PARM(transparent_mode, "i"); -+MODULE_PARM_DESC(transparent_mode, -+ "Set to 1 to set transparent mode " -+ "(slower but backwards compatible)"); -+MODULE_PARM(enable_tx_capture, "i"); -+MODULE_PARM_DESC(enable_tx_capture, "Set to 1 to capture outgoing packets"); -+ -+/* ********************************** */ -+ -+#define MIN_QUEUED_PKTS 64 -+#define MAX_QUEUE_LOOPS 64 -+ -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)) -+#define ring_sk_datatype(__sk) ((struct ring_opt *)__sk) -+#define ring_sk(__sk) ((__sk)->sk_protinfo) -+#else -+#define ring_sk_datatype(a) (a) -+#define ring_sk(__sk) ((__sk)->protinfo.pf_ring) -+#endif -+ -+/* -+ int dev_queue_xmit(struct sk_buff *skb) -+ skb->dev; -+ struct net_device *dev_get_by_name(const char *name) -+*/ -+ -+/* ********************************** */ -+ -+static void ring_sock_destruct(struct sock *sk) { -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)) -+ skb_queue_purge(&sk->sk_receive_queue); -+ -+ if (!sock_flag(sk, SOCK_DEAD)) { -+#if defined(RING_DEBUG) -+ printk("Attempt to release alive ring socket: %p\n", sk); -+#endif -+ return; -+ } -+ -+ BUG_TRAP(!atomic_read(&sk->sk_rmem_alloc)); -+ BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc)); -+#else -+ -+ BUG_TRAP(atomic_read(&sk->rmem_alloc)==0); -+ BUG_TRAP(atomic_read(&sk->wmem_alloc)==0); -+ -+ if (!sk->dead) { -+#if defined(RING_DEBUG) -+ printk("Attempt to release alive ring socket: %p\n", sk); -+#endif -+ return; -+ } -+#endif -+ -+ kfree(ring_sk(sk)); -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)) -+ MOD_DEC_USE_COUNT; -+#endif -+} -+ -+/* ********************************** */ -+/* -+ * ring_insert() -+ * -+ * store the sk in a new element and add it -+ * to the head of the list. -+ */ -+static inline void ring_insert(struct sock *sk) { -+ struct ring_element *next; -+ -+#if defined(RING_DEBUG) -+ printk("RING: ring_insert()\n"); -+#endif -+ -+ next = kmalloc(sizeof(struct ring_element), GFP_ATOMIC); -+ if(next != NULL) { -+ next->sk = sk; -+ write_lock_irq(&ring_mgmt_lock); -+ list_add(&next->list, &ring_table); -+ write_unlock_irq(&ring_mgmt_lock); -+ } else { -+ if (net_ratelimit()) -+ printk("RING: could not kmalloc slot!!\n"); -+ } -+} -+ -+/* ********************************** */ -+/* -+ * ring_remove() -+ * -+ * For each of the elements in the list: -+ * - check if this is the element we want to delete -+ * - if it is, remove it from the list, and free it. -+ * -+ * stop when we find the one we're looking for (break), -+ * or when we reach the end of the list. -+ */ -+static inline void ring_remove(struct sock *sk) { -+ struct list_head *ptr; -+ struct ring_element *entry; -+ -+ -+ for(ptr = ring_table.next; ptr != &ring_table; ptr = ptr->next) { -+ entry = list_entry(ptr, struct ring_element, list); -+ -+ if(entry->sk == sk) { -+ write_lock_irq(&ring_mgmt_lock); -+ list_del(ptr); -+ kfree(ptr); -+ write_unlock_irq(&ring_mgmt_lock); -+ break; -+ } -+ } -+ -+} -+ -+/* ********************************** */ -+ -+static u_int32_t num_queued_pkts(struct ring_opt *pfr) { -+ -+ if(pfr->ring_slots != NULL) { -+ -+ u_int32_t tot_insert = pfr->slots_info->insert_idx, -+#if defined(RING_DEBUG) -+ tot_read = pfr->slots_info->tot_read, tot_pkts; -+#else -+ tot_read = pfr->slots_info->tot_read; -+#endif -+ -+ if(tot_insert >= tot_read) { -+#if defined(RING_DEBUG) -+ tot_pkts = tot_insert-tot_read; -+#endif -+ return(tot_insert-tot_read); -+ } else { -+#if defined(RING_DEBUG) -+ tot_pkts = ((u_int32_t)-1)+tot_insert-tot_read; -+#endif -+ return(((u_int32_t)-1)+tot_insert-tot_read); -+ } -+ -+#if defined(RING_DEBUG) -+ printk("-> num_queued_pkts=%d [tot_insert=%d][tot_read=%d]\n", -+ tot_pkts, tot_insert, tot_read); -+#endif -+ -+ } else -+ return(0); -+} -+ -+/* ********************************** */ -+ -+static inline FlowSlot* get_insert_slot(struct ring_opt *pfr) { -+#if defined(RING_DEBUG) -+ printk("get_insert_slot(%d)\n", pfr->slots_info->insert_idx); -+#endif -+ -+ if(pfr->ring_slots != NULL) { -+ FlowSlot *slot = (FlowSlot*)&(pfr->ring_slots[pfr->slots_info->insert_idx -+ *pfr->slots_info->slot_len]); -+ return(slot); -+ } else -+ return(NULL); -+} -+ -+/* ********************************** */ -+ -+static inline FlowSlot* get_remove_slot(struct ring_opt *pfr) { -+#if defined(RING_DEBUG) -+ printk("get_remove_slot(%d)\n", pfr->slots_info->remove_idx); -+#endif -+ -+ if(pfr->ring_slots != NULL) -+ return((FlowSlot*)&(pfr->ring_slots[pfr->slots_info->remove_idx* -+ pfr->slots_info->slot_len])); -+ else -+ return(NULL); -+} -+ -+/* ********************************** */ -+ -+static void add_skb_to_ring(struct sk_buff *skb, -+ struct ring_opt *pfr, -+ u_char recv_packet, -+ u_char real_skb /* 1=skb 0=faked skb */) { -+ FlowSlot *theSlot; -+ int idx, displ; -+ -+ if(recv_packet) { -+ /* Hack for identifying a packet received by the e1000 */ -+ if(real_skb) { -+ displ = SKB_DISPLACEMENT; -+ } else -+ displ = 0; /* Received by the e1000 wrapper */ -+ } else -+ displ = 0; -+ -+ write_lock(&pfr->ring_index_lock); -+ pfr->slots_info->tot_pkts++; -+ write_unlock(&pfr->ring_index_lock); -+ -+ /* BPF Filtering (from af_packet.c) */ -+ if(pfr->bpfFilter != NULL) { -+ unsigned res = 1, len; -+ -+ len = skb->len-skb->data_len; -+ -+ write_lock(&pfr->ring_index_lock); -+ skb->data -= displ; -+ res = sk_run_filter(skb, pfr->bpfFilter->insns, pfr->bpfFilter->len); -+ skb->data += displ; -+ write_unlock(&pfr->ring_index_lock); -+ -+ if(res == 0) { -+ /* Filter failed */ -+ -+#if defined(RING_DEBUG) -+ printk("add_skb_to_ring(skb): Filter failed [len=%d][tot=%llu]" -+ "[insertIdx=%d][pkt_type=%d][cloned=%d]\n", -+ (int)skb->len, pfr->slots_info->tot_pkts, -+ pfr->slots_info->insert_idx, -+ skb->pkt_type, skb->cloned); -+#endif -+ -+ return; -+ } -+ } -+ -+ /* ************************** */ -+ -+ if(pfr->sample_rate > 1) { -+ if(pfr->pktToSample == 0) { -+ write_lock(&pfr->ring_index_lock); -+ pfr->pktToSample = pfr->sample_rate; -+ write_unlock(&pfr->ring_index_lock); -+ } else { -+ write_lock(&pfr->ring_index_lock); -+ pfr->pktToSample--; -+ write_unlock(&pfr->ring_index_lock); -+ -+#if defined(RING_DEBUG) -+ printk("add_skb_to_ring(skb): sampled packet [len=%d]" -+ "[tot=%llu][insertIdx=%d][pkt_type=%d][cloned=%d]\n", -+ (int)skb->len, pfr->slots_info->tot_pkts, -+ pfr->slots_info->insert_idx, -+ skb->pkt_type, skb->cloned); -+#endif -+ return; -+ } -+ } -+ -+ /* ************************************* */ -+ -+ if((pfr->reflector_dev != NULL) -+ && (!netif_queue_stopped(pfr->reflector_dev))) { -+ int cpu = smp_processor_id(); -+ -+ /* increase reference counter so that this skb is not freed */ -+ atomic_inc(&skb->users); -+ -+ skb->data -= displ; -+ -+ /* send it */ -+ if (pfr->reflector_dev->xmit_lock_owner != cpu) { -+ spin_lock_bh(&pfr->reflector_dev->xmit_lock); -+ pfr->reflector_dev->xmit_lock_owner = cpu; -+ spin_unlock_bh(&pfr->reflector_dev->xmit_lock); -+ -+ if (pfr->reflector_dev->hard_start_xmit(skb, -+ pfr->reflector_dev) == 0) { -+ spin_lock_bh(&pfr->reflector_dev->xmit_lock); -+ pfr->reflector_dev->xmit_lock_owner = -1; -+ skb->data += displ; -+ spin_unlock_bh(&pfr->reflector_dev->xmit_lock); -+#if defined(RING_DEBUG) -+ printk("++ hard_start_xmit succeeded\n"); -+#endif -+ return; /* OK */ -+ } -+ -+ spin_lock_bh(&pfr->reflector_dev->xmit_lock); -+ pfr->reflector_dev->xmit_lock_owner = -1; -+ spin_unlock_bh(&pfr->reflector_dev->xmit_lock); -+ } -+ -+#if defined(RING_DEBUG) -+ printk("++ hard_start_xmit failed\n"); -+#endif -+ skb->data += displ; -+ return; /* -ENETDOWN */ -+ } -+ -+ /* ************************************* */ -+ -+#if defined(RING_DEBUG) -+ printk("add_skb_to_ring(skb) [len=%d][tot=%llu][insertIdx=%d]" -+ "[pkt_type=%d][cloned=%d]\n", -+ (int)skb->len, pfr->slots_info->tot_pkts, -+ pfr->slots_info->insert_idx, -+ skb->pkt_type, skb->cloned); -+#endif -+ -+ idx = pfr->slots_info->insert_idx; -+ theSlot = get_insert_slot(pfr); -+ -+ if((theSlot != NULL) && (theSlot->slot_state == 0)) { -+ struct pcap_pkthdr *hdr; -+ unsigned int bucketSpace; -+ char *bucket; -+ -+ /* Update Index */ -+ idx++; -+ -+ if(idx == pfr->slots_info->tot_slots) { -+ write_lock(&pfr->ring_index_lock); -+ pfr->slots_info->insert_idx = 0; -+ write_unlock(&pfr->ring_index_lock); -+ } else { -+ write_lock(&pfr->ring_index_lock); -+ pfr->slots_info->insert_idx = idx; -+ write_unlock(&pfr->ring_index_lock); -+ } -+ -+ bucketSpace = pfr->slots_info->slot_len -+#ifdef RING_MAGIC -+ - sizeof(u_char) -+#endif -+ - sizeof(u_char) /* flowSlot.slot_state */ -+ - sizeof(struct pcap_pkthdr) -+ - 1 /* 10 */ /* safe boundary */; -+ -+ bucket = &theSlot->bucket; -+ hdr = (struct pcap_pkthdr*)bucket; -+ -+ if(skb->stamp.tv_sec == 0) do_gettimeofday(&skb->stamp); -+ -+ hdr->ts.tv_sec = skb->stamp.tv_sec, hdr->ts.tv_usec = skb->stamp.tv_usec; -+ hdr->caplen = skb->len+displ; -+ -+ if(hdr->caplen > bucketSpace) -+ hdr->caplen = bucketSpace; -+ -+ hdr->len = skb->len+displ; -+ memcpy(&bucket[sizeof(struct pcap_pkthdr)], -+ skb->data-displ, hdr->caplen); -+ -+#if defined(RING_DEBUG) -+ { -+ static unsigned int lastLoss = 0; -+ -+ if(pfr->slots_info->tot_lost -+ && (lastLoss != pfr->slots_info->tot_lost)) { -+ printk("add_skb_to_ring(%d): [bucketSpace=%d]" -+ "[hdr.caplen=%d][skb->len=%d]" -+ "[pcap_pkthdr=%d][removeIdx=%d]" -+ "[loss=%lu][page=%u][slot=%u]\n", -+ idx-1, bucketSpace, hdr->caplen, skb->len, -+ sizeof(struct pcap_pkthdr), -+ pfr->slots_info->remove_idx, -+ (long unsigned int)pfr->slots_info->tot_lost, -+ pfr->insert_page_id, pfr->insert_slot_id); -+ -+ lastLoss = pfr->slots_info->tot_lost; -+ } -+ } -+#endif -+ -+ write_lock(&pfr->ring_index_lock); -+ pfr->slots_info->tot_insert++; -+ theSlot->slot_state = 1; -+ write_unlock(&pfr->ring_index_lock); -+ } else { -+ write_lock(&pfr->ring_index_lock); -+ pfr->slots_info->tot_lost++; -+ write_unlock(&pfr->ring_index_lock); -+ -+#if defined(RING_DEBUG) -+ printk("add_skb_to_ring(skb): packet lost [loss=%lu]" -+ "[removeIdx=%u][insertIdx=%u]\n", -+ (long unsigned int)pfr->slots_info->tot_lost, -+ pfr->slots_info->remove_idx, pfr->slots_info->insert_idx); -+#endif -+ } -+ -+ /* wakeup in case of poll() */ -+ if(waitqueue_active(&pfr->ring_slots_waitqueue)) -+ wake_up_interruptible(&pfr->ring_slots_waitqueue); -+} -+ -+/* ********************************** */ -+ -+static u_int hash_skb(struct ring_cluster *cluster_ptr, -+ struct sk_buff *skb, u_char recv_packet) { -+ u_int idx; -+ int displ; -+ struct iphdr *ip; -+ -+ if(cluster_ptr->hashing_mode == cluster_round_robin) { -+ idx = cluster_ptr->hashing_id++; -+ } else { -+ /* Per-flow clustering */ -+ if(skb->len > sizeof(struct iphdr)+sizeof(struct tcphdr)) { -+ if(recv_packet) -+ displ = 0; -+ else -+ displ = SKB_DISPLACEMENT; -+ -+ /* -+ skb->data+displ -+ -+ Always points to to the IP part of the packet -+ */ -+ -+ ip = (struct iphdr*)(skb->data+displ); -+ -+ idx = ip->saddr+ip->daddr+ip->protocol; -+ -+ if(ip->protocol == IPPROTO_TCP) { -+ struct tcphdr *tcp = (struct tcphdr*)(skb->data+displ -+ +sizeof(struct iphdr)); -+ idx += tcp->source+tcp->dest; -+ } else if(ip->protocol == IPPROTO_UDP) { -+ struct udphdr *udp = (struct udphdr*)(skb->data+displ -+ +sizeof(struct iphdr)); -+ idx += udp->source+udp->dest; -+ } -+ } else -+ idx = skb->len; -+ } -+ -+ return(idx % cluster_ptr->num_cluster_elements); -+} -+ -+/* ********************************** */ -+ -+static int skb_ring_handler(struct sk_buff *skb, -+ u_char recv_packet, -+ u_char real_skb /* 1=skb 0=faked skb */) { -+ struct sock *skElement; -+ int rc = 0; -+ struct list_head *ptr; -+ struct ring_cluster *cluster_ptr; -+ -+ if((!skb) /* Invalid skb */ -+ || ((!enable_tx_capture) && (!recv_packet))) { -+ /* -+ An outgoing packet is about to be sent out -+ but we decided not to handle transmitted -+ packets. -+ */ -+ return(0); -+ } -+ -+#if defined(RING_DEBUG) -+ if(0) { -+ printk("skb_ring_handler() [len=%d][dev=%s]\n", skb->len, -+ skb->dev->name == NULL ? "<NULL>" : skb->dev->name); -+ } -+#endif -+ -+ /* [1] Check unclustered sockets */ -+ for (ptr = ring_table.next; ptr != &ring_table; ptr = ptr->next) { -+ struct ring_opt *pfr; -+ struct ring_element *entry; -+ -+ entry = list_entry(ptr, struct ring_element, list); -+ -+ read_lock(&ring_mgmt_lock); -+ skElement = entry->sk; -+ pfr = ring_sk(skElement); -+ read_unlock(&ring_mgmt_lock); -+ -+ if((pfr != NULL) -+ && (pfr->cluster_id == 0 /* No cluster */) -+ && (pfr->ring_slots != NULL) -+ && (pfr->ring_netdev == skb->dev)) { -+ /* We've found the ring where the packet can be stored */ -+ read_lock(&ring_mgmt_lock); -+ add_skb_to_ring(skb, pfr, recv_packet, real_skb); -+ read_unlock(&ring_mgmt_lock); -+ -+ rc = 1; /* Ring found: we've done our job */ -+ } -+ } -+ -+ /* [2] Check socket clusters */ -+ cluster_ptr = ring_cluster_list; -+ -+ while(cluster_ptr != NULL) { -+ struct ring_opt *pfr; -+ -+ if(cluster_ptr->num_cluster_elements > 0) { -+ u_int skb_hash = hash_skb(cluster_ptr, skb, recv_packet); -+ -+ read_lock(&ring_mgmt_lock); -+ skElement = cluster_ptr->sk[skb_hash]; -+ read_unlock(&ring_mgmt_lock); -+ -+ if(skElement != NULL) { -+ pfr = ring_sk(skElement); -+ -+ if((pfr != NULL) -+ && (pfr->ring_slots != NULL) -+ && (pfr->ring_netdev == skb->dev)) { -+ /* We've found the ring where the packet can be stored */ -+ read_lock(&ring_mgmt_lock); -+ add_skb_to_ring(skb, pfr, recv_packet, real_skb); -+ read_unlock(&ring_mgmt_lock); -+ -+ rc = 1; /* Ring found: we've done our job */ -+ } -+ } -+ } -+ -+ cluster_ptr = cluster_ptr->next; -+ } -+ -+ if(transparent_mode) rc = 0; -+ -+ if((rc != 0) && real_skb) -+ dev_kfree_skb(skb); /* Free the skb */ -+ -+ return(rc); /* 0 = packet not handled */ -+} -+ -+/* ********************************** */ -+ -+struct sk_buff skb; -+ -+static int buffer_ring_handler(struct net_device *dev, -+ char *data, int len) { -+ -+#if defined(RING_DEBUG) -+ printk("buffer_ring_handler: [dev=%s][len=%d]\n", -+ dev->name == NULL ? "<NULL>" : dev->name, len); -+#endif -+ -+ skb.dev = dev, skb.len = len, skb.data = data, -+ skb.data_len = len, skb.stamp.tv_sec = 0; /* Calculate the time */ -+ -+ skb_ring_handler(&skb, 1, 0 /* fake skb */); -+ -+ return(0); -+} -+ -+/* ********************************** */ -+ -+static int ring_create(struct socket *sock, int protocol) { -+ struct sock *sk; -+ struct ring_opt *pfr; -+ int err; -+ -+#if defined(RING_DEBUG) -+ printk("RING: ring_create()\n"); -+#endif -+ -+ /* Are you root, superuser or so ? */ -+ if(!capable(CAP_NET_ADMIN)) -+ return -EPERM; -+ -+ if(sock->type != SOCK_RAW) -+ return -ESOCKTNOSUPPORT; -+ -+ if(protocol != htons(ETH_P_ALL)) -+ return -EPROTONOSUPPORT; -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)) -+ MOD_INC_USE_COUNT; -+#endif -+ -+ err = -ENOMEM; -+ -+ // BD: -- broke this out to keep it more simple and clear as to what the -+ // options are. -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)) -+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11)) -+ sk = sk_alloc(PF_RING, GFP_KERNEL, 1, NULL); -+#endif -+#endif -+ -+ // BD: API changed in 2.6.12, ref: -+ // http://svn.clkao.org/svnweb/linux/revision/?rev=28201 -+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,11)) -+ sk = sk_alloc(PF_RING, GFP_ATOMIC, &ring_proto, 1); -+#endif -+ -+ if (sk == NULL) -+ goto out; -+ -+ sock->ops = &ring_ops; -+ sock_init_data(sock, sk); -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)) -+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11)) -+ sk_set_owner(sk, THIS_MODULE); -+#endif -+#endif -+ -+ err = -ENOMEM; -+ ring_sk(sk) = ring_sk_datatype(kmalloc(sizeof(*pfr), GFP_KERNEL)); -+ -+ if (!(pfr = ring_sk(sk))) { -+ sk_free(sk); -+ goto out; -+ } -+ memset(pfr, 0, sizeof(*pfr)); -+ init_waitqueue_head(&pfr->ring_slots_waitqueue); -+ pfr->ring_index_lock = RW_LOCK_UNLOCKED; -+ atomic_set(&pfr->num_ring_slots_waiters, 0); -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)) -+ sk->sk_family = PF_RING; -+ sk->sk_destruct = ring_sock_destruct; -+#else -+ sk->family = PF_RING; -+ sk->destruct = ring_sock_destruct; -+ sk->num = protocol; -+#endif -+ -+ ring_insert(sk); -+ -+#if defined(RING_DEBUG) -+ printk("RING: ring_create() - created\n"); -+#endif -+ -+ return(0); -+ out: -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)) -+ MOD_DEC_USE_COUNT; -+#endif -+ return err; -+} -+ -+/* *********************************************** */ -+ -+static int ring_release(struct socket *sock) -+{ -+ struct sock *sk = sock->sk; -+ struct ring_opt *pfr = ring_sk(sk); -+ -+ if(!sk) -+ return 0; -+ -+#if defined(RING_DEBUG) -+ printk("RING: called ring_release\n"); -+#endif -+ -+#if defined(RING_DEBUG) -+ printk("RING: ring_release entered\n"); -+#endif -+ -+ ring_remove(sk); -+ -+ sock_orphan(sk); -+ sock->sk = NULL; -+ -+ /* Free the ring buffer */ -+ if(pfr->ring_memory) { -+ struct page *page, *page_end; -+ -+ page_end = virt_to_page(pfr->ring_memory + (PAGE_SIZE << pfr->order) - 1); -+ for(page = virt_to_page(pfr->ring_memory); page <= page_end; page++) -+ ClearPageReserved(page); -+ -+ free_pages(pfr->ring_memory, pfr->order); -+ } -+ -+ kfree(pfr); -+ ring_sk(sk) = NULL; -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)) -+ skb_queue_purge(&sk->sk_write_queue); -+#endif -+ sock_put(sk); -+ -+#if defined(RING_DEBUG) -+ printk("RING: ring_release leaving\n"); -+#endif -+ -+ return 0; -+} -+ -+/* ********************************** */ -+/* -+ * We create a ring for this socket and bind it to the specified device -+ */ -+static int packet_ring_bind(struct sock *sk, struct net_device *dev) -+{ -+ u_int the_slot_len; -+ u_int32_t tot_mem; -+ struct ring_opt *pfr = ring_sk(sk); -+ struct page *page, *page_end; -+ -+ if(!dev) return(-1); -+ -+#if defined(RING_DEBUG) -+ printk("RING: packet_ring_bind(%s) called\n", dev->name); -+#endif -+ -+ /* ********************************************** -+ -+ ************************************* -+ * * -+ * FlowSlotInfo * -+ * * -+ ************************************* <-+ -+ * FlowSlot * | -+ ************************************* | -+ * FlowSlot * | -+ ************************************* +- num_slots -+ * FlowSlot * | -+ ************************************* | -+ * FlowSlot * | -+ ************************************* <-+ -+ -+ ********************************************** */ -+ -+ the_slot_len = sizeof(u_char) /* flowSlot.slot_state */ -+ + sizeof(u_short) /* flowSlot.slot_len */ -+ + bucket_len /* flowSlot.bucket */; -+ -+ tot_mem = sizeof(FlowSlotInfo) + num_slots*the_slot_len; -+ -+ /* -+ Calculate the value of the order parameter used later. -+ See http://www.linuxjournal.com/article.php?sid=1133 -+ */ -+ for(pfr->order = 0;(PAGE_SIZE << pfr->order) < tot_mem; pfr->order++) ; -+ -+ /* -+ We now try to allocate the memory as required. If we fail -+ we try to allocate a smaller amount or memory (hence a -+ smaller ring). -+ */ -+ while((pfr->ring_memory = __get_free_pages(GFP_ATOMIC, pfr->order)) == 0) -+ if(pfr->order-- == 0) -+ break; -+ -+ if(pfr->order == 0) { -+#if defined(RING_DEBUG) -+ printk("ERROR: not enough memory\n"); -+#endif -+ return(-1); -+ } else { -+#if defined(RING_DEBUG) -+ printk("RING: succesfully allocated %lu KB [tot_mem=%d][order=%ld]\n", -+ PAGE_SIZE >> (10 - pfr->order), tot_mem, pfr->order); -+#endif -+ } -+ -+ tot_mem = PAGE_SIZE << pfr->order; -+ memset((char*)pfr->ring_memory, 0, tot_mem); -+ -+ /* Now we need to reserve the pages */ -+ page_end = virt_to_page(pfr->ring_memory + (PAGE_SIZE << pfr->order) - 1); -+ for(page = virt_to_page(pfr->ring_memory); page <= page_end; page++) -+ SetPageReserved(page); -+ -+ pfr->slots_info = (FlowSlotInfo*)pfr->ring_memory; -+ pfr->ring_slots = (char*)(pfr->ring_memory+sizeof(FlowSlotInfo)); -+ -+ pfr->slots_info->version = RING_FLOWSLOT_VERSION; -+ pfr->slots_info->slot_len = the_slot_len; -+ pfr->slots_info->tot_slots = (tot_mem-sizeof(FlowSlotInfo))/the_slot_len; -+ pfr->slots_info->tot_mem = tot_mem; -+ pfr->slots_info->sample_rate = sample_rate; -+ -+#if defined(RING_DEBUG) -+ printk("RING: allocated %d slots [slot_len=%d][tot_mem=%u]\n", -+ pfr->slots_info->tot_slots, pfr->slots_info->slot_len, -+ pfr->slots_info->tot_mem); -+#endif -+ -+#ifdef RING_MAGIC -+ { -+ int i; -+ -+ for(i=0; i<pfr->slots_info->tot_slots; i++) { -+ unsigned long idx = i*pfr->slots_info->slot_len; -+ FlowSlot *slot = (FlowSlot*)&pfr->ring_slots[idx]; -+ slot->magic = RING_MAGIC_VALUE; slot->slot_state = 0; -+ } -+ } -+#endif -+ -+ pfr->insert_page_id = 1, pfr->insert_slot_id = 0; -+ -+ /* -+ IMPORTANT -+ Leave this statement here as last one. In fact when -+ the ring_netdev != NULL the socket is ready to be used. -+ */ -+ pfr->ring_netdev = dev; -+ -+ return(0); -+} -+ -+/* ************************************* */ -+ -+/* Bind to a device */ -+static int ring_bind(struct socket *sock, -+ struct sockaddr *sa, int addr_len) -+{ -+ struct sock *sk=sock->sk; -+ struct net_device *dev = NULL; -+ -+#if defined(RING_DEBUG) -+ printk("RING: ring_bind() called\n"); -+#endif -+ -+ /* -+ * Check legality -+ */ -+ if (addr_len != sizeof(struct sockaddr)) -+ return -EINVAL; -+ if (sa->sa_family != PF_RING) -+ return -EINVAL; -+ -+ /* Safety check: add trailing zero if missing */ -+ sa->sa_data[sizeof(sa->sa_data)-1] = '\0'; -+ -+#if defined(RING_DEBUG) -+ printk("RING: searching device %s\n", sa->sa_data); -+#endif -+ -+ if((dev = __dev_get_by_name(sa->sa_data)) == NULL) { -+#if defined(RING_DEBUG) -+ printk("RING: search failed\n"); -+#endif -+ return(-EINVAL); -+ } else -+ return(packet_ring_bind(sk, dev)); -+} -+ -+/* ************************************* */ -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)) -+ -+volatile void* virt_to_kseg(volatile void* address) { -+ pte_t *pte; -+ pud_t *pud; -+ unsigned long addr = (unsigned long)address; -+ -+ pud = pud_offset(pgd_offset_k((unsigned long) address), -+ (unsigned long) address); -+ -+ /* -+ High-memory support courtesy of -+ Brad Doctor <bdoctor@ps-ax.com> -+ */ -+#if defined(CONFIG_X86_PAE) && (!defined(CONFIG_NOHIGHMEM)) -+ pte = pte_offset_map(pmd_offset(pud, addr), addr); -+#else -+ pte = pmd_offset_map(pud, addr); -+#endif -+ -+ return((volatile void*)pte_page(*pte)); -+} -+ -+#else /* 2.4 */ -+ -+/* http://www.scs.ch/~frey/linux/memorymap.html */ -+volatile void *virt_to_kseg(volatile void *address) -+{ -+ pgd_t *pgd; pmd_t *pmd; pte_t *ptep, pte; -+ unsigned long va, ret = 0UL; -+ -+ va=VMALLOC_VMADDR((unsigned long)address); -+ -+ /* get the page directory. Use the kernel memory map. */ -+ pgd = pgd_offset_k(va); -+ -+ /* check whether we found an entry */ -+ if (!pgd_none(*pgd)) -+ { -+ /* get the page middle directory */ -+ pmd = pmd_offset(pgd, va); -+ /* check whether we found an entry */ -+ if (!pmd_none(*pmd)) -+ { -+ /* get a pointer to the page table entry */ -+ ptep = pte_offset(pmd, va); -+ pte = *ptep; -+ /* check for a valid page */ -+ if (pte_present(pte)) -+ { -+ /* get the address the page is refering to */ -+ ret = (unsigned long)page_address(pte_page(pte)); -+ /* add the offset within the page to the page address */ -+ ret |= (va & (PAGE_SIZE -1)); -+ } -+ } -+ } -+ return((volatile void *)ret); -+} -+#endif -+ -+/* ************************************* */ -+ -+static int ring_mmap(struct file *file, -+ struct socket *sock, -+ struct vm_area_struct *vma) -+{ -+ struct sock *sk = sock->sk; -+ struct ring_opt *pfr = ring_sk(sk); -+ unsigned long size, start; -+ u_int pagesToMap; -+ char *ptr; -+ -+#if defined(RING_DEBUG) -+ printk("RING: ring_mmap() called\n"); -+#endif -+ -+ if(pfr->ring_memory == 0) { -+#if defined(RING_DEBUG) -+ printk("RING: ring_mmap() failed: mapping area to an unbound socket\n"); -+#endif -+ return -EINVAL; -+ } -+ -+ size = (unsigned long)(vma->vm_end-vma->vm_start); -+ -+ if(size % PAGE_SIZE) { -+#if defined(RING_DEBUG) -+ printk("RING: ring_mmap() failed: len is not multiple of PAGE_SIZE\n"); -+#endif -+ return(-EINVAL); -+ } -+ -+ /* if userspace tries to mmap beyond end of our buffer, fail */ -+ if(size > pfr->slots_info->tot_mem) { -+#if defined(RING_DEBUG) -+ printk("proc_mmap() failed: area too large [%ld > %d]\n", size, pfr->slots_info->tot_mem); -+#endif -+ return(-EINVAL); -+ } -+ -+ pagesToMap = size/PAGE_SIZE; -+ -+#if defined(RING_DEBUG) -+ printk("RING: ring_mmap() called. %d pages to map\n", pagesToMap); -+#endif -+ -+#if defined(RING_DEBUG) -+ printk("RING: mmap [slot_len=%d][tot_slots=%d] for ring on device %s\n", -+ pfr->slots_info->slot_len, pfr->slots_info->tot_slots, -+ pfr->ring_netdev->name); -+#endif -+ -+ /* we do not want to have this area swapped out, lock it */ -+ vma->vm_flags |= VM_LOCKED; -+ start = vma->vm_start; -+ -+ /* Ring slots start from page 1 (page 0 is reserved for FlowSlotInfo) */ -+ ptr = (char*)(start+PAGE_SIZE); -+ -+ if(remap_page_range( -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)) -+ vma, -+#endif -+ start, -+ __pa(pfr->ring_memory), -+ PAGE_SIZE*pagesToMap, vma->vm_page_prot)) { -+#if defined(RING_DEBUG) -+ printk("remap_page_range() failed\n"); -+#endif -+ return(-EAGAIN); -+ } -+ -+#if defined(RING_DEBUG) -+ printk("proc_mmap(pagesToMap=%d): success.\n", pagesToMap); -+#endif -+ -+ return 0; -+} -+ -+/* ************************************* */ -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)) -+static int ring_recvmsg(struct kiocb *iocb, struct socket *sock, -+ struct msghdr *msg, size_t len, int flags) -+#else -+ static int ring_recvmsg(struct socket *sock, struct msghdr *msg, int len, -+ int flags, struct scm_cookie *scm) -+#endif -+{ -+ FlowSlot* slot; -+ struct ring_opt *pfr = ring_sk(sock->sk); -+ u_int32_t queued_pkts, num_loops = 0; -+ -+#if defined(RING_DEBUG) -+ printk("ring_recvmsg called\n"); -+#endif -+ -+ slot = get_remove_slot(pfr); -+ -+ while((queued_pkts = num_queued_pkts(pfr)) < MIN_QUEUED_PKTS) { -+ wait_event_interruptible(pfr->ring_slots_waitqueue, 1); -+ -+#if defined(RING_DEBUG) -+ printk("-> ring_recvmsg returning %d [queued_pkts=%d][num_loops=%d]\n", -+ slot->slot_state, queued_pkts, num_loops); -+#endif -+ -+ if(queued_pkts > 0) { -+ if(num_loops++ > MAX_QUEUE_LOOPS) -+ break; -+ } -+ } -+ -+#if defined(RING_DEBUG) -+ if(slot != NULL) -+ printk("ring_recvmsg is returning [queued_pkts=%d][num_loops=%d]\n", -+ queued_pkts, num_loops); -+#endif -+ -+ return(queued_pkts); -+} -+ -+/* ************************************* */ -+ -+unsigned int ring_poll(struct file * file, -+ struct socket *sock, poll_table *wait) -+{ -+ FlowSlot* slot; -+ struct ring_opt *pfr = ring_sk(sock->sk); -+ -+#if defined(RING_DEBUG) -+ printk("poll called\n"); -+#endif -+ -+ slot = get_remove_slot(pfr); -+ -+ if((slot != NULL) && (slot->slot_state == 0)) -+ poll_wait(file, &pfr->ring_slots_waitqueue, wait); -+ -+#if defined(RING_DEBUG) -+ printk("poll returning %d\n", slot->slot_state); -+#endif -+ -+ if((slot != NULL) && (slot->slot_state == 1)) -+ return(POLLIN | POLLRDNORM); -+ else -+ return(0); -+} -+ -+/* ************************************* */ -+ -+int add_to_cluster_list(struct ring_cluster *el, -+ struct sock *sock) { -+ -+ if(el->num_cluster_elements == CLUSTER_LEN) -+ return(-1); /* Cluster full */ -+ -+ ring_sk_datatype(ring_sk(sock))->cluster_id = el->cluster_id; -+ el->sk[el->num_cluster_elements] = sock; -+ el->num_cluster_elements++; -+ return(0); -+} -+ -+/* ************************************* */ -+ -+int remove_from_cluster_list(struct ring_cluster *el, -+ struct sock *sock) { -+ int i, j; -+ -+ for(i=0; i<CLUSTER_LEN; i++) -+ if(el->sk[i] == sock) { -+ el->num_cluster_elements--; -+ -+ if(el->num_cluster_elements > 0) { -+ /* The cluster contains other elements */ -+ for(j=i; j<CLUSTER_LEN-1; j++) -+ el->sk[j] = el->sk[j+1]; -+ -+ el->sk[CLUSTER_LEN-1] = NULL; -+ } else { -+ /* Empty cluster */ -+ memset(el->sk, 0, sizeof(el->sk)); -+ } -+ -+ return(0); -+ } -+ -+ return(-1); /* Not found */ -+} -+ -+/* ************************************* */ -+ -+static int remove_from_cluster(struct sock *sock, -+ struct ring_opt *pfr) -+{ -+ struct ring_cluster *el; -+ -+#if defined(RING_DEBUG) -+ printk("--> remove_from_cluster(%d)\n", pfr->cluster_id); -+#endif -+ -+ if(pfr->cluster_id == 0 /* 0 = No Cluster */) -+ return(0); /* Noting to do */ -+ -+ el = ring_cluster_list; -+ -+ while(el != NULL) { -+ if(el->cluster_id == pfr->cluster_id) { -+ return(remove_from_cluster_list(el, sock)); -+ } else -+ el = el->next; -+ } -+ -+ return(-EINVAL); /* Not found */ -+} -+ -+/* ************************************* */ -+ -+static int add_to_cluster(struct sock *sock, -+ struct ring_opt *pfr, -+ u_short cluster_id) -+{ -+ struct ring_cluster *el; -+ -+#ifndef RING_DEBUG -+ printk("--> add_to_cluster(%d)\n", cluster_id); -+#endif -+ -+ if(cluster_id == 0 /* 0 = No Cluster */) return(-EINVAL); -+ -+ if(pfr->cluster_id != 0) -+ remove_from_cluster(sock, pfr); -+ -+ el = ring_cluster_list; -+ -+ while(el != NULL) { -+ if(el->cluster_id == cluster_id) { -+ return(add_to_cluster_list(el, sock)); -+ } else -+ el = el->next; -+ } -+ -+ /* There's no existing cluster. We need to create one */ -+ if((el = kmalloc(sizeof(struct ring_cluster), GFP_KERNEL)) == NULL) -+ return(-ENOMEM); -+ -+ el->cluster_id = cluster_id; -+ el->num_cluster_elements = 1; -+ el->hashing_mode = cluster_per_flow; /* Default */ -+ el->hashing_id = 0; -+ -+ memset(el->sk, 0, sizeof(el->sk)); -+ el->sk[0] = sock; -+ el->next = ring_cluster_list; -+ ring_cluster_list = el; -+ pfr->cluster_id = cluster_id; -+ -+ return(0); /* 0 = OK */ -+} -+ -+/* ************************************* */ -+ -+/* Code taken/inspired from core/sock.c */ -+static int ring_setsockopt(struct socket *sock, -+ int level, int optname, -+ char *optval, int optlen) -+{ -+ struct ring_opt *pfr = ring_sk(sock->sk); -+ int val, found, ret = 0; -+ u_int cluster_id; -+ char devName[8]; -+ -+ if((optlen<sizeof(int)) || (pfr == NULL)) -+ return(-EINVAL); -+ -+ if (get_user(val, (int *)optval)) -+ return -EFAULT; -+ -+ found = 1; -+ -+ switch(optname) -+ { -+ case SO_ATTACH_FILTER: -+ ret = -EINVAL; -+ if (optlen == sizeof(struct sock_fprog)) { -+ unsigned int fsize; -+ struct sock_fprog fprog; -+ struct sk_filter *filter; -+ -+ ret = -EFAULT; -+ -+ /* -+ NOTE -+ -+ Do not call copy_from_user within a held -+ splinlock (e.g. ring_mgmt_lock) as this caused -+ problems when certain debugging was enabled under -+ 2.6.5 -- including hard lockups of the machine. -+ */ -+ if(copy_from_user(&fprog, optval, sizeof(fprog))) -+ break; -+ -+ fsize = sizeof(struct sock_filter) * fprog.len; -+ filter = kmalloc(fsize, GFP_KERNEL); -+ -+ if(filter == NULL) { -+ ret = -ENOMEM; -+ break; -+ } -+ -+ if(copy_from_user(filter->insns, fprog.filter, fsize)) -+ break; -+ -+ filter->len = fprog.len; -+ -+ if(sk_chk_filter(filter->insns, filter->len) != 0) { -+ /* Bad filter specified */ -+ kfree(filter); -+ pfr->bpfFilter = NULL; -+ break; -+ } -+ -+ /* get the lock, set the filter, release the lock */ -+ write_lock(&ring_mgmt_lock); -+ pfr->bpfFilter = filter; -+ write_unlock(&ring_mgmt_lock); -+ } -+ ret = 0; -+ break; -+ -+ case SO_DETACH_FILTER: -+ write_lock(&ring_mgmt_lock); -+ found = 1; -+ if(pfr->bpfFilter != NULL) { -+ kfree(pfr->bpfFilter); -+ pfr->bpfFilter = NULL; -+ write_unlock(&ring_mgmt_lock); -+ break; -+ } -+ ret = -ENONET; -+ break; -+ -+ case SO_ADD_TO_CLUSTER: -+ if (optlen!=sizeof(val)) -+ return -EINVAL; -+ -+ if (copy_from_user(&cluster_id, optval, sizeof(cluster_id))) -+ return -EFAULT; -+ -+ write_lock(&ring_mgmt_lock); -+ ret = add_to_cluster(sock->sk, pfr, cluster_id); -+ write_unlock(&ring_mgmt_lock); -+ break; -+ -+ case SO_REMOVE_FROM_CLUSTER: -+ write_lock(&ring_mgmt_lock); -+ ret = remove_from_cluster(sock->sk, pfr); -+ write_unlock(&ring_mgmt_lock); -+ break; -+ -+ case SO_SET_REFLECTOR: -+ if(optlen >= (sizeof(devName)-1)) -+ return -EINVAL; -+ -+ if(optlen > 0) { -+ if(copy_from_user(devName, optval, optlen)) -+ return -EFAULT; -+ } -+ -+ devName[optlen] = '\0'; -+ -+#if defined(RING_DEBUG) -+ printk("+++ SO_SET_REFLECTOR(%s)\n", devName); -+#endif -+ -+ write_lock(&ring_mgmt_lock); -+ pfr->reflector_dev = dev_get_by_name(devName); -+ write_unlock(&ring_mgmt_lock); -+ -+#if defined(RING_DEBUG) -+ if(pfr->reflector_dev != NULL) -+ printk("SO_SET_REFLECTOR(%s): succeded\n", devName); -+ else -+ printk("SO_SET_REFLECTOR(%s): device unknown\n", devName); -+#endif -+ break; -+ -+ default: -+ found = 0; -+ break; -+ } -+ -+ if(found) -+ return(ret); -+ else -+ return(sock_setsockopt(sock, level, optname, optval, optlen)); -+} -+ -+/* ************************************* */ -+ -+static int ring_ioctl(struct socket *sock, -+ unsigned int cmd, unsigned long arg) -+{ -+ switch(cmd) -+ { -+ case SIOCGIFFLAGS: -+ case SIOCSIFFLAGS: -+ case SIOCGIFCONF: -+ case SIOCGIFMETRIC: -+ case SIOCSIFMETRIC: -+ case SIOCGIFMEM: -+ case SIOCSIFMEM: -+ case SIOCGIFMTU: -+ case SIOCSIFMTU: -+ case SIOCSIFLINK: -+ case SIOCGIFHWADDR: -+ case SIOCSIFHWADDR: -+ case SIOCSIFMAP: -+ case SIOCGIFMAP: -+ case SIOCSIFSLAVE: -+ case SIOCGIFSLAVE: -+ case SIOCGIFINDEX: -+ case SIOCGIFNAME: -+ case SIOCGIFCOUNT: -+ case SIOCSIFHWBROADCAST: -+ return(dev_ioctl(cmd,(void *) arg)); -+ -+ default: -+ return -EOPNOTSUPP; -+ } -+ -+ return 0; -+} -+ -+/* ************************************* */ -+ -+static struct proto_ops ring_ops = { -+ .family = PF_RING, -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)) -+ .owner = THIS_MODULE, -+#endif -+ -+ /* Operations that make no sense on ring sockets. */ -+ .connect = sock_no_connect, -+ .socketpair = sock_no_socketpair, -+ .accept = sock_no_accept, -+ .getname = sock_no_getname, -+ .listen = sock_no_listen, -+ .shutdown = sock_no_shutdown, -+ .sendpage = sock_no_sendpage, -+ .sendmsg = sock_no_sendmsg, -+ .getsockopt = sock_no_getsockopt, -+ -+ /* Now the operations that really occur. */ -+ .release = ring_release, -+ .bind = ring_bind, -+ .mmap = ring_mmap, -+ .poll = ring_poll, -+ .setsockopt = ring_setsockopt, -+ .ioctl = ring_ioctl, -+ .recvmsg = ring_recvmsg, -+}; -+ -+/* ************************************ */ -+ -+static struct net_proto_family ring_family_ops = { -+ .family = PF_RING, -+ .create = ring_create, -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)) -+ .owner = THIS_MODULE, -+#endif -+}; -+ -+// BD: API changed in 2.6.12, ref: -+// http://svn.clkao.org/svnweb/linux/revision/?rev=28201 -+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,11)) -+static struct proto ring_proto = { -+ .name = "PF_RING", -+ .owner = THIS_MODULE, -+ .obj_size = sizeof(struct sock), -+}; -+#endif -+ -+/* ************************************ */ -+ -+static void __exit ring_exit(void) -+{ -+ struct list_head *ptr; -+ struct ring_element *entry; -+ -+ for(ptr = ring_table.next; ptr != &ring_table; ptr = ptr->next) { -+ entry = list_entry(ptr, struct ring_element, list); -+ kfree(entry); -+ } -+ -+ while(ring_cluster_list != NULL) { -+ struct ring_cluster *next = ring_cluster_list->next; -+ kfree(ring_cluster_list); -+ ring_cluster_list = next; -+ } -+ -+ set_skb_ring_handler(NULL); -+ set_buffer_ring_handler(NULL); -+ sock_unregister(PF_RING); -+ -+ printk("PF_RING shut down.\n"); -+} -+ -+/* ************************************ */ -+ -+static int __init ring_init(void) -+{ -+ printk("Welcome to PF_RING %s\n(C) 2004 L.Deri <deri@ntop.org>\n", -+ RING_VERSION); -+ -+ INIT_LIST_HEAD(&ring_table); -+ ring_cluster_list = NULL; -+ -+ sock_register(&ring_family_ops); -+ -+ set_skb_ring_handler(skb_ring_handler); -+ set_buffer_ring_handler(buffer_ring_handler); -+ -+ if(get_buffer_ring_handler() != buffer_ring_handler) { -+ printk("PF_RING: set_buffer_ring_handler FAILED\n"); -+ -+ set_skb_ring_handler(NULL); -+ set_buffer_ring_handler(NULL); -+ sock_unregister(PF_RING); -+ return -1; -+ } else { -+ printk("PF_RING: bucket length %d bytes\n", bucket_len); -+ printk("PF_RING: ring slots %d\n", num_slots); -+ printk("PF_RING: sample rate %d [1=no sampling]\n", sample_rate); -+ printk("PF_RING: capture TX %s\n", -+ enable_tx_capture ? "Yes [RX+TX]" : "No [RX only]"); -+ printk("PF_RING: transparent mode %s\n", -+ transparent_mode ? "Yes" : "No"); -+ -+ printk("PF_RING initialized correctly.\n"); -+ return 0; -+ } -+} -+ -+module_init(ring_init); -+module_exit(ring_exit); -+MODULE_LICENSE("GPL"); -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)) -+MODULE_ALIAS_NETPROTO(PF_RING); -+#endif |