xburst: remove support for old kernels
[openwrt.git] / target / linux / cns3xxx / patches-3.3 / 051-cns3xxx_gigabit.patch
1 --- /dev/null
2 +++ b/drivers/net/ethernet/cavium/cns3xxx_eth.c
3 @@ -0,0 +1,1270 @@
4 +/*
5 + * Cavium CNS3xxx Gigabit driver for Linux
6 + *
7 + * Copyright 2011 Gateworks Corporation
8 + *               Chris Lang <clang@gateworks.com>
9 + *
10 + * This program is free software; you can redistribute it and/or modify it
11 + * under the terms of version 2 of the GNU General Public License
12 + * as published by the Free Software Foundation.
13 + *
14 + */
15 +
16 +#include <linux/delay.h>
17 +#include <linux/module.h>
18 +#include <linux/dma-mapping.h>
19 +#include <linux/dmapool.h>
20 +#include <linux/etherdevice.h>
21 +#include <linux/interrupt.h>
22 +#include <linux/io.h>
23 +#include <linux/kernel.h>
24 +#include <linux/phy.h>
25 +#include <linux/platform_device.h>
26 +#include <linux/skbuff.h>
27 +#include <mach/irqs.h>
28 +#include <mach/platform.h>
29 +
30 +#define DRV_NAME "cns3xxx_eth"
31 +
32 +#define RX_DESCS 512
33 +#define TX_DESCS 512
34 +#define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
35 +
36 +#define RX_POOL_ALLOC_SIZE (sizeof(struct rx_desc) * RX_DESCS)
37 +#define TX_POOL_ALLOC_SIZE (sizeof(struct tx_desc) * TX_DESCS)
38 +#define REGS_SIZE 336
39 +#define MAX_MRU        9500
40 +
41 +#define NAPI_WEIGHT 64
42 +
43 +/* MDIO Defines */
44 +#define MDIO_CMD_COMPLETE 0x00008000
45 +#define MDIO_WRITE_COMMAND 0x00002000
46 +#define MDIO_READ_COMMAND 0x00004000
47 +#define MDIO_REG_OFFSET 8
48 +#define MDIO_VALUE_OFFSET 16
49 +
50 +/* Descritor Defines */
51 +#define END_OF_RING 0x40000000
52 +#define FIRST_SEGMENT 0x20000000
53 +#define LAST_SEGMENT 0x10000000
54 +#define FORCE_ROUTE 0x04000000
55 +#define IP_CHECKSUM 0x00040000
56 +#define UDP_CHECKSUM 0x00020000
57 +#define TCP_CHECKSUM 0x00010000
58 +
59 +/* Port Config Defines */
60 +#define PORT_DISABLE 0x00040000
61 +#define PROMISC_OFFSET 29
62 +
63 +/* Global Config Defines */
64 +#define UNKNOWN_VLAN_TO_CPU 0x02000000
65 +#define ACCEPT_CRC_PACKET 0x00200000
66 +#define CRC_STRIPPING 0x00100000
67 +
68 +/* VLAN Config Defines */
69 +#define NIC_MODE 0x00008000
70 +#define VLAN_UNAWARE 0x00000001
71 +
72 +/* DMA AUTO Poll Defines */
73 +#define TS_POLL_EN 0x00000020
74 +#define TS_SUSPEND 0x00000010
75 +#define FS_POLL_EN 0x00000002
76 +#define FS_SUSPEND 0x00000001
77 +
78 +/* DMA Ring Control Defines */
79 +#define QUEUE_THRESHOLD 0x000000f0
80 +#define CLR_FS_STATE 0x80000000
81 +
82 +struct tx_desc
83 +{
84 +       u32 sdp; /* segment data pointer */
85 +
86 +       union {
87 +               struct {
88 +                       u32 sdl:16; /* segment data length */
89 +                       u32 tco:1;
90 +                       u32 uco:1;
91 +                       u32 ico:1;
92 +                       u32 rsv_1:3; /* reserve */
93 +                       u32 pri:3;
94 +                       u32 fp:1; /* force priority */
95 +                       u32 fr:1;
96 +                       u32 interrupt:1;
97 +                       u32 lsd:1;
98 +                       u32 fsd:1;
99 +                       u32 eor:1;
100 +                       u32 cown:1;
101 +               };
102 +               u32 config0;
103 +       };
104 +
105 +       union {
106 +               struct {
107 +                       u32 ctv:1;
108 +                       u32 stv:1;
109 +                       u32 sid:4;
110 +                       u32 inss:1;
111 +                       u32 dels:1;
112 +                       u32 rsv_2:9;
113 +                       u32 pmap:5;
114 +                       u32 mark:3;
115 +                       u32 ewan:1;
116 +                       u32 fewan:1;
117 +                       u32 rsv_3:5;
118 +               };
119 +               u32 config1;
120 +       };
121 +
122 +       union {
123 +               struct {
124 +                       u32 c_vid:12;
125 +                       u32 c_cfs:1;
126 +                       u32 c_pri:3;
127 +                       u32 s_vid:12;
128 +                       u32 s_dei:1;
129 +                       u32 s_pri:3;
130 +               };
131 +               u32 config2;
132 +       };
133 +
134 +       u8 alignment[16]; /* for 32 byte */
135 +};
136 +
137 +struct rx_desc
138 +{
139 +       u32 sdp; /* segment data pointer */
140 +
141 +       union {
142 +               struct {
143 +                       u32 sdl:16; /* segment data length */
144 +                       u32 l4f:1;
145 +                       u32 ipf:1;
146 +                       u32 prot:4;
147 +                       u32 hr:6;
148 +                       u32 lsd:1;
149 +                       u32 fsd:1;
150 +                       u32 eor:1;
151 +                       u32 cown:1;
152 +               };
153 +               u32 config0;
154 +       };
155 +
156 +       union {
157 +               struct {
158 +                       u32 ctv:1;
159 +                       u32 stv:1;
160 +                       u32 unv:1;
161 +                       u32 iwan:1;
162 +                       u32 exdv:1;
163 +                       u32 e_wan:1;
164 +                       u32 rsv_1:2;
165 +                       u32 sp:3;
166 +                       u32 crc_err:1;
167 +                       u32 un_eth:1;
168 +                       u32 tc:2;
169 +                       u32 rsv_2:1;
170 +                       u32 ip_offset:5;
171 +                       u32 rsv_3:11;
172 +               };
173 +               u32 config1;
174 +       };
175 +
176 +       union {
177 +               struct {
178 +                       u32 c_vid:12;
179 +                       u32 c_cfs:1;
180 +                       u32 c_pri:3;
181 +                       u32 s_vid:12;
182 +                       u32 s_dei:1;
183 +                       u32 s_pri:3;
184 +               };
185 +               u32 config2;
186 +       };
187 +
188 +       u8 alignment[16]; /* for 32 byte alignment */
189 +};
190 +
191 +struct switch_regs {
192 +       u32 phy_control;
193 +       u32 phy_auto_addr;
194 +       u32 mac_glob_cfg;
195 +       u32 mac_cfg[4];
196 +       u32 mac_pri_ctrl[5], __res;
197 +       u32 etype[2];
198 +       u32 udp_range[4];
199 +       u32 prio_etype_udp;
200 +       u32 prio_ipdscp[8];
201 +       u32 tc_ctrl;
202 +       u32 rate_ctrl;
203 +       u32 fc_glob_thrs;
204 +       u32 fc_port_thrs;
205 +       u32 mc_fc_glob_thrs;
206 +       u32 dc_glob_thrs;
207 +       u32 arl_vlan_cmd;
208 +       u32 arl_ctrl[3];
209 +       u32 vlan_cfg;
210 +       u32 pvid[2];
211 +       u32 vlan_ctrl[3];
212 +       u32 session_id[8];
213 +       u32 intr_stat;
214 +       u32 intr_mask;
215 +       u32 sram_test;
216 +       u32 mem_queue;
217 +       u32 farl_ctrl;
218 +       u32 fc_input_thrs, __res1[2];
219 +       u32 clk_skew_ctrl;
220 +       u32 mac_glob_cfg_ext, __res2[2];
221 +       u32 dma_ring_ctrl;
222 +       u32 dma_auto_poll_cfg;
223 +       u32 delay_intr_cfg, __res3;
224 +       u32 ts_dma_ctrl0;
225 +       u32 ts_desc_ptr0;
226 +       u32 ts_desc_base_addr0, __res4;
227 +       u32 fs_dma_ctrl0;
228 +       u32 fs_desc_ptr0;
229 +       u32 fs_desc_base_addr0, __res5;
230 +       u32 ts_dma_ctrl1;
231 +       u32 ts_desc_ptr1;
232 +       u32 ts_desc_base_addr1, __res6;
233 +       u32 fs_dma_ctrl1;
234 +       u32 fs_desc_ptr1;
235 +       u32 fs_desc_base_addr1;
236 +};
237 +
238 +struct _tx_ring {
239 +       struct tx_desc *desc;
240 +       dma_addr_t phys_addr;
241 +       struct tx_desc *cur_addr;
242 +       struct sk_buff *buff_tab[TX_DESCS];
243 +       u32 free_index;
244 +       u32 count_index;
245 +       u32 cur_index;
246 +       int num_used;
247 +       int num_count;
248 +};
249 +
250 +struct _rx_ring {
251 +       struct rx_desc *desc;
252 +       dma_addr_t phys_addr;
253 +       struct rx_desc *cur_addr;
254 +       struct sk_buff *buff_tab[RX_DESCS];
255 +       u32 cur_index;
256 +       u32 alloc_index;
257 +       int alloc_count;
258 +};
259 +
260 +struct sw {
261 +       struct resource *mem_res;
262 +       struct switch_regs __iomem *regs;
263 +       struct napi_struct napi;
264 +       struct cns3xxx_plat_info *plat;
265 +       struct _tx_ring *tx_ring;
266 +       struct _rx_ring *rx_ring;
267 +       u32 mtu;
268 +};
269 +
270 +struct port {
271 +       struct net_device *netdev;
272 +       struct phy_device *phydev;
273 +       struct sw *sw;
274 +       int id;                 /* logical port ID */
275 +       int speed, duplex;
276 +       u32 mtu;
277 +};
278 +
279 +static spinlock_t mdio_lock;
280 +static spinlock_t tx_lock;
281 +static spinlock_t stat_lock;
282 +static struct switch_regs __iomem *mdio_regs; /* mdio command and status only */
283 +struct mii_bus *mdio_bus;
284 +static int ports_open;
285 +static struct port *switch_port_tab[3];
286 +static struct dma_pool *rx_dma_pool;
287 +static struct dma_pool *tx_dma_pool;
288 +struct net_device *napi_dev;
289 +
290 +static int cns3xxx_mdio_cmd(struct mii_bus *bus, int phy_id, int location,
291 +                          int write, u16 cmd)
292 +{
293 +       int cycles = 0;
294 +       u32 temp = 0;
295 +
296 +       temp = __raw_readl(&mdio_regs->phy_control);
297 +       temp |= MDIO_CMD_COMPLETE;
298 +       __raw_writel(temp, &mdio_regs->phy_control);
299 +       udelay(10);
300 +
301 +       if (write) {
302 +               temp = (cmd << MDIO_VALUE_OFFSET);
303 +               temp |= MDIO_WRITE_COMMAND;
304 +       } else {
305 +               temp = MDIO_READ_COMMAND;
306 +       }
307 +       temp |= ((location & 0x1f) << MDIO_REG_OFFSET);
308 +       temp |= (phy_id & 0x1f);
309 +
310 +       __raw_writel(temp, &mdio_regs->phy_control);
311 +
312 +       while (((__raw_readl(&mdio_regs->phy_control) & MDIO_CMD_COMPLETE) == 0)
313 +                       && cycles < 5000) {
314 +               udelay(1);
315 +               cycles++;
316 +       }
317 +
318 +       if (cycles == 5000) {
319 +               printk(KERN_ERR "%s #%i: MII transaction failed\n", bus->name,
320 +                      phy_id);
321 +               return -1;
322 +       }
323 +
324 +       temp = __raw_readl(&mdio_regs->phy_control);
325 +       temp |= MDIO_CMD_COMPLETE;
326 +       __raw_writel(temp, &mdio_regs->phy_control);
327 +
328 +       if (write)
329 +               return 0;
330 +
331 +       return ((temp >> MDIO_VALUE_OFFSET) & 0xFFFF);
332 +}
333 +
334 +static int cns3xxx_mdio_read(struct mii_bus *bus, int phy_id, int location)
335 +{
336 +       unsigned long flags;
337 +       int ret;
338 +
339 +       spin_lock_irqsave(&mdio_lock, flags);
340 +       ret = cns3xxx_mdio_cmd(bus, phy_id, location, 0, 0);
341 +       spin_unlock_irqrestore(&mdio_lock, flags);
342 +       return ret;
343 +}
344 +
345 +static int cns3xxx_mdio_write(struct mii_bus *bus, int phy_id, int location,
346 +                            u16 val)
347 +{
348 +       unsigned long flags;
349 +       int ret;
350 +
351 +       spin_lock_irqsave(&mdio_lock, flags);
352 +       ret = cns3xxx_mdio_cmd(bus, phy_id, location, 1, val);
353 +       spin_unlock_irqrestore(&mdio_lock, flags);
354 +       return ret;
355 +}
356 +
357 +static int cns3xxx_mdio_register(void)
358 +{
359 +       int err;
360 +
361 +       if (!(mdio_bus = mdiobus_alloc()))
362 +               return -ENOMEM;
363 +
364 +       mdio_regs = (struct switch_regs __iomem *)CNS3XXX_SWITCH_BASE_VIRT;
365 +
366 +       spin_lock_init(&mdio_lock);
367 +       mdio_bus->name = "CNS3xxx MII Bus";
368 +       mdio_bus->read = &cns3xxx_mdio_read;
369 +       mdio_bus->write = &cns3xxx_mdio_write;
370 +       strcpy(mdio_bus->id, "0");
371 +
372 +       if ((err = mdiobus_register(mdio_bus)))
373 +               mdiobus_free(mdio_bus);
374 +       return err;
375 +}
376 +
377 +static void cns3xxx_mdio_remove(void)
378 +{
379 +       mdiobus_unregister(mdio_bus);
380 +       mdiobus_free(mdio_bus);
381 +}
382 +
383 +static void cns3xxx_adjust_link(struct net_device *dev)
384 +{
385 +       struct port *port = netdev_priv(dev);
386 +       struct phy_device *phydev = port->phydev;
387 +
388 +       if (!phydev->link) {
389 +               if (port->speed) {
390 +                       port->speed = 0;
391 +                       printk(KERN_INFO "%s: link down\n", dev->name);
392 +               }
393 +               return;
394 +       }
395 +
396 +       if (port->speed == phydev->speed && port->duplex == phydev->duplex)
397 +               return;
398 +
399 +       port->speed = phydev->speed;
400 +       port->duplex = phydev->duplex;
401 +
402 +       printk(KERN_INFO "%s: link up, speed %u Mb/s, %s duplex\n",
403 +              dev->name, port->speed, port->duplex ? "full" : "half");
404 +}
405 +
406 +irqreturn_t eth_rx_irq(int irq, void *pdev)
407 +{
408 +       struct net_device *dev = pdev;
409 +       struct sw *sw = netdev_priv(dev);
410 +       if (likely(napi_schedule_prep(&sw->napi))) {
411 +               disable_irq_nosync(IRQ_CNS3XXX_SW_R0RXC);
412 +               __napi_schedule(&sw->napi);
413 +       }
414 +       return (IRQ_HANDLED);
415 +}
416 +
417 +static void cns3xxx_alloc_rx_buf(struct sw *sw, int received)
418 +{
419 +       struct _rx_ring *rx_ring = sw->rx_ring;
420 +       unsigned int i = rx_ring->alloc_index;
421 +       struct rx_desc *desc;
422 +       struct sk_buff *skb;
423 +       u32 mtu = sw->mtu;
424 +
425 +       rx_ring->alloc_count += received;
426 +
427 +       for (received = rx_ring->alloc_count; received > 0; received--) {
428 +               desc = &(rx_ring)->desc[i];
429 +
430 +               if ((skb = dev_alloc_skb(mtu))) {
431 +                       if (SKB_DMA_REALIGN)
432 +                               skb_reserve(skb, SKB_DMA_REALIGN);
433 +                       skb_reserve(skb, NET_IP_ALIGN);
434 +                       desc->sdp = dma_map_single(NULL, skb->data,
435 +                                   mtu, DMA_FROM_DEVICE);
436 +                       if (dma_mapping_error(NULL, desc->sdp)) {
437 +                               dev_kfree_skb(skb);
438 +                               /* Failed to map, better luck next time */
439 +                               goto out;;
440 +                       }
441 +               } else {
442 +                       /* Failed to allocate skb, try again next time */
443 +                       goto out;
444 +               }
445 +
446 +               /* put the new buffer on RX-free queue */
447 +               rx_ring->buff_tab[i] = skb;
448 +
449 +               if (++i == RX_DESCS) {
450 +                       i = 0;
451 +                       desc->config0 = END_OF_RING | FIRST_SEGMENT |
452 +                                       LAST_SEGMENT | mtu;
453 +               } else {
454 +                       desc->config0 = FIRST_SEGMENT | LAST_SEGMENT | mtu;
455 +               }
456 +       }
457 +out:
458 +       rx_ring->alloc_count = received;
459 +       rx_ring->alloc_index = i;
460 +}
461 +
462 +static void update_tx_stats(struct sw *sw)
463 +{
464 +       struct _tx_ring *tx_ring = sw->tx_ring;
465 +       struct tx_desc *desc;
466 +       struct tx_desc *next_desc;
467 +       struct sk_buff *skb;
468 +       int i;
469 +       int index;
470 +       int num_count;
471 +
472 +       spin_lock_bh(&stat_lock);
473 +
474 +       num_count = tx_ring->num_count;
475 +
476 +       if (!num_count) {
477 +               spin_unlock_bh(&stat_lock);
478 +               return;
479 +       }
480 +
481 +       index = tx_ring->count_index;
482 +       desc = &(tx_ring)->desc[index];
483 +       for (i = 0; i < num_count; i++) {
484 +               skb = tx_ring->buff_tab[index];
485 +               if (desc->cown) {
486 +                       tx_ring->buff_tab[index] = 0;
487 +                       if (unlikely(++index == TX_DESCS)) index = 0;
488 +                       next_desc = &(tx_ring)->desc[index];
489 +                       prefetch(next_desc + 4);
490 +                       if (likely(skb)) {
491 +                               skb->dev->stats.tx_packets++;
492 +                               skb->dev->stats.tx_bytes += skb->len;
493 +                               dev_kfree_skb_any(skb);
494 +                       }
495 +                       desc = next_desc;
496 +               } else {
497 +                       break;
498 +               }
499 +       }
500 +       tx_ring->num_count -= i;
501 +       tx_ring->count_index = index;
502 +
503 +       spin_unlock_bh(&stat_lock);
504 +}
505 +
506 +static void clear_tx_desc(struct sw *sw)
507 +{
508 +       struct _tx_ring *tx_ring = sw->tx_ring;
509 +       struct tx_desc *desc;
510 +       struct tx_desc *next_desc;
511 +       int i;
512 +       int index;
513 +       int num_used = tx_ring->num_used - tx_ring->num_count;
514 +
515 +       if (num_used < (TX_DESCS >> 1))
516 +               return;
517 +
518 +       index = tx_ring->free_index;
519 +       desc = &(tx_ring)->desc[index];
520 +       for (i = 0; i < num_used; i++) {
521 +               if (desc->cown) {
522 +                       if (unlikely(++index == TX_DESCS)) index = 0;
523 +                       next_desc = &(tx_ring)->desc[index];
524 +                       prefetch(next_desc);
525 +                       prefetch(next_desc + 4);
526 +                       if (likely(desc->sdp))
527 +                               dma_unmap_single(NULL, desc->sdp,
528 +                                       desc->sdl, DMA_TO_DEVICE);
529 +                       desc = next_desc;
530 +               } else {
531 +                       break;
532 +               }
533 +       }
534 +       tx_ring->free_index = index;
535 +       tx_ring->num_used -= i;
536 +}
537 +
538 +static int eth_poll(struct napi_struct *napi, int budget)
539 +{
540 +       struct sw *sw = container_of(napi, struct sw, napi);
541 +       struct net_device *dev;
542 +       struct _rx_ring *rx_ring = sw->rx_ring;
543 +       int received = 0;
544 +       unsigned int length;
545 +       unsigned int i = rx_ring->cur_index;
546 +       struct rx_desc *next_desc;
547 +       struct rx_desc *desc = &(rx_ring)->desc[i];
548 +       int port_id;
549 +
550 +       while (desc->cown) {
551 +               struct sk_buff *skb;
552 +
553 +               if (received >= budget)
554 +                       break;
555 +
556 +               skb = rx_ring->buff_tab[i];
557 +
558 +               if (++i == RX_DESCS) i = 0;
559 +               next_desc = &(rx_ring)->desc[i];
560 +               prefetch(next_desc);
561 +
562 +               port_id = desc->sp;
563 +               if (port_id == 4)
564 +                       dev = switch_port_tab[2]->netdev;
565 +               else
566 +                       dev = switch_port_tab[port_id]->netdev;
567 +
568 +               length = desc->sdl;
569 +               /* process received frame */
570 +               dma_unmap_single(&dev->dev, desc->sdp,
571 +                                length, DMA_FROM_DEVICE);
572 +
573 +               skb_put(skb, length);
574 +
575 +               skb->dev = dev;
576 +               skb->protocol = eth_type_trans(skb, dev);
577 +
578 +               dev->stats.rx_packets++;
579 +               dev->stats.rx_bytes += length;
580 +
581 +               switch (desc->prot) {
582 +                       case 1:
583 +                       case 2:
584 +                       case 5:
585 +                       case 6:
586 +                       case 13:
587 +                       case 14:
588 +                               if (desc->l4f)
589 +                                       skb->ip_summed = CHECKSUM_NONE;
590 +                               else
591 +                                       skb->ip_summed = CHECKSUM_UNNECESSARY;
592 +                       break;
593 +                       default:
594 +                               skb->ip_summed = CHECKSUM_NONE;
595 +                       break;
596 +               }
597 +
598 +               napi_gro_receive(napi, skb);
599 +
600 +               received++;
601 +               desc = next_desc;
602 +       }
603 +
604 +       cns3xxx_alloc_rx_buf(sw, received);
605 +       rx_ring->cur_index = i;
606 +
607 +       if (received != budget) {
608 +               napi_complete(napi);
609 +               enable_irq(IRQ_CNS3XXX_SW_R0RXC);
610 +       }
611 +
612 +       return received;
613 +}
614 +
615 +static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
616 +{
617 +       struct port *port = netdev_priv(dev);
618 +       struct sw *sw = port->sw;
619 +       struct _tx_ring *tx_ring = sw->tx_ring;
620 +       struct tx_desc *tx_desc;
621 +       int index;
622 +       int len = skb->len;
623 +       char pmap = (1 << port->id);
624 +
625 +       if (pmap == 8)
626 +               pmap = (1 << 4);
627 +
628 +       if (unlikely(len > sw->mtu)) {
629 +               dev_kfree_skb(skb);
630 +               dev->stats.tx_errors++;
631 +               return NETDEV_TX_OK;
632 +       }
633 +
634 +       update_tx_stats(sw);
635 +
636 +       spin_lock_bh(&tx_lock);
637 +
638 +       clear_tx_desc(sw);
639 +
640 +       if (unlikely(tx_ring->num_used == TX_DESCS)) {
641 +               spin_unlock_bh(&tx_lock);
642 +               return NETDEV_TX_BUSY;
643 +       }
644 +
645 +       index = tx_ring->cur_index;
646 +
647 +       if (unlikely(++tx_ring->cur_index == TX_DESCS))
648 +               tx_ring->cur_index = 0;
649 +
650 +       tx_ring->num_used++;
651 +       tx_ring->num_count++;
652 +
653 +       spin_unlock_bh(&tx_lock);
654 +
655 +       tx_desc = &(tx_ring)->desc[index];
656 +
657 +       tx_desc->sdp = dma_map_single(NULL, skb->data, len,
658 +                                     DMA_TO_DEVICE);
659 +
660 +       if (dma_mapping_error(NULL, tx_desc->sdp)) {
661 +               dev_kfree_skb(skb);
662 +               dev->stats.tx_errors++;
663 +               return NETDEV_TX_OK;
664 +       }
665 +
666 +       tx_desc->pmap = pmap;
667 +       tx_ring->buff_tab[index] = skb;
668 +
669 +       if (index == TX_DESCS - 1) {
670 +               tx_desc->config0 = END_OF_RING | FIRST_SEGMENT | LAST_SEGMENT |
671 +                                  FORCE_ROUTE | IP_CHECKSUM | UDP_CHECKSUM |
672 +                                  TCP_CHECKSUM | len;
673 +       } else {
674 +               tx_desc->config0 = FIRST_SEGMENT | LAST_SEGMENT |
675 +                                  FORCE_ROUTE | IP_CHECKSUM | UDP_CHECKSUM |
676 +                                  TCP_CHECKSUM | len;
677 +       }
678 +
679 +       return NETDEV_TX_OK;
680 +}
681 +
682 +static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
683 +{
684 +       struct port *port = netdev_priv(dev);
685 +
686 +       if (!netif_running(dev))
687 +               return -EINVAL;
688 +       return phy_mii_ioctl(port->phydev, req, cmd);
689 +}
690 +
691 +/* ethtool support */
692 +
693 +static void cns3xxx_get_drvinfo(struct net_device *dev,
694 +                              struct ethtool_drvinfo *info)
695 +{
696 +       strcpy(info->driver, DRV_NAME);
697 +       strcpy(info->bus_info, "internal");
698 +}
699 +
700 +static int cns3xxx_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
701 +{
702 +       struct port *port = netdev_priv(dev);
703 +       return phy_ethtool_gset(port->phydev, cmd);
704 +}
705 +
706 +static int cns3xxx_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
707 +{
708 +       struct port *port = netdev_priv(dev);
709 +       return phy_ethtool_sset(port->phydev, cmd);
710 +}
711 +
712 +static int cns3xxx_nway_reset(struct net_device *dev)
713 +{
714 +       struct port *port = netdev_priv(dev);
715 +       return phy_start_aneg(port->phydev);
716 +}
717 +
718 +static struct ethtool_ops cns3xxx_ethtool_ops = {
719 +       .get_drvinfo = cns3xxx_get_drvinfo,
720 +       .get_settings = cns3xxx_get_settings,
721 +       .set_settings = cns3xxx_set_settings,
722 +       .nway_reset = cns3xxx_nway_reset,
723 +       .get_link = ethtool_op_get_link,
724 +};
725 +
726 +
727 +static int init_rings(struct sw *sw)
728 +{
729 +       int i;
730 +       struct _rx_ring *rx_ring = sw->rx_ring;
731 +       struct _tx_ring *tx_ring = sw->tx_ring;
732 +
733 +       __raw_writel(0, &sw->regs->fs_dma_ctrl0);
734 +       __raw_writel(TS_SUSPEND | FS_SUSPEND, &sw->regs->dma_auto_poll_cfg);
735 +       __raw_writel(QUEUE_THRESHOLD, &sw->regs->dma_ring_ctrl);
736 +       __raw_writel(CLR_FS_STATE | QUEUE_THRESHOLD, &sw->regs->dma_ring_ctrl);
737 +
738 +       __raw_writel(QUEUE_THRESHOLD, &sw->regs->dma_ring_ctrl);
739 +
740 +       if (!(rx_dma_pool = dma_pool_create(DRV_NAME, NULL,
741 +                                           RX_POOL_ALLOC_SIZE, 32, 0)))
742 +               return -ENOMEM;
743 +
744 +       if (!(rx_ring->desc = dma_pool_alloc(rx_dma_pool, GFP_KERNEL,
745 +                                             &rx_ring->phys_addr)))
746 +               return -ENOMEM;
747 +       memset(rx_ring->desc, 0, RX_POOL_ALLOC_SIZE);
748 +
749 +       /* Setup RX buffers */
750 +       for (i = 0; i < RX_DESCS; i++) {
751 +               struct rx_desc *desc = &(rx_ring)->desc[i];
752 +               struct sk_buff *skb;
753 +               if (!(skb = dev_alloc_skb(sw->mtu)))
754 +                       return -ENOMEM;
755 +               if (SKB_DMA_REALIGN)
756 +                       skb_reserve(skb, SKB_DMA_REALIGN);
757 +               skb_reserve(skb, NET_IP_ALIGN);
758 +               desc->sdl = sw->mtu;
759 +               if (i == (RX_DESCS - 1))
760 +                       desc->eor = 1;
761 +               desc->fsd = 1;
762 +               desc->lsd = 1;
763 +
764 +               desc->sdp = dma_map_single(NULL, skb->data,
765 +                                           sw->mtu, DMA_FROM_DEVICE);
766 +               if (dma_mapping_error(NULL, desc->sdp)) {
767 +                       return -EIO;
768 +               }
769 +               rx_ring->buff_tab[i] = skb;
770 +               desc->cown = 0;
771 +       }
772 +       __raw_writel(rx_ring->phys_addr, &sw->regs->fs_desc_ptr0);
773 +       __raw_writel(rx_ring->phys_addr, &sw->regs->fs_desc_base_addr0);
774 +
775 +       if (!(tx_dma_pool = dma_pool_create(DRV_NAME, NULL,
776 +                                           TX_POOL_ALLOC_SIZE, 32, 0)))
777 +               return -ENOMEM;
778 +
779 +       if (!(tx_ring->desc = dma_pool_alloc(tx_dma_pool, GFP_KERNEL,
780 +                                             &tx_ring->phys_addr)))
781 +               return -ENOMEM;
782 +       memset(tx_ring->desc, 0, TX_POOL_ALLOC_SIZE);
783 +
784 +       /* Setup TX buffers */
785 +       for (i = 0; i < TX_DESCS; i++) {
786 +               struct tx_desc *desc = &(tx_ring)->desc[i];
787 +               tx_ring->buff_tab[i] = 0;
788 +
789 +               if (i == (TX_DESCS - 1))
790 +                       desc->eor = 1;
791 +               desc->cown = 1;
792 +       }
793 +       __raw_writel(tx_ring->phys_addr, &sw->regs->ts_desc_ptr0);
794 +       __raw_writel(tx_ring->phys_addr, &sw->regs->ts_desc_base_addr0);
795 +
796 +       return 0;
797 +}
798 +
799 +static void destroy_rings(struct sw *sw)
800 +{
801 +       int i;
802 +       if (sw->rx_ring->desc) {
803 +               for (i = 0; i < RX_DESCS; i++) {
804 +                       struct _rx_ring *rx_ring = sw->rx_ring;
805 +                       struct rx_desc *desc = &(rx_ring)->desc[i];
806 +                       struct sk_buff *skb = sw->rx_ring->buff_tab[i];
807 +                       if (skb) {
808 +                               dma_unmap_single(NULL,
809 +                                                desc->sdp,
810 +                                                sw->mtu, DMA_FROM_DEVICE);
811 +                               dev_kfree_skb(skb);
812 +                       }
813 +               }
814 +               dma_pool_free(rx_dma_pool, sw->rx_ring->desc, sw->rx_ring->phys_addr);
815 +               dma_pool_destroy(rx_dma_pool);
816 +               rx_dma_pool = 0;
817 +               sw->rx_ring->desc = 0;
818 +       }
819 +       if (sw->tx_ring->desc) {
820 +               for (i = 0; i < TX_DESCS; i++) {
821 +                       struct _tx_ring *tx_ring = sw->tx_ring;
822 +                       struct tx_desc *desc = &(tx_ring)->desc[i];
823 +                       struct sk_buff *skb = sw->tx_ring->buff_tab[i];
824 +                       if (skb) {
825 +                               dma_unmap_single(NULL, desc->sdp,
826 +                                       skb->len, DMA_TO_DEVICE);
827 +                               dev_kfree_skb(skb);
828 +                       }
829 +               }
830 +               dma_pool_free(tx_dma_pool, sw->tx_ring->desc, sw->tx_ring->phys_addr);
831 +               dma_pool_destroy(tx_dma_pool);
832 +               tx_dma_pool = 0;
833 +               sw->tx_ring->desc = 0;
834 +       }
835 +}
836 +
837 +static int eth_open(struct net_device *dev)
838 +{
839 +       struct port *port = netdev_priv(dev);
840 +       struct sw *sw = port->sw;
841 +       u32 temp;
842 +
843 +       port->speed = 0;        /* force "link up" message */
844 +       phy_start(port->phydev);
845 +
846 +       netif_start_queue(dev);
847 +
848 +       if (!ports_open) {
849 +               request_irq(IRQ_CNS3XXX_SW_R0RXC, eth_rx_irq, IRQF_SHARED, "gig_switch", napi_dev);
850 +               napi_enable(&sw->napi);
851 +               netif_start_queue(napi_dev);
852 +               //enable_irq(IRQ_CNS3XXX_SW_R0RXC);
853 +
854 +               temp = __raw_readl(&sw->regs->mac_cfg[2]);
855 +               temp &= ~(PORT_DISABLE);
856 +               __raw_writel(temp, &sw->regs->mac_cfg[2]);
857 +
858 +               temp = __raw_readl(&sw->regs->dma_auto_poll_cfg);
859 +               temp &= ~(TS_SUSPEND | FS_SUSPEND);
860 +               __raw_writel(temp, &sw->regs->dma_auto_poll_cfg);
861 +
862 +               __raw_writel((TS_POLL_EN | FS_POLL_EN), &sw->regs->dma_auto_poll_cfg);
863 +       }
864 +       temp = __raw_readl(&sw->regs->mac_cfg[port->id]);
865 +       temp &= ~(PORT_DISABLE);
866 +       __raw_writel(temp, &sw->regs->mac_cfg[port->id]);
867 +
868 +       ports_open++;
869 +       netif_carrier_on(dev);
870 +
871 +       return 0;
872 +}
873 +
874 +static int eth_close(struct net_device *dev)
875 +{
876 +       struct port *port = netdev_priv(dev);
877 +       struct sw *sw = port->sw;
878 +       u32 temp;
879 +
880 +       ports_open--;
881 +
882 +       temp = __raw_readl(&sw->regs->mac_cfg[port->id]);
883 +       temp |= (PORT_DISABLE);
884 +       __raw_writel(temp, &sw->regs->mac_cfg[port->id]);
885 +
886 +       netif_stop_queue(dev);
887 +
888 +       phy_stop(port->phydev);
889 +
890 +       if (!ports_open) {
891 +               disable_irq(IRQ_CNS3XXX_SW_R0RXC);
892 +               free_irq(IRQ_CNS3XXX_SW_R0RXC, napi_dev);
893 +               napi_disable(&sw->napi);
894 +               netif_stop_queue(napi_dev);
895 +               temp = __raw_readl(&sw->regs->mac_cfg[2]);
896 +               temp |= (PORT_DISABLE);
897 +               __raw_writel(temp, &sw->regs->mac_cfg[2]);
898 +
899 +               __raw_writel(TS_SUSPEND | FS_SUSPEND,
900 +                            &sw->regs->dma_auto_poll_cfg);
901 +       }
902 +
903 +       netif_carrier_off(dev);
904 +       return 0;
905 +}
906 +
907 +static void eth_rx_mode(struct net_device *dev)
908 +{
909 +       struct port *port = netdev_priv(dev);
910 +       struct sw *sw = port->sw;
911 +       u32 temp;
912 +
913 +       temp = __raw_readl(&sw->regs->mac_glob_cfg);
914 +
915 +       if (dev->flags & IFF_PROMISC) {
916 +               if (port->id == 3)
917 +                       temp |= ((1 << 2) << PROMISC_OFFSET);
918 +               else
919 +                       temp |= ((1 << port->id) << PROMISC_OFFSET);
920 +       } else {
921 +               if (port->id == 3)
922 +                       temp &= ~((1 << 2) << PROMISC_OFFSET);
923 +               else
924 +                       temp &= ~((1 << port->id) << PROMISC_OFFSET);
925 +       }
926 +       __raw_writel(temp, &sw->regs->mac_glob_cfg);
927 +}
928 +
929 +static int eth_set_mac(struct net_device *netdev, void *p)
930 +{
931 +       struct port *port = netdev_priv(netdev);
932 +       struct sw *sw = port->sw;
933 +       struct sockaddr *addr = p;
934 +       u32 cycles = 0;
935 +
936 +       if (!is_valid_ether_addr(addr->sa_data))
937 +               return -EADDRNOTAVAIL;
938 +
939 +       /* Invalidate old ARL Entry */
940 +       if (port->id == 3)
941 +               __raw_writel((port->id << 16) | (0x4 << 9), &sw->regs->arl_ctrl[0]);
942 +       else
943 +               __raw_writel(((port->id + 1) << 16) | (0x4 << 9), &sw->regs->arl_ctrl[0]);
944 +       __raw_writel( ((netdev->dev_addr[0] << 24) | (netdev->dev_addr[1] << 16) |
945 +                       (netdev->dev_addr[2] << 8) | (netdev->dev_addr[3])),
946 +                       &sw->regs->arl_ctrl[1]);
947 +
948 +       __raw_writel( ((netdev->dev_addr[4] << 24) | (netdev->dev_addr[5] << 16) |
949 +                       (1 << 1)),
950 +                       &sw->regs->arl_ctrl[2]);
951 +       __raw_writel((1 << 19), &sw->regs->arl_vlan_cmd);
952 +
953 +       while (((__raw_readl(&sw->regs->arl_vlan_cmd) & (1 << 21)) == 0)
954 +                       && cycles < 5000) {
955 +               udelay(1);
956 +               cycles++;
957 +       }
958 +
959 +       cycles = 0;
960 +       memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
961 +
962 +       if (port->id == 3)
963 +               __raw_writel((port->id << 16) | (0x4 << 9), &sw->regs->arl_ctrl[0]);
964 +       else
965 +               __raw_writel(((port->id + 1) << 16) | (0x4 << 9), &sw->regs->arl_ctrl[0]);
966 +       __raw_writel( ((addr->sa_data[0] << 24) | (addr->sa_data[1] << 16) |
967 +                       (addr->sa_data[2] << 8) | (addr->sa_data[3])),
968 +                       &sw->regs->arl_ctrl[1]);
969 +
970 +       __raw_writel( ((addr->sa_data[4] << 24) | (addr->sa_data[5] << 16) |
971 +                       (7 << 4) | (1 << 1)), &sw->regs->arl_ctrl[2]);
972 +       __raw_writel((1 << 19), &sw->regs->arl_vlan_cmd);
973 +
974 +       while (((__raw_readl(&sw->regs->arl_vlan_cmd) & (1 << 21)) == 0)
975 +               && cycles < 5000) {
976 +               udelay(1);
977 +               cycles++;
978 +       }
979 +       return 0;
980 +}
981 +
982 +static int cns3xxx_change_mtu(struct net_device *netdev, int new_mtu)
983 +{
984 +       struct port *port = netdev_priv(netdev);
985 +       struct sw *sw = port->sw;
986 +       u32 temp;
987 +       int i;
988 +       struct _rx_ring *rx_ring = sw->rx_ring;
989 +       struct rx_desc *desc;
990 +       struct sk_buff *skb;
991 +
992 +       if (new_mtu > MAX_MRU)
993 +               return -EINVAL;
994 +
995 +       netdev->mtu = new_mtu;
996 +
997 +       new_mtu += 36 + SKB_DMA_REALIGN;
998 +       port->mtu = new_mtu;
999 +
1000 +       new_mtu = 0;
1001 +       for (i = 0; i < 3; i++) {
1002 +               if (switch_port_tab[i]) {
1003 +                       if (switch_port_tab[i]->mtu > new_mtu)
1004 +                               new_mtu = switch_port_tab[i]->mtu;
1005 +               }
1006 +       }
1007 +
1008 +
1009 +       if (new_mtu == sw->mtu)
1010 +               return 0;
1011 +
1012 +       disable_irq(IRQ_CNS3XXX_SW_R0RXC);
1013 +
1014 +       sw->mtu = new_mtu;
1015 +
1016 +       /* Disable DMA */
1017 +       __raw_writel(TS_SUSPEND | FS_SUSPEND, &sw->regs->dma_auto_poll_cfg);
1018 +
1019 +       for (i = 0; i < RX_DESCS; i++) {
1020 +               desc = &(rx_ring)->desc[i];
1021 +               /* Check if we own it, if we do, it will get set correctly
1022 +                * when it is re-used */
1023 +               if (!desc->cown) {
1024 +                       skb = rx_ring->buff_tab[i];
1025 +                       dma_unmap_single(NULL, desc->sdp, desc->sdl,
1026 +                                        DMA_FROM_DEVICE);
1027 +                       dev_kfree_skb(skb);
1028 +
1029 +                       if ((skb = dev_alloc_skb(new_mtu))) {
1030 +                               if (SKB_DMA_REALIGN)
1031 +                                       skb_reserve(skb, SKB_DMA_REALIGN);
1032 +                               skb_reserve(skb, NET_IP_ALIGN);
1033 +                               desc->sdp = dma_map_single(NULL, skb->data,
1034 +                                           new_mtu, DMA_FROM_DEVICE);
1035 +                               if (dma_mapping_error(NULL, desc->sdp)) {
1036 +                                       dev_kfree_skb(skb);
1037 +                                       skb = NULL;
1038 +                               }
1039 +                       }
1040 +
1041 +                       /* put the new buffer on RX-free queue */
1042 +                       rx_ring->buff_tab[i] = skb;
1043 +
1044 +                       if (i == RX_DESCS - 1)
1045 +                               desc->config0 = END_OF_RING | FIRST_SEGMENT |
1046 +                                               LAST_SEGMENT | new_mtu;
1047 +                       else
1048 +                               desc->config0 = FIRST_SEGMENT |
1049 +                                               LAST_SEGMENT | new_mtu;
1050 +               }
1051 +       }
1052 +
1053 +       /* Re-ENABLE DMA */
1054 +       temp = __raw_readl(&sw->regs->dma_auto_poll_cfg);
1055 +       temp &= ~(TS_SUSPEND | FS_SUSPEND);
1056 +       __raw_writel(temp, &sw->regs->dma_auto_poll_cfg);
1057 +
1058 +       __raw_writel((TS_POLL_EN | FS_POLL_EN), &sw->regs->dma_auto_poll_cfg);
1059 +
1060 +       enable_irq(IRQ_CNS3XXX_SW_R0RXC);
1061 +
1062 +       return 0;
1063 +}
1064 +
1065 +static const struct net_device_ops cns3xxx_netdev_ops = {
1066 +       .ndo_open = eth_open,
1067 +       .ndo_stop = eth_close,
1068 +       .ndo_start_xmit = eth_xmit,
1069 +       .ndo_set_rx_mode = eth_rx_mode,
1070 +       .ndo_do_ioctl = eth_ioctl,
1071 +       .ndo_change_mtu = cns3xxx_change_mtu,
1072 +       .ndo_set_mac_address = eth_set_mac,
1073 +       .ndo_validate_addr = eth_validate_addr,
1074 +};
1075 +
1076 +static int __devinit eth_init_one(struct platform_device *pdev)
1077 +{
1078 +       int i;
1079 +       struct port *port;
1080 +       struct sw *sw;
1081 +       struct net_device *dev;
1082 +       struct cns3xxx_plat_info *plat = pdev->dev.platform_data;
1083 +       u32 regs_phys;
1084 +       char phy_id[MII_BUS_ID_SIZE + 3];
1085 +       int err;
1086 +       u32 temp;
1087 +
1088 +       spin_lock_init(&tx_lock);
1089 +       spin_lock_init(&stat_lock);
1090 +
1091 +       if (!(napi_dev = alloc_etherdev(sizeof(struct sw))))
1092 +               return -ENOMEM;
1093 +       strcpy(napi_dev->name, "switch%d");
1094 +
1095 +       SET_NETDEV_DEV(napi_dev, &pdev->dev);
1096 +       sw = netdev_priv(napi_dev);
1097 +       memset(sw, 0, sizeof(struct sw));
1098 +       sw->regs = (struct switch_regs __iomem *)CNS3XXX_SWITCH_BASE_VIRT;
1099 +       regs_phys = CNS3XXX_SWITCH_BASE;
1100 +       sw->mem_res = request_mem_region(regs_phys, REGS_SIZE, napi_dev->name);
1101 +       if (!sw->mem_res) {
1102 +               err = -EBUSY;
1103 +               goto err_free;
1104 +       }
1105 +
1106 +       sw->mtu = 1536 + SKB_DMA_REALIGN;
1107 +
1108 +       for (i = 0; i < 4; i++) {
1109 +               temp = __raw_readl(&sw->regs->mac_cfg[i]);
1110 +               temp |= (PORT_DISABLE) | 0x80000000;
1111 +               __raw_writel(temp, &sw->regs->mac_cfg[i]);
1112 +       }
1113 +
1114 +       temp = PORT_DISABLE;
1115 +       __raw_writel(temp, &sw->regs->mac_cfg[2]);
1116 +
1117 +       temp = __raw_readl(&sw->regs->vlan_cfg);
1118 +       temp |= NIC_MODE | VLAN_UNAWARE;
1119 +       __raw_writel(temp, &sw->regs->vlan_cfg);
1120 +
1121 +       __raw_writel(UNKNOWN_VLAN_TO_CPU | ACCEPT_CRC_PACKET |
1122 +                    CRC_STRIPPING, &sw->regs->mac_glob_cfg);
1123 +
1124 +       if (!(sw->rx_ring = kmalloc(sizeof(struct _rx_ring), GFP_KERNEL))) {
1125 +               err = -ENOMEM;
1126 +               goto err_free;
1127 +       }
1128 +       memset(sw->rx_ring, 0, sizeof(struct _rx_ring));
1129 +
1130 +       if (!(sw->tx_ring = kmalloc(sizeof(struct _tx_ring), GFP_KERNEL))) {
1131 +               err = -ENOMEM;
1132 +               goto err_free_rx;
1133 +       }
1134 +       memset(sw->tx_ring, 0, sizeof(struct _tx_ring));
1135 +
1136 +       if ((err = init_rings(sw)) != 0) {
1137 +               destroy_rings(sw);
1138 +               err = -ENOMEM;
1139 +               goto err_free_rings;
1140 +       }
1141 +       platform_set_drvdata(pdev, napi_dev);
1142 +
1143 +       netif_napi_add(napi_dev, &sw->napi, eth_poll, NAPI_WEIGHT);
1144 +
1145 +       for (i = 0; i < 3; i++) {
1146 +               if (!(plat->ports & (1 << i))) {
1147 +                       continue;
1148 +               }
1149 +
1150 +               if (!(dev = alloc_etherdev(sizeof(struct port)))) {
1151 +                       goto free_ports;
1152 +               }
1153 +
1154 +               //SET_NETDEV_DEV(dev, &pdev->dev);
1155 +               port = netdev_priv(dev);
1156 +               port->netdev = dev;
1157 +               if (i == 2)
1158 +                       port->id = 3;
1159 +               else
1160 +                       port->id = i;
1161 +               port->sw = sw;
1162 +               port->mtu = sw->mtu;
1163 +
1164 +               temp = __raw_readl(&sw->regs->mac_cfg[port->id]);
1165 +               temp |= (PORT_DISABLE);
1166 +               __raw_writel(temp, &sw->regs->mac_cfg[port->id]);
1167 +
1168 +               dev->netdev_ops = &cns3xxx_netdev_ops;
1169 +               dev->ethtool_ops = &cns3xxx_ethtool_ops;
1170 +               dev->tx_queue_len = 1000;
1171 +               dev->features = NETIF_F_HW_CSUM;
1172 +
1173 +               dev->vlan_features = NETIF_F_HW_CSUM;
1174 +
1175 +               switch_port_tab[i] = port;
1176 +               memcpy(dev->dev_addr, &plat->hwaddr[i], ETH_ALEN);
1177 +
1178 +               snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, "0", plat->phy[i]);
1179 +               port->phydev = phy_connect(dev, phy_id, &cns3xxx_adjust_link, 0,
1180 +                       PHY_INTERFACE_MODE_RGMII);
1181 +               if ((err = IS_ERR(port->phydev))) {
1182 +                       switch_port_tab[i] = 0;
1183 +                       free_netdev(dev);
1184 +                       goto free_ports;
1185 +               }
1186 +
1187 +               port->phydev->irq = PHY_POLL;
1188 +
1189 +               if ((err = register_netdev(dev))) {
1190 +                       phy_disconnect(port->phydev);
1191 +                       switch_port_tab[i] = 0;
1192 +                       free_netdev(dev);
1193 +                       goto free_ports;
1194 +               }
1195 +
1196 +               printk(KERN_INFO "%s: RGMII PHY %i on cns3xxx Switch\n", dev->name, plat->phy[i]);
1197 +               netif_carrier_off(dev);
1198 +               dev = 0;
1199 +       }
1200 +
1201 +       return 0;
1202 +
1203 +free_ports:
1204 +       err = -ENOMEM;
1205 +       for (--i; i >= 0; i--) {
1206 +               if (switch_port_tab[i]) {
1207 +                       port = switch_port_tab[i];
1208 +                       dev = port->netdev;
1209 +                       unregister_netdev(dev);
1210 +                       phy_disconnect(port->phydev);
1211 +                       switch_port_tab[i] = 0;
1212 +                       free_netdev(dev);
1213 +               }
1214 +       }
1215 +err_free_rings:
1216 +       kfree(sw->tx_ring);
1217 +err_free_rx:
1218 +       kfree(sw->rx_ring);
1219 +err_free:
1220 +       free_netdev(napi_dev);
1221 +       return err;
1222 +}
1223 +
1224 +static int __devexit eth_remove_one(struct platform_device *pdev)
1225 +{
1226 +       struct net_device *dev = platform_get_drvdata(pdev);
1227 +       struct sw *sw = netdev_priv(dev);
1228 +       int i;
1229 +       destroy_rings(sw);
1230 +
1231 +       for (i = 2; i >= 0; i--) {
1232 +               if (switch_port_tab[i]) {
1233 +                       struct port *port = switch_port_tab[i];
1234 +                       struct net_device *dev = port->netdev;
1235 +                       unregister_netdev(dev);
1236 +                       phy_disconnect(port->phydev);
1237 +                       switch_port_tab[i] = 0;
1238 +                       free_netdev(dev);
1239 +               }
1240 +       }
1241 +
1242 +       release_resource(sw->mem_res);
1243 +       free_netdev(napi_dev);
1244 +       return 0;
1245 +}
1246 +
1247 +static struct platform_driver cns3xxx_eth_driver = {
1248 +       .driver.name    = DRV_NAME,
1249 +       .probe          = eth_init_one,
1250 +       .remove         = eth_remove_one,
1251 +};
1252 +
1253 +static int __init eth_init_module(void)
1254 +{
1255 +       int err;
1256 +       if ((err = cns3xxx_mdio_register()))
1257 +               return err;
1258 +       return platform_driver_register(&cns3xxx_eth_driver);
1259 +}
1260 +
1261 +static void __exit eth_cleanup_module(void)
1262 +{
1263 +       platform_driver_unregister(&cns3xxx_eth_driver);
1264 +       cns3xxx_mdio_remove();
1265 +}
1266 +
1267 +module_init(eth_init_module);
1268 +module_exit(eth_cleanup_module);
1269 +
1270 +MODULE_AUTHOR("Chris Lang");
1271 +MODULE_DESCRIPTION("Cavium CNS3xxx Ethernet driver");
1272 +MODULE_LICENSE("GPL v2");
1273 +MODULE_ALIAS("platform:cns3xxx_eth");
1274 --- /dev/null
1275 +++ b/arch/arm/mach-cns3xxx/include/mach/platform.h
1276 @@ -0,0 +1,26 @@
1277 +/*
1278 + * arch/arm/mach-cns3xxx/include/mach/platform.h
1279 + *
1280 + * Copyright 2011 Gateworks Corporation
1281 + *               Chris Lang <clang@gateworks.com
1282 + *
1283 + * This file is free software; you can redistribute it and/or modify
1284 + * it under the terms of the GNU General Public License, Version 2, as
1285 + * published by the Free Software Foundation.
1286 + *
1287 + */
1288 +
1289 +#ifndef __ASM_ARCH_PLATFORM_H
1290 +#define __ASM_ARCH_PLATFORM_H
1291 +
1292 +#ifndef __ASSEMBLY__
1293 +
1294 +/* Information about built-in Ethernet MAC interfaces */
1295 +struct cns3xxx_plat_info {
1296 +       u8 ports; /* Bitmap of enabled Ports */
1297 +       u8 hwaddr[4][6];
1298 +       u32 phy[3];
1299 +};
1300 +
1301 +#endif /* __ASM_ARCH_PLATFORM_H */
1302 +#endif
1303 --- a/drivers/net/ethernet/Kconfig
1304 +++ b/drivers/net/ethernet/Kconfig
1305 @@ -32,6 +32,7 @@ source "drivers/net/ethernet/calxeda/Kco
1306  source "drivers/net/ethernet/chelsio/Kconfig"
1307  source "drivers/net/ethernet/cirrus/Kconfig"
1308  source "drivers/net/ethernet/cisco/Kconfig"
1309 +source "drivers/net/ethernet/cavium/Kconfig"
1310  source "drivers/net/ethernet/davicom/Kconfig"
1311  
1312  config DNET
1313 --- a/drivers/net/ethernet/Makefile
1314 +++ b/drivers/net/ethernet/Makefile
1315 @@ -15,6 +15,7 @@ obj-$(CONFIG_NET_BFIN) += adi/
1316  obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/
1317  obj-$(CONFIG_NET_VENDOR_BROCADE) += brocade/
1318  obj-$(CONFIG_NET_CALXEDA_XGMAC) += calxeda/
1319 +obj-$(CONFIG_NET_VENDOR_CAVIUM) += cavium/
1320  obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/
1321  obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/
1322  obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/
1323 --- /dev/null
1324 +++ b/drivers/net/ethernet/cavium/Kconfig
1325 @@ -0,0 +1,24 @@
1326 +config NET_VENDOR_CAVIUM
1327 +       bool "Cavium devices"
1328 +       default y
1329 +       depends on ARCH_CNS3XXX
1330 +       ---help---
1331 +         If you have a network (Ethernet) chipset belonging to this class,
1332 +         say Y.
1333 +
1334 +         Note that the answer to this question does not directly affect
1335 +         the kernel: saying N will just case the configurator to skip all
1336 +         the questions regarding AMD chipsets. If you say Y, you will be asked
1337 +         for your specific chipset/driver in the following questions.
1338 +
1339 +if NET_VENDOR_CAVIUM
1340 +
1341 +config CNS3XXX_ETH
1342 +       tristate "Cavium CNS3xxx Ethernet support"
1343 +       depends on ARCH_CNS3XXX
1344 +       select PHYLIB
1345 +       help
1346 +         Say Y here if you want to use built-in Ethernet ports
1347 +         on CNS3XXX processor.
1348 +
1349 +endif
1350 --- /dev/null
1351 +++ b/drivers/net/ethernet/cavium/Makefile
1352 @@ -0,0 +1,5 @@
1353 +#
1354 +# Makefile for the Cavium ethernet device drivers.
1355 +#
1356 +
1357 +obj-$(CNS3XXX_ETH) += cns3xxx_eth.o