cf67e92dd6b2a0bb805f8f402bc5e112e414f3ef
[openwrt.git] / target / linux / cns3xxx / patches / 051-cns3xxx_gigabit.patch
1 --- a/drivers/net/Kconfig
2 +++ b/drivers/net/Kconfig
3 @@ -2071,6 +2071,14 @@ config ACENIC_OMIT_TIGON_I
4  
5           The safe and default value for this is N.
6  
7 +config CNS3XXX_ETH
8 +       tristate "Cavium CNS3xxx Ethernet support"
9 +       depends on ARCH_CNS3XXX
10 +       select PHYLIB
11 +       help
12 +         Say Y here if you want to use built-in Ethernet ports
13 +         on CNS3XXX processor.
14 +
15  config DL2K
16         tristate "DL2000/TC902x-based Gigabit Ethernet support"
17         depends on PCI
18 --- a/drivers/net/Makefile
19 +++ b/drivers/net/Makefile
20 @@ -240,6 +240,7 @@ obj-$(CONFIG_MAC89x0) += mac89x0.o
21  obj-$(CONFIG_TUN) += tun.o
22  obj-$(CONFIG_VETH) += veth.o
23  obj-$(CONFIG_NET_NETX) += netx-eth.o
24 +obj-$(CONFIG_CNS3XXX_ETH) += cns3xxx_eth.o
25  obj-$(CONFIG_DL2K) += dl2k.o
26  obj-$(CONFIG_R8169) += r8169.o
27  obj-$(CONFIG_AMD8111_ETH) += amd8111e.o
28 --- /dev/null
29 +++ b/drivers/net/cns3xxx_eth.c
30 @@ -0,0 +1,1269 @@
31 +/*
32 + * Cavium CNS3xxx Gigabit driver for Linux
33 + *
34 + * Copyright 2011 Gateworks Corporation
35 + *               Chris Lang <clang@gateworks.com>
36 + *
37 + * This program is free software; you can redistribute it and/or modify it
38 + * under the terms of version 2 of the GNU General Public License
39 + * as published by the Free Software Foundation.
40 + *
41 + */
42 +
43 +#include <linux/delay.h>
44 +#include <linux/dma-mapping.h>
45 +#include <linux/dmapool.h>
46 +#include <linux/etherdevice.h>
47 +#include <linux/interrupt.h>
48 +#include <linux/io.h>
49 +#include <linux/kernel.h>
50 +#include <linux/phy.h>
51 +#include <linux/platform_device.h>
52 +#include <linux/skbuff.h>
53 +#include <mach/irqs.h>
54 +#include <mach/platform.h>
55 +
56 +#define DRV_NAME "cns3xxx_eth"
57 +
58 +#define RX_DESCS 512
59 +#define TX_DESCS 512
60 +#define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
61 +
62 +#define RX_POOL_ALLOC_SIZE (sizeof(struct rx_desc) * RX_DESCS)
63 +#define TX_POOL_ALLOC_SIZE (sizeof(struct tx_desc) * TX_DESCS)
64 +#define REGS_SIZE 336
65 +#define MAX_MRU        9500
66 +
67 +#define NAPI_WEIGHT 64
68 +
69 +/* MDIO Defines */
70 +#define MDIO_CMD_COMPLETE 0x00008000
71 +#define MDIO_WRITE_COMMAND 0x00002000
72 +#define MDIO_READ_COMMAND 0x00004000
73 +#define MDIO_REG_OFFSET 8
74 +#define MDIO_VALUE_OFFSET 16
75 +
76 +/* Descritor Defines */
77 +#define END_OF_RING 0x40000000
78 +#define FIRST_SEGMENT 0x20000000
79 +#define LAST_SEGMENT 0x10000000
80 +#define FORCE_ROUTE 0x04000000
81 +#define IP_CHECKSUM 0x00040000
82 +#define UDP_CHECKSUM 0x00020000
83 +#define TCP_CHECKSUM 0x00010000
84 +
85 +/* Port Config Defines */
86 +#define PORT_DISABLE 0x00040000
87 +#define PROMISC_OFFSET 29
88 +
89 +/* Global Config Defines */
90 +#define UNKNOWN_VLAN_TO_CPU 0x02000000
91 +#define ACCEPT_CRC_PACKET 0x00200000
92 +#define CRC_STRIPPING 0x00100000
93 +
94 +/* VLAN Config Defines */
95 +#define NIC_MODE 0x00008000
96 +#define VLAN_UNAWARE 0x00000001
97 +
98 +/* DMA AUTO Poll Defines */
99 +#define TS_POLL_EN 0x00000020
100 +#define TS_SUSPEND 0x00000010
101 +#define FS_POLL_EN 0x00000002
102 +#define FS_SUSPEND 0x00000001
103 +
104 +/* DMA Ring Control Defines */
105 +#define QUEUE_THRESHOLD 0x000000f0
106 +#define CLR_FS_STATE 0x80000000
107 +
108 +struct tx_desc
109 +{
110 +       u32 sdp; /* segment data pointer */
111 +
112 +       union {
113 +               struct {
114 +                       u32 sdl:16; /* segment data length */
115 +                       u32 tco:1;
116 +                       u32 uco:1;
117 +                       u32 ico:1;
118 +                       u32 rsv_1:3; /* reserve */
119 +                       u32 pri:3;
120 +                       u32 fp:1; /* force priority */
121 +                       u32 fr:1;
122 +                       u32 interrupt:1;
123 +                       u32 lsd:1;
124 +                       u32 fsd:1;
125 +                       u32 eor:1;
126 +                       u32 cown:1;
127 +               };
128 +               u32 config0;
129 +       };
130 +
131 +       union {
132 +               struct {
133 +                       u32 ctv:1;
134 +                       u32 stv:1;
135 +                       u32 sid:4;
136 +                       u32 inss:1;
137 +                       u32 dels:1;
138 +                       u32 rsv_2:9;
139 +                       u32 pmap:5;
140 +                       u32 mark:3;
141 +                       u32 ewan:1;
142 +                       u32 fewan:1;
143 +                       u32 rsv_3:5;
144 +               };
145 +               u32 config1;
146 +       };
147 +
148 +       union {
149 +               struct {
150 +                       u32 c_vid:12;
151 +                       u32 c_cfs:1;
152 +                       u32 c_pri:3;
153 +                       u32 s_vid:12;
154 +                       u32 s_dei:1;
155 +                       u32 s_pri:3;
156 +               };
157 +               u32 config2;
158 +       };
159 +
160 +       u8 alignment[16]; /* for 32 byte */
161 +};
162 +
163 +struct rx_desc
164 +{
165 +       u32 sdp; /* segment data pointer */
166 +
167 +       union {
168 +               struct {
169 +                       u32 sdl:16; /* segment data length */
170 +                       u32 l4f:1;
171 +                       u32 ipf:1;
172 +                       u32 prot:4;
173 +                       u32 hr:6;
174 +                       u32 lsd:1;
175 +                       u32 fsd:1;
176 +                       u32 eor:1;
177 +                       u32 cown:1;
178 +               };
179 +               u32 config0;
180 +       };
181 +
182 +       union {
183 +               struct {
184 +                       u32 ctv:1;
185 +                       u32 stv:1;
186 +                       u32 unv:1;
187 +                       u32 iwan:1;
188 +                       u32 exdv:1;
189 +                       u32 e_wan:1;
190 +                       u32 rsv_1:2;
191 +                       u32 sp:3;
192 +                       u32 crc_err:1;
193 +                       u32 un_eth:1;
194 +                       u32 tc:2;
195 +                       u32 rsv_2:1;
196 +                       u32 ip_offset:5;
197 +                       u32 rsv_3:11;
198 +               };
199 +               u32 config1;
200 +       };
201 +
202 +       union {
203 +               struct {
204 +                       u32 c_vid:12;
205 +                       u32 c_cfs:1;
206 +                       u32 c_pri:3;
207 +                       u32 s_vid:12;
208 +                       u32 s_dei:1;
209 +                       u32 s_pri:3;
210 +               };
211 +               u32 config2;
212 +       };
213 +
214 +       u8 alignment[16]; /* for 32 byte alignment */
215 +};
216 +
217 +struct switch_regs {
218 +       u32 phy_control;
219 +       u32 phy_auto_addr;
220 +       u32 mac_glob_cfg;
221 +       u32 mac_cfg[4];
222 +       u32 mac_pri_ctrl[5], __res;
223 +       u32 etype[2];
224 +       u32 udp_range[4];
225 +       u32 prio_etype_udp;
226 +       u32 prio_ipdscp[8];
227 +       u32 tc_ctrl;
228 +       u32 rate_ctrl;
229 +       u32 fc_glob_thrs;
230 +       u32 fc_port_thrs;
231 +       u32 mc_fc_glob_thrs;
232 +       u32 dc_glob_thrs;
233 +       u32 arl_vlan_cmd;
234 +       u32 arl_ctrl[3];
235 +       u32 vlan_cfg;
236 +       u32 pvid[2];
237 +       u32 vlan_ctrl[3];
238 +       u32 session_id[8];
239 +       u32 intr_stat;
240 +       u32 intr_mask;
241 +       u32 sram_test;
242 +       u32 mem_queue;
243 +       u32 farl_ctrl;
244 +       u32 fc_input_thrs, __res1[2];
245 +       u32 clk_skew_ctrl;
246 +       u32 mac_glob_cfg_ext, __res2[2];
247 +       u32 dma_ring_ctrl;
248 +       u32 dma_auto_poll_cfg;
249 +       u32 delay_intr_cfg, __res3;
250 +       u32 ts_dma_ctrl0;
251 +       u32 ts_desc_ptr0;
252 +       u32 ts_desc_base_addr0, __res4;
253 +       u32 fs_dma_ctrl0;
254 +       u32 fs_desc_ptr0;
255 +       u32 fs_desc_base_addr0, __res5;
256 +       u32 ts_dma_ctrl1;
257 +       u32 ts_desc_ptr1;
258 +       u32 ts_desc_base_addr1, __res6;
259 +       u32 fs_dma_ctrl1;
260 +       u32 fs_desc_ptr1;
261 +       u32 fs_desc_base_addr1;
262 +};
263 +
264 +struct _tx_ring {
265 +       struct tx_desc *desc;
266 +       dma_addr_t phys_addr;
267 +       struct tx_desc *cur_addr;
268 +       struct sk_buff *buff_tab[TX_DESCS];
269 +       u32 free_index;
270 +       u32 count_index;
271 +       u32 cur_index;
272 +       int num_used;
273 +       int num_count;
274 +};
275 +
276 +struct _rx_ring {
277 +       struct rx_desc *desc;
278 +       dma_addr_t phys_addr;
279 +       struct rx_desc *cur_addr;
280 +       struct sk_buff *buff_tab[RX_DESCS];
281 +       u32 cur_index;
282 +       u32 alloc_index;
283 +       int alloc_count;
284 +};
285 +
286 +struct sw {
287 +       struct resource *mem_res;
288 +       struct switch_regs __iomem *regs;
289 +       struct napi_struct napi;
290 +       struct cns3xxx_plat_info *plat;
291 +       struct _tx_ring *tx_ring;
292 +       struct _rx_ring *rx_ring;
293 +       u32 mtu;
294 +};
295 +
296 +struct port {
297 +       struct net_device *netdev;
298 +       struct phy_device *phydev;
299 +       struct sw *sw;
300 +       int id;                 /* logical port ID */
301 +       int speed, duplex;
302 +       u32 mtu;
303 +};
304 +
305 +static spinlock_t mdio_lock;
306 +static spinlock_t tx_lock;
307 +static spinlock_t stat_lock;
308 +static struct switch_regs __iomem *mdio_regs; /* mdio command and status only */
309 +struct mii_bus *mdio_bus;
310 +static int ports_open;
311 +static struct port *switch_port_tab[3];
312 +static struct dma_pool *rx_dma_pool;
313 +static struct dma_pool *tx_dma_pool;
314 +struct net_device *napi_dev;
315 +
316 +static int cns3xxx_mdio_cmd(struct mii_bus *bus, int phy_id, int location,
317 +                          int write, u16 cmd)
318 +{
319 +       int cycles = 0;
320 +       u32 temp = 0;
321 +
322 +       temp = __raw_readl(&mdio_regs->phy_control);
323 +       temp |= MDIO_CMD_COMPLETE;
324 +       __raw_writel(temp, &mdio_regs->phy_control);
325 +       udelay(10);
326 +
327 +       if (write) {
328 +               temp = (cmd << MDIO_VALUE_OFFSET);
329 +               temp |= MDIO_WRITE_COMMAND;
330 +       } else {
331 +               temp = MDIO_READ_COMMAND;
332 +       }
333 +       temp |= ((location & 0x1f) << MDIO_REG_OFFSET);
334 +       temp |= (phy_id & 0x1f);
335 +
336 +       __raw_writel(temp, &mdio_regs->phy_control);
337 +
338 +       while (((__raw_readl(&mdio_regs->phy_control) & MDIO_CMD_COMPLETE) == 0)
339 +                       && cycles < 5000) {
340 +               udelay(1);
341 +               cycles++;
342 +       }
343 +
344 +       if (cycles == 5000) {
345 +               printk(KERN_ERR "%s #%i: MII transaction failed\n", bus->name,
346 +                      phy_id);
347 +               return -1;
348 +       }
349 +
350 +       temp = __raw_readl(&mdio_regs->phy_control);
351 +       temp |= MDIO_CMD_COMPLETE;
352 +       __raw_writel(temp, &mdio_regs->phy_control);
353 +
354 +       if (write)
355 +               return 0;
356 +
357 +       return ((temp >> MDIO_VALUE_OFFSET) & 0xFFFF);
358 +}
359 +
360 +static int cns3xxx_mdio_read(struct mii_bus *bus, int phy_id, int location)
361 +{
362 +       unsigned long flags;
363 +       int ret;
364 +
365 +       spin_lock_irqsave(&mdio_lock, flags);
366 +       ret = cns3xxx_mdio_cmd(bus, phy_id, location, 0, 0);
367 +       spin_unlock_irqrestore(&mdio_lock, flags);
368 +       return ret;
369 +}
370 +
371 +static int cns3xxx_mdio_write(struct mii_bus *bus, int phy_id, int location,
372 +                            u16 val)
373 +{
374 +       unsigned long flags;
375 +       int ret;
376 +
377 +       spin_lock_irqsave(&mdio_lock, flags);
378 +       ret = cns3xxx_mdio_cmd(bus, phy_id, location, 1, val);
379 +       spin_unlock_irqrestore(&mdio_lock, flags);
380 +       return ret;
381 +}
382 +
383 +static int cns3xxx_mdio_register(void)
384 +{
385 +       int err;
386 +
387 +       if (!(mdio_bus = mdiobus_alloc()))
388 +               return -ENOMEM;
389 +
390 +       mdio_regs = (struct switch_regs __iomem *)CNS3XXX_SWITCH_BASE_VIRT;
391 +
392 +       spin_lock_init(&mdio_lock);
393 +       mdio_bus->name = "CNS3xxx MII Bus";
394 +       mdio_bus->read = &cns3xxx_mdio_read;
395 +       mdio_bus->write = &cns3xxx_mdio_write;
396 +       strcpy(mdio_bus->id, "0");
397 +
398 +       if ((err = mdiobus_register(mdio_bus)))
399 +               mdiobus_free(mdio_bus);
400 +       return err;
401 +}
402 +
403 +static void cns3xxx_mdio_remove(void)
404 +{
405 +       mdiobus_unregister(mdio_bus);
406 +       mdiobus_free(mdio_bus);
407 +}
408 +
409 +static void cns3xxx_adjust_link(struct net_device *dev)
410 +{
411 +       struct port *port = netdev_priv(dev);
412 +       struct phy_device *phydev = port->phydev;
413 +
414 +       if (!phydev->link) {
415 +               if (port->speed) {
416 +                       port->speed = 0;
417 +                       printk(KERN_INFO "%s: link down\n", dev->name);
418 +               }
419 +               return;
420 +       }
421 +
422 +       if (port->speed == phydev->speed && port->duplex == phydev->duplex)
423 +               return;
424 +
425 +       port->speed = phydev->speed;
426 +       port->duplex = phydev->duplex;
427 +
428 +       printk(KERN_INFO "%s: link up, speed %u Mb/s, %s duplex\n",
429 +              dev->name, port->speed, port->duplex ? "full" : "half");
430 +}
431 +
432 +irqreturn_t eth_rx_irq(int irq, void *pdev)
433 +{
434 +       struct net_device *dev = pdev;
435 +       struct sw *sw = netdev_priv(dev);
436 +       if (likely(napi_schedule_prep(&sw->napi))) {
437 +               disable_irq_nosync(IRQ_CNS3XXX_SW_R0RXC);
438 +               __napi_schedule(&sw->napi);
439 +       }
440 +       return (IRQ_HANDLED);
441 +}
442 +
443 +static void cns3xxx_alloc_rx_buf(struct sw *sw, int received)
444 +{
445 +       struct _rx_ring *rx_ring = sw->rx_ring;
446 +       unsigned int i = rx_ring->alloc_index;
447 +       struct rx_desc *desc;
448 +       struct sk_buff *skb;
449 +       u32 mtu = sw->mtu;
450 +
451 +       rx_ring->alloc_count += received;
452 +
453 +       for (received = rx_ring->alloc_count; received > 0; received--) {
454 +               desc = &(rx_ring)->desc[i];
455 +
456 +               if ((skb = dev_alloc_skb(mtu))) {
457 +                       if (SKB_DMA_REALIGN)
458 +                               skb_reserve(skb, SKB_DMA_REALIGN);
459 +                       skb_reserve(skb, NET_IP_ALIGN);
460 +                       desc->sdp = dma_map_single(NULL, skb->data,
461 +                                   mtu, DMA_FROM_DEVICE);
462 +                       if (dma_mapping_error(NULL, desc->sdp)) {
463 +                               dev_kfree_skb(skb);
464 +                               /* Failed to map, better luck next time */
465 +                               goto out;;
466 +                       }
467 +               } else {
468 +                       /* Failed to allocate skb, try again next time */
469 +                       goto out;
470 +               }
471 +
472 +               /* put the new buffer on RX-free queue */
473 +               rx_ring->buff_tab[i] = skb;
474 +
475 +               if (++i == RX_DESCS) {
476 +                       i = 0;
477 +                       desc->config0 = END_OF_RING | FIRST_SEGMENT |
478 +                                       LAST_SEGMENT | mtu;
479 +               } else {
480 +                       desc->config0 = FIRST_SEGMENT | LAST_SEGMENT | mtu;
481 +               }
482 +       }
483 +out:
484 +       rx_ring->alloc_count = received;
485 +       rx_ring->alloc_index = i;
486 +}
487 +
488 +static void update_tx_stats(struct sw *sw)
489 +{
490 +       struct _tx_ring *tx_ring = sw->tx_ring;
491 +       struct tx_desc *desc;
492 +       struct tx_desc *next_desc;
493 +       struct sk_buff *skb;
494 +       int i;
495 +       int index;
496 +       int num_count;
497 +
498 +       spin_lock_bh(&stat_lock);
499 +
500 +       num_count = tx_ring->num_count;
501 +
502 +       if (!num_count) {
503 +               spin_unlock_bh(&stat_lock);
504 +               return;
505 +       }
506 +
507 +       index = tx_ring->count_index;
508 +       desc = &(tx_ring)->desc[index];
509 +       for (i = 0; i < num_count; i++) {
510 +               skb = tx_ring->buff_tab[index];
511 +               if (desc->cown) {
512 +                       tx_ring->buff_tab[index] = 0;
513 +                       if (unlikely(++index == TX_DESCS)) index = 0;
514 +                       next_desc = &(tx_ring)->desc[index];
515 +                       prefetch(next_desc + 4);
516 +                       if (likely(skb)) {
517 +                               skb->dev->stats.tx_packets++;
518 +                               skb->dev->stats.tx_bytes += skb->len;
519 +                               dev_kfree_skb_any(skb);
520 +                       }
521 +                       desc = next_desc;
522 +               } else {
523 +                       break;
524 +               }
525 +       }
526 +       tx_ring->num_count -= i;
527 +       tx_ring->count_index = index;
528 +
529 +       spin_unlock_bh(&stat_lock);
530 +}
531 +
532 +static void clear_tx_desc(struct sw *sw)
533 +{
534 +       struct _tx_ring *tx_ring = sw->tx_ring;
535 +       struct tx_desc *desc;
536 +       struct tx_desc *next_desc;
537 +       int i;
538 +       int index;
539 +       int num_used = tx_ring->num_used - tx_ring->num_count;
540 +
541 +       if (num_used < (TX_DESCS >> 1))
542 +               return;
543 +
544 +       index = tx_ring->free_index;
545 +       desc = &(tx_ring)->desc[index];
546 +       for (i = 0; i < num_used; i++) {
547 +               if (desc->cown) {
548 +                       if (unlikely(++index == TX_DESCS)) index = 0;
549 +                       next_desc = &(tx_ring)->desc[index];
550 +                       prefetch(next_desc);
551 +                       prefetch(next_desc + 4);
552 +                       if (likely(desc->sdp))
553 +                               dma_unmap_single(NULL, desc->sdp,
554 +                                       desc->sdl, DMA_TO_DEVICE);
555 +                       desc = next_desc;
556 +               } else {
557 +                       break;
558 +               }
559 +       }
560 +       tx_ring->free_index = index;
561 +       tx_ring->num_used -= i;
562 +}
563 +
564 +static int eth_poll(struct napi_struct *napi, int budget)
565 +{
566 +       struct sw *sw = container_of(napi, struct sw, napi);
567 +       struct net_device *dev;
568 +       struct _rx_ring *rx_ring = sw->rx_ring;
569 +       int received = 0;
570 +       unsigned int length;
571 +       unsigned int i = rx_ring->cur_index;
572 +       struct rx_desc *next_desc;
573 +       struct rx_desc *desc = &(rx_ring)->desc[i];
574 +       int port_id;
575 +
576 +       while (desc->cown) {
577 +               struct sk_buff *skb;
578 +
579 +               if (received >= budget)
580 +                       break;
581 +
582 +               skb = rx_ring->buff_tab[i];
583 +
584 +               if (++i == RX_DESCS) i = 0;
585 +               next_desc = &(rx_ring)->desc[i];
586 +               prefetch(next_desc);
587 +
588 +               port_id = desc->sp;
589 +               if (port_id == 4)
590 +                       dev = switch_port_tab[2]->netdev;
591 +               else
592 +                       dev = switch_port_tab[port_id]->netdev;
593 +
594 +               length = desc->sdl;
595 +               /* process received frame */
596 +               dma_unmap_single(&dev->dev, desc->sdp,
597 +                                length, DMA_FROM_DEVICE);
598 +
599 +               skb_put(skb, length);
600 +
601 +               skb->dev = dev;
602 +               skb->protocol = eth_type_trans(skb, dev);
603 +
604 +               dev->stats.rx_packets++;
605 +               dev->stats.rx_bytes += length;
606 +
607 +               switch (desc->prot) {
608 +                       case 1:
609 +                       case 2:
610 +                       case 5:
611 +                       case 6:
612 +                       case 13:
613 +                       case 14:
614 +                               if (desc->l4f)
615 +                                       skb->ip_summed = CHECKSUM_NONE;
616 +                               else
617 +                                       skb->ip_summed = CHECKSUM_UNNECESSARY;
618 +                       break;
619 +                       default:
620 +                               skb->ip_summed = CHECKSUM_NONE;
621 +                       break;
622 +               }
623 +
624 +               napi_gro_receive(napi, skb);
625 +
626 +               received++;
627 +               desc = next_desc;
628 +       }
629 +
630 +       cns3xxx_alloc_rx_buf(sw, received);
631 +       rx_ring->cur_index = i;
632 +
633 +       if (received != budget) {
634 +               napi_complete(napi);
635 +               enable_irq(IRQ_CNS3XXX_SW_R0RXC);
636 +       }
637 +
638 +       return received;
639 +}
640 +
641 +static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
642 +{
643 +       struct port *port = netdev_priv(dev);
644 +       struct sw *sw = port->sw;
645 +       struct _tx_ring *tx_ring = sw->tx_ring;
646 +       struct tx_desc *tx_desc;
647 +       int index;
648 +       int len = skb->len;
649 +       char pmap = (1 << port->id);
650 +
651 +       if (pmap == 8)
652 +               pmap = (1 << 4);
653 +
654 +       if (unlikely(len > sw->mtu)) {
655 +               dev_kfree_skb(skb);
656 +               dev->stats.tx_errors++;
657 +               return NETDEV_TX_OK;
658 +       }
659 +
660 +       update_tx_stats(sw);
661 +
662 +       spin_lock_bh(&tx_lock);
663 +
664 +       clear_tx_desc(sw);
665 +
666 +       if (unlikely(tx_ring->num_used == TX_DESCS)) {
667 +               spin_unlock_bh(&tx_lock);
668 +               return NETDEV_TX_BUSY;
669 +       }
670 +
671 +       index = tx_ring->cur_index;
672 +
673 +       if (unlikely(++tx_ring->cur_index == TX_DESCS))
674 +               tx_ring->cur_index = 0;
675 +
676 +       tx_ring->num_used++;
677 +       tx_ring->num_count++;
678 +
679 +       spin_unlock_bh(&tx_lock);
680 +
681 +       tx_desc = &(tx_ring)->desc[index];
682 +
683 +       tx_desc->sdp = dma_map_single(NULL, skb->data, len,
684 +                                     DMA_TO_DEVICE);
685 +
686 +       if (dma_mapping_error(NULL, tx_desc->sdp)) {
687 +               dev_kfree_skb(skb);
688 +               dev->stats.tx_errors++;
689 +               return NETDEV_TX_OK;
690 +       }
691 +
692 +       tx_desc->pmap = pmap;
693 +       tx_ring->buff_tab[index] = skb;
694 +
695 +       if (index == TX_DESCS - 1) {
696 +               tx_desc->config0 = END_OF_RING | FIRST_SEGMENT | LAST_SEGMENT |
697 +                                  FORCE_ROUTE | IP_CHECKSUM | UDP_CHECKSUM |
698 +                                  TCP_CHECKSUM | len;
699 +       } else {
700 +               tx_desc->config0 = FIRST_SEGMENT | LAST_SEGMENT |
701 +                                  FORCE_ROUTE | IP_CHECKSUM | UDP_CHECKSUM |
702 +                                  TCP_CHECKSUM | len;
703 +       }
704 +
705 +       return NETDEV_TX_OK;
706 +}
707 +
708 +static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
709 +{
710 +       struct port *port = netdev_priv(dev);
711 +
712 +       if (!netif_running(dev))
713 +               return -EINVAL;
714 +       return phy_mii_ioctl(port->phydev, req, cmd);
715 +}
716 +
717 +/* ethtool support */
718 +
719 +static void cns3xxx_get_drvinfo(struct net_device *dev,
720 +                              struct ethtool_drvinfo *info)
721 +{
722 +       strcpy(info->driver, DRV_NAME);
723 +       strcpy(info->bus_info, "internal");
724 +}
725 +
726 +static int cns3xxx_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
727 +{
728 +       struct port *port = netdev_priv(dev);
729 +       return phy_ethtool_gset(port->phydev, cmd);
730 +}
731 +
732 +static int cns3xxx_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
733 +{
734 +       struct port *port = netdev_priv(dev);
735 +       return phy_ethtool_sset(port->phydev, cmd);
736 +}
737 +
738 +static int cns3xxx_nway_reset(struct net_device *dev)
739 +{
740 +       struct port *port = netdev_priv(dev);
741 +       return phy_start_aneg(port->phydev);
742 +}
743 +
744 +static struct ethtool_ops cns3xxx_ethtool_ops = {
745 +       .get_drvinfo = cns3xxx_get_drvinfo,
746 +       .get_settings = cns3xxx_get_settings,
747 +       .set_settings = cns3xxx_set_settings,
748 +       .nway_reset = cns3xxx_nway_reset,
749 +       .get_link = ethtool_op_get_link,
750 +};
751 +
752 +
753 +static int init_rings(struct sw *sw)
754 +{
755 +       int i;
756 +       struct _rx_ring *rx_ring = sw->rx_ring;
757 +       struct _tx_ring *tx_ring = sw->tx_ring;
758 +
759 +       __raw_writel(0, &sw->regs->fs_dma_ctrl0);
760 +       __raw_writel(TS_SUSPEND | FS_SUSPEND, &sw->regs->dma_auto_poll_cfg);
761 +       __raw_writel(QUEUE_THRESHOLD, &sw->regs->dma_ring_ctrl);
762 +       __raw_writel(CLR_FS_STATE | QUEUE_THRESHOLD, &sw->regs->dma_ring_ctrl);
763 +
764 +       __raw_writel(QUEUE_THRESHOLD, &sw->regs->dma_ring_ctrl);
765 +
766 +       if (!(rx_dma_pool = dma_pool_create(DRV_NAME, NULL,
767 +                                           RX_POOL_ALLOC_SIZE, 32, 0)))
768 +               return -ENOMEM;
769 +
770 +       if (!(rx_ring->desc = dma_pool_alloc(rx_dma_pool, GFP_KERNEL,
771 +                                             &rx_ring->phys_addr)))
772 +               return -ENOMEM;
773 +       memset(rx_ring->desc, 0, RX_POOL_ALLOC_SIZE);
774 +
775 +       /* Setup RX buffers */
776 +       for (i = 0; i < RX_DESCS; i++) {
777 +               struct rx_desc *desc = &(rx_ring)->desc[i];
778 +               struct sk_buff *skb;
779 +               if (!(skb = dev_alloc_skb(sw->mtu)))
780 +                       return -ENOMEM;
781 +               if (SKB_DMA_REALIGN)
782 +                       skb_reserve(skb, SKB_DMA_REALIGN);
783 +               skb_reserve(skb, NET_IP_ALIGN);
784 +               desc->sdl = sw->mtu;
785 +               if (i == (RX_DESCS - 1))
786 +                       desc->eor = 1;
787 +               desc->fsd = 1;
788 +               desc->lsd = 1;
789 +
790 +               desc->sdp = dma_map_single(NULL, skb->data,
791 +                                           sw->mtu, DMA_FROM_DEVICE);
792 +               if (dma_mapping_error(NULL, desc->sdp)) {
793 +                       return -EIO;
794 +               }
795 +               rx_ring->buff_tab[i] = skb;
796 +               desc->cown = 0;
797 +       }
798 +       __raw_writel(rx_ring->phys_addr, &sw->regs->fs_desc_ptr0);
799 +       __raw_writel(rx_ring->phys_addr, &sw->regs->fs_desc_base_addr0);
800 +
801 +       if (!(tx_dma_pool = dma_pool_create(DRV_NAME, NULL,
802 +                                           TX_POOL_ALLOC_SIZE, 32, 0)))
803 +               return -ENOMEM;
804 +
805 +       if (!(tx_ring->desc = dma_pool_alloc(tx_dma_pool, GFP_KERNEL,
806 +                                             &tx_ring->phys_addr)))
807 +               return -ENOMEM;
808 +       memset(tx_ring->desc, 0, TX_POOL_ALLOC_SIZE);
809 +
810 +       /* Setup TX buffers */
811 +       for (i = 0; i < TX_DESCS; i++) {
812 +               struct tx_desc *desc = &(tx_ring)->desc[i];
813 +               tx_ring->buff_tab[i] = 0;
814 +
815 +               if (i == (TX_DESCS - 1))
816 +                       desc->eor = 1;
817 +               desc->cown = 1;
818 +       }
819 +       __raw_writel(tx_ring->phys_addr, &sw->regs->ts_desc_ptr0);
820 +       __raw_writel(tx_ring->phys_addr, &sw->regs->ts_desc_base_addr0);
821 +
822 +       return 0;
823 +}
824 +
825 +static void destroy_rings(struct sw *sw)
826 +{
827 +       int i;
828 +       if (sw->rx_ring->desc) {
829 +               for (i = 0; i < RX_DESCS; i++) {
830 +                       struct _rx_ring *rx_ring = sw->rx_ring;
831 +                       struct rx_desc *desc = &(rx_ring)->desc[i];
832 +                       struct sk_buff *skb = sw->rx_ring->buff_tab[i];
833 +                       if (skb) {
834 +                               dma_unmap_single(NULL,
835 +                                                desc->sdp,
836 +                                                sw->mtu, DMA_FROM_DEVICE);
837 +                               dev_kfree_skb(skb);
838 +                       }
839 +               }
840 +               dma_pool_free(rx_dma_pool, sw->rx_ring->desc, sw->rx_ring->phys_addr);
841 +               dma_pool_destroy(rx_dma_pool);
842 +               rx_dma_pool = 0;
843 +               sw->rx_ring->desc = 0;
844 +       }
845 +       if (sw->tx_ring->desc) {
846 +               for (i = 0; i < TX_DESCS; i++) {
847 +                       struct _tx_ring *tx_ring = sw->tx_ring;
848 +                       struct tx_desc *desc = &(tx_ring)->desc[i];
849 +                       struct sk_buff *skb = sw->tx_ring->buff_tab[i];
850 +                       if (skb) {
851 +                               dma_unmap_single(NULL, desc->sdp,
852 +                                       skb->len, DMA_TO_DEVICE);
853 +                               dev_kfree_skb(skb);
854 +                       }
855 +               }
856 +               dma_pool_free(tx_dma_pool, sw->tx_ring->desc, sw->tx_ring->phys_addr);
857 +               dma_pool_destroy(tx_dma_pool);
858 +               tx_dma_pool = 0;
859 +               sw->tx_ring->desc = 0;
860 +       }
861 +}
862 +
863 +static int eth_open(struct net_device *dev)
864 +{
865 +       struct port *port = netdev_priv(dev);
866 +       struct sw *sw = port->sw;
867 +       u32 temp;
868 +
869 +       port->speed = 0;        /* force "link up" message */
870 +       phy_start(port->phydev);
871 +
872 +       netif_start_queue(dev);
873 +
874 +       if (!ports_open) {
875 +               request_irq(IRQ_CNS3XXX_SW_R0RXC, eth_rx_irq, IRQF_SHARED, "gig_switch", napi_dev);
876 +               napi_enable(&sw->napi);
877 +               netif_start_queue(napi_dev);
878 +               //enable_irq(IRQ_CNS3XXX_SW_R0RXC);
879 +
880 +               temp = __raw_readl(&sw->regs->mac_cfg[2]);
881 +               temp &= ~(PORT_DISABLE);
882 +               __raw_writel(temp, &sw->regs->mac_cfg[2]);
883 +
884 +               temp = __raw_readl(&sw->regs->dma_auto_poll_cfg);
885 +               temp &= ~(TS_SUSPEND | FS_SUSPEND);
886 +               __raw_writel(temp, &sw->regs->dma_auto_poll_cfg);
887 +
888 +               __raw_writel((TS_POLL_EN | FS_POLL_EN), &sw->regs->dma_auto_poll_cfg);
889 +       }
890 +       temp = __raw_readl(&sw->regs->mac_cfg[port->id]);
891 +       temp &= ~(PORT_DISABLE);
892 +       __raw_writel(temp, &sw->regs->mac_cfg[port->id]);
893 +
894 +       ports_open++;
895 +       netif_carrier_on(dev);
896 +
897 +       return 0;
898 +}
899 +
900 +static int eth_close(struct net_device *dev)
901 +{
902 +       struct port *port = netdev_priv(dev);
903 +       struct sw *sw = port->sw;
904 +       u32 temp;
905 +
906 +       ports_open--;
907 +
908 +       temp = __raw_readl(&sw->regs->mac_cfg[port->id]);
909 +       temp |= (PORT_DISABLE);
910 +       __raw_writel(temp, &sw->regs->mac_cfg[port->id]);
911 +
912 +       netif_stop_queue(dev);
913 +
914 +       phy_stop(port->phydev);
915 +
916 +       if (!ports_open) {
917 +               disable_irq(IRQ_CNS3XXX_SW_R0RXC);
918 +               free_irq(IRQ_CNS3XXX_SW_R0RXC, napi_dev);
919 +               napi_disable(&sw->napi);
920 +               netif_stop_queue(napi_dev);
921 +               temp = __raw_readl(&sw->regs->mac_cfg[2]);
922 +               temp |= (PORT_DISABLE);
923 +               __raw_writel(temp, &sw->regs->mac_cfg[2]);
924 +
925 +               __raw_writel(TS_SUSPEND | FS_SUSPEND,
926 +                            &sw->regs->dma_auto_poll_cfg);
927 +       }
928 +
929 +       netif_carrier_off(dev);
930 +       return 0;
931 +}
932 +
933 +static void eth_rx_mode(struct net_device *dev)
934 +{
935 +       struct port *port = netdev_priv(dev);
936 +       struct sw *sw = port->sw;
937 +       u32 temp;
938 +
939 +       temp = __raw_readl(&sw->regs->mac_glob_cfg);
940 +
941 +       if (dev->flags & IFF_PROMISC) {
942 +               if (port->id == 3)
943 +                       temp |= ((1 << 2) << PROMISC_OFFSET);
944 +               else
945 +                       temp |= ((1 << port->id) << PROMISC_OFFSET);
946 +       } else {
947 +               if (port->id == 3)
948 +                       temp &= ~((1 << 2) << PROMISC_OFFSET);
949 +               else
950 +                       temp &= ~((1 << port->id) << PROMISC_OFFSET);
951 +       }
952 +       __raw_writel(temp, &sw->regs->mac_glob_cfg);
953 +}
954 +
955 +static int eth_set_mac(struct net_device *netdev, void *p)
956 +{
957 +       struct port *port = netdev_priv(netdev);
958 +       struct sw *sw = port->sw;
959 +       struct sockaddr *addr = p;
960 +       u32 cycles = 0;
961 +
962 +       if (!is_valid_ether_addr(addr->sa_data))
963 +               return -EADDRNOTAVAIL;
964 +
965 +       /* Invalidate old ARL Entry */
966 +       if (port->id == 3)
967 +               __raw_writel((port->id << 16) | (0x4 << 9), &sw->regs->arl_ctrl[0]);
968 +       else
969 +               __raw_writel(((port->id + 1) << 16) | (0x4 << 9), &sw->regs->arl_ctrl[0]);
970 +       __raw_writel( ((netdev->dev_addr[0] << 24) | (netdev->dev_addr[1] << 16) |
971 +                       (netdev->dev_addr[2] << 8) | (netdev->dev_addr[3])),
972 +                       &sw->regs->arl_ctrl[1]);
973 +
974 +       __raw_writel( ((netdev->dev_addr[4] << 24) | (netdev->dev_addr[5] << 16) |
975 +                       (1 << 1)),
976 +                       &sw->regs->arl_ctrl[2]);
977 +       __raw_writel((1 << 19), &sw->regs->arl_vlan_cmd);
978 +
979 +       while (((__raw_readl(&sw->regs->arl_vlan_cmd) & (1 << 21)) == 0)
980 +                       && cycles < 5000) {
981 +               udelay(1);
982 +               cycles++;
983 +       }
984 +
985 +       cycles = 0;
986 +       memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
987 +
988 +       if (port->id == 3)
989 +               __raw_writel((port->id << 16) | (0x4 << 9), &sw->regs->arl_ctrl[0]);
990 +       else
991 +               __raw_writel(((port->id + 1) << 16) | (0x4 << 9), &sw->regs->arl_ctrl[0]);
992 +       __raw_writel( ((addr->sa_data[0] << 24) | (addr->sa_data[1] << 16) |
993 +                       (addr->sa_data[2] << 8) | (addr->sa_data[3])),
994 +                       &sw->regs->arl_ctrl[1]);
995 +
996 +       __raw_writel( ((addr->sa_data[4] << 24) | (addr->sa_data[5] << 16) |
997 +                       (7 << 4) | (1 << 1)), &sw->regs->arl_ctrl[2]);
998 +       __raw_writel((1 << 19), &sw->regs->arl_vlan_cmd);
999 +
1000 +       while (((__raw_readl(&sw->regs->arl_vlan_cmd) & (1 << 21)) == 0)
1001 +               && cycles < 5000) {
1002 +               udelay(1);
1003 +               cycles++;
1004 +       }
1005 +       return 0;
1006 +}
1007 +
1008 +static int cns3xxx_change_mtu(struct net_device *netdev, int new_mtu)
1009 +{
1010 +       struct port *port = netdev_priv(netdev);
1011 +       struct sw *sw = port->sw;
1012 +       u32 temp;
1013 +       int i;
1014 +       struct _rx_ring *rx_ring = sw->rx_ring;
1015 +       struct rx_desc *desc;
1016 +       struct sk_buff *skb;
1017 +
1018 +       if (new_mtu > MAX_MRU)
1019 +               return -EINVAL;
1020 +
1021 +       netdev->mtu = new_mtu;
1022 +
1023 +       new_mtu += 36 + SKB_DMA_REALIGN;
1024 +       port->mtu = new_mtu;
1025 +
1026 +       new_mtu = 0;
1027 +       for (i = 0; i < 3; i++) {
1028 +               if (switch_port_tab[i]) {
1029 +                       if (switch_port_tab[i]->mtu > new_mtu)
1030 +                               new_mtu = switch_port_tab[i]->mtu;
1031 +               }
1032 +       }
1033 +
1034 +
1035 +       if (new_mtu == sw->mtu)
1036 +               return 0;
1037 +
1038 +       disable_irq(IRQ_CNS3XXX_SW_R0RXC);
1039 +
1040 +       sw->mtu = new_mtu;
1041 +
1042 +       /* Disable DMA */
1043 +       __raw_writel(TS_SUSPEND | FS_SUSPEND, &sw->regs->dma_auto_poll_cfg);
1044 +
1045 +       for (i = 0; i < RX_DESCS; i++) {
1046 +               desc = &(rx_ring)->desc[i];
1047 +               /* Check if we own it, if we do, it will get set correctly
1048 +                * when it is re-used */
1049 +               if (!desc->cown) {
1050 +                       skb = rx_ring->buff_tab[i];
1051 +                       dma_unmap_single(NULL, desc->sdp, desc->sdl,
1052 +                                        DMA_FROM_DEVICE);
1053 +                       dev_kfree_skb(skb);
1054 +
1055 +                       if ((skb = dev_alloc_skb(new_mtu))) {
1056 +                               if (SKB_DMA_REALIGN)
1057 +                                       skb_reserve(skb, SKB_DMA_REALIGN);
1058 +                               skb_reserve(skb, NET_IP_ALIGN);
1059 +                               desc->sdp = dma_map_single(NULL, skb->data,
1060 +                                           new_mtu, DMA_FROM_DEVICE);
1061 +                               if (dma_mapping_error(NULL, desc->sdp)) {
1062 +                                       dev_kfree_skb(skb);
1063 +                                       skb = NULL;
1064 +                               }
1065 +                       }
1066 +
1067 +                       /* put the new buffer on RX-free queue */
1068 +                       rx_ring->buff_tab[i] = skb;
1069 +
1070 +                       if (i == RX_DESCS - 1)
1071 +                               desc->config0 = END_OF_RING | FIRST_SEGMENT |
1072 +                                               LAST_SEGMENT | new_mtu;
1073 +                       else
1074 +                               desc->config0 = FIRST_SEGMENT |
1075 +                                               LAST_SEGMENT | new_mtu;
1076 +               }
1077 +       }
1078 +
1079 +       /* Re-ENABLE DMA */
1080 +       temp = __raw_readl(&sw->regs->dma_auto_poll_cfg);
1081 +       temp &= ~(TS_SUSPEND | FS_SUSPEND);
1082 +       __raw_writel(temp, &sw->regs->dma_auto_poll_cfg);
1083 +
1084 +       __raw_writel((TS_POLL_EN | FS_POLL_EN), &sw->regs->dma_auto_poll_cfg);
1085 +
1086 +       enable_irq(IRQ_CNS3XXX_SW_R0RXC);
1087 +
1088 +       return 0;
1089 +}
1090 +
1091 +static const struct net_device_ops cns3xxx_netdev_ops = {
1092 +       .ndo_open = eth_open,
1093 +       .ndo_stop = eth_close,
1094 +       .ndo_start_xmit = eth_xmit,
1095 +       .ndo_set_rx_mode = eth_rx_mode,
1096 +       .ndo_do_ioctl = eth_ioctl,
1097 +       .ndo_change_mtu = cns3xxx_change_mtu,
1098 +       .ndo_set_mac_address = eth_set_mac,
1099 +       .ndo_validate_addr = eth_validate_addr,
1100 +};
1101 +
1102 +static int __devinit eth_init_one(struct platform_device *pdev)
1103 +{
1104 +       int i;
1105 +       struct port *port;
1106 +       struct sw *sw;
1107 +       struct net_device *dev;
1108 +       struct cns3xxx_plat_info *plat = pdev->dev.platform_data;
1109 +       u32 regs_phys;
1110 +       char phy_id[MII_BUS_ID_SIZE + 3];
1111 +       int err;
1112 +       u32 temp;
1113 +
1114 +       spin_lock_init(&tx_lock);
1115 +       spin_lock_init(&stat_lock);
1116 +
1117 +       if (!(napi_dev = alloc_etherdev(sizeof(struct sw))))
1118 +               return -ENOMEM;
1119 +       strcpy(napi_dev->name, "switch%d");
1120 +
1121 +       SET_NETDEV_DEV(napi_dev, &pdev->dev);
1122 +       sw = netdev_priv(napi_dev);
1123 +       memset(sw, 0, sizeof(struct sw));
1124 +       sw->regs = (struct switch_regs __iomem *)CNS3XXX_SWITCH_BASE_VIRT;
1125 +       regs_phys = CNS3XXX_SWITCH_BASE;
1126 +       sw->mem_res = request_mem_region(regs_phys, REGS_SIZE, napi_dev->name);
1127 +       if (!sw->mem_res) {
1128 +               err = -EBUSY;
1129 +               goto err_free;
1130 +       }
1131 +
1132 +       sw->mtu = 1536 + SKB_DMA_REALIGN;
1133 +
1134 +       for (i = 0; i < 4; i++) {
1135 +               temp = __raw_readl(&sw->regs->mac_cfg[i]);
1136 +               temp |= (PORT_DISABLE) | 0x80000000;
1137 +               __raw_writel(temp, &sw->regs->mac_cfg[i]);
1138 +       }
1139 +
1140 +       temp = PORT_DISABLE;
1141 +       __raw_writel(temp, &sw->regs->mac_cfg[2]);
1142 +
1143 +       temp = __raw_readl(&sw->regs->vlan_cfg);
1144 +       temp |= NIC_MODE | VLAN_UNAWARE;
1145 +       __raw_writel(temp, &sw->regs->vlan_cfg);
1146 +
1147 +       __raw_writel(UNKNOWN_VLAN_TO_CPU | ACCEPT_CRC_PACKET |
1148 +                    CRC_STRIPPING, &sw->regs->mac_glob_cfg);
1149 +
1150 +       if (!(sw->rx_ring = kmalloc(sizeof(struct _rx_ring), GFP_KERNEL))) {
1151 +               err = -ENOMEM;
1152 +               goto err_free;
1153 +       }
1154 +       memset(sw->rx_ring, 0, sizeof(struct _rx_ring));
1155 +
1156 +       if (!(sw->tx_ring = kmalloc(sizeof(struct _tx_ring), GFP_KERNEL))) {
1157 +               err = -ENOMEM;
1158 +               goto err_free_rx;
1159 +       }
1160 +       memset(sw->tx_ring, 0, sizeof(struct _tx_ring));
1161 +
1162 +       if ((err = init_rings(sw)) != 0) {
1163 +               destroy_rings(sw);
1164 +               err = -ENOMEM;
1165 +               goto err_free_rings;
1166 +       }
1167 +       platform_set_drvdata(pdev, napi_dev);
1168 +
1169 +       netif_napi_add(napi_dev, &sw->napi, eth_poll, NAPI_WEIGHT);
1170 +
1171 +       for (i = 0; i < 3; i++) {
1172 +               if (!(plat->ports & (1 << i))) {
1173 +                       continue;
1174 +               }
1175 +
1176 +               if (!(dev = alloc_etherdev(sizeof(struct port)))) {
1177 +                       goto free_ports;
1178 +               }
1179 +
1180 +               //SET_NETDEV_DEV(dev, &pdev->dev);
1181 +               port = netdev_priv(dev);
1182 +               port->netdev = dev;
1183 +               if (i == 2)
1184 +                       port->id = 3;
1185 +               else
1186 +                       port->id = i;
1187 +               port->sw = sw;
1188 +               port->mtu = sw->mtu;
1189 +
1190 +               temp = __raw_readl(&sw->regs->mac_cfg[port->id]);
1191 +               temp |= (PORT_DISABLE);
1192 +               __raw_writel(temp, &sw->regs->mac_cfg[port->id]);
1193 +
1194 +               dev->netdev_ops = &cns3xxx_netdev_ops;
1195 +               dev->ethtool_ops = &cns3xxx_ethtool_ops;
1196 +               dev->tx_queue_len = 1000;
1197 +               dev->features = NETIF_F_HW_CSUM;
1198 +
1199 +               dev->vlan_features = NETIF_F_HW_CSUM;
1200 +
1201 +               switch_port_tab[i] = port;
1202 +               memcpy(dev->dev_addr, &plat->hwaddr[i], ETH_ALEN);
1203 +
1204 +               snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, "0", plat->phy[i]);
1205 +               port->phydev = phy_connect(dev, phy_id, &cns3xxx_adjust_link, 0,
1206 +                       PHY_INTERFACE_MODE_RGMII);
1207 +               if ((err = IS_ERR(port->phydev))) {
1208 +                       switch_port_tab[i] = 0;
1209 +                       free_netdev(dev);
1210 +                       goto free_ports;
1211 +               }
1212 +
1213 +               port->phydev->irq = PHY_POLL;
1214 +
1215 +               if ((err = register_netdev(dev))) {
1216 +                       phy_disconnect(port->phydev);
1217 +                       switch_port_tab[i] = 0;
1218 +                       free_netdev(dev);
1219 +                       goto free_ports;
1220 +               }
1221 +
1222 +               printk(KERN_INFO "%s: RGMII PHY %i on cns3xxx Switch\n", dev->name, plat->phy[i]);
1223 +               netif_carrier_off(dev);
1224 +               dev = 0;
1225 +       }
1226 +
1227 +       return 0;
1228 +
1229 +free_ports:
1230 +       err = -ENOMEM;
1231 +       for (--i; i >= 0; i--) {
1232 +               if (switch_port_tab[i]) {
1233 +                       port = switch_port_tab[i];
1234 +                       dev = port->netdev;
1235 +                       unregister_netdev(dev);
1236 +                       phy_disconnect(port->phydev);
1237 +                       switch_port_tab[i] = 0;
1238 +                       free_netdev(dev);
1239 +               }
1240 +       }
1241 +err_free_rings:
1242 +       kfree(sw->tx_ring);
1243 +err_free_rx:
1244 +       kfree(sw->rx_ring);
1245 +err_free:
1246 +       free_netdev(napi_dev);
1247 +       return err;
1248 +}
1249 +
1250 +static int __devexit eth_remove_one(struct platform_device *pdev)
1251 +{
1252 +       struct net_device *dev = platform_get_drvdata(pdev);
1253 +       struct sw *sw = netdev_priv(dev);
1254 +       int i;
1255 +       destroy_rings(sw);
1256 +
1257 +       for (i = 2; i >= 0; i--) {
1258 +               if (switch_port_tab[i]) {
1259 +                       struct port *port = switch_port_tab[i];
1260 +                       struct net_device *dev = port->netdev;
1261 +                       unregister_netdev(dev);
1262 +                       phy_disconnect(port->phydev);
1263 +                       switch_port_tab[i] = 0;
1264 +                       free_netdev(dev);
1265 +               }
1266 +       }
1267 +
1268 +       release_resource(sw->mem_res);
1269 +       free_netdev(napi_dev);
1270 +       return 0;
1271 +}
1272 +
1273 +static struct platform_driver cns3xxx_eth_driver = {
1274 +       .driver.name    = DRV_NAME,
1275 +       .probe          = eth_init_one,
1276 +       .remove         = eth_remove_one,
1277 +};
1278 +
1279 +static int __init eth_init_module(void)
1280 +{
1281 +       int err;
1282 +       if ((err = cns3xxx_mdio_register()))
1283 +               return err;
1284 +       return platform_driver_register(&cns3xxx_eth_driver);
1285 +}
1286 +
1287 +static void __exit eth_cleanup_module(void)
1288 +{
1289 +       platform_driver_unregister(&cns3xxx_eth_driver);
1290 +       cns3xxx_mdio_remove();
1291 +}
1292 +
1293 +module_init(eth_init_module);
1294 +module_exit(eth_cleanup_module);
1295 +
1296 +MODULE_AUTHOR("Chris Lang");
1297 +MODULE_DESCRIPTION("Cavium CNS3xxx Ethernet driver");
1298 +MODULE_LICENSE("GPL v2");
1299 +MODULE_ALIAS("platform:cns3xxx_eth");
1300 --- /dev/null
1301 +++ b/arch/arm/mach-cns3xxx/include/mach/platform.h
1302 @@ -0,0 +1,26 @@
1303 +/*
1304 + * arch/arm/mach-cns3xxx/include/mach/platform.h
1305 + *
1306 + * Copyright 2011 Gateworks Corporation
1307 + *               Chris Lang <clang@gateworks.com
1308 + *
1309 + * This file is free software; you can redistribute it and/or modify
1310 + * it under the terms of the GNU General Public License, Version 2, as
1311 + * published by the Free Software Foundation.
1312 + *
1313 + */
1314 +
1315 +#ifndef __ASM_ARCH_PLATFORM_H
1316 +#define __ASM_ARCH_PLATFORM_H
1317 +
1318 +#ifndef __ASSEMBLY__
1319 +
1320 +/* Information about built-in Ethernet MAC interfaces */
1321 +struct cns3xxx_plat_info {
1322 +       u8 ports; /* Bitmap of enabled Ports */
1323 +       u8 hwaddr[4][6];
1324 +       u32 phy[3];
1325 +};
1326 +
1327 +#endif /* __ASM_ARCH_PLATFORM_H */
1328 +#endif