[kernel] update to 2.6.25.20, 2.6.26.8, 2.6.27.5 and refresh patches
[openwrt.git] / target / linux / brcm63xx / patches-2.6.27 / 009-add_integrated_ethernet_mac_support.patch
1 From 49aa7ffcd9bd2d9a0af99fced7b8511160dbf345 Mon Sep 17 00:00:00 2001
2 From: Maxime Bizon <mbizon@freebox.fr>
3 Date: Sun, 21 Sep 2008 03:43:26 +0200
4 Subject: [PATCH] [MIPS] BCM63XX: Add integrated ethernet mac support.
5
6 Signed-off-by: Maxime Bizon <mbizon@freebox.fr>
7 ---
8  arch/mips/bcm63xx/Makefile                       |    1 +
9  arch/mips/bcm63xx/dev-enet.c                     |  158 ++
10  drivers/net/Kconfig                              |    9 +
11  drivers/net/Makefile                             |    1 +
12  drivers/net/bcm63xx_enet.c                       | 1894 ++++++++++++++++++++++
13  drivers/net/bcm63xx_enet.h                       |  294 ++++
14  include/asm-mips/mach-bcm63xx/bcm63xx_dev_enet.h |   45 +
15  7 files changed, 2402 insertions(+), 0 deletions(-)
16  create mode 100644 arch/mips/bcm63xx/dev-enet.c
17  create mode 100644 drivers/net/bcm63xx_enet.c
18  create mode 100644 drivers/net/bcm63xx_enet.h
19  create mode 100644 include/asm-mips/mach-bcm63xx/bcm63xx_dev_enet.h
20
21 --- a/arch/mips/bcm63xx/Makefile
22 +++ b/arch/mips/bcm63xx/Makefile
23 @@ -3,4 +3,5 @@ obj-y           += dev-uart.o
24  obj-y          += dev-pcmcia.o
25  obj-y          += dev-usb-ohci.o
26  obj-y          += dev-usb-ehci.o
27 +obj-y          += dev-enet.o
28  obj-$(CONFIG_EARLY_PRINTK)     += early_printk.o
29 --- /dev/null
30 +++ b/arch/mips/bcm63xx/dev-enet.c
31 @@ -0,0 +1,158 @@
32 +/*
33 + * This file is subject to the terms and conditions of the GNU General Public
34 + * License.  See the file "COPYING" in the main directory of this archive
35 + * for more details.
36 + *
37 + * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
38 + */
39 +
40 +#include <linux/init.h>
41 +#include <linux/kernel.h>
42 +#include <linux/platform_device.h>
43 +#include <bcm63xx_dev_enet.h>
44 +#include <bcm63xx_io.h>
45 +#include <bcm63xx_regs.h>
46 +
47 +static struct resource shared_res[] = {
48 +       {
49 +               .start          = -1, /* filled at runtime */
50 +               .end            = -1, /* filled at runtime */
51 +               .flags          = IORESOURCE_MEM,
52 +       },
53 +};
54 +
55 +static struct platform_device bcm63xx_enet_shared_device = {
56 +       .name           = "bcm63xx_enet_shared",
57 +       .id             = 0,
58 +       .num_resources  = ARRAY_SIZE(shared_res),
59 +       .resource       = shared_res,
60 +};
61 +
62 +static int shared_device_registered = 0;
63 +
64 +static struct resource enet0_res[] = {
65 +       {
66 +               .start          = -1, /* filled at runtime */
67 +               .end            = -1, /* filled at runtime */
68 +               .flags          = IORESOURCE_MEM,
69 +       },
70 +       {
71 +               .start          = -1, /* filled at runtime */
72 +               .flags          = IORESOURCE_IRQ,
73 +       },
74 +       {
75 +               .start          = -1, /* filled at runtime */
76 +               .start          = IRQ_ENET0_RXDMA,
77 +               .flags          = IORESOURCE_IRQ,
78 +       },
79 +       {
80 +               .start          = -1, /* filled at runtime */
81 +               .start          = IRQ_ENET0_TXDMA,
82 +               .flags          = IORESOURCE_IRQ,
83 +       },
84 +};
85 +
86 +static struct bcm63xx_enet_platform_data enet0_pd;
87 +
88 +static struct platform_device bcm63xx_enet0_device = {
89 +       .name           = "bcm63xx_enet",
90 +       .id             = 0,
91 +       .num_resources  = ARRAY_SIZE(enet0_res),
92 +       .resource       = enet0_res,
93 +       .dev            = {
94 +               .platform_data = &enet0_pd,
95 +       },
96 +};
97 +
98 +static struct resource enet1_res[] = {
99 +       {
100 +               .start          = -1, /* filled at runtime */
101 +               .end            = -1, /* filled at runtime */
102 +               .flags          = IORESOURCE_MEM,
103 +       },
104 +       {
105 +               .start          = -1, /* filled at runtime */
106 +               .flags          = IORESOURCE_IRQ,
107 +       },
108 +       {
109 +               .start          = -1, /* filled at runtime */
110 +               .flags          = IORESOURCE_IRQ,
111 +       },
112 +       {
113 +               .start          = -1, /* filled at runtime */
114 +               .flags          = IORESOURCE_IRQ,
115 +       },
116 +};
117 +
118 +static struct bcm63xx_enet_platform_data enet1_pd;
119 +
120 +static struct platform_device bcm63xx_enet1_device = {
121 +       .name           = "bcm63xx_enet",
122 +       .id             = 1,
123 +       .num_resources  = ARRAY_SIZE(enet1_res),
124 +       .resource       = enet1_res,
125 +       .dev            = {
126 +               .platform_data = &enet1_pd,
127 +       },
128 +};
129 +
130 +int __init bcm63xx_enet_register(int unit,
131 +                                const struct bcm63xx_enet_platform_data *pd)
132 +{
133 +       struct platform_device *pdev;
134 +       struct bcm63xx_enet_platform_data *dpd;
135 +       int ret;
136 +
137 +       if (unit > 1)
138 +               return -ENODEV;
139 +
140 +       if (!shared_device_registered) {
141 +               shared_res[0].start = bcm63xx_regset_address(RSET_ENETDMA);
142 +               shared_res[0].end = shared_res[0].start;
143 +               shared_res[0].end += RSET_ENETDMA_SIZE - 1;
144 +
145 +               ret = platform_device_register(&bcm63xx_enet_shared_device);
146 +               if (ret)
147 +                       return ret;
148 +               shared_device_registered = 1;
149 +       }
150 +
151 +       if (unit == 0) {
152 +               enet0_res[0].start = bcm63xx_regset_address(RSET_ENET0);
153 +               enet0_res[0].end = enet0_res[0].start;
154 +               enet0_res[0].end += RSET_ENET_SIZE - 1;
155 +               enet0_res[1].start = bcm63xx_get_irq_number(IRQ_ENET0);
156 +               enet0_res[2].start = bcm63xx_get_irq_number(IRQ_ENET0_RXDMA);
157 +               enet0_res[3].start = bcm63xx_get_irq_number(IRQ_ENET0_TXDMA);
158 +               pdev = &bcm63xx_enet0_device;
159 +       } else {
160 +               enet1_res[0].start = bcm63xx_regset_address(RSET_ENET1);
161 +               enet1_res[0].end = enet1_res[0].start;
162 +               enet1_res[0].end += RSET_ENET_SIZE - 1;
163 +               enet1_res[1].start = bcm63xx_get_irq_number(IRQ_ENET1);
164 +               enet1_res[2].start = bcm63xx_get_irq_number(IRQ_ENET1_RXDMA);
165 +               enet1_res[3].start = bcm63xx_get_irq_number(IRQ_ENET1_TXDMA);
166 +               pdev = &bcm63xx_enet1_device;
167 +       }
168 +
169 +       /* copy given platform data */
170 +       dpd = pdev->dev.platform_data;
171 +       memcpy(dpd, pd, sizeof (*pd));
172 +
173 +       /* adjust them in case internal phy is used */
174 +       if (dpd->use_internal_phy) {
175 +
176 +               /* internal phy only exists for enet0 */
177 +               if (unit == 1)
178 +                       return -ENODEV;
179 +
180 +               dpd->phy_id = 1;
181 +               dpd->has_phy_interrupt = 1;
182 +               dpd->phy_interrupt = bcm63xx_get_irq_number(IRQ_ENET_PHY);
183 +       }
184 +
185 +       ret = platform_device_register(pdev);
186 +       if (ret)
187 +               return ret;
188 +       return 0;
189 +}
190 --- a/drivers/net/Kconfig
191 +++ b/drivers/net/Kconfig
192 @@ -1963,6 +1963,15 @@ config NE_H8300
193           Say Y here if you want to use the NE2000 compatible
194           controller on the Renesas H8/300 processor.
195  
196 +config BCM63XX_ENET
197 +       tristate "Broadcom 63xx internal mac support"
198 +       depends on BCM63XX
199 +       select MII
200 +       select PHYLIB
201 +       help
202 +         This driver supports the ethernet MACs in the Broadcom 63xx
203 +         MIPS chipset family (BCM63XX).
204 +
205  source "drivers/net/fs_enet/Kconfig"
206  
207  endif # NET_ETHERNET
208 --- a/drivers/net/Makefile
209 +++ b/drivers/net/Makefile
210 @@ -123,6 +123,7 @@ obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o
211  obj-$(CONFIG_B44) += b44.o
212  obj-$(CONFIG_FORCEDETH) += forcedeth.o
213  obj-$(CONFIG_NE_H8300) += ne-h8300.o
214 +obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o
215  obj-$(CONFIG_AX88796) += ax88796.o
216  
217  obj-$(CONFIG_TSI108_ETH) += tsi108_eth.o
218 --- /dev/null
219 +++ b/drivers/net/bcm63xx_enet.c
220 @@ -0,0 +1,1894 @@
221 +/*
222 + * Driver for BCM963xx builtin Ethernet mac
223 + *
224 + * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
225 + *
226 + * This program is free software; you can redistribute it and/or modify
227 + * it under the terms of the GNU General Public License as published by
228 + * the Free Software Foundation; either version 2 of the License, or
229 + * (at your option) any later version.
230 + *
231 + * This program is distributed in the hope that it will be useful,
232 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
233 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
234 + * GNU General Public License for more details.
235 + *
236 + * You should have received a copy of the GNU General Public License
237 + * along with this program; if not, write to the Free Software
238 + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
239 + */
240 +#include <linux/init.h>
241 +#include <linux/module.h>
242 +#include <linux/clk.h>
243 +#include <linux/etherdevice.h>
244 +#include <linux/delay.h>
245 +#include <linux/ethtool.h>
246 +#include <linux/crc32.h>
247 +#include <linux/err.h>
248 +#include <linux/dma-mapping.h>
249 +#include <linux/platform_device.h>
250 +
251 +#include <bcm63xx_dev_enet.h>
252 +#include "bcm63xx_enet.h"
253 +
254 +static char bcm_enet_driver_name[] = "bcm63xx_enet";
255 +static char bcm_enet_driver_version[] = "1.0";
256 +
257 +static int copybreak __read_mostly = 128;
258 +module_param(copybreak, int, 0);
259 +MODULE_PARM_DESC(copybreak, "Receive copy threshold");
260 +
261 +/* io memory shared between all devices */
262 +static void __iomem *bcm_enet_shared_base;
263 +
264 +/*
265 + * io helpers to access mac registers
266 + */
267 +static inline u32 enet_readl(struct bcm_enet_priv *priv, u32 off)
268 +{
269 +       return bcm_readl(priv->base + off);
270 +}
271 +
272 +static inline void enet_writel(struct bcm_enet_priv *priv,
273 +                              u32 val, u32 off)
274 +{
275 +       bcm_writel(val, priv->base + off);
276 +}
277 +
278 +/*
279 + * io helpers to access shared registers
280 + */
281 +static inline u32 enet_dma_readl(struct bcm_enet_priv *priv, u32 off)
282 +{
283 +       return bcm_readl(bcm_enet_shared_base + off);
284 +}
285 +
286 +static inline void enet_dma_writel(struct bcm_enet_priv *priv,
287 +                                      u32 val, u32 off)
288 +{
289 +       bcm_writel(val, bcm_enet_shared_base + off);
290 +}
291 +
292 +/*
293 + * write given data into mii register and wait for transfer to end
294 + * with timeout (average measured transfer time is 25us)
295 + */
296 +static int do_mdio_op(struct bcm_enet_priv *priv, unsigned int data)
297 +{
298 +       int limit;
299 +
300 +       /* make sure mii interrupt status is cleared */
301 +       enet_writel(priv, ENET_IR_MII, ENET_IR_REG);
302 +
303 +       enet_writel(priv, data, ENET_MIIDATA_REG);
304 +       wmb();
305 +
306 +       /* busy wait on mii interrupt bit, with timeout */
307 +       limit = 1000;
308 +       do {
309 +               if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII)
310 +                       break;
311 +               udelay(1);
312 +       } while (limit-- >= 0);
313 +
314 +       return (limit < 0) ? 1 : 0;
315 +}
316 +
317 +/*
318 + * MII internal read callback
319 + */
320 +static int bcm_enet_mdio_read(struct bcm_enet_priv *priv, int mii_id,
321 +                             int regnum)
322 +{
323 +       u32 tmp, val;
324 +
325 +       tmp = regnum << ENET_MIIDATA_REG_SHIFT;
326 +       tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT;
327 +       tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT;
328 +       tmp |= ENET_MIIDATA_OP_READ_MASK;
329 +
330 +       if (do_mdio_op(priv, tmp))
331 +               return -1;
332 +
333 +       val = enet_readl(priv, ENET_MIIDATA_REG);
334 +       val &= 0xffff;
335 +       return val;
336 +}
337 +
338 +/*
339 + * MII internal write callback
340 + */
341 +static int bcm_enet_mdio_write(struct bcm_enet_priv *priv, int mii_id,
342 +                              int regnum, u16 value)
343 +{
344 +       u32 tmp;
345 +
346 +       tmp = (value & 0xffff) << ENET_MIIDATA_DATA_SHIFT;
347 +       tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT;
348 +       tmp |= regnum << ENET_MIIDATA_REG_SHIFT;
349 +       tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT;
350 +       tmp |= ENET_MIIDATA_OP_WRITE_MASK;
351 +
352 +       (void)do_mdio_op(priv, tmp);
353 +       return 0;
354 +}
355 +
356 +/*
357 + * MII read callback from phylib
358 + */
359 +static int bcm_enet_mdio_read_phylib(struct mii_bus *bus, int mii_id,
360 +                                    int regnum)
361 +{
362 +       return bcm_enet_mdio_read(bus->priv, mii_id, regnum);
363 +}
364 +
365 +/*
366 + * MII write callback from phylib
367 + */
368 +static int bcm_enet_mdio_write_phylib(struct mii_bus *bus, int mii_id,
369 +                                     int regnum, u16 value)
370 +{
371 +       return bcm_enet_mdio_write(bus->priv, mii_id, regnum, value);
372 +}
373 +
374 +/*
375 + * MII read callback from mii core
376 + */
377 +static int bcm_enet_mdio_read_mii(struct net_device *dev, int mii_id,
378 +                                 int regnum)
379 +{
380 +       return bcm_enet_mdio_read(netdev_priv(dev), mii_id, regnum);
381 +}
382 +
383 +/*
384 + * MII write callback from mii core
385 + */
386 +static void bcm_enet_mdio_write_mii(struct net_device *dev, int mii_id,
387 +                                   int regnum, int value)
388 +{
389 +       bcm_enet_mdio_write(netdev_priv(dev), mii_id, regnum, value);
390 +}
391 +
392 +/*
393 + * refill rx queue
394 + */
395 +static int bcm_enet_refill_rx(struct net_device *dev)
396 +{
397 +       struct bcm_enet_priv *priv;
398 +
399 +       priv = netdev_priv(dev);
400 +
401 +       while (priv->rx_desc_count < priv->rx_ring_size) {
402 +               struct bcm_enet_desc *desc;
403 +               struct sk_buff *skb;
404 +               dma_addr_t p;
405 +               int desc_idx;
406 +               u32 len_stat;
407 +
408 +               desc_idx = priv->rx_dirty_desc;
409 +               desc = &priv->rx_desc_cpu[desc_idx];
410 +
411 +               if (!priv->rx_skb[desc_idx]) {
412 +                       skb = netdev_alloc_skb(dev, BCMENET_MAX_RX_SIZE);
413 +                       if (!skb)
414 +                               break;
415 +                       priv->rx_skb[desc_idx] = skb;
416 +
417 +                       p = dma_map_single(&priv->pdev->dev, skb->data,
418 +                                          BCMENET_MAX_RX_SIZE,
419 +                                          DMA_FROM_DEVICE);
420 +                       desc->address = p;
421 +               }
422 +
423 +               len_stat = BCMENET_MAX_RX_SIZE << DMADESC_LENGTH_SHIFT;
424 +               len_stat |= DMADESC_OWNER_MASK;
425 +               if (priv->rx_dirty_desc == priv->rx_ring_size - 1) {
426 +                       len_stat |= DMADESC_WRAP_MASK;
427 +                       priv->rx_dirty_desc = 0;
428 +               } else {
429 +                       priv->rx_dirty_desc++;
430 +               }
431 +               wmb();
432 +               desc->len_stat = len_stat;
433 +
434 +               priv->rx_desc_count++;
435 +
436 +               /* tell dma engine we allocated one buffer */
437 +               enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan));
438 +       }
439 +
440 +       /* If rx ring is still empty, set a timer to try allocating
441 +        * again at a later time. */
442 +       if (priv->rx_desc_count == 0 && netif_running(dev)) {
443 +               dev_warn(&priv->pdev->dev, "unable to refill rx ring\n");
444 +               priv->rx_timeout.expires = jiffies + HZ;
445 +               add_timer(&priv->rx_timeout);
446 +       }
447 +
448 +       return 0;
449 +}
450 +
451 +/*
452 + * timer callback to defer refill rx queue in case we're OOM
453 + */
454 +static void bcm_enet_refill_rx_timer(unsigned long data)
455 +{
456 +       struct net_device *dev;
457 +       struct bcm_enet_priv *priv;
458 +
459 +       dev = (struct net_device *)data;
460 +       priv = netdev_priv(dev);
461 +
462 +       spin_lock(&priv->rx_lock);
463 +       bcm_enet_refill_rx((struct net_device *)data);
464 +       spin_unlock(&priv->rx_lock);
465 +}
466 +
467 +/*
468 + * extract packet from rx queue
469 + */
470 +static int bcm_enet_receive_queue(struct net_device *dev, int budget)
471 +{
472 +       struct bcm_enet_priv *priv;
473 +       struct device *kdev;
474 +       int processed;
475 +
476 +       priv = netdev_priv(dev);
477 +       kdev = &priv->pdev->dev;
478 +       processed = 0;
479 +
480 +       /* don't scan ring further than number of refilled
481 +        * descriptor */
482 +       if (budget > priv->rx_desc_count)
483 +               budget = priv->rx_desc_count;
484 +
485 +       do {
486 +               struct bcm_enet_desc *desc;
487 +               struct sk_buff *skb;
488 +               int desc_idx;
489 +               u32 len_stat;
490 +               unsigned int len;
491 +
492 +               desc_idx = priv->rx_curr_desc;
493 +               desc = &priv->rx_desc_cpu[desc_idx];
494 +
495 +               /* make sure we actually read the descriptor status at
496 +                * each loop */
497 +               rmb();
498 +
499 +               len_stat = desc->len_stat;
500 +
501 +               /* break if dma ownership belongs to hw */
502 +               if (len_stat & DMADESC_OWNER_MASK)
503 +                       break;
504 +
505 +               processed++;
506 +               priv->rx_curr_desc++;
507 +               if (priv->rx_curr_desc == priv->rx_ring_size)
508 +                       priv->rx_curr_desc = 0;
509 +               priv->rx_desc_count--;
510 +
511 +               /* if the packet does not have start of packet _and_
512 +                * end of packet flag set, then just recycle it */
513 +               if ((len_stat & DMADESC_ESOP_MASK) != DMADESC_ESOP_MASK) {
514 +                       priv->stats.rx_dropped++;
515 +                       continue;
516 +               }
517 +
518 +               /* recycle packet if it's marked as bad */
519 +               if (unlikely(len_stat & DMADESC_ERR_MASK)) {
520 +                       priv->stats.rx_errors++;
521 +
522 +                       if (len_stat & DMADESC_OVSIZE_MASK)
523 +                               priv->stats.rx_length_errors++;
524 +                       if (len_stat & DMADESC_CRC_MASK)
525 +                               priv->stats.rx_crc_errors++;
526 +                       if (len_stat & DMADESC_UNDER_MASK)
527 +                               priv->stats.rx_frame_errors++;
528 +                       if (len_stat & DMADESC_OV_MASK)
529 +                               priv->stats.rx_fifo_errors++;
530 +                       continue;
531 +               }
532 +
533 +               /* valid packet */
534 +               skb = priv->rx_skb[desc_idx];
535 +               len = (len_stat & DMADESC_LENGTH_MASK) >> DMADESC_LENGTH_SHIFT;
536 +               /* don't include FCS */
537 +               len -= 4;
538 +
539 +               if (len < copybreak) {
540 +                       struct sk_buff *nskb;
541 +
542 +                       nskb = netdev_alloc_skb(dev, len + 2);
543 +                       if (!nskb) {
544 +                               /* forget packet, just rearm desc */
545 +                               priv->stats.rx_dropped++;
546 +                               continue;
547 +                       }
548 +
549 +                       /* since we're copying the data, we can align
550 +                        * them properly */
551 +                       skb_reserve(nskb, NET_IP_ALIGN);
552 +                       dma_sync_single_for_cpu(kdev, desc->address,
553 +                                               len, DMA_FROM_DEVICE);
554 +                       memcpy(nskb->data, skb->data, len);
555 +                       dma_sync_single_for_device(kdev, desc->address,
556 +                                                  len, DMA_FROM_DEVICE);
557 +                       skb = nskb;
558 +               } else {
559 +                       dma_unmap_single(&priv->pdev->dev, desc->address,
560 +                                        BCMENET_MAX_RX_SIZE, DMA_FROM_DEVICE);
561 +                       priv->rx_skb[desc_idx] = NULL;
562 +               }
563 +
564 +               skb_put(skb, len);
565 +               skb->dev = dev;
566 +               skb->protocol = eth_type_trans(skb, dev);
567 +               priv->stats.rx_packets++;
568 +               priv->stats.rx_bytes += len;
569 +               dev->last_rx = jiffies;
570 +               netif_receive_skb(skb);
571 +
572 +       } while (--budget > 0);
573 +
574 +       if (processed || !priv->rx_desc_count) {
575 +               bcm_enet_refill_rx(dev);
576 +
577 +               /* kick rx dma */
578 +               enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
579 +                               ENETDMA_CHANCFG_REG(priv->rx_chan));
580 +       }
581 +
582 +       return processed;
583 +}
584 +
585 +
586 +/*
587 + * try to or force reclaim of transmitted buffers
588 + */
589 +static int bcm_enet_tx_reclaim(struct net_device *dev, int force)
590 +{
591 +       struct bcm_enet_priv *priv;
592 +       int released;
593 +
594 +       priv = netdev_priv(dev);
595 +       released = 0;
596 +
597 +       while (priv->tx_desc_count < priv->tx_ring_size) {
598 +               struct bcm_enet_desc *desc;
599 +               struct sk_buff *skb;
600 +
601 +               /* We run in a bh and fight against start_xmit, which
602 +                * is called with bh disabled  */
603 +               spin_lock(&priv->tx_lock);
604 +
605 +               desc = &priv->tx_desc_cpu[priv->tx_dirty_desc];
606 +
607 +               if (!force && (desc->len_stat & DMADESC_OWNER_MASK)) {
608 +                       spin_unlock(&priv->tx_lock);
609 +                       break;
610 +               }
611 +
612 +               /* ensure other field of the descriptor were not read
613 +                * before we checked ownership */
614 +               rmb();
615 +
616 +               skb = priv->tx_skb[priv->tx_dirty_desc];
617 +               priv->tx_skb[priv->tx_dirty_desc] = NULL;
618 +               dma_unmap_single(&priv->pdev->dev, desc->address, skb->len,
619 +                                DMA_TO_DEVICE);
620 +
621 +               priv->tx_dirty_desc++;
622 +               if (priv->tx_dirty_desc == priv->tx_ring_size)
623 +                       priv->tx_dirty_desc = 0;
624 +               priv->tx_desc_count++;
625 +
626 +               spin_unlock(&priv->tx_lock);
627 +
628 +               if (desc->len_stat & DMADESC_UNDER_MASK)
629 +                       priv->stats.tx_errors++;
630 +
631 +               dev_kfree_skb(skb);
632 +               released++;
633 +       }
634 +
635 +       if (netif_queue_stopped(dev) && released)
636 +               netif_wake_queue(dev);
637 +
638 +       return released;
639 +}
640 +
641 +/*
642 + * poll func, called by network core
643 + */
644 +static int bcm_enet_poll(struct napi_struct *napi, int budget)
645 +{
646 +       struct bcm_enet_priv *priv;
647 +       struct net_device *dev;
648 +       int tx_work_done, rx_work_done;
649 +
650 +       priv = container_of(napi, struct bcm_enet_priv, napi);
651 +       dev = priv->net_dev;
652 +
653 +       /* ack interrupts */
654 +       enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
655 +                       ENETDMA_IR_REG(priv->rx_chan));
656 +       enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
657 +                       ENETDMA_IR_REG(priv->tx_chan));
658 +
659 +       /* reclaim sent skb */
660 +       tx_work_done = bcm_enet_tx_reclaim(dev, 0);
661 +
662 +       spin_lock(&priv->rx_lock);
663 +       rx_work_done = bcm_enet_receive_queue(dev, budget);
664 +       spin_unlock(&priv->rx_lock);
665 +
666 +       if (rx_work_done >= budget || tx_work_done > 0) {
667 +               /* rx/tx queue is not yet empty/clean */
668 +               return rx_work_done;
669 +       }
670 +
671 +       /* no more packet in rx/tx queue, remove device from poll
672 +        * queue */
673 +       __netif_rx_complete(dev, napi);
674 +
675 +       /* restore rx/tx interrupt */
676 +       enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
677 +                       ENETDMA_IRMASK_REG(priv->rx_chan));
678 +       enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
679 +                       ENETDMA_IRMASK_REG(priv->tx_chan));
680 +
681 +       return rx_work_done;
682 +}
683 +
684 +/*
685 + * mac interrupt handler
686 + */
687 +static irqreturn_t bcm_enet_isr_mac(int irq, void *dev_id)
688 +{
689 +       struct net_device *dev;
690 +       struct bcm_enet_priv *priv;
691 +       u32 stat;
692 +
693 +       dev = dev_id;
694 +       priv = netdev_priv(dev);
695 +
696 +       stat = enet_readl(priv, ENET_IR_REG);
697 +       if (!(stat & ENET_IR_MIB))
698 +               return IRQ_NONE;
699 +
700 +       /* clear & mask interrupt */
701 +       enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
702 +       enet_writel(priv, 0, ENET_IRMASK_REG);
703 +
704 +       /* read mib registers in workqueue */
705 +       schedule_work(&priv->mib_update_task);
706 +
707 +       return IRQ_HANDLED;
708 +}
709 +
710 +/*
711 + * rx/tx dma interrupt handler
712 + */
713 +static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id)
714 +{
715 +       struct net_device *dev;
716 +       struct bcm_enet_priv *priv;
717 +
718 +       dev = dev_id;
719 +       priv = netdev_priv(dev);
720 +
721 +       /* mask rx/tx interrupts */
722 +       enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
723 +       enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
724 +
725 +       netif_rx_schedule(dev, &priv->napi);
726 +
727 +       return IRQ_HANDLED;
728 +}
729 +
730 +/*
731 + * tx request callback
732 + */
733 +static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
734 +{
735 +       struct bcm_enet_priv *priv;
736 +       struct bcm_enet_desc *desc;
737 +       u32 len_stat;
738 +       int ret;
739 +
740 +       priv = netdev_priv(dev);
741 +
742 +       /* lock against tx reclaim */
743 +       spin_lock(&priv->tx_lock);
744 +
745 +       /* make sure  the tx hw queue  is not full,  should not happen
746 +        * since we stop queue before it's the case */
747 +       if (unlikely(!priv->tx_desc_count)) {
748 +               netif_stop_queue(dev);
749 +               dev_err(&priv->pdev->dev, "xmit called with no tx desc "
750 +                       "available?\n");
751 +               ret = NETDEV_TX_BUSY;
752 +               goto out_unlock;
753 +       }
754 +
755 +       /* point to the next available desc */
756 +       desc = &priv->tx_desc_cpu[priv->tx_curr_desc];
757 +       priv->tx_skb[priv->tx_curr_desc] = skb;
758 +
759 +       /* fill descriptor */
760 +       desc->address = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
761 +                                      DMA_TO_DEVICE);
762 +
763 +       len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK;
764 +       len_stat |= DMADESC_ESOP_MASK |
765 +               DMADESC_APPEND_CRC |
766 +               DMADESC_OWNER_MASK;
767 +
768 +       priv->tx_curr_desc++;
769 +       if (priv->tx_curr_desc == priv->tx_ring_size) {
770 +               priv->tx_curr_desc = 0;
771 +               len_stat |= DMADESC_WRAP_MASK;
772 +       }
773 +       priv->tx_desc_count--;
774 +
775 +       /* dma might be already polling, make sure we update desc
776 +        * fields in correct order */
777 +       wmb();
778 +       desc->len_stat = len_stat;
779 +       wmb();
780 +
781 +       /* kick tx dma */
782 +       enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
783 +                       ENETDMA_CHANCFG_REG(priv->tx_chan));
784 +
785 +       /* stop queue if no more desc available */
786 +       if (!priv->tx_desc_count)
787 +               netif_stop_queue(dev);
788 +
789 +       priv->stats.tx_bytes += skb->len;
790 +       priv->stats.tx_packets++;
791 +       dev->trans_start = jiffies;
792 +       ret = NETDEV_TX_OK;
793 +
794 +out_unlock:
795 +       spin_unlock(&priv->tx_lock);
796 +       return ret;
797 +}
798 +
799 +/*
800 + * Change the interface's mac address.
801 + */
802 +static int bcm_enet_set_mac_address(struct net_device *dev, void *p)
803 +{
804 +       struct bcm_enet_priv *priv;
805 +       struct sockaddr *addr = p;
806 +       u32 val;
807 +
808 +       priv = netdev_priv(dev);
809 +       memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
810 +
811 +       /* use perfect match register 0 to store my mac address */
812 +       val = (dev->dev_addr[2] << 24) | (dev->dev_addr[3] << 16) |
813 +               (dev->dev_addr[4] << 8) | dev->dev_addr[5];
814 +       enet_writel(priv, val, ENET_PML_REG(0));
815 +
816 +       val = (dev->dev_addr[0] << 8 | dev->dev_addr[1]);
817 +       val |= ENET_PMH_DATAVALID_MASK;
818 +       enet_writel(priv, val, ENET_PMH_REG(0));
819 +
820 +       return 0;
821 +}
822 +
823 +/*
824 + * Change rx mode (promiscous/allmulti) and update multicast list
825 + */
826 +static void bcm_enet_set_multicast_list(struct net_device *dev)
827 +{
828 +       struct bcm_enet_priv *priv;
829 +       struct dev_mc_list *mc_list;
830 +       u32 val;
831 +       int i;
832 +
833 +       priv = netdev_priv(dev);
834 +
835 +       val = enet_readl(priv, ENET_RXCFG_REG);
836 +
837 +       if (dev->flags & IFF_PROMISC)
838 +               val |= ENET_RXCFG_PROMISC_MASK;
839 +       else
840 +               val &= ~ENET_RXCFG_PROMISC_MASK;
841 +
842 +       /* only 3 perfect match registers left, first one is used for
843 +        * own mac address */
844 +       if ((dev->flags & IFF_ALLMULTI) || dev->mc_count > 3)
845 +               val |= ENET_RXCFG_ALLMCAST_MASK;
846 +       else
847 +               val &= ~ENET_RXCFG_ALLMCAST_MASK;
848 +
849 +       /* no need to set perfect match registers if we catch all
850 +        * multicast */
851 +       if (val & ENET_RXCFG_ALLMCAST_MASK) {
852 +               enet_writel(priv, val, ENET_RXCFG_REG);
853 +               return;
854 +       }
855 +
856 +       for (i = 0, mc_list = dev->mc_list;
857 +            (mc_list != NULL) && (i < dev->mc_count) && (i < 3);
858 +            i++, mc_list = mc_list->next) {
859 +               u8 *dmi_addr;
860 +               u32 tmp;
861 +
862 +               /* filter non ethernet address */
863 +               if (mc_list->dmi_addrlen != 6)
864 +                       continue;
865 +
866 +               /* update perfect match registers */
867 +               dmi_addr = mc_list->dmi_addr;
868 +               tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) |
869 +                       (dmi_addr[4] << 8) | dmi_addr[5];
870 +               enet_writel(priv, tmp, ENET_PML_REG(i + 1));
871 +
872 +               tmp = (dmi_addr[0] << 8 | dmi_addr[1]);
873 +               tmp |= ENET_PMH_DATAVALID_MASK;
874 +               enet_writel(priv, tmp, ENET_PMH_REG(i + 1));
875 +       }
876 +
877 +       for (; i < 3; i++) {
878 +               enet_writel(priv, 0, ENET_PML_REG(i + 1));
879 +               enet_writel(priv, 0, ENET_PMH_REG(i + 1));
880 +       }
881 +
882 +       enet_writel(priv, val, ENET_RXCFG_REG);
883 +}
884 +
885 +/*
886 + * set mac duplex parameters
887 + */
888 +static void bcm_enet_set_duplex(struct bcm_enet_priv *priv, int fullduplex)
889 +{
890 +       u32 val;
891 +
892 +       val = enet_readl(priv, ENET_TXCTL_REG);
893 +       if (fullduplex)
894 +               val |= ENET_TXCTL_FD_MASK;
895 +       else
896 +               val &= ~ENET_TXCTL_FD_MASK;
897 +       enet_writel(priv, val, ENET_TXCTL_REG);
898 +}
899 +
900 +/*
901 + * set mac flow control parameters
902 + */
903 +static void bcm_enet_set_flow(struct bcm_enet_priv *priv, int rx_en, int tx_en)
904 +{
905 +       u32 val;
906 +
907 +       /* rx flow control (pause frame handling) */
908 +       val = enet_readl(priv, ENET_RXCFG_REG);
909 +       if (rx_en)
910 +               val |= ENET_RXCFG_ENFLOW_MASK;
911 +       else
912 +               val &= ~ENET_RXCFG_ENFLOW_MASK;
913 +       enet_writel(priv, val, ENET_RXCFG_REG);
914 +
915 +       /* tx flow control (pause frame generation) */
916 +       val = enet_dma_readl(priv, ENETDMA_CFG_REG);
917 +       if (tx_en)
918 +               val |= ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
919 +       else
920 +               val &= ~ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
921 +       enet_dma_writel(priv, val, ENETDMA_CFG_REG);
922 +}
923 +
924 +/*
925 + * link changed callback (from phylib)
926 + */
927 +static void bcm_enet_adjust_phy_link(struct net_device *dev)
928 +{
929 +       struct bcm_enet_priv *priv;
930 +       struct phy_device *phydev;
931 +       int status_changed;
932 +
933 +       priv = netdev_priv(dev);
934 +       phydev = priv->phydev;
935 +       status_changed = 0;
936 +
937 +       if (priv->old_link != phydev->link) {
938 +               status_changed = 1;
939 +               priv->old_link = phydev->link;
940 +       }
941 +
942 +       /* reflect duplex change in mac configuration */
943 +       if (phydev->link && phydev->duplex != priv->old_duplex) {
944 +               bcm_enet_set_duplex(priv,
945 +                                   (phydev->duplex == DUPLEX_FULL) ? 1 : 0);
946 +               status_changed = 1;
947 +               priv->old_duplex = phydev->duplex;
948 +       }
949 +
950 +       /* enable flow control if remote advertise it (trust phylib to
951 +        * check that duplex is full */
952 +       if (phydev->link && phydev->pause != priv->old_pause) {
953 +               int rx_pause_en, tx_pause_en;
954 +
955 +               if (phydev->pause) {
956 +                       /* pause was advertised by lpa and us */
957 +                       rx_pause_en = 1;
958 +                       tx_pause_en = 1;
959 +               } else if (!priv->pause_auto) {
960 +                       /* pause setting overrided by user */
961 +                       rx_pause_en = priv->pause_rx;
962 +                       tx_pause_en = priv->pause_tx;
963 +               } else {
964 +                       rx_pause_en = 0;
965 +                       tx_pause_en = 0;
966 +               }
967 +
968 +               bcm_enet_set_flow(priv, rx_pause_en, tx_pause_en);
969 +               status_changed = 1;
970 +               priv->old_pause = phydev->pause;
971 +       }
972 +
973 +       if (status_changed) {
974 +               pr_info("%s: link %s", dev->name, phydev->link ?
975 +                       "UP" : "DOWN");
976 +               if (phydev->link)
977 +                       printk(" - %d/%s - flow control %s", phydev->speed,
978 +                              DUPLEX_FULL == phydev->duplex ? "full" : "half",
979 +                              phydev->pause == 1 ? "rx&tx" : "off");
980 +
981 +               printk("\n");
982 +       }
983 +}
984 +
985 +/*
986 + * link changed callback (if phylib is not used)
987 + */
988 +static void bcm_enet_adjust_link(struct net_device *dev)
989 +{
990 +       struct bcm_enet_priv *priv;
991 +
992 +       priv = netdev_priv(dev);
993 +       bcm_enet_set_duplex(priv, priv->force_duplex_full);
994 +       bcm_enet_set_flow(priv, priv->pause_rx, priv->pause_tx);
995 +
996 +       pr_info("%s: link forced UP - %d/%s - flow control %s/%s\n",
997 +               dev->name,
998 +               priv->force_speed_100 ? 100 : 10,
999 +               priv->force_duplex_full ? "full" : "half",
1000 +               priv->pause_rx ? "rx" : "off",
1001 +               priv->pause_tx ? "tx" : "off");
1002 +}
1003 +
1004 +/*
1005 + * open callback, allocate dma rings & buffers and start rx operation
1006 + */
1007 +static int bcm_enet_open(struct net_device *dev)
1008 +{
1009 +       struct bcm_enet_priv *priv;
1010 +       struct sockaddr addr;
1011 +       struct device *kdev;
1012 +       struct phy_device *phydev;
1013 +       int irq_requested, i, ret;
1014 +       unsigned int size;
1015 +       char phy_id[BUS_ID_SIZE];
1016 +       void *p;
1017 +       u32 val;
1018 +
1019 +       priv = netdev_priv(dev);
1020 +       priv->rx_desc_cpu = priv->tx_desc_cpu = NULL;
1021 +       priv->rx_skb = priv->tx_skb = NULL;
1022 +
1023 +       kdev = &priv->pdev->dev;
1024 +
1025 +       if (priv->has_phy) {
1026 +               /* connect to PHY */
1027 +               snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT,
1028 +                        priv->mac_id ? "1" : "0", priv->phy_id);
1029 +
1030 +               phydev = phy_connect(dev, phy_id, &bcm_enet_adjust_phy_link, 0,
1031 +                                    PHY_INTERFACE_MODE_MII);
1032 +
1033 +               if (IS_ERR(phydev)) {
1034 +                       dev_err(kdev, "could not attach to PHY\n");
1035 +                       return PTR_ERR(phydev);
1036 +               }
1037 +
1038 +               /* mask with MAC supported features */
1039 +               phydev->supported &= (SUPPORTED_10baseT_Half |
1040 +                                     SUPPORTED_10baseT_Full |
1041 +                                     SUPPORTED_100baseT_Half |
1042 +                                     SUPPORTED_100baseT_Full |
1043 +                                     SUPPORTED_Autoneg |
1044 +                                     SUPPORTED_Pause |
1045 +                                     SUPPORTED_MII);
1046 +               phydev->advertising = phydev->supported;
1047 +
1048 +               if (priv->pause_auto && priv->pause_rx && priv->pause_tx)
1049 +                       phydev->advertising |= SUPPORTED_Pause;
1050 +               else
1051 +                       phydev->advertising &= ~SUPPORTED_Pause;
1052 +
1053 +               dev_info(kdev, "attached PHY at address %d [%s]\n",
1054 +                        phydev->addr, phydev->drv->name);
1055 +
1056 +               priv->old_link = 0;
1057 +               priv->old_duplex = -1;
1058 +               priv->old_pause = -1;
1059 +               priv->phydev = phydev;
1060 +       }
1061 +
1062 +       /* mask all interrupts and request them */
1063 +       enet_writel(priv, 0, ENET_IRMASK_REG);
1064 +       enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
1065 +       enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
1066 +
1067 +       irq_requested = 0;
1068 +       ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev);
1069 +       if (ret)
1070 +               goto out;
1071 +       irq_requested++;
1072 +
1073 +       ret = request_irq(priv->irq_rx, bcm_enet_isr_dma,
1074 +                         IRQF_SAMPLE_RANDOM | IRQF_DISABLED, dev->name, dev);
1075 +       if (ret)
1076 +               goto out;
1077 +       irq_requested++;
1078 +
1079 +       ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
1080 +                         IRQF_DISABLED, dev->name, dev);
1081 +       if (ret)
1082 +               goto out;
1083 +       irq_requested++;
1084 +
1085 +       /* initialize perfect match registers */
1086 +       for (i = 0; i < 4; i++) {
1087 +               enet_writel(priv, 0, ENET_PML_REG(i));
1088 +               enet_writel(priv, 0, ENET_PMH_REG(i));
1089 +       }
1090 +
1091 +       /* write device mac address */
1092 +       memcpy(addr.sa_data, dev->dev_addr, ETH_ALEN);
1093 +       bcm_enet_set_mac_address(dev, &addr);
1094 +
1095 +       /* allocate rx dma ring */
1096 +       size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
1097 +       p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
1098 +       if (!p) {
1099 +               dev_err(kdev, "cannot allocate rx ring %u\n", size);
1100 +               ret = -ENOMEM;
1101 +               goto out;
1102 +       }
1103 +
1104 +       memset(p, 0, size);
1105 +       priv->rx_desc_alloc_size = size;
1106 +       priv->rx_desc_cpu = p;
1107 +
1108 +       /* allocate tx dma ring */
1109 +       size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
1110 +       p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
1111 +       if (!p) {
1112 +               dev_err(kdev, "cannot allocate tx ring\n");
1113 +               ret = -ENOMEM;
1114 +               goto out;
1115 +       }
1116 +
1117 +       memset(p, 0, size);
1118 +       priv->tx_desc_alloc_size = size;
1119 +       priv->tx_desc_cpu = p;
1120 +
1121 +       priv->tx_skb = kzalloc(sizeof(struct sk_buff *) * priv->tx_ring_size,
1122 +                              GFP_KERNEL);
1123 +       if (!priv->tx_skb) {
1124 +               dev_err(kdev, "cannot allocate rx skb queue\n");
1125 +               ret = -ENOMEM;
1126 +               goto out;
1127 +       }
1128 +
1129 +       priv->tx_desc_count = priv->tx_ring_size;
1130 +       priv->tx_dirty_desc = 0;
1131 +       priv->tx_curr_desc = 0;
1132 +       spin_lock_init(&priv->tx_lock);
1133 +
1134 +       /* init & fill rx ring with skbs */
1135 +       priv->rx_skb = kzalloc(sizeof(struct sk_buff *) * priv->rx_ring_size,
1136 +                              GFP_KERNEL);
1137 +       if (!priv->rx_skb) {
1138 +               dev_err(kdev, "cannot allocate rx skb queue\n");
1139 +               ret = -ENOMEM;
1140 +               goto out;
1141 +       }
1142 +
1143 +       priv->rx_desc_count = 0;
1144 +       priv->rx_dirty_desc = 0;
1145 +       priv->rx_curr_desc = 0;
1146 +
1147 +       /* initialize flow control buffer allocation */
1148 +       enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
1149 +                       ENETDMA_BUFALLOC_REG(priv->rx_chan));
1150 +
1151 +       if (bcm_enet_refill_rx(dev)) {
1152 +               dev_err(kdev, "cannot allocate rx skb queue\n");
1153 +               ret = -ENOMEM;
1154 +               goto out;
1155 +       }
1156 +
1157 +       /* write rx & tx ring addresses */
1158 +       enet_dma_writel(priv, priv->rx_desc_dma,
1159 +                       ENETDMA_RSTART_REG(priv->rx_chan));
1160 +       enet_dma_writel(priv, priv->tx_desc_dma,
1161 +                       ENETDMA_RSTART_REG(priv->tx_chan));
1162 +
1163 +       /* clear remaining state ram for rx & tx channel */
1164 +       enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->rx_chan));
1165 +       enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->tx_chan));
1166 +       enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->rx_chan));
1167 +       enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->tx_chan));
1168 +       enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->rx_chan));
1169 +       enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->tx_chan));
1170 +
1171 +       /* set max rx/tx length */
1172 +       enet_writel(priv, BCMENET_MAX_RX_SIZE, ENET_RXMAXLEN_REG);
1173 +       enet_writel(priv, BCMENET_MAX_TX_SIZE, ENET_TXMAXLEN_REG);
1174 +
1175 +       /* set dma maximum burst len */
1176 +       enet_dma_writel(priv, BCMENET_DMA_MAXBURST,
1177 +                       ENETDMA_MAXBURST_REG(priv->rx_chan));
1178 +       enet_dma_writel(priv, BCMENET_DMA_MAXBURST,
1179 +                       ENETDMA_MAXBURST_REG(priv->tx_chan));
1180 +
1181 +       /* set correct transmit fifo watermark */
1182 +       enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG);
1183 +
1184 +       /* set flow control low/high threshold to 1/3 / 2/3 */
1185 +       val = priv->rx_ring_size / 3;
1186 +       enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
1187 +       val = (priv->rx_ring_size * 2) / 3;
1188 +       enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
1189 +
1190 +       /* all set, enable mac and interrupts, start dma engine and
1191 +        * kick rx dma channel */
1192 +       wmb();
1193 +       enet_writel(priv, ENET_CTL_ENABLE_MASK, ENET_CTL_REG);
1194 +       enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
1195 +       enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
1196 +                       ENETDMA_CHANCFG_REG(priv->rx_chan));
1197 +
1198 +       /* watch "mib counters about to overflow" interrupt */
1199 +       enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
1200 +       enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
1201 +
1202 +       /* watch "packet transferred" interrupt in rx and tx */
1203 +       enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
1204 +                       ENETDMA_IR_REG(priv->rx_chan));
1205 +       enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
1206 +                       ENETDMA_IR_REG(priv->tx_chan));
1207 +
1208 +       /* make sure we enable napi before rx interrupt  */
1209 +       napi_enable(&priv->napi);
1210 +
1211 +       enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
1212 +                       ENETDMA_IRMASK_REG(priv->rx_chan));
1213 +       enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
1214 +                       ENETDMA_IRMASK_REG(priv->tx_chan));
1215 +
1216 +       if (priv->has_phy)
1217 +               phy_start(priv->phydev);
1218 +       else
1219 +               bcm_enet_adjust_link(dev);
1220 +
1221 +       netif_start_queue(dev);
1222 +       return 0;
1223 +
1224 +out:
1225 +       phy_disconnect(priv->phydev);
1226 +       if (irq_requested > 2)
1227 +               free_irq(priv->irq_tx, dev);
1228 +       if (irq_requested > 1)
1229 +               free_irq(priv->irq_rx, dev);
1230 +       if (irq_requested > 0)
1231 +               free_irq(dev->irq, dev);
1232 +       for (i = 0; i < priv->rx_ring_size; i++) {
1233 +               struct bcm_enet_desc *desc;
1234 +
1235 +               if (!priv->rx_skb[i])
1236 +                       continue;
1237 +
1238 +               desc = &priv->rx_desc_cpu[i];
1239 +               dma_unmap_single(kdev, desc->address, BCMENET_MAX_RX_SIZE,
1240 +                                DMA_FROM_DEVICE);
1241 +               kfree_skb(priv->rx_skb[i]);
1242 +       }
1243 +       if (priv->rx_desc_cpu)
1244 +               dma_free_coherent(kdev, priv->rx_desc_alloc_size,
1245 +                                 priv->rx_desc_cpu, priv->rx_desc_dma);
1246 +       if (priv->tx_desc_cpu)
1247 +               dma_free_coherent(kdev, priv->tx_desc_alloc_size,
1248 +                                 priv->tx_desc_cpu, priv->tx_desc_dma);
1249 +       kfree(priv->rx_skb);
1250 +       kfree(priv->tx_skb);
1251 +       return ret;
1252 +}
1253 +
1254 +/*
1255 + * disable mac
1256 + */
1257 +static void bcm_enet_disable_mac(struct bcm_enet_priv *priv)
1258 +{
1259 +       int limit;
1260 +       u32 val;
1261 +
1262 +       val = enet_readl(priv, ENET_CTL_REG);
1263 +       val |= ENET_CTL_DISABLE_MASK;
1264 +       enet_writel(priv, val, ENET_CTL_REG);
1265 +
1266 +       limit = 1000;
1267 +       do {
1268 +               u32 val;
1269 +
1270 +               val = enet_readl(priv, ENET_CTL_REG);
1271 +               if (!(val & ENET_CTL_DISABLE_MASK))
1272 +                       break;
1273 +               udelay(1);
1274 +       } while (limit--);
1275 +}
1276 +
1277 +/*
1278 + * disable dma in given channel
1279 + */
1280 +static void bcm_enet_disable_dma(struct bcm_enet_priv *priv, int chan)
1281 +{
1282 +       int limit;
1283 +
1284 +       enet_dma_writel(priv, 0, ENETDMA_CHANCFG_REG(chan));
1285 +
1286 +       limit = 1000;
1287 +       do {
1288 +               u32 val;
1289 +
1290 +               val = enet_dma_readl(priv, ENETDMA_CHANCFG_REG(chan));
1291 +               if (!(val & ENETDMA_CHANCFG_EN_MASK))
1292 +                       break;
1293 +               udelay(1);
1294 +       } while (limit--);
1295 +}
1296 +
1297 +/*
1298 + * stop callback
1299 + */
1300 +static int bcm_enet_stop(struct net_device *dev)
1301 +{
1302 +       struct bcm_enet_priv *priv;
1303 +       struct device *kdev;
1304 +       int i;
1305 +
1306 +       priv = netdev_priv(dev);
1307 +       kdev = &priv->pdev->dev;
1308 +
1309 +       netif_stop_queue(dev);
1310 +       napi_disable(&priv->napi);
1311 +       if (priv->has_phy)
1312 +               phy_stop(priv->phydev);
1313 +       del_timer_sync(&priv->rx_timeout);
1314 +
1315 +       /* mask all interrupts */
1316 +       enet_writel(priv, 0, ENET_IRMASK_REG);
1317 +       enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
1318 +       enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
1319 +
1320 +       /* make sure no mib update is scheduled */
1321 +       flush_scheduled_work();
1322 +
1323 +       /* disable dma & mac */
1324 +       bcm_enet_disable_dma(priv, priv->tx_chan);
1325 +       bcm_enet_disable_dma(priv, priv->rx_chan);
1326 +       bcm_enet_disable_mac(priv);
1327 +
1328 +       /* force reclaim of all tx buffers */
1329 +       bcm_enet_tx_reclaim(dev, 1);
1330 +
1331 +       /* free the rx skb ring */
1332 +       for (i = 0; i < priv->rx_ring_size; i++) {
1333 +               struct bcm_enet_desc *desc;
1334 +
1335 +               if (!priv->rx_skb[i])
1336 +                       continue;
1337 +
1338 +               desc = &priv->rx_desc_cpu[i];
1339 +               dma_unmap_single(kdev, desc->address, BCMENET_MAX_RX_SIZE,
1340 +                                DMA_FROM_DEVICE);
1341 +               kfree_skb(priv->rx_skb[i]);
1342 +       }
1343 +
1344 +       /* free remaining allocated memory */
1345 +       kfree(priv->rx_skb);
1346 +       kfree(priv->tx_skb);
1347 +       dma_free_coherent(kdev, priv->rx_desc_alloc_size,
1348 +                         priv->rx_desc_cpu, priv->rx_desc_dma);
1349 +       dma_free_coherent(kdev, priv->tx_desc_alloc_size,
1350 +                         priv->tx_desc_cpu, priv->tx_desc_dma);
1351 +       free_irq(priv->irq_tx, dev);
1352 +       free_irq(priv->irq_rx, dev);
1353 +       free_irq(dev->irq, dev);
1354 +
1355 +       /* release phy */
1356 +       if (priv->has_phy) {
1357 +               phy_disconnect(priv->phydev);
1358 +               priv->phydev = NULL;
1359 +       }
1360 +
1361 +       return 0;
1362 +}
1363 +
1364 +/*
1365 + * core request to return device rx/tx stats
1366 + */
1367 +static struct net_device_stats *bcm_enet_get_stats(struct net_device *dev)
1368 +{
1369 +       struct bcm_enet_priv *priv;
1370 +
1371 +       priv = netdev_priv(dev);
1372 +       return &priv->stats;
1373 +}
1374 +
1375 +/*
1376 + * ethtool callbacks
1377 + */
1378 +struct bcm_enet_stats {
1379 +       char stat_string[ETH_GSTRING_LEN];
1380 +       int sizeof_stat;
1381 +       int stat_offset;
1382 +       int mib_reg;
1383 +};
1384 +
1385 +#define GEN_STAT(m) sizeof(((struct bcm_enet_priv *)0)->m),            \
1386 +                    offsetof(struct bcm_enet_priv, m)
1387 +
1388 +static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = {
1389 +       { "rx_packets", GEN_STAT(stats.rx_packets), -1 },
1390 +       { "tx_packets", GEN_STAT(stats.tx_packets), -1 },
1391 +       { "rx_bytes", GEN_STAT(stats.rx_bytes), -1 },
1392 +       { "tx_bytes", GEN_STAT(stats.tx_bytes), -1 },
1393 +       { "rx_errors", GEN_STAT(stats.rx_errors), -1 },
1394 +       { "tx_errors", GEN_STAT(stats.tx_errors), -1 },
1395 +       { "rx_dropped", GEN_STAT(stats.rx_dropped), -1 },
1396 +       { "tx_dropped", GEN_STAT(stats.tx_dropped), -1 },
1397 +
1398 +       { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETH_MIB_RX_GD_OCTETS},
1399 +       { "rx_good_pkts", GEN_STAT(mib.rx_gd_pkts), ETH_MIB_RX_GD_PKTS },
1400 +       { "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETH_MIB_RX_BRDCAST },
1401 +       { "rx_multicast", GEN_STAT(mib.rx_mult), ETH_MIB_RX_MULT },
1402 +       { "rx_64_octets", GEN_STAT(mib.rx_64), ETH_MIB_RX_64 },
1403 +       { "rx_65_127_oct", GEN_STAT(mib.rx_65_127), ETH_MIB_RX_65_127 },
1404 +       { "rx_128_255_oct", GEN_STAT(mib.rx_128_255), ETH_MIB_RX_128_255 },
1405 +       { "rx_256_511_oct", GEN_STAT(mib.rx_256_511), ETH_MIB_RX_256_511 },
1406 +       { "rx_512_1023_oct", GEN_STAT(mib.rx_512_1023), ETH_MIB_RX_512_1023 },
1407 +       { "rx_1024_max_oct", GEN_STAT(mib.rx_1024_max), ETH_MIB_RX_1024_MAX },
1408 +       { "rx_jabber", GEN_STAT(mib.rx_jab), ETH_MIB_RX_JAB },
1409 +       { "rx_oversize", GEN_STAT(mib.rx_ovr), ETH_MIB_RX_OVR },
1410 +       { "rx_fragment", GEN_STAT(mib.rx_frag), ETH_MIB_RX_FRAG },
1411 +       { "rx_dropped", GEN_STAT(mib.rx_drop), ETH_MIB_RX_DROP },
1412 +       { "rx_crc_align", GEN_STAT(mib.rx_crc_align), ETH_MIB_RX_CRC_ALIGN },
1413 +       { "rx_undersize", GEN_STAT(mib.rx_und), ETH_MIB_RX_UND },
1414 +       { "rx_crc", GEN_STAT(mib.rx_crc), ETH_MIB_RX_CRC },
1415 +       { "rx_align", GEN_STAT(mib.rx_align), ETH_MIB_RX_ALIGN },
1416 +       { "rx_symbol_error", GEN_STAT(mib.rx_sym), ETH_MIB_RX_SYM },
1417 +       { "rx_pause", GEN_STAT(mib.rx_pause), ETH_MIB_RX_PAUSE },
1418 +       { "rx_control", GEN_STAT(mib.rx_cntrl), ETH_MIB_RX_CNTRL },
1419 +
1420 +       { "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETH_MIB_TX_GD_OCTETS },
1421 +       { "tx_good_pkts", GEN_STAT(mib.tx_gd_pkts), ETH_MIB_TX_GD_PKTS },
1422 +       { "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETH_MIB_TX_BRDCAST },
1423 +       { "tx_multicast", GEN_STAT(mib.tx_mult), ETH_MIB_TX_MULT },
1424 +       { "tx_64_oct", GEN_STAT(mib.tx_64), ETH_MIB_TX_64 },
1425 +       { "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETH_MIB_TX_65_127 },
1426 +       { "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETH_MIB_TX_128_255 },
1427 +       { "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETH_MIB_TX_256_511 },
1428 +       { "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETH_MIB_TX_512_1023},
1429 +       { "tx_1024_max_oct", GEN_STAT(mib.tx_1024_max), ETH_MIB_TX_1024_MAX },
1430 +       { "tx_jabber", GEN_STAT(mib.tx_jab), ETH_MIB_TX_JAB },
1431 +       { "tx_oversize", GEN_STAT(mib.tx_ovr), ETH_MIB_TX_OVR },
1432 +       { "tx_fragment", GEN_STAT(mib.tx_frag), ETH_MIB_TX_FRAG },
1433 +       { "tx_underrun", GEN_STAT(mib.tx_underrun), ETH_MIB_TX_UNDERRUN },
1434 +       { "tx_collisions", GEN_STAT(mib.tx_col), ETH_MIB_TX_COL },
1435 +       { "tx_single_collision", GEN_STAT(mib.tx_1_col), ETH_MIB_TX_1_COL },
1436 +       { "tx_multiple_collision", GEN_STAT(mib.tx_m_col), ETH_MIB_TX_M_COL },
1437 +       { "tx_excess_collision", GEN_STAT(mib.tx_ex_col), ETH_MIB_TX_EX_COL },
1438 +       { "tx_late_collision", GEN_STAT(mib.tx_late), ETH_MIB_TX_LATE },
1439 +       { "tx_deferred", GEN_STAT(mib.tx_def), ETH_MIB_TX_DEF },
1440 +       { "tx_carrier_sense", GEN_STAT(mib.tx_crs), ETH_MIB_TX_CRS },
1441 +       { "tx_pause", GEN_STAT(mib.tx_pause), ETH_MIB_TX_PAUSE },
1442 +
1443 +};
1444 +
1445 +#define BCM_ENET_STATS_LEN     \
1446 +       (sizeof(bcm_enet_gstrings_stats) / sizeof(struct bcm_enet_stats))
1447 +
1448 +static const u32 unused_mib_regs[] = {
1449 +       ETH_MIB_TX_ALL_OCTETS,
1450 +       ETH_MIB_TX_ALL_PKTS,
1451 +       ETH_MIB_RX_ALL_OCTETS,
1452 +       ETH_MIB_RX_ALL_PKTS,
1453 +};
1454 +
1455 +
1456 +static void bcm_enet_get_drvinfo(struct net_device *netdev,
1457 +                                struct ethtool_drvinfo *drvinfo)
1458 +{
1459 +       strncpy(drvinfo->driver, bcm_enet_driver_name, 32);
1460 +       strncpy(drvinfo->version, bcm_enet_driver_version, 32);
1461 +       strncpy(drvinfo->fw_version, "N/A", 32);
1462 +       strncpy(drvinfo->bus_info, "bcm63xx", 32);
1463 +       drvinfo->n_stats = BCM_ENET_STATS_LEN;
1464 +}
1465 +
1466 +static int bcm_enet_get_stats_count(struct net_device *netdev)
1467 +{
1468 +       return BCM_ENET_STATS_LEN;
1469 +}
1470 +
1471 +static void bcm_enet_get_strings(struct net_device *netdev,
1472 +                                u32 stringset, u8 *data)
1473 +{
1474 +       int i;
1475 +
1476 +       switch (stringset) {
1477 +       case ETH_SS_STATS:
1478 +               for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1479 +                       memcpy(data + i * ETH_GSTRING_LEN,
1480 +                              bcm_enet_gstrings_stats[i].stat_string,
1481 +                              ETH_GSTRING_LEN);
1482 +               }
1483 +               break;
1484 +       }
1485 +}
1486 +
1487 +static void update_mib_counters(struct bcm_enet_priv *priv)
1488 +{
1489 +       int i;
1490 +
1491 +       for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1492 +               const struct bcm_enet_stats *s;
1493 +               u32 val;
1494 +               char *p;
1495 +
1496 +               s = &bcm_enet_gstrings_stats[i];
1497 +               if (s->mib_reg == -1)
1498 +                       continue;
1499 +
1500 +               val = enet_readl(priv, ENET_MIB_REG(s->mib_reg));
1501 +               p = (char *)priv + s->stat_offset;
1502 +
1503 +               if (s->sizeof_stat == sizeof(u64))
1504 +                       *(u64 *)p += val;
1505 +               else
1506 +                       *(u32 *)p += val;
1507 +       }
1508 +
1509 +       /* also empty unused mib counters to make sure mib counter
1510 +        * overflow interrupt is cleared */
1511 +       for (i = 0; i < ARRAY_SIZE(unused_mib_regs); i++)
1512 +               (void)enet_readl(priv, ENET_MIB_REG(unused_mib_regs[i]));
1513 +}
1514 +
1515 +static void bcm_enet_update_mib_counters_defer(struct work_struct *t)
1516 +{
1517 +       struct bcm_enet_priv *priv;
1518 +
1519 +       priv = container_of(t, struct bcm_enet_priv, mib_update_task);
1520 +       mutex_lock(&priv->mib_update_lock);
1521 +       update_mib_counters(priv);
1522 +       mutex_unlock(&priv->mib_update_lock);
1523 +
1524 +       /* reenable mib interrupt */
1525 +       if (netif_running(priv->net_dev))
1526 +               enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
1527 +}
1528 +
1529 +static void bcm_enet_get_ethtool_stats(struct net_device *netdev,
1530 +                                      struct ethtool_stats *stats,
1531 +                                      u64 *data)
1532 +{
1533 +       struct bcm_enet_priv *priv;
1534 +       int i;
1535 +
1536 +       priv = netdev_priv(netdev);
1537 +
1538 +       mutex_lock(&priv->mib_update_lock);
1539 +       update_mib_counters(priv);
1540 +
1541 +       for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1542 +               const struct bcm_enet_stats *s;
1543 +               char *p;
1544 +
1545 +               s = &bcm_enet_gstrings_stats[i];
1546 +               p = (char *)priv + s->stat_offset;
1547 +               data[i] = (s->sizeof_stat == sizeof(u64)) ?
1548 +                       *(u64 *)p : *(u32 *)p;
1549 +       }
1550 +       mutex_unlock(&priv->mib_update_lock);
1551 +}
1552 +
1553 +static int bcm_enet_get_settings(struct net_device *dev,
1554 +                                struct ethtool_cmd *cmd)
1555 +{
1556 +       struct bcm_enet_priv *priv;
1557 +
1558 +       priv = netdev_priv(dev);
1559 +
1560 +       cmd->maxrxpkt = 0;
1561 +       cmd->maxtxpkt = 0;
1562 +
1563 +       if (priv->has_phy) {
1564 +               if (!priv->phydev)
1565 +                       return -ENODEV;
1566 +               return phy_ethtool_gset(priv->phydev, cmd);
1567 +       } else {
1568 +               cmd->autoneg = 0;
1569 +               cmd->speed = (priv->force_speed_100) ? SPEED_100 : SPEED_10;
1570 +               cmd->duplex = (priv->force_duplex_full) ?
1571 +                       DUPLEX_FULL : DUPLEX_HALF;
1572 +               cmd->supported = ADVERTISED_10baseT_Half  |
1573 +                       ADVERTISED_10baseT_Full |
1574 +                       ADVERTISED_100baseT_Half |
1575 +                       ADVERTISED_100baseT_Full;
1576 +               cmd->advertising = 0;
1577 +               cmd->port = PORT_MII;
1578 +               cmd->transceiver = XCVR_EXTERNAL;
1579 +       }
1580 +       return 0;
1581 +}
1582 +
1583 +static int bcm_enet_set_settings(struct net_device *dev,
1584 +                                struct ethtool_cmd *cmd)
1585 +{
1586 +       struct bcm_enet_priv *priv;
1587 +
1588 +       priv = netdev_priv(dev);
1589 +       if (priv->has_phy) {
1590 +               if (!priv->phydev)
1591 +                       return -ENODEV;
1592 +               return phy_ethtool_sset(priv->phydev, cmd);
1593 +       } else {
1594 +
1595 +               if (cmd->autoneg ||
1596 +                   (cmd->speed != SPEED_100 && cmd->speed != SPEED_10) ||
1597 +                   cmd->port != PORT_MII)
1598 +                       return -EINVAL;
1599 +
1600 +               priv->force_speed_100 = (cmd->speed == SPEED_100) ? 1 : 0;
1601 +               priv->force_duplex_full = (cmd->duplex == DUPLEX_FULL) ? 1 : 0;
1602 +
1603 +               if (netif_running(dev))
1604 +                       bcm_enet_adjust_link(dev);
1605 +               return 0;
1606 +       }
1607 +}
1608 +
1609 +static void bcm_enet_get_ringparam(struct net_device *dev,
1610 +                                  struct ethtool_ringparam *ering)
1611 +{
1612 +       struct bcm_enet_priv *priv;
1613 +
1614 +       priv = netdev_priv(dev);
1615 +
1616 +       /* rx/tx ring is actually only limited by memory */
1617 +       ering->rx_max_pending = 8192;
1618 +       ering->tx_max_pending = 8192;
1619 +       ering->rx_mini_max_pending = 0;
1620 +       ering->rx_jumbo_max_pending = 0;
1621 +       ering->rx_pending = priv->rx_ring_size;
1622 +       ering->tx_pending = priv->tx_ring_size;
1623 +}
1624 +
1625 +static int bcm_enet_set_ringparam(struct net_device *dev,
1626 +                                 struct ethtool_ringparam *ering)
1627 +{
1628 +       struct bcm_enet_priv *priv;
1629 +       int was_running;
1630 +
1631 +       priv = netdev_priv(dev);
1632 +
1633 +       was_running = 0;
1634 +       if (netif_running(dev)) {
1635 +               bcm_enet_stop(dev);
1636 +               was_running = 1;
1637 +       }
1638 +
1639 +       priv->rx_ring_size = ering->rx_pending;
1640 +       priv->tx_ring_size = ering->tx_pending;
1641 +
1642 +       if (was_running) {
1643 +               int err;
1644 +
1645 +               err = bcm_enet_open(dev);
1646 +               if (err)
1647 +                       dev_close(dev);
1648 +               else
1649 +                       bcm_enet_set_multicast_list(dev);
1650 +       }
1651 +       return 0;
1652 +}
1653 +
1654 +static void bcm_enet_get_pauseparam(struct net_device *dev,
1655 +                                   struct ethtool_pauseparam *ecmd)
1656 +{
1657 +       struct bcm_enet_priv *priv;
1658 +
1659 +       priv = netdev_priv(dev);
1660 +       ecmd->autoneg = priv->pause_auto;
1661 +       ecmd->rx_pause = priv->pause_rx;
1662 +       ecmd->tx_pause = priv->pause_tx;
1663 +}
1664 +
1665 +static int bcm_enet_set_pauseparam(struct net_device *dev,
1666 +                                  struct ethtool_pauseparam *ecmd)
1667 +{
1668 +       struct bcm_enet_priv *priv;
1669 +
1670 +       priv = netdev_priv(dev);
1671 +
1672 +       if (priv->has_phy) {
1673 +               if (ecmd->autoneg && (ecmd->rx_pause != ecmd->tx_pause)) {
1674 +                       /* asymetric pause mode not supported,
1675 +                        * actually possible but integrated PHY has RO
1676 +                        * asym_pause bit */
1677 +                       return -EINVAL;
1678 +               }
1679 +       } else {
1680 +               /* no pause autoneg on direct mii connection */
1681 +               if (ecmd->autoneg)
1682 +                       return -EINVAL;
1683 +       }
1684 +
1685 +       priv->pause_auto = ecmd->autoneg;
1686 +       priv->pause_rx = ecmd->rx_pause;
1687 +       priv->pause_tx = ecmd->tx_pause;
1688 +
1689 +       return 0;
1690 +}
1691 +
1692 +static struct ethtool_ops bcm_enet_ethtool_ops = {
1693 +       .get_strings            = bcm_enet_get_strings,
1694 +       .get_stats_count        = bcm_enet_get_stats_count,
1695 +       .get_ethtool_stats      = bcm_enet_get_ethtool_stats,
1696 +       .get_settings           = bcm_enet_get_settings,
1697 +       .set_settings           = bcm_enet_set_settings,
1698 +       .get_drvinfo            = bcm_enet_get_drvinfo,
1699 +       .get_link               = ethtool_op_get_link,
1700 +       .get_ringparam          = bcm_enet_get_ringparam,
1701 +       .set_ringparam          = bcm_enet_set_ringparam,
1702 +       .get_pauseparam         = bcm_enet_get_pauseparam,
1703 +       .set_pauseparam         = bcm_enet_set_pauseparam,
1704 +};
1705 +
1706 +static int bcm_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1707 +{
1708 +       struct bcm_enet_priv *priv;
1709 +
1710 +       priv = netdev_priv(dev);
1711 +       if (priv->has_phy) {
1712 +               if (!priv->phydev)
1713 +                       return -ENODEV;
1714 +               return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd);
1715 +       } else {
1716 +               struct mii_if_info mii;
1717 +
1718 +               mii.dev = dev;
1719 +               mii.mdio_read = bcm_enet_mdio_read_mii;
1720 +               mii.mdio_write = bcm_enet_mdio_write_mii;
1721 +               mii.phy_id = 0;
1722 +               mii.phy_id_mask = 0x3f;
1723 +               mii.reg_num_mask = 0x1f;
1724 +               return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL);
1725 +       }
1726 +}
1727 +
1728 +/*
1729 + * preinit hardware to allow mii operation while device is down
1730 + */
1731 +static void bcm_enet_hw_preinit(struct bcm_enet_priv *priv)
1732 +{
1733 +       u32 val;
1734 +       int limit;
1735 +
1736 +       /* make sure mac is disabled */
1737 +       bcm_enet_disable_mac(priv);
1738 +
1739 +       /* soft reset mac */
1740 +       val = ENET_CTL_SRESET_MASK;
1741 +       enet_writel(priv, val, ENET_CTL_REG);
1742 +       wmb();
1743 +
1744 +       limit = 1000;
1745 +       do {
1746 +               val = enet_readl(priv, ENET_CTL_REG);
1747 +               if (!(val & ENET_CTL_SRESET_MASK))
1748 +                       break;
1749 +               udelay(1);
1750 +       } while (limit--);
1751 +
1752 +       /* select correct mii interface */
1753 +       val = enet_readl(priv, ENET_CTL_REG);
1754 +       if (priv->use_external_mii)
1755 +               val |= ENET_CTL_EPHYSEL_MASK;
1756 +       else
1757 +               val &= ~ENET_CTL_EPHYSEL_MASK;
1758 +       enet_writel(priv, val, ENET_CTL_REG);
1759 +
1760 +       /* turn on mdc clock */
1761 +       enet_writel(priv, (0x1f << ENET_MIISC_MDCFREQDIV_SHIFT) |
1762 +                   ENET_MIISC_PREAMBLEEN_MASK, ENET_MIISC_REG);
1763 +
1764 +       /* set mib counters to self-clear when read */
1765 +       val = enet_readl(priv, ENET_MIBCTL_REG);
1766 +       val |= ENET_MIBCTL_RDCLEAR_MASK;
1767 +       enet_writel(priv, val, ENET_MIBCTL_REG);
1768 +}
1769 +
1770 +/*
1771 + * allocate netdevice, request register memory and register device.
1772 + */
1773 +static int __devinit bcm_enet_probe(struct platform_device *pdev)
1774 +{
1775 +       struct bcm_enet_priv *priv;
1776 +       struct net_device *dev;
1777 +       struct bcm63xx_enet_platform_data *pd;
1778 +       struct resource *res_mem, *res_irq, *res_irq_rx, *res_irq_tx;
1779 +       struct mii_bus *bus;
1780 +       const char *clk_name;
1781 +       unsigned int iomem_size;
1782 +       int i, ret, mdio_registered, mem_requested;
1783 +
1784 +       /* stop if shared driver failed, assume driver->probe will be
1785 +        * called in the same order we register devices (correct ?) */
1786 +       if (!bcm_enet_shared_base)
1787 +               return -ENODEV;
1788 +
1789 +       mdio_registered = mem_requested = 0;
1790 +
1791 +       res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1792 +       res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1793 +       res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
1794 +       res_irq_tx = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
1795 +       if (!res_mem || !res_irq || !res_irq_rx || !res_irq_tx)
1796 +               return -ENODEV;
1797 +
1798 +       ret = 0;
1799 +       dev = alloc_etherdev(sizeof(*priv));
1800 +       if (!dev)
1801 +               return -ENOMEM;
1802 +       priv = netdev_priv(dev);
1803 +       memset(priv, 0, sizeof(*priv));
1804 +
1805 +       iomem_size = res_mem->end - res_mem->start + 1;
1806 +       if (!request_mem_region(res_mem->start, iomem_size, "bcm63xx_enet")) {
1807 +               ret = -EBUSY;
1808 +               goto err;
1809 +       }
1810 +       mem_requested = 1;
1811 +
1812 +       priv->base = ioremap(res_mem->start, iomem_size);
1813 +       if (priv->base == NULL) {
1814 +               ret = -ENOMEM;
1815 +               goto err;
1816 +       }
1817 +       dev->irq = priv->irq = res_irq->start;
1818 +       priv->irq_rx = res_irq_rx->start;
1819 +       priv->irq_tx = res_irq_tx->start;
1820 +       priv->mac_id = pdev->id;
1821 +
1822 +       /* get rx & tx dma channel id for this mac */
1823 +       if (priv->mac_id == 0) {
1824 +               priv->rx_chan = 0;
1825 +               priv->tx_chan = 1;
1826 +               clk_name = "enet0";
1827 +       } else {
1828 +               priv->rx_chan = 2;
1829 +               priv->tx_chan = 3;
1830 +               clk_name = "enet1";
1831 +       }
1832 +
1833 +       priv->mac_clk = clk_get(&pdev->dev, clk_name);
1834 +       if (IS_ERR(priv->mac_clk)) {
1835 +               ret = PTR_ERR(priv->mac_clk);
1836 +               priv->mac_clk = NULL;
1837 +               goto err;
1838 +       }
1839 +       clk_enable(priv->mac_clk);
1840 +
1841 +       /* initialize default and fetch platform data */
1842 +       priv->rx_ring_size = BCMENET_DEF_RX_DESC;
1843 +       priv->tx_ring_size = BCMENET_DEF_TX_DESC;
1844 +
1845 +       pd = pdev->dev.platform_data;
1846 +       if (pd) {
1847 +               memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
1848 +               priv->has_phy = pd->has_phy;
1849 +               priv->phy_id = pd->phy_id;
1850 +               priv->has_phy_interrupt = pd->has_phy_interrupt;
1851 +               priv->phy_interrupt = pd->phy_interrupt;
1852 +               priv->use_external_mii = !pd->use_internal_phy;
1853 +               priv->pause_auto = pd->pause_auto;
1854 +               priv->pause_rx = pd->pause_rx;
1855 +               priv->pause_tx = pd->pause_tx;
1856 +               priv->force_duplex_full = pd->force_duplex_full;
1857 +               priv->force_speed_100 = pd->force_speed_100;
1858 +       }
1859 +
1860 +       if (priv->mac_id == 0 && priv->has_phy && !priv->use_external_mii) {
1861 +               /* using internal PHY, enable clock */
1862 +               priv->phy_clk = clk_get(&pdev->dev, "ephy");
1863 +               if (IS_ERR(priv->phy_clk)) {
1864 +                       ret = PTR_ERR(priv->phy_clk);
1865 +                       priv->phy_clk = NULL;
1866 +                       goto err;
1867 +               }
1868 +               clk_enable(priv->phy_clk);
1869 +       }
1870 +
1871 +       /* do minimal hardware init to be able to probe mii bus */
1872 +       bcm_enet_hw_preinit(priv);
1873 +
1874 +       /* MII bus registration */
1875 +       if (priv->has_phy) {
1876 +               bus = &priv->mii_bus;
1877 +               bus->name = "bcm63xx_enet MII bus";
1878 +               bus->dev = &pdev->dev;
1879 +               bus->priv = priv;
1880 +               bus->read = bcm_enet_mdio_read_phylib;
1881 +               bus->write = bcm_enet_mdio_write_phylib;
1882 +               sprintf(bus->id, "%d", priv->mac_id);
1883 +
1884 +               /* only probe bus where we think the PHY is, because
1885 +                * the mdio read operation return 0 instead of 0xffff
1886 +                * if a slave is not present on hw */
1887 +               bus->phy_mask = ~(1 << priv->phy_id);
1888 +
1889 +               bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
1890 +               if (!bus->irq) {
1891 +                       ret = -ENOMEM;
1892 +                       goto err;
1893 +               }
1894 +
1895 +               if (priv->has_phy_interrupt)
1896 +                       bus->irq[priv->phy_id] = priv->phy_interrupt;
1897 +               else
1898 +                       bus->irq[priv->phy_id] = PHY_POLL;
1899 +
1900 +               ret = mdiobus_register(bus);
1901 +               if (ret) {
1902 +                       dev_err(&pdev->dev, "unable to register mdio bus\n");
1903 +                       goto err;
1904 +               }
1905 +               mdio_registered = 1;
1906 +       } else {
1907 +
1908 +               /* run platform code to initialize PHY device */
1909 +               if (pd->mii_config &&
1910 +                   pd->mii_config(dev, 1, bcm_enet_mdio_read_mii,
1911 +                                  bcm_enet_mdio_write_mii)) {
1912 +                       dev_err(&pdev->dev, "unable to configure mdio bus\n");
1913 +                       goto err;
1914 +               }
1915 +       }
1916 +
1917 +       spin_lock_init(&priv->rx_lock);
1918 +
1919 +       /* init rx timeout (used for oom) */
1920 +       init_timer(&priv->rx_timeout);
1921 +       priv->rx_timeout.function = bcm_enet_refill_rx_timer;
1922 +       priv->rx_timeout.data = (unsigned long)dev;
1923 +
1924 +       /* init the mib update lock&work */
1925 +       mutex_init(&priv->mib_update_lock);
1926 +       INIT_WORK(&priv->mib_update_task, bcm_enet_update_mib_counters_defer);
1927 +
1928 +       /* zero mib counters */
1929 +       for (i = 0; i < ENET_MIB_REG_COUNT; i++)
1930 +               enet_writel(priv, 0, ENET_MIB_REG(i));
1931 +
1932 +       /* register netdevice */
1933 +       dev->open = bcm_enet_open;
1934 +       dev->stop = bcm_enet_stop;
1935 +       dev->hard_start_xmit = bcm_enet_start_xmit;
1936 +       dev->get_stats = bcm_enet_get_stats;
1937 +       dev->set_mac_address = bcm_enet_set_mac_address;
1938 +       dev->set_multicast_list = bcm_enet_set_multicast_list;
1939 +       netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
1940 +       dev->do_ioctl = bcm_enet_ioctl;
1941 +#ifdef CONFIG_NET_POLL_CONTROLLER
1942 +       dev->poll_controller = bcm_enet_netpoll;
1943 +#endif
1944 +
1945 +       SET_ETHTOOL_OPS(dev, &bcm_enet_ethtool_ops);
1946 +
1947 +       ret = register_netdev(dev);
1948 +       if (ret)
1949 +               goto err;
1950 +
1951 +       platform_set_drvdata(pdev, dev);
1952 +       priv->pdev = pdev;
1953 +       priv->net_dev = dev;
1954 +       SET_NETDEV_DEV(dev, &pdev->dev);
1955 +
1956 +       return 0;
1957 +
1958 +err:
1959 +       if (mem_requested)
1960 +               release_mem_region(res_mem->start, iomem_size);
1961 +       if (mdio_registered)
1962 +               mdiobus_unregister(&priv->mii_bus);
1963 +       kfree(priv->mii_bus.irq);
1964 +       if (priv->mac_clk) {
1965 +               clk_disable(priv->mac_clk);
1966 +               clk_put(priv->mac_clk);
1967 +       }
1968 +       if (priv->phy_clk) {
1969 +               clk_disable(priv->phy_clk);
1970 +               clk_put(priv->phy_clk);
1971 +       }
1972 +       if (priv->base) {
1973 +               /* turn off mdc clock */
1974 +               enet_writel(priv, 0, ENET_MIISC_REG);
1975 +               iounmap(priv->base);
1976 +       }
1977 +       free_netdev(dev);
1978 +       return ret;
1979 +}
1980 +
1981 +
1982 +/*
1983 + * exit func, stops hardware and unregisters netdevice
1984 + */
1985 +static int __devexit bcm_enet_remove(struct platform_device *pdev)
1986 +{
1987 +       struct bcm_enet_priv *priv;
1988 +       struct net_device *dev;
1989 +       struct resource *res;
1990 +
1991 +       /* stop netdevice */
1992 +       dev = platform_get_drvdata(pdev);
1993 +       priv = netdev_priv(dev);
1994 +       unregister_netdev(dev);
1995 +
1996 +       /* turn off mdc clock */
1997 +       enet_writel(priv, 0, ENET_MIISC_REG);
1998 +
1999 +       if (priv->has_phy) {
2000 +               mdiobus_unregister(&priv->mii_bus);
2001 +               kfree(priv->mii_bus.irq);
2002 +       } else {
2003 +               struct bcm63xx_enet_platform_data *pd;
2004 +
2005 +               pd = pdev->dev.platform_data;
2006 +               if (pd && pd->mii_config)
2007 +                       pd->mii_config(dev, 0, bcm_enet_mdio_read_mii,
2008 +                                      bcm_enet_mdio_write_mii);
2009 +       }
2010 +
2011 +       /* release device resources */
2012 +       iounmap(priv->base);
2013 +       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2014 +       release_mem_region(res->start, res->end - res->start + 1);
2015 +
2016 +       /* disable hw block clocks */
2017 +       if (priv->phy_clk) {
2018 +               clk_disable(priv->phy_clk);
2019 +               clk_put(priv->phy_clk);
2020 +       }
2021 +       clk_disable(priv->mac_clk);
2022 +       clk_put(priv->mac_clk);
2023 +
2024 +       free_netdev(dev);
2025 +       return 0;
2026 +}
2027 +
2028 +struct platform_driver bcm63xx_enet_driver = {
2029 +       .probe  = bcm_enet_probe,
2030 +       .remove = __devexit_p(bcm_enet_remove),
2031 +       .driver = {
2032 +               .name   = "bcm63xx_enet",
2033 +               .owner  = THIS_MODULE,
2034 +       },
2035 +};
2036 +
2037 +/*
2038 + * reserve & remap memory space shared between all macs
2039 + */
2040 +static int __devinit bcm_enet_shared_probe(struct platform_device *pdev)
2041 +{
2042 +       struct resource *res;
2043 +       unsigned int iomem_size;
2044 +
2045 +       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2046 +       if (!res)
2047 +               return -ENODEV;
2048 +
2049 +       iomem_size = res->end - res->start + 1;
2050 +       if (!request_mem_region(res->start, iomem_size, "bcm63xx_enet_dma"))
2051 +               return -EBUSY;
2052 +
2053 +       bcm_enet_shared_base = ioremap(res->start, iomem_size);
2054 +       if (!bcm_enet_shared_base) {
2055 +               release_mem_region(res->start, iomem_size);
2056 +               return -ENOMEM;
2057 +       }
2058 +       return 0;
2059 +}
2060 +
2061 +static int __devexit bcm_enet_shared_remove(struct platform_device *pdev)
2062 +{
2063 +       struct resource *res;
2064 +
2065 +       iounmap(bcm_enet_shared_base);
2066 +       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2067 +       release_mem_region(res->start, res->end - res->start + 1);
2068 +       return 0;
2069 +}
2070 +
2071 +/*
2072 + * this "shared" driver is needed because both macs share a single
2073 + * address space
2074 + */
2075 +struct platform_driver bcm63xx_enet_shared_driver = {
2076 +       .probe  = bcm_enet_shared_probe,
2077 +       .remove = __devexit_p(bcm_enet_shared_remove),
2078 +       .driver = {
2079 +               .name   = "bcm63xx_enet_shared",
2080 +               .owner  = THIS_MODULE,
2081 +       },
2082 +};
2083 +
2084 +/*
2085 + * entry point
2086 + */
2087 +static int __init bcm_enet_init(void)
2088 +{
2089 +       int ret;
2090 +
2091 +       ret = platform_driver_register(&bcm63xx_enet_shared_driver);
2092 +       if (ret)
2093 +               return ret;
2094 +
2095 +       ret = platform_driver_register(&bcm63xx_enet_driver);
2096 +       if (ret)
2097 +               platform_driver_unregister(&bcm63xx_enet_shared_driver);
2098 +
2099 +       return ret;
2100 +}
2101 +
2102 +static void __exit bcm_enet_exit(void)
2103 +{
2104 +       platform_driver_unregister(&bcm63xx_enet_driver);
2105 +       platform_driver_unregister(&bcm63xx_enet_shared_driver);
2106 +}
2107 +
2108 +
2109 +module_init(bcm_enet_init);
2110 +module_exit(bcm_enet_exit);
2111 +
2112 +MODULE_DESCRIPTION("BCM63xx internal ethernet mac driver");
2113 +MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
2114 +MODULE_LICENSE("GPL");
2115 --- /dev/null
2116 +++ b/drivers/net/bcm63xx_enet.h
2117 @@ -0,0 +1,294 @@
2118 +#ifndef BCM63XX_ENET_H_
2119 +#define BCM63XX_ENET_H_
2120 +
2121 +#include <linux/types.h>
2122 +#include <linux/mii.h>
2123 +#include <linux/mutex.h>
2124 +#include <linux/phy.h>
2125 +#include <linux/platform_device.h>
2126 +
2127 +#include <bcm63xx_regs.h>
2128 +#include <bcm63xx_irq.h>
2129 +#include <bcm63xx_io.h>
2130 +
2131 +/* default number of descriptor */
2132 +#define BCMENET_DEF_RX_DESC    64
2133 +#define BCMENET_DEF_TX_DESC    32
2134 +
2135 +/* maximum burst len for dma (4 bytes unit) */
2136 +#define BCMENET_DMA_MAXBURST   16
2137 +
2138 +/* tx transmit threshold (4 bytes unit), fifo is 256 bytes, the value
2139 + * must be low enough so that a DMA transfer of above burst length can
2140 + * not overflow the fifo  */
2141 +#define BCMENET_TX_FIFO_TRESH  32
2142 +
2143 +/* maximum rx/tx packet size */
2144 +#define        BCMENET_MAX_RX_SIZE     (ETH_FRAME_LEN + 4)
2145 +#define        BCMENET_MAX_TX_SIZE     (ETH_FRAME_LEN + 4)
2146 +
2147 +/*
2148 + * rx/tx dma descriptor
2149 + */
2150 +struct bcm_enet_desc {
2151 +       u32 len_stat;
2152 +       u32 address;
2153 +};
2154 +
2155 +#define DMADESC_LENGTH_SHIFT   16
2156 +#define DMADESC_LENGTH_MASK    (0xfff << DMADESC_LENGTH_SHIFT)
2157 +#define DMADESC_OWNER_MASK     (1 << 15)
2158 +#define DMADESC_EOP_MASK       (1 << 14)
2159 +#define DMADESC_SOP_MASK       (1 << 13)
2160 +#define DMADESC_ESOP_MASK      (DMADESC_EOP_MASK | DMADESC_SOP_MASK)
2161 +#define DMADESC_WRAP_MASK      (1 << 12)
2162 +
2163 +#define DMADESC_UNDER_MASK     (1 << 9)
2164 +#define DMADESC_APPEND_CRC     (1 << 8)
2165 +#define DMADESC_OVSIZE_MASK    (1 << 4)
2166 +#define DMADESC_RXER_MASK      (1 << 2)
2167 +#define DMADESC_CRC_MASK       (1 << 1)
2168 +#define DMADESC_OV_MASK                (1 << 0)
2169 +#define DMADESC_ERR_MASK       (DMADESC_UNDER_MASK | \
2170 +                               DMADESC_OVSIZE_MASK | \
2171 +                               DMADESC_RXER_MASK | \
2172 +                               DMADESC_CRC_MASK | \
2173 +                               DMADESC_OV_MASK)
2174 +
2175 +
2176 +/*
2177 + * MIB Counters register definitions
2178 +*/
2179 +#define ETH_MIB_TX_GD_OCTETS                   0
2180 +#define ETH_MIB_TX_GD_PKTS                     1
2181 +#define ETH_MIB_TX_ALL_OCTETS                  2
2182 +#define ETH_MIB_TX_ALL_PKTS                    3
2183 +#define ETH_MIB_TX_BRDCAST                     4
2184 +#define ETH_MIB_TX_MULT                                5
2185 +#define ETH_MIB_TX_64                          6
2186 +#define ETH_MIB_TX_65_127                      7
2187 +#define ETH_MIB_TX_128_255                     8
2188 +#define ETH_MIB_TX_256_511                     9
2189 +#define ETH_MIB_TX_512_1023                    10
2190 +#define ETH_MIB_TX_1024_MAX                    11
2191 +#define ETH_MIB_TX_JAB                         12
2192 +#define ETH_MIB_TX_OVR                         13
2193 +#define ETH_MIB_TX_FRAG                                14
2194 +#define ETH_MIB_TX_UNDERRUN                    15
2195 +#define ETH_MIB_TX_COL                         16
2196 +#define ETH_MIB_TX_1_COL                       17
2197 +#define ETH_MIB_TX_M_COL                       18
2198 +#define ETH_MIB_TX_EX_COL                      19
2199 +#define ETH_MIB_TX_LATE                                20
2200 +#define ETH_MIB_TX_DEF                         21
2201 +#define ETH_MIB_TX_CRS                         22
2202 +#define ETH_MIB_TX_PAUSE                       23
2203 +
2204 +#define ETH_MIB_RX_GD_OCTETS                   32
2205 +#define ETH_MIB_RX_GD_PKTS                     33
2206 +#define ETH_MIB_RX_ALL_OCTETS                  34
2207 +#define ETH_MIB_RX_ALL_PKTS                    35
2208 +#define ETH_MIB_RX_BRDCAST                     36
2209 +#define ETH_MIB_RX_MULT                                37
2210 +#define ETH_MIB_RX_64                          38
2211 +#define ETH_MIB_RX_65_127                      39
2212 +#define ETH_MIB_RX_128_255                     40
2213 +#define ETH_MIB_RX_256_511                     41
2214 +#define ETH_MIB_RX_512_1023                    42
2215 +#define ETH_MIB_RX_1024_MAX                    43
2216 +#define ETH_MIB_RX_JAB                         44
2217 +#define ETH_MIB_RX_OVR                         45
2218 +#define ETH_MIB_RX_FRAG                                46
2219 +#define ETH_MIB_RX_DROP                                47
2220 +#define ETH_MIB_RX_CRC_ALIGN                   48
2221 +#define ETH_MIB_RX_UND                         49
2222 +#define ETH_MIB_RX_CRC                         50
2223 +#define ETH_MIB_RX_ALIGN                       51
2224 +#define ETH_MIB_RX_SYM                         52
2225 +#define ETH_MIB_RX_PAUSE                       53
2226 +#define ETH_MIB_RX_CNTRL                       54
2227 +
2228 +
2229 +struct bcm_enet_mib_counters {
2230 +       u64 tx_gd_octets;
2231 +       u32 tx_gd_pkts;
2232 +       u32 tx_all_octets;
2233 +       u32 tx_all_pkts;
2234 +       u32 tx_brdcast;
2235 +       u32 tx_mult;
2236 +       u32 tx_64;
2237 +       u32 tx_65_127;
2238 +       u32 tx_128_255;
2239 +       u32 tx_256_511;
2240 +       u32 tx_512_1023;
2241 +       u32 tx_1024_max;
2242 +       u32 tx_jab;
2243 +       u32 tx_ovr;
2244 +       u32 tx_frag;
2245 +       u32 tx_underrun;
2246 +       u32 tx_col;
2247 +       u32 tx_1_col;
2248 +       u32 tx_m_col;
2249 +       u32 tx_ex_col;
2250 +       u32 tx_late;
2251 +       u32 tx_def;
2252 +       u32 tx_crs;
2253 +       u32 tx_pause;
2254 +       u64 rx_gd_octets;
2255 +       u32 rx_gd_pkts;
2256 +       u32 rx_all_octets;
2257 +       u32 rx_all_pkts;
2258 +       u32 rx_brdcast;
2259 +       u32 rx_mult;
2260 +       u32 rx_64;
2261 +       u32 rx_65_127;
2262 +       u32 rx_128_255;
2263 +       u32 rx_256_511;
2264 +       u32 rx_512_1023;
2265 +       u32 rx_1024_max;
2266 +       u32 rx_jab;
2267 +       u32 rx_ovr;
2268 +       u32 rx_frag;
2269 +       u32 rx_drop;
2270 +       u32 rx_crc_align;
2271 +       u32 rx_und;
2272 +       u32 rx_crc;
2273 +       u32 rx_align;
2274 +       u32 rx_sym;
2275 +       u32 rx_pause;
2276 +       u32 rx_cntrl;
2277 +};
2278 +
2279 +
2280 +struct bcm_enet_priv {
2281 +
2282 +       /* mac id (from platform device id) */
2283 +       int mac_id;
2284 +
2285 +       /* base remapped address of device */
2286 +       void __iomem *base;
2287 +
2288 +       /* mac irq, rx_dma irq, tx_dma irq */
2289 +       int irq;
2290 +       int irq_rx;
2291 +       int irq_tx;
2292 +
2293 +       /* hw view of rx & tx dma ring */
2294 +       dma_addr_t rx_desc_dma;
2295 +       dma_addr_t tx_desc_dma;
2296 +
2297 +       /* allocated size (in bytes) for rx & tx dma ring */
2298 +       unsigned int rx_desc_alloc_size;
2299 +       unsigned int tx_desc_alloc_size;
2300 +
2301 +
2302 +       struct napi_struct napi;
2303 +
2304 +       /* dma channel id for rx */
2305 +       int rx_chan;
2306 +
2307 +       /* number of dma desc in rx ring */
2308 +       int rx_ring_size;
2309 +
2310 +       /* cpu view of rx dma ring */
2311 +       struct bcm_enet_desc *rx_desc_cpu;
2312 +
2313 +       /* current number of armed descriptor given to hardware for rx */
2314 +       int rx_desc_count;
2315 +
2316 +       /* next rx descriptor to fetch from hardware */
2317 +       int rx_curr_desc;
2318 +
2319 +       /* next dirty rx descriptor to refill */
2320 +       int rx_dirty_desc;
2321 +
2322 +       /* list of skb given to hw for rx */
2323 +       struct sk_buff **rx_skb;
2324 +
2325 +       /* used when rx skb allocation failed, so we defer rx queue
2326 +        * refill */
2327 +       struct timer_list rx_timeout;
2328 +
2329 +       /* lock rx_timeout against rx normal operation */
2330 +       spinlock_t rx_lock;
2331 +
2332 +
2333 +       /* dma channel id for tx */
2334 +       int tx_chan;
2335 +
2336 +       /* number of dma desc in tx ring */
2337 +       int tx_ring_size;
2338 +
2339 +       /* cpu view of rx dma ring */
2340 +       struct bcm_enet_desc *tx_desc_cpu;
2341 +
2342 +       /* number of available descriptor for tx */
2343 +       int tx_desc_count;
2344 +
2345 +       /* next tx descriptor avaiable */
2346 +       int tx_curr_desc;
2347 +
2348 +       /* next dirty tx descriptor to reclaim */
2349 +       int tx_dirty_desc;
2350 +
2351 +       /* list of skb given to hw for tx */
2352 +       struct sk_buff **tx_skb;
2353 +
2354 +       /* lock used by tx reclaim and xmit */
2355 +       spinlock_t tx_lock;
2356 +
2357 +
2358 +       /* set if internal phy is ignored and external mii interface
2359 +        * is selected */
2360 +       int use_external_mii;
2361 +
2362 +       /* set if a phy is connected, phy address must be known,
2363 +        * probing is not possible */
2364 +       int has_phy;
2365 +       int phy_id;
2366 +
2367 +       /* set if connected phy has an associated irq */
2368 +       int has_phy_interrupt;
2369 +       int phy_interrupt;
2370 +
2371 +       /* used when a phy is connected (phylib used) */
2372 +       struct mii_bus mii_bus;
2373 +       struct phy_device *phydev;
2374 +       int old_link;
2375 +       int old_duplex;
2376 +       int old_pause;
2377 +
2378 +       /* used when no phy is connected */
2379 +       int force_speed_100;
2380 +       int force_duplex_full;
2381 +
2382 +       /* pause parameters */
2383 +       int pause_auto;
2384 +       int pause_rx;
2385 +       int pause_tx;
2386 +
2387 +       /* stats */
2388 +       struct net_device_stats stats;
2389 +       struct bcm_enet_mib_counters mib;
2390 +
2391 +       /* after mib interrupt, mib registers update is done in this
2392 +        * work queue */
2393 +       struct work_struct mib_update_task;
2394 +
2395 +       /* lock mib update between userspace request and workqueue */
2396 +       struct mutex mib_update_lock;
2397 +
2398 +       /* mac clock */
2399 +       struct clk *mac_clk;
2400 +
2401 +       /* phy clock if internal phy is used */
2402 +       struct clk *phy_clk;
2403 +
2404 +       /* network device reference */
2405 +       struct net_device *net_dev;
2406 +
2407 +       /* platform device reference */
2408 +       struct platform_device *pdev;
2409 +};
2410 +
2411 +#endif /* ! BCM63XX_ENET_H_ */
2412 --- /dev/null
2413 +++ b/include/asm-mips/mach-bcm63xx/bcm63xx_dev_enet.h
2414 @@ -0,0 +1,45 @@
2415 +#ifndef BCM63XX_DEV_ENET_H_
2416 +#define BCM63XX_DEV_ENET_H_
2417 +
2418 +#include <linux/if_ether.h>
2419 +#include <linux/init.h>
2420 +
2421 +/*
2422 + * on board ethernet platform data
2423 + */
2424 +struct bcm63xx_enet_platform_data {
2425 +       char mac_addr[ETH_ALEN];
2426 +
2427 +       int has_phy;
2428 +
2429 +       /* if has_phy, then set use_internal_phy */
2430 +       int use_internal_phy;
2431 +
2432 +       /* or fill phy info to use an external one */
2433 +       int phy_id;
2434 +       int has_phy_interrupt;
2435 +       int phy_interrupt;
2436 +
2437 +       /* if has_phy, use autonegociated pause parameters or force
2438 +        * them */
2439 +       int pause_auto;
2440 +       int pause_rx;
2441 +       int pause_tx;
2442 +
2443 +       /* if !has_phy, set desired forced speed/duplex */
2444 +       int force_speed_100;
2445 +       int force_duplex_full;
2446 +
2447 +       /* if !has_phy, set callback to perform mii device
2448 +        * init/remove */
2449 +       int (*mii_config)(struct net_device *dev, int probe,
2450 +                         int (*mii_read)(struct net_device *dev,
2451 +                                         int phy_id, int reg),
2452 +                         void (*mii_write)(struct net_device *dev,
2453 +                                           int phy_id, int reg, int val));
2454 +};
2455 +
2456 +int __init bcm63xx_enet_register(int unit,
2457 +                                const struct bcm63xx_enet_platform_data *pd);
2458 +
2459 +#endif /* ! BCM63XX_DEV_ENET_H_ */