[brcm63xx] backport an upstream fix: We're not disabling IRQ, so we must call the...
[openwrt.git] / target / linux / brcm63xx / files / drivers / net / bcm63xx_enet.c
1 /*
2  * Driver for BCM963xx builtin Ethernet mac
3  *
4  * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19  */
20 #include <linux/init.h>
21 #include <linux/module.h>
22 #include <linux/clk.h>
23 #include <linux/etherdevice.h>
24 #include <linux/delay.h>
25 #include <linux/ethtool.h>
26 #include <linux/crc32.h>
27 #include <linux/err.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/platform_device.h>
30
31 #include <bcm63xx_dev_enet.h>
32 #include "bcm63xx_enet.h"
33
34 static char bcm_enet_driver_name[] = "bcm63xx_enet";
35 static char bcm_enet_driver_version[] = "1.0";
36
37 static int copybreak __read_mostly = 128;
38 module_param(copybreak, int, 0);
39 MODULE_PARM_DESC(copybreak, "Receive copy threshold");
40
41 /* io memory shared between all devices */
42 static void __iomem *bcm_enet_shared_base;
43
44 /*
45  * io helpers to access mac registers
46  */
47 static inline u32 enet_readl(struct bcm_enet_priv *priv, u32 off)
48 {
49         return bcm_readl(priv->base + off);
50 }
51
52 static inline void enet_writel(struct bcm_enet_priv *priv,
53                                u32 val, u32 off)
54 {
55         bcm_writel(val, priv->base + off);
56 }
57
58 /*
59  * io helpers to access shared registers
60  */
61 static inline u32 enet_dma_readl(struct bcm_enet_priv *priv, u32 off)
62 {
63         return bcm_readl(bcm_enet_shared_base + off);
64 }
65
66 static inline void enet_dma_writel(struct bcm_enet_priv *priv,
67                                        u32 val, u32 off)
68 {
69         bcm_writel(val, bcm_enet_shared_base + off);
70 }
71
72 /*
73  * write given data into mii register and wait for transfer to end
74  * with timeout (average measured transfer time is 25us)
75  */
76 static int do_mdio_op(struct bcm_enet_priv *priv, unsigned int data)
77 {
78         int limit;
79
80         /* make sure mii interrupt status is cleared */
81         enet_writel(priv, ENET_IR_MII, ENET_IR_REG);
82
83         enet_writel(priv, data, ENET_MIIDATA_REG);
84         wmb();
85
86         /* busy wait on mii interrupt bit, with timeout */
87         limit = 1000;
88         do {
89                 if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII)
90                         break;
91                 udelay(1);
92         } while (limit-- >= 0);
93
94         return (limit < 0) ? 1 : 0;
95 }
96
97 /*
98  * MII internal read callback
99  */
100 static int bcm_enet_mdio_read(struct bcm_enet_priv *priv, int mii_id,
101                               int regnum)
102 {
103         u32 tmp, val;
104
105         tmp = regnum << ENET_MIIDATA_REG_SHIFT;
106         tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT;
107         tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT;
108         tmp |= ENET_MIIDATA_OP_READ_MASK;
109
110         if (do_mdio_op(priv, tmp))
111                 return -1;
112
113         val = enet_readl(priv, ENET_MIIDATA_REG);
114         val &= 0xffff;
115         return val;
116 }
117
118 /*
119  * MII internal write callback
120  */
121 static int bcm_enet_mdio_write(struct bcm_enet_priv *priv, int mii_id,
122                                int regnum, u16 value)
123 {
124         u32 tmp;
125
126         tmp = (value & 0xffff) << ENET_MIIDATA_DATA_SHIFT;
127         tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT;
128         tmp |= regnum << ENET_MIIDATA_REG_SHIFT;
129         tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT;
130         tmp |= ENET_MIIDATA_OP_WRITE_MASK;
131
132         (void)do_mdio_op(priv, tmp);
133         return 0;
134 }
135
136 /*
137  * MII read callback from phylib
138  */
139 static int bcm_enet_mdio_read_phylib(struct mii_bus *bus, int mii_id,
140                                      int regnum)
141 {
142         return bcm_enet_mdio_read(bus->priv, mii_id, regnum);
143 }
144
145 /*
146  * MII write callback from phylib
147  */
148 static int bcm_enet_mdio_write_phylib(struct mii_bus *bus, int mii_id,
149                                       int regnum, u16 value)
150 {
151         return bcm_enet_mdio_write(bus->priv, mii_id, regnum, value);
152 }
153
154 /*
155  * MII read callback from mii core
156  */
157 static int bcm_enet_mdio_read_mii(struct net_device *dev, int mii_id,
158                                   int regnum)
159 {
160         return bcm_enet_mdio_read(netdev_priv(dev), mii_id, regnum);
161 }
162
163 /*
164  * MII write callback from mii core
165  */
166 static void bcm_enet_mdio_write_mii(struct net_device *dev, int mii_id,
167                                     int regnum, int value)
168 {
169         bcm_enet_mdio_write(netdev_priv(dev), mii_id, regnum, value);
170 }
171
172 /*
173  * refill rx queue
174  */
175 static int bcm_enet_refill_rx(struct net_device *dev)
176 {
177         struct bcm_enet_priv *priv;
178
179         priv = netdev_priv(dev);
180
181         while (priv->rx_desc_count < priv->rx_ring_size) {
182                 struct bcm_enet_desc *desc;
183                 struct sk_buff *skb;
184                 dma_addr_t p;
185                 int desc_idx;
186                 u32 len_stat;
187
188                 desc_idx = priv->rx_dirty_desc;
189                 desc = &priv->rx_desc_cpu[desc_idx];
190
191                 if (!priv->rx_skb[desc_idx]) {
192                         skb = netdev_alloc_skb(dev, BCMENET_MAX_RX_SIZE);
193                         if (!skb)
194                                 break;
195                         priv->rx_skb[desc_idx] = skb;
196
197                         p = dma_map_single(&priv->pdev->dev, skb->data,
198                                            BCMENET_MAX_RX_SIZE,
199                                            DMA_FROM_DEVICE);
200                         desc->address = p;
201                 }
202
203                 len_stat = BCMENET_MAX_RX_SIZE << DMADESC_LENGTH_SHIFT;
204                 len_stat |= DMADESC_OWNER_MASK;
205                 if (priv->rx_dirty_desc == priv->rx_ring_size - 1) {
206                         len_stat |= DMADESC_WRAP_MASK;
207                         priv->rx_dirty_desc = 0;
208                 } else {
209                         priv->rx_dirty_desc++;
210                 }
211                 wmb();
212                 desc->len_stat = len_stat;
213
214                 priv->rx_desc_count++;
215
216                 /* tell dma engine we allocated one buffer */
217                 enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan));
218         }
219
220         /* If rx ring is still empty, set a timer to try allocating
221          * again at a later time. */
222         if (priv->rx_desc_count == 0 && netif_running(dev)) {
223                 dev_warn(&priv->pdev->dev, "unable to refill rx ring\n");
224                 priv->rx_timeout.expires = jiffies + HZ;
225                 add_timer(&priv->rx_timeout);
226         }
227
228         return 0;
229 }
230
231 /*
232  * timer callback to defer refill rx queue in case we're OOM
233  */
234 static void bcm_enet_refill_rx_timer(unsigned long data)
235 {
236         struct net_device *dev;
237         struct bcm_enet_priv *priv;
238
239         dev = (struct net_device *)data;
240         priv = netdev_priv(dev);
241
242         spin_lock(&priv->rx_lock);
243         bcm_enet_refill_rx((struct net_device *)data);
244         spin_unlock(&priv->rx_lock);
245 }
246
247 /*
248  * extract packet from rx queue
249  */
250 static int bcm_enet_receive_queue(struct net_device *dev, int budget)
251 {
252         struct bcm_enet_priv *priv;
253         struct device *kdev;
254         int processed;
255
256         priv = netdev_priv(dev);
257         kdev = &priv->pdev->dev;
258         processed = 0;
259
260         /* don't scan ring further than number of refilled
261          * descriptor */
262         if (budget > priv->rx_desc_count)
263                 budget = priv->rx_desc_count;
264
265         do {
266                 struct bcm_enet_desc *desc;
267                 struct sk_buff *skb;
268                 int desc_idx;
269                 u32 len_stat;
270                 unsigned int len;
271
272                 desc_idx = priv->rx_curr_desc;
273                 desc = &priv->rx_desc_cpu[desc_idx];
274
275                 /* make sure we actually read the descriptor status at
276                  * each loop */
277                 rmb();
278
279                 len_stat = desc->len_stat;
280
281                 /* break if dma ownership belongs to hw */
282                 if (len_stat & DMADESC_OWNER_MASK)
283                         break;
284
285                 processed++;
286                 priv->rx_curr_desc++;
287                 if (priv->rx_curr_desc == priv->rx_ring_size)
288                         priv->rx_curr_desc = 0;
289                 priv->rx_desc_count--;
290
291                 /* if the packet does not have start of packet _and_
292                  * end of packet flag set, then just recycle it */
293                 if ((len_stat & DMADESC_ESOP_MASK) != DMADESC_ESOP_MASK) {
294                         priv->stats.rx_dropped++;
295                         continue;
296                 }
297
298                 /* recycle packet if it's marked as bad */
299                 if (unlikely(len_stat & DMADESC_ERR_MASK)) {
300                         priv->stats.rx_errors++;
301
302                         if (len_stat & DMADESC_OVSIZE_MASK)
303                                 priv->stats.rx_length_errors++;
304                         if (len_stat & DMADESC_CRC_MASK)
305                                 priv->stats.rx_crc_errors++;
306                         if (len_stat & DMADESC_UNDER_MASK)
307                                 priv->stats.rx_frame_errors++;
308                         if (len_stat & DMADESC_OV_MASK)
309                                 priv->stats.rx_fifo_errors++;
310                         continue;
311                 }
312
313                 /* valid packet */
314                 skb = priv->rx_skb[desc_idx];
315                 len = (len_stat & DMADESC_LENGTH_MASK) >> DMADESC_LENGTH_SHIFT;
316                 /* don't include FCS */
317                 len -= 4;
318
319                 if (len < copybreak) {
320                         struct sk_buff *nskb;
321
322                         nskb = netdev_alloc_skb(dev, len + 2);
323                         if (!nskb) {
324                                 /* forget packet, just rearm desc */
325                                 priv->stats.rx_dropped++;
326                                 continue;
327                         }
328
329                         /* since we're copying the data, we can align
330                          * them properly */
331                         skb_reserve(nskb, NET_IP_ALIGN);
332                         dma_sync_single_for_cpu(kdev, desc->address,
333                                                 len, DMA_FROM_DEVICE);
334                         memcpy(nskb->data, skb->data, len);
335                         dma_sync_single_for_device(kdev, desc->address,
336                                                    len, DMA_FROM_DEVICE);
337                         skb = nskb;
338                 } else {
339                         dma_unmap_single(&priv->pdev->dev, desc->address,
340                                          BCMENET_MAX_RX_SIZE, DMA_FROM_DEVICE);
341                         priv->rx_skb[desc_idx] = NULL;
342                 }
343
344                 skb_put(skb, len);
345                 skb->dev = dev;
346                 skb->protocol = eth_type_trans(skb, dev);
347                 priv->stats.rx_packets++;
348                 priv->stats.rx_bytes += len;
349                 dev->last_rx = jiffies;
350                 netif_receive_skb(skb);
351
352         } while (--budget > 0);
353
354         if (processed || !priv->rx_desc_count) {
355                 bcm_enet_refill_rx(dev);
356
357                 /* kick rx dma */
358                 enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
359                                 ENETDMA_CHANCFG_REG(priv->rx_chan));
360         }
361
362         return processed;
363 }
364
365
366 /*
367  * try to or force reclaim of transmitted buffers
368  */
369 static int bcm_enet_tx_reclaim(struct net_device *dev, int force)
370 {
371         struct bcm_enet_priv *priv;
372         int released;
373
374         priv = netdev_priv(dev);
375         released = 0;
376
377         while (priv->tx_desc_count < priv->tx_ring_size) {
378                 struct bcm_enet_desc *desc;
379                 struct sk_buff *skb;
380
381                 /* We run in a bh and fight against start_xmit, which
382                  * is called with bh disabled  */
383                 spin_lock(&priv->tx_lock);
384
385                 desc = &priv->tx_desc_cpu[priv->tx_dirty_desc];
386
387                 if (!force && (desc->len_stat & DMADESC_OWNER_MASK)) {
388                         spin_unlock(&priv->tx_lock);
389                         break;
390                 }
391
392                 /* ensure other field of the descriptor were not read
393                  * before we checked ownership */
394                 rmb();
395
396                 skb = priv->tx_skb[priv->tx_dirty_desc];
397                 priv->tx_skb[priv->tx_dirty_desc] = NULL;
398                 dma_unmap_single(&priv->pdev->dev, desc->address, skb->len,
399                                  DMA_TO_DEVICE);
400
401                 priv->tx_dirty_desc++;
402                 if (priv->tx_dirty_desc == priv->tx_ring_size)
403                         priv->tx_dirty_desc = 0;
404                 priv->tx_desc_count++;
405
406                 spin_unlock(&priv->tx_lock);
407
408                 if (desc->len_stat & DMADESC_UNDER_MASK)
409                         priv->stats.tx_errors++;
410
411                 dev_kfree_skb(skb);
412                 released++;
413         }
414
415         if (netif_queue_stopped(dev) && released)
416                 netif_wake_queue(dev);
417
418         return released;
419 }
420
421 /*
422  * poll func, called by network core
423  */
424 static int bcm_enet_poll(struct napi_struct *napi, int budget)
425 {
426         struct bcm_enet_priv *priv;
427         struct net_device *dev;
428         int tx_work_done, rx_work_done;
429
430         priv = container_of(napi, struct bcm_enet_priv, napi);
431         dev = priv->net_dev;
432
433         /* ack interrupts */
434         enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
435                         ENETDMA_IR_REG(priv->rx_chan));
436         enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
437                         ENETDMA_IR_REG(priv->tx_chan));
438
439         /* reclaim sent skb */
440         tx_work_done = bcm_enet_tx_reclaim(dev, 0);
441
442         spin_lock(&priv->rx_lock);
443         rx_work_done = bcm_enet_receive_queue(dev, budget);
444         spin_unlock(&priv->rx_lock);
445
446         if (rx_work_done >= budget || tx_work_done > 0) {
447                 /* rx/tx queue is not yet empty/clean */
448                 return rx_work_done;
449         }
450
451         /* no more packet in rx/tx queue, remove device from poll
452          * queue */
453         netif_rx_complete(dev, napi);
454
455         /* restore rx/tx interrupt */
456         enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
457                         ENETDMA_IRMASK_REG(priv->rx_chan));
458         enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
459                         ENETDMA_IRMASK_REG(priv->tx_chan));
460
461         return rx_work_done;
462 }
463
464 /*
465  * mac interrupt handler
466  */
467 static irqreturn_t bcm_enet_isr_mac(int irq, void *dev_id)
468 {
469         struct net_device *dev;
470         struct bcm_enet_priv *priv;
471         u32 stat;
472
473         dev = dev_id;
474         priv = netdev_priv(dev);
475
476         stat = enet_readl(priv, ENET_IR_REG);
477         if (!(stat & ENET_IR_MIB))
478                 return IRQ_NONE;
479
480         /* clear & mask interrupt */
481         enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
482         enet_writel(priv, 0, ENET_IRMASK_REG);
483
484         /* read mib registers in workqueue */
485         schedule_work(&priv->mib_update_task);
486
487         return IRQ_HANDLED;
488 }
489
490 /*
491  * rx/tx dma interrupt handler
492  */
493 static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id)
494 {
495         struct net_device *dev;
496         struct bcm_enet_priv *priv;
497
498         dev = dev_id;
499         priv = netdev_priv(dev);
500
501         /* mask rx/tx interrupts */
502         enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
503         enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
504
505         netif_rx_schedule(dev, &priv->napi);
506
507         return IRQ_HANDLED;
508 }
509
510 /*
511  * tx request callback
512  */
513 static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
514 {
515         struct bcm_enet_priv *priv;
516         struct bcm_enet_desc *desc;
517         u32 len_stat;
518         int ret;
519
520         priv = netdev_priv(dev);
521
522         /* lock against tx reclaim */
523         spin_lock(&priv->tx_lock);
524
525         /* make sure  the tx hw queue  is not full,  should not happen
526          * since we stop queue before it's the case */
527         if (unlikely(!priv->tx_desc_count)) {
528                 netif_stop_queue(dev);
529                 dev_err(&priv->pdev->dev, "xmit called with no tx desc "
530                         "available?\n");
531                 ret = NETDEV_TX_BUSY;
532                 goto out_unlock;
533         }
534
535         /* point to the next available desc */
536         desc = &priv->tx_desc_cpu[priv->tx_curr_desc];
537         priv->tx_skb[priv->tx_curr_desc] = skb;
538
539         /* fill descriptor */
540         desc->address = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
541                                        DMA_TO_DEVICE);
542
543         len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK;
544         len_stat |= DMADESC_ESOP_MASK |
545                 DMADESC_APPEND_CRC |
546                 DMADESC_OWNER_MASK;
547
548         priv->tx_curr_desc++;
549         if (priv->tx_curr_desc == priv->tx_ring_size) {
550                 priv->tx_curr_desc = 0;
551                 len_stat |= DMADESC_WRAP_MASK;
552         }
553         priv->tx_desc_count--;
554
555         /* dma might be already polling, make sure we update desc
556          * fields in correct order */
557         wmb();
558         desc->len_stat = len_stat;
559         wmb();
560
561         /* kick tx dma */
562         enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
563                         ENETDMA_CHANCFG_REG(priv->tx_chan));
564
565         /* stop queue if no more desc available */
566         if (!priv->tx_desc_count)
567                 netif_stop_queue(dev);
568
569         priv->stats.tx_bytes += skb->len;
570         priv->stats.tx_packets++;
571         dev->trans_start = jiffies;
572         ret = NETDEV_TX_OK;
573
574 out_unlock:
575         spin_unlock(&priv->tx_lock);
576         return ret;
577 }
578
579 /*
580  * Change the interface's mac address.
581  */
582 static int bcm_enet_set_mac_address(struct net_device *dev, void *p)
583 {
584         struct bcm_enet_priv *priv;
585         struct sockaddr *addr = p;
586         u32 val;
587
588         priv = netdev_priv(dev);
589         memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
590
591         /* use perfect match register 0 to store my mac address */
592         val = (dev->dev_addr[2] << 24) | (dev->dev_addr[3] << 16) |
593                 (dev->dev_addr[4] << 8) | dev->dev_addr[5];
594         enet_writel(priv, val, ENET_PML_REG(0));
595
596         val = (dev->dev_addr[0] << 8 | dev->dev_addr[1]);
597         val |= ENET_PMH_DATAVALID_MASK;
598         enet_writel(priv, val, ENET_PMH_REG(0));
599
600         return 0;
601 }
602
603 /*
604  * Change rx mode (promiscous/allmulti) and update multicast list
605  */
606 static void bcm_enet_set_multicast_list(struct net_device *dev)
607 {
608         struct bcm_enet_priv *priv;
609         struct dev_mc_list *mc_list;
610         u32 val;
611         int i;
612
613         priv = netdev_priv(dev);
614
615         val = enet_readl(priv, ENET_RXCFG_REG);
616
617         if (dev->flags & IFF_PROMISC)
618                 val |= ENET_RXCFG_PROMISC_MASK;
619         else
620                 val &= ~ENET_RXCFG_PROMISC_MASK;
621
622         /* only 3 perfect match registers left, first one is used for
623          * own mac address */
624         if ((dev->flags & IFF_ALLMULTI) || dev->mc_count > 3)
625                 val |= ENET_RXCFG_ALLMCAST_MASK;
626         else
627                 val &= ~ENET_RXCFG_ALLMCAST_MASK;
628
629         /* no need to set perfect match registers if we catch all
630          * multicast */
631         if (val & ENET_RXCFG_ALLMCAST_MASK) {
632                 enet_writel(priv, val, ENET_RXCFG_REG);
633                 return;
634         }
635
636         for (i = 0, mc_list = dev->mc_list;
637              (mc_list != NULL) && (i < dev->mc_count) && (i < 3);
638              i++, mc_list = mc_list->next) {
639                 u8 *dmi_addr;
640                 u32 tmp;
641
642                 /* filter non ethernet address */
643                 if (mc_list->dmi_addrlen != 6)
644                         continue;
645
646                 /* update perfect match registers */
647                 dmi_addr = mc_list->dmi_addr;
648                 tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) |
649                         (dmi_addr[4] << 8) | dmi_addr[5];
650                 enet_writel(priv, tmp, ENET_PML_REG(i + 1));
651
652                 tmp = (dmi_addr[0] << 8 | dmi_addr[1]);
653                 tmp |= ENET_PMH_DATAVALID_MASK;
654                 enet_writel(priv, tmp, ENET_PMH_REG(i + 1));
655         }
656
657         for (; i < 3; i++) {
658                 enet_writel(priv, 0, ENET_PML_REG(i + 1));
659                 enet_writel(priv, 0, ENET_PMH_REG(i + 1));
660         }
661
662         enet_writel(priv, val, ENET_RXCFG_REG);
663 }
664
665 /*
666  * set mac duplex parameters
667  */
668 static void bcm_enet_set_duplex(struct bcm_enet_priv *priv, int fullduplex)
669 {
670         u32 val;
671
672         val = enet_readl(priv, ENET_TXCTL_REG);
673         if (fullduplex)
674                 val |= ENET_TXCTL_FD_MASK;
675         else
676                 val &= ~ENET_TXCTL_FD_MASK;
677         enet_writel(priv, val, ENET_TXCTL_REG);
678 }
679
680 /*
681  * set mac flow control parameters
682  */
683 static void bcm_enet_set_flow(struct bcm_enet_priv *priv, int rx_en, int tx_en)
684 {
685         u32 val;
686
687         /* rx flow control (pause frame handling) */
688         val = enet_readl(priv, ENET_RXCFG_REG);
689         if (rx_en)
690                 val |= ENET_RXCFG_ENFLOW_MASK;
691         else
692                 val &= ~ENET_RXCFG_ENFLOW_MASK;
693         enet_writel(priv, val, ENET_RXCFG_REG);
694
695         /* tx flow control (pause frame generation) */
696         val = enet_dma_readl(priv, ENETDMA_CFG_REG);
697         if (tx_en)
698                 val |= ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
699         else
700                 val &= ~ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
701         enet_dma_writel(priv, val, ENETDMA_CFG_REG);
702 }
703
704 /*
705  * link changed callback (from phylib)
706  */
707 static void bcm_enet_adjust_phy_link(struct net_device *dev)
708 {
709         struct bcm_enet_priv *priv;
710         struct phy_device *phydev;
711         int status_changed;
712
713         priv = netdev_priv(dev);
714         phydev = priv->phydev;
715         status_changed = 0;
716
717         if (priv->old_link != phydev->link) {
718                 status_changed = 1;
719                 priv->old_link = phydev->link;
720         }
721
722         /* reflect duplex change in mac configuration */
723         if (phydev->link && phydev->duplex != priv->old_duplex) {
724                 bcm_enet_set_duplex(priv,
725                                     (phydev->duplex == DUPLEX_FULL) ? 1 : 0);
726                 status_changed = 1;
727                 priv->old_duplex = phydev->duplex;
728         }
729
730         /* enable flow control if remote advertise it (trust phylib to
731          * check that duplex is full */
732         if (phydev->link && phydev->pause != priv->old_pause) {
733                 int rx_pause_en, tx_pause_en;
734
735                 if (phydev->pause) {
736                         /* pause was advertised by lpa and us */
737                         rx_pause_en = 1;
738                         tx_pause_en = 1;
739                 } else if (!priv->pause_auto) {
740                         /* pause setting overrided by user */
741                         rx_pause_en = priv->pause_rx;
742                         tx_pause_en = priv->pause_tx;
743                 } else {
744                         rx_pause_en = 0;
745                         tx_pause_en = 0;
746                 }
747
748                 bcm_enet_set_flow(priv, rx_pause_en, tx_pause_en);
749                 status_changed = 1;
750                 priv->old_pause = phydev->pause;
751         }
752
753         if (status_changed) {
754                 pr_info("%s: link %s", dev->name, phydev->link ?
755                         "UP" : "DOWN");
756                 if (phydev->link)
757                         printk(" - %d/%s - flow control %s", phydev->speed,
758                                DUPLEX_FULL == phydev->duplex ? "full" : "half",
759                                phydev->pause == 1 ? "rx&tx" : "off");
760
761                 printk("\n");
762         }
763 }
764
765 /*
766  * link changed callback (if phylib is not used)
767  */
768 static void bcm_enet_adjust_link(struct net_device *dev)
769 {
770         struct bcm_enet_priv *priv;
771
772         priv = netdev_priv(dev);
773         bcm_enet_set_duplex(priv, priv->force_duplex_full);
774         bcm_enet_set_flow(priv, priv->pause_rx, priv->pause_tx);
775
776         pr_info("%s: link forced UP - %d/%s - flow control %s/%s\n",
777                 dev->name,
778                 priv->force_speed_100 ? 100 : 10,
779                 priv->force_duplex_full ? "full" : "half",
780                 priv->pause_rx ? "rx" : "off",
781                 priv->pause_tx ? "tx" : "off");
782 }
783
784 /*
785  * open callback, allocate dma rings & buffers and start rx operation
786  */
787 static int bcm_enet_open(struct net_device *dev)
788 {
789         struct bcm_enet_priv *priv;
790         struct sockaddr addr;
791         struct device *kdev;
792         struct phy_device *phydev;
793         int irq_requested, i, ret;
794         unsigned int size;
795         char phy_id[BUS_ID_SIZE];
796         void *p;
797         u32 val;
798
799         priv = netdev_priv(dev);
800         priv->rx_desc_cpu = priv->tx_desc_cpu = NULL;
801         priv->rx_skb = priv->tx_skb = NULL;
802
803         kdev = &priv->pdev->dev;
804
805         if (priv->has_phy) {
806                 /* connect to PHY */
807                 snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT,
808                          priv->mac_id ? "1" : "0", priv->phy_id);
809
810                 phydev = phy_connect(dev, phy_id, &bcm_enet_adjust_phy_link, 0,
811                                      PHY_INTERFACE_MODE_MII);
812
813                 if (IS_ERR(phydev)) {
814                         dev_err(kdev, "could not attach to PHY\n");
815                         return PTR_ERR(phydev);
816                 }
817
818                 /* mask with MAC supported features */
819                 phydev->supported &= (SUPPORTED_10baseT_Half |
820                                       SUPPORTED_10baseT_Full |
821                                       SUPPORTED_100baseT_Half |
822                                       SUPPORTED_100baseT_Full |
823                                       SUPPORTED_Autoneg |
824                                       SUPPORTED_Pause |
825                                       SUPPORTED_MII);
826                 phydev->advertising = phydev->supported;
827
828                 if (priv->pause_auto && priv->pause_rx && priv->pause_tx)
829                         phydev->advertising |= SUPPORTED_Pause;
830                 else
831                         phydev->advertising &= ~SUPPORTED_Pause;
832
833                 dev_info(kdev, "attached PHY at address %d [%s]\n",
834                          phydev->addr, phydev->drv->name);
835
836                 priv->old_link = 0;
837                 priv->old_duplex = -1;
838                 priv->old_pause = -1;
839                 priv->phydev = phydev;
840         }
841
842         /* mask all interrupts and request them */
843         enet_writel(priv, 0, ENET_IRMASK_REG);
844         enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
845         enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
846
847         irq_requested = 0;
848         ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev);
849         if (ret)
850                 goto out;
851         irq_requested++;
852
853         ret = request_irq(priv->irq_rx, bcm_enet_isr_dma,
854                           IRQF_SAMPLE_RANDOM | IRQF_DISABLED, dev->name, dev);
855         if (ret)
856                 goto out;
857         irq_requested++;
858
859         ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
860                           IRQF_DISABLED, dev->name, dev);
861         if (ret)
862                 goto out;
863         irq_requested++;
864
865         /* initialize perfect match registers */
866         for (i = 0; i < 4; i++) {
867                 enet_writel(priv, 0, ENET_PML_REG(i));
868                 enet_writel(priv, 0, ENET_PMH_REG(i));
869         }
870
871         /* write device mac address */
872         memcpy(addr.sa_data, dev->dev_addr, ETH_ALEN);
873         bcm_enet_set_mac_address(dev, &addr);
874
875         /* allocate rx dma ring */
876         size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
877         p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
878         if (!p) {
879                 dev_err(kdev, "cannot allocate rx ring %u\n", size);
880                 ret = -ENOMEM;
881                 goto out;
882         }
883
884         memset(p, 0, size);
885         priv->rx_desc_alloc_size = size;
886         priv->rx_desc_cpu = p;
887
888         /* allocate tx dma ring */
889         size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
890         p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
891         if (!p) {
892                 dev_err(kdev, "cannot allocate tx ring\n");
893                 ret = -ENOMEM;
894                 goto out;
895         }
896
897         memset(p, 0, size);
898         priv->tx_desc_alloc_size = size;
899         priv->tx_desc_cpu = p;
900
901         priv->tx_skb = kzalloc(sizeof(struct sk_buff *) * priv->tx_ring_size,
902                                GFP_KERNEL);
903         if (!priv->tx_skb) {
904                 dev_err(kdev, "cannot allocate rx skb queue\n");
905                 ret = -ENOMEM;
906                 goto out;
907         }
908
909         priv->tx_desc_count = priv->tx_ring_size;
910         priv->tx_dirty_desc = 0;
911         priv->tx_curr_desc = 0;
912         spin_lock_init(&priv->tx_lock);
913
914         /* init & fill rx ring with skbs */
915         priv->rx_skb = kzalloc(sizeof(struct sk_buff *) * priv->rx_ring_size,
916                                GFP_KERNEL);
917         if (!priv->rx_skb) {
918                 dev_err(kdev, "cannot allocate rx skb queue\n");
919                 ret = -ENOMEM;
920                 goto out;
921         }
922
923         priv->rx_desc_count = 0;
924         priv->rx_dirty_desc = 0;
925         priv->rx_curr_desc = 0;
926
927         /* initialize flow control buffer allocation */
928         enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
929                         ENETDMA_BUFALLOC_REG(priv->rx_chan));
930
931         if (bcm_enet_refill_rx(dev)) {
932                 dev_err(kdev, "cannot allocate rx skb queue\n");
933                 ret = -ENOMEM;
934                 goto out;
935         }
936
937         /* write rx & tx ring addresses */
938         enet_dma_writel(priv, priv->rx_desc_dma,
939                         ENETDMA_RSTART_REG(priv->rx_chan));
940         enet_dma_writel(priv, priv->tx_desc_dma,
941                         ENETDMA_RSTART_REG(priv->tx_chan));
942
943         /* clear remaining state ram for rx & tx channel */
944         enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->rx_chan));
945         enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->tx_chan));
946         enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->rx_chan));
947         enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->tx_chan));
948         enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->rx_chan));
949         enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->tx_chan));
950
951         /* set max rx/tx length */
952         enet_writel(priv, BCMENET_MAX_RX_SIZE, ENET_RXMAXLEN_REG);
953         enet_writel(priv, BCMENET_MAX_TX_SIZE, ENET_TXMAXLEN_REG);
954
955         /* set dma maximum burst len */
956         enet_dma_writel(priv, BCMENET_DMA_MAXBURST,
957                         ENETDMA_MAXBURST_REG(priv->rx_chan));
958         enet_dma_writel(priv, BCMENET_DMA_MAXBURST,
959                         ENETDMA_MAXBURST_REG(priv->tx_chan));
960
961         /* set correct transmit fifo watermark */
962         enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG);
963
964         /* set flow control low/high threshold to 1/3 / 2/3 */
965         val = priv->rx_ring_size / 3;
966         enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
967         val = (priv->rx_ring_size * 2) / 3;
968         enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
969
970         /* all set, enable mac and interrupts, start dma engine and
971          * kick rx dma channel */
972         wmb();
973         enet_writel(priv, ENET_CTL_ENABLE_MASK, ENET_CTL_REG);
974         enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
975         enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
976                         ENETDMA_CHANCFG_REG(priv->rx_chan));
977
978         /* watch "mib counters about to overflow" interrupt */
979         enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
980         enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
981
982         /* watch "packet transferred" interrupt in rx and tx */
983         enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
984                         ENETDMA_IR_REG(priv->rx_chan));
985         enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
986                         ENETDMA_IR_REG(priv->tx_chan));
987
988         /* make sure we enable napi before rx interrupt  */
989         napi_enable(&priv->napi);
990
991         enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
992                         ENETDMA_IRMASK_REG(priv->rx_chan));
993         enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
994                         ENETDMA_IRMASK_REG(priv->tx_chan));
995
996         if (priv->has_phy)
997                 phy_start(priv->phydev);
998         else
999                 bcm_enet_adjust_link(dev);
1000
1001         netif_start_queue(dev);
1002         return 0;
1003
1004 out:
1005         phy_disconnect(priv->phydev);
1006         if (irq_requested > 2)
1007                 free_irq(priv->irq_tx, dev);
1008         if (irq_requested > 1)
1009                 free_irq(priv->irq_rx, dev);
1010         if (irq_requested > 0)
1011                 free_irq(dev->irq, dev);
1012         for (i = 0; i < priv->rx_ring_size; i++) {
1013                 struct bcm_enet_desc *desc;
1014
1015                 if (!priv->rx_skb[i])
1016                         continue;
1017
1018                 desc = &priv->rx_desc_cpu[i];
1019                 dma_unmap_single(kdev, desc->address, BCMENET_MAX_RX_SIZE,
1020                                  DMA_FROM_DEVICE);
1021                 kfree_skb(priv->rx_skb[i]);
1022         }
1023         if (priv->rx_desc_cpu)
1024                 dma_free_coherent(kdev, priv->rx_desc_alloc_size,
1025                                   priv->rx_desc_cpu, priv->rx_desc_dma);
1026         if (priv->tx_desc_cpu)
1027                 dma_free_coherent(kdev, priv->tx_desc_alloc_size,
1028                                   priv->tx_desc_cpu, priv->tx_desc_dma);
1029         kfree(priv->rx_skb);
1030         kfree(priv->tx_skb);
1031         return ret;
1032 }
1033
1034 /*
1035  * disable mac
1036  */
1037 static void bcm_enet_disable_mac(struct bcm_enet_priv *priv)
1038 {
1039         int limit;
1040         u32 val;
1041
1042         val = enet_readl(priv, ENET_CTL_REG);
1043         val |= ENET_CTL_DISABLE_MASK;
1044         enet_writel(priv, val, ENET_CTL_REG);
1045
1046         limit = 1000;
1047         do {
1048                 u32 val;
1049
1050                 val = enet_readl(priv, ENET_CTL_REG);
1051                 if (!(val & ENET_CTL_DISABLE_MASK))
1052                         break;
1053                 udelay(1);
1054         } while (limit--);
1055 }
1056
1057 /*
1058  * disable dma in given channel
1059  */
1060 static void bcm_enet_disable_dma(struct bcm_enet_priv *priv, int chan)
1061 {
1062         int limit;
1063
1064         enet_dma_writel(priv, 0, ENETDMA_CHANCFG_REG(chan));
1065
1066         limit = 1000;
1067         do {
1068                 u32 val;
1069
1070                 val = enet_dma_readl(priv, ENETDMA_CHANCFG_REG(chan));
1071                 if (!(val & ENETDMA_CHANCFG_EN_MASK))
1072                         break;
1073                 udelay(1);
1074         } while (limit--);
1075 }
1076
1077 /*
1078  * stop callback
1079  */
1080 static int bcm_enet_stop(struct net_device *dev)
1081 {
1082         struct bcm_enet_priv *priv;
1083         struct device *kdev;
1084         int i;
1085
1086         priv = netdev_priv(dev);
1087         kdev = &priv->pdev->dev;
1088
1089         netif_stop_queue(dev);
1090         napi_disable(&priv->napi);
1091         if (priv->has_phy)
1092                 phy_stop(priv->phydev);
1093         del_timer_sync(&priv->rx_timeout);
1094
1095         /* mask all interrupts */
1096         enet_writel(priv, 0, ENET_IRMASK_REG);
1097         enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
1098         enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
1099
1100         /* make sure no mib update is scheduled */
1101         flush_scheduled_work();
1102
1103         /* disable dma & mac */
1104         bcm_enet_disable_dma(priv, priv->tx_chan);
1105         bcm_enet_disable_dma(priv, priv->rx_chan);
1106         bcm_enet_disable_mac(priv);
1107
1108         /* force reclaim of all tx buffers */
1109         bcm_enet_tx_reclaim(dev, 1);
1110
1111         /* free the rx skb ring */
1112         for (i = 0; i < priv->rx_ring_size; i++) {
1113                 struct bcm_enet_desc *desc;
1114
1115                 if (!priv->rx_skb[i])
1116                         continue;
1117
1118                 desc = &priv->rx_desc_cpu[i];
1119                 dma_unmap_single(kdev, desc->address, BCMENET_MAX_RX_SIZE,
1120                                  DMA_FROM_DEVICE);
1121                 kfree_skb(priv->rx_skb[i]);
1122         }
1123
1124         /* free remaining allocated memory */
1125         kfree(priv->rx_skb);
1126         kfree(priv->tx_skb);
1127         dma_free_coherent(kdev, priv->rx_desc_alloc_size,
1128                           priv->rx_desc_cpu, priv->rx_desc_dma);
1129         dma_free_coherent(kdev, priv->tx_desc_alloc_size,
1130                           priv->tx_desc_cpu, priv->tx_desc_dma);
1131         free_irq(priv->irq_tx, dev);
1132         free_irq(priv->irq_rx, dev);
1133         free_irq(dev->irq, dev);
1134
1135         /* release phy */
1136         if (priv->has_phy) {
1137                 phy_disconnect(priv->phydev);
1138                 priv->phydev = NULL;
1139         }
1140
1141         return 0;
1142 }
1143
1144 /*
1145  * core request to return device rx/tx stats
1146  */
1147 static struct net_device_stats *bcm_enet_get_stats(struct net_device *dev)
1148 {
1149         struct bcm_enet_priv *priv;
1150
1151         priv = netdev_priv(dev);
1152         return &priv->stats;
1153 }
1154
1155 /*
1156  * ethtool callbacks
1157  */
1158 struct bcm_enet_stats {
1159         char stat_string[ETH_GSTRING_LEN];
1160         int sizeof_stat;
1161         int stat_offset;
1162         int mib_reg;
1163 };
1164
1165 #define GEN_STAT(m) sizeof(((struct bcm_enet_priv *)0)->m),             \
1166                      offsetof(struct bcm_enet_priv, m)
1167
1168 static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = {
1169         { "rx_packets", GEN_STAT(stats.rx_packets), -1 },
1170         { "tx_packets", GEN_STAT(stats.tx_packets), -1 },
1171         { "rx_bytes", GEN_STAT(stats.rx_bytes), -1 },
1172         { "tx_bytes", GEN_STAT(stats.tx_bytes), -1 },
1173         { "rx_errors", GEN_STAT(stats.rx_errors), -1 },
1174         { "tx_errors", GEN_STAT(stats.tx_errors), -1 },
1175         { "rx_dropped", GEN_STAT(stats.rx_dropped), -1 },
1176         { "tx_dropped", GEN_STAT(stats.tx_dropped), -1 },
1177
1178         { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETH_MIB_RX_GD_OCTETS},
1179         { "rx_good_pkts", GEN_STAT(mib.rx_gd_pkts), ETH_MIB_RX_GD_PKTS },
1180         { "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETH_MIB_RX_BRDCAST },
1181         { "rx_multicast", GEN_STAT(mib.rx_mult), ETH_MIB_RX_MULT },
1182         { "rx_64_octets", GEN_STAT(mib.rx_64), ETH_MIB_RX_64 },
1183         { "rx_65_127_oct", GEN_STAT(mib.rx_65_127), ETH_MIB_RX_65_127 },
1184         { "rx_128_255_oct", GEN_STAT(mib.rx_128_255), ETH_MIB_RX_128_255 },
1185         { "rx_256_511_oct", GEN_STAT(mib.rx_256_511), ETH_MIB_RX_256_511 },
1186         { "rx_512_1023_oct", GEN_STAT(mib.rx_512_1023), ETH_MIB_RX_512_1023 },
1187         { "rx_1024_max_oct", GEN_STAT(mib.rx_1024_max), ETH_MIB_RX_1024_MAX },
1188         { "rx_jabber", GEN_STAT(mib.rx_jab), ETH_MIB_RX_JAB },
1189         { "rx_oversize", GEN_STAT(mib.rx_ovr), ETH_MIB_RX_OVR },
1190         { "rx_fragment", GEN_STAT(mib.rx_frag), ETH_MIB_RX_FRAG },
1191         { "rx_dropped", GEN_STAT(mib.rx_drop), ETH_MIB_RX_DROP },
1192         { "rx_crc_align", GEN_STAT(mib.rx_crc_align), ETH_MIB_RX_CRC_ALIGN },
1193         { "rx_undersize", GEN_STAT(mib.rx_und), ETH_MIB_RX_UND },
1194         { "rx_crc", GEN_STAT(mib.rx_crc), ETH_MIB_RX_CRC },
1195         { "rx_align", GEN_STAT(mib.rx_align), ETH_MIB_RX_ALIGN },
1196         { "rx_symbol_error", GEN_STAT(mib.rx_sym), ETH_MIB_RX_SYM },
1197         { "rx_pause", GEN_STAT(mib.rx_pause), ETH_MIB_RX_PAUSE },
1198         { "rx_control", GEN_STAT(mib.rx_cntrl), ETH_MIB_RX_CNTRL },
1199
1200         { "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETH_MIB_TX_GD_OCTETS },
1201         { "tx_good_pkts", GEN_STAT(mib.tx_gd_pkts), ETH_MIB_TX_GD_PKTS },
1202         { "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETH_MIB_TX_BRDCAST },
1203         { "tx_multicast", GEN_STAT(mib.tx_mult), ETH_MIB_TX_MULT },
1204         { "tx_64_oct", GEN_STAT(mib.tx_64), ETH_MIB_TX_64 },
1205         { "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETH_MIB_TX_65_127 },
1206         { "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETH_MIB_TX_128_255 },
1207         { "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETH_MIB_TX_256_511 },
1208         { "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETH_MIB_TX_512_1023},
1209         { "tx_1024_max_oct", GEN_STAT(mib.tx_1024_max), ETH_MIB_TX_1024_MAX },
1210         { "tx_jabber", GEN_STAT(mib.tx_jab), ETH_MIB_TX_JAB },
1211         { "tx_oversize", GEN_STAT(mib.tx_ovr), ETH_MIB_TX_OVR },
1212         { "tx_fragment", GEN_STAT(mib.tx_frag), ETH_MIB_TX_FRAG },
1213         { "tx_underrun", GEN_STAT(mib.tx_underrun), ETH_MIB_TX_UNDERRUN },
1214         { "tx_collisions", GEN_STAT(mib.tx_col), ETH_MIB_TX_COL },
1215         { "tx_single_collision", GEN_STAT(mib.tx_1_col), ETH_MIB_TX_1_COL },
1216         { "tx_multiple_collision", GEN_STAT(mib.tx_m_col), ETH_MIB_TX_M_COL },
1217         { "tx_excess_collision", GEN_STAT(mib.tx_ex_col), ETH_MIB_TX_EX_COL },
1218         { "tx_late_collision", GEN_STAT(mib.tx_late), ETH_MIB_TX_LATE },
1219         { "tx_deferred", GEN_STAT(mib.tx_def), ETH_MIB_TX_DEF },
1220         { "tx_carrier_sense", GEN_STAT(mib.tx_crs), ETH_MIB_TX_CRS },
1221         { "tx_pause", GEN_STAT(mib.tx_pause), ETH_MIB_TX_PAUSE },
1222
1223 };
1224
1225 #define BCM_ENET_STATS_LEN      \
1226         (sizeof(bcm_enet_gstrings_stats) / sizeof(struct bcm_enet_stats))
1227
1228 static const u32 unused_mib_regs[] = {
1229         ETH_MIB_TX_ALL_OCTETS,
1230         ETH_MIB_TX_ALL_PKTS,
1231         ETH_MIB_RX_ALL_OCTETS,
1232         ETH_MIB_RX_ALL_PKTS,
1233 };
1234
1235
1236 static void bcm_enet_get_drvinfo(struct net_device *netdev,
1237                                  struct ethtool_drvinfo *drvinfo)
1238 {
1239         strncpy(drvinfo->driver, bcm_enet_driver_name, 32);
1240         strncpy(drvinfo->version, bcm_enet_driver_version, 32);
1241         strncpy(drvinfo->fw_version, "N/A", 32);
1242         strncpy(drvinfo->bus_info, "bcm63xx", 32);
1243         drvinfo->n_stats = BCM_ENET_STATS_LEN;
1244 }
1245
1246 static int bcm_enet_get_stats_count(struct net_device *netdev)
1247 {
1248         return BCM_ENET_STATS_LEN;
1249 }
1250
1251 static void bcm_enet_get_strings(struct net_device *netdev,
1252                                  u32 stringset, u8 *data)
1253 {
1254         int i;
1255
1256         switch (stringset) {
1257         case ETH_SS_STATS:
1258                 for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1259                         memcpy(data + i * ETH_GSTRING_LEN,
1260                                bcm_enet_gstrings_stats[i].stat_string,
1261                                ETH_GSTRING_LEN);
1262                 }
1263                 break;
1264         }
1265 }
1266
1267 static void update_mib_counters(struct bcm_enet_priv *priv)
1268 {
1269         int i;
1270
1271         for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1272                 const struct bcm_enet_stats *s;
1273                 u32 val;
1274                 char *p;
1275
1276                 s = &bcm_enet_gstrings_stats[i];
1277                 if (s->mib_reg == -1)
1278                         continue;
1279
1280                 val = enet_readl(priv, ENET_MIB_REG(s->mib_reg));
1281                 p = (char *)priv + s->stat_offset;
1282
1283                 if (s->sizeof_stat == sizeof(u64))
1284                         *(u64 *)p += val;
1285                 else
1286                         *(u32 *)p += val;
1287         }
1288
1289         /* also empty unused mib counters to make sure mib counter
1290          * overflow interrupt is cleared */
1291         for (i = 0; i < ARRAY_SIZE(unused_mib_regs); i++)
1292                 (void)enet_readl(priv, ENET_MIB_REG(unused_mib_regs[i]));
1293 }
1294
1295 static void bcm_enet_update_mib_counters_defer(struct work_struct *t)
1296 {
1297         struct bcm_enet_priv *priv;
1298
1299         priv = container_of(t, struct bcm_enet_priv, mib_update_task);
1300         mutex_lock(&priv->mib_update_lock);
1301         update_mib_counters(priv);
1302         mutex_unlock(&priv->mib_update_lock);
1303
1304         /* reenable mib interrupt */
1305         if (netif_running(priv->net_dev))
1306                 enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
1307 }
1308
1309 static void bcm_enet_get_ethtool_stats(struct net_device *netdev,
1310                                        struct ethtool_stats *stats,
1311                                        u64 *data)
1312 {
1313         struct bcm_enet_priv *priv;
1314         int i;
1315
1316         priv = netdev_priv(netdev);
1317
1318         mutex_lock(&priv->mib_update_lock);
1319         update_mib_counters(priv);
1320
1321         for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1322                 const struct bcm_enet_stats *s;
1323                 char *p;
1324
1325                 s = &bcm_enet_gstrings_stats[i];
1326                 p = (char *)priv + s->stat_offset;
1327                 data[i] = (s->sizeof_stat == sizeof(u64)) ?
1328                         *(u64 *)p : *(u32 *)p;
1329         }
1330         mutex_unlock(&priv->mib_update_lock);
1331 }
1332
1333 static int bcm_enet_get_settings(struct net_device *dev,
1334                                  struct ethtool_cmd *cmd)
1335 {
1336         struct bcm_enet_priv *priv;
1337
1338         priv = netdev_priv(dev);
1339
1340         cmd->maxrxpkt = 0;
1341         cmd->maxtxpkt = 0;
1342
1343         if (priv->has_phy) {
1344                 if (!priv->phydev)
1345                         return -ENODEV;
1346                 return phy_ethtool_gset(priv->phydev, cmd);
1347         } else {
1348                 cmd->autoneg = 0;
1349                 cmd->speed = (priv->force_speed_100) ? SPEED_100 : SPEED_10;
1350                 cmd->duplex = (priv->force_duplex_full) ?
1351                         DUPLEX_FULL : DUPLEX_HALF;
1352                 cmd->supported = ADVERTISED_10baseT_Half  |
1353                         ADVERTISED_10baseT_Full |
1354                         ADVERTISED_100baseT_Half |
1355                         ADVERTISED_100baseT_Full;
1356                 cmd->advertising = 0;
1357                 cmd->port = PORT_MII;
1358                 cmd->transceiver = XCVR_EXTERNAL;
1359         }
1360         return 0;
1361 }
1362
1363 static int bcm_enet_set_settings(struct net_device *dev,
1364                                  struct ethtool_cmd *cmd)
1365 {
1366         struct bcm_enet_priv *priv;
1367
1368         priv = netdev_priv(dev);
1369         if (priv->has_phy) {
1370                 if (!priv->phydev)
1371                         return -ENODEV;
1372                 return phy_ethtool_sset(priv->phydev, cmd);
1373         } else {
1374
1375                 if (cmd->autoneg ||
1376                     (cmd->speed != SPEED_100 && cmd->speed != SPEED_10) ||
1377                     cmd->port != PORT_MII)
1378                         return -EINVAL;
1379
1380                 priv->force_speed_100 = (cmd->speed == SPEED_100) ? 1 : 0;
1381                 priv->force_duplex_full = (cmd->duplex == DUPLEX_FULL) ? 1 : 0;
1382
1383                 if (netif_running(dev))
1384                         bcm_enet_adjust_link(dev);
1385                 return 0;
1386         }
1387 }
1388
1389 static void bcm_enet_get_ringparam(struct net_device *dev,
1390                                    struct ethtool_ringparam *ering)
1391 {
1392         struct bcm_enet_priv *priv;
1393
1394         priv = netdev_priv(dev);
1395
1396         /* rx/tx ring is actually only limited by memory */
1397         ering->rx_max_pending = 8192;
1398         ering->tx_max_pending = 8192;
1399         ering->rx_mini_max_pending = 0;
1400         ering->rx_jumbo_max_pending = 0;
1401         ering->rx_pending = priv->rx_ring_size;
1402         ering->tx_pending = priv->tx_ring_size;
1403 }
1404
1405 static int bcm_enet_set_ringparam(struct net_device *dev,
1406                                   struct ethtool_ringparam *ering)
1407 {
1408         struct bcm_enet_priv *priv;
1409         int was_running;
1410
1411         priv = netdev_priv(dev);
1412
1413         was_running = 0;
1414         if (netif_running(dev)) {
1415                 bcm_enet_stop(dev);
1416                 was_running = 1;
1417         }
1418
1419         priv->rx_ring_size = ering->rx_pending;
1420         priv->tx_ring_size = ering->tx_pending;
1421
1422         if (was_running) {
1423                 int err;
1424
1425                 err = bcm_enet_open(dev);
1426                 if (err)
1427                         dev_close(dev);
1428                 else
1429                         bcm_enet_set_multicast_list(dev);
1430         }
1431         return 0;
1432 }
1433
1434 static void bcm_enet_get_pauseparam(struct net_device *dev,
1435                                     struct ethtool_pauseparam *ecmd)
1436 {
1437         struct bcm_enet_priv *priv;
1438
1439         priv = netdev_priv(dev);
1440         ecmd->autoneg = priv->pause_auto;
1441         ecmd->rx_pause = priv->pause_rx;
1442         ecmd->tx_pause = priv->pause_tx;
1443 }
1444
1445 static int bcm_enet_set_pauseparam(struct net_device *dev,
1446                                    struct ethtool_pauseparam *ecmd)
1447 {
1448         struct bcm_enet_priv *priv;
1449
1450         priv = netdev_priv(dev);
1451
1452         if (priv->has_phy) {
1453                 if (ecmd->autoneg && (ecmd->rx_pause != ecmd->tx_pause)) {
1454                         /* asymetric pause mode not supported,
1455                          * actually possible but integrated PHY has RO
1456                          * asym_pause bit */
1457                         return -EINVAL;
1458                 }
1459         } else {
1460                 /* no pause autoneg on direct mii connection */
1461                 if (ecmd->autoneg)
1462                         return -EINVAL;
1463         }
1464
1465         priv->pause_auto = ecmd->autoneg;
1466         priv->pause_rx = ecmd->rx_pause;
1467         priv->pause_tx = ecmd->tx_pause;
1468
1469         return 0;
1470 }
1471
1472 static struct ethtool_ops bcm_enet_ethtool_ops = {
1473         .get_strings            = bcm_enet_get_strings,
1474         .get_stats_count        = bcm_enet_get_stats_count,
1475         .get_ethtool_stats      = bcm_enet_get_ethtool_stats,
1476         .get_settings           = bcm_enet_get_settings,
1477         .set_settings           = bcm_enet_set_settings,
1478         .get_drvinfo            = bcm_enet_get_drvinfo,
1479         .get_link               = ethtool_op_get_link,
1480         .get_ringparam          = bcm_enet_get_ringparam,
1481         .set_ringparam          = bcm_enet_set_ringparam,
1482         .get_pauseparam         = bcm_enet_get_pauseparam,
1483         .set_pauseparam         = bcm_enet_set_pauseparam,
1484 };
1485
1486 static int bcm_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1487 {
1488         struct bcm_enet_priv *priv;
1489
1490         priv = netdev_priv(dev);
1491         if (priv->has_phy) {
1492                 if (!priv->phydev)
1493                         return -ENODEV;
1494                 return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd);
1495         } else {
1496                 struct mii_if_info mii;
1497
1498                 mii.dev = dev;
1499                 mii.mdio_read = bcm_enet_mdio_read_mii;
1500                 mii.mdio_write = bcm_enet_mdio_write_mii;
1501                 mii.phy_id = 0;
1502                 mii.phy_id_mask = 0x3f;
1503                 mii.reg_num_mask = 0x1f;
1504                 return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL);
1505         }
1506 }
1507
1508 /*
1509  * preinit hardware to allow mii operation while device is down
1510  */
1511 static void bcm_enet_hw_preinit(struct bcm_enet_priv *priv)
1512 {
1513         u32 val;
1514         int limit;
1515
1516         /* make sure mac is disabled */
1517         bcm_enet_disable_mac(priv);
1518
1519         /* soft reset mac */
1520         val = ENET_CTL_SRESET_MASK;
1521         enet_writel(priv, val, ENET_CTL_REG);
1522         wmb();
1523
1524         limit = 1000;
1525         do {
1526                 val = enet_readl(priv, ENET_CTL_REG);
1527                 if (!(val & ENET_CTL_SRESET_MASK))
1528                         break;
1529                 udelay(1);
1530         } while (limit--);
1531
1532         /* select correct mii interface */
1533         val = enet_readl(priv, ENET_CTL_REG);
1534         if (priv->use_external_mii)
1535                 val |= ENET_CTL_EPHYSEL_MASK;
1536         else
1537                 val &= ~ENET_CTL_EPHYSEL_MASK;
1538         enet_writel(priv, val, ENET_CTL_REG);
1539
1540         /* turn on mdc clock */
1541         enet_writel(priv, (0x1f << ENET_MIISC_MDCFREQDIV_SHIFT) |
1542                     ENET_MIISC_PREAMBLEEN_MASK, ENET_MIISC_REG);
1543
1544         /* set mib counters to self-clear when read */
1545         val = enet_readl(priv, ENET_MIBCTL_REG);
1546         val |= ENET_MIBCTL_RDCLEAR_MASK;
1547         enet_writel(priv, val, ENET_MIBCTL_REG);
1548 }
1549
1550 /*
1551  * allocate netdevice, request register memory and register device.
1552  */
1553 static int __devinit bcm_enet_probe(struct platform_device *pdev)
1554 {
1555         struct bcm_enet_priv *priv;
1556         struct net_device *dev;
1557         struct bcm63xx_enet_platform_data *pd;
1558         struct resource *res_mem, *res_irq, *res_irq_rx, *res_irq_tx;
1559         struct mii_bus *bus;
1560         const char *clk_name;
1561         unsigned int iomem_size;
1562         int i, ret, mdio_registered, mem_requested;
1563
1564         /* stop if shared driver failed, assume driver->probe will be
1565          * called in the same order we register devices (correct ?) */
1566         if (!bcm_enet_shared_base)
1567                 return -ENODEV;
1568
1569         mdio_registered = mem_requested = 0;
1570
1571         res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1572         res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1573         res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
1574         res_irq_tx = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
1575         if (!res_mem || !res_irq || !res_irq_rx || !res_irq_tx)
1576                 return -ENODEV;
1577
1578         ret = 0;
1579         dev = alloc_etherdev(sizeof(*priv));
1580         if (!dev)
1581                 return -ENOMEM;
1582         priv = netdev_priv(dev);
1583         memset(priv, 0, sizeof(*priv));
1584
1585         iomem_size = res_mem->end - res_mem->start + 1;
1586         if (!request_mem_region(res_mem->start, iomem_size, "bcm63xx_enet")) {
1587                 ret = -EBUSY;
1588                 goto err;
1589         }
1590         mem_requested = 1;
1591
1592         priv->base = ioremap(res_mem->start, iomem_size);
1593         if (priv->base == NULL) {
1594                 ret = -ENOMEM;
1595                 goto err;
1596         }
1597         dev->irq = priv->irq = res_irq->start;
1598         priv->irq_rx = res_irq_rx->start;
1599         priv->irq_tx = res_irq_tx->start;
1600         priv->mac_id = pdev->id;
1601
1602         /* get rx & tx dma channel id for this mac */
1603         if (priv->mac_id == 0) {
1604                 priv->rx_chan = 0;
1605                 priv->tx_chan = 1;
1606                 clk_name = "enet0";
1607         } else {
1608                 priv->rx_chan = 2;
1609                 priv->tx_chan = 3;
1610                 clk_name = "enet1";
1611         }
1612
1613         priv->mac_clk = clk_get(&pdev->dev, clk_name);
1614         if (IS_ERR(priv->mac_clk)) {
1615                 ret = PTR_ERR(priv->mac_clk);
1616                 priv->mac_clk = NULL;
1617                 goto err;
1618         }
1619         clk_enable(priv->mac_clk);
1620
1621         /* initialize default and fetch platform data */
1622         priv->rx_ring_size = BCMENET_DEF_RX_DESC;
1623         priv->tx_ring_size = BCMENET_DEF_TX_DESC;
1624
1625         pd = pdev->dev.platform_data;
1626         if (pd) {
1627                 memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
1628                 priv->has_phy = pd->has_phy;
1629                 priv->phy_id = pd->phy_id;
1630                 priv->has_phy_interrupt = pd->has_phy_interrupt;
1631                 priv->phy_interrupt = pd->phy_interrupt;
1632                 priv->use_external_mii = !pd->use_internal_phy;
1633                 priv->pause_auto = pd->pause_auto;
1634                 priv->pause_rx = pd->pause_rx;
1635                 priv->pause_tx = pd->pause_tx;
1636                 priv->force_duplex_full = pd->force_duplex_full;
1637                 priv->force_speed_100 = pd->force_speed_100;
1638         }
1639
1640         if (priv->mac_id == 0 && priv->has_phy && !priv->use_external_mii) {
1641                 /* using internal PHY, enable clock */
1642                 priv->phy_clk = clk_get(&pdev->dev, "ephy");
1643                 if (IS_ERR(priv->phy_clk)) {
1644                         ret = PTR_ERR(priv->phy_clk);
1645                         priv->phy_clk = NULL;
1646                         goto err;
1647                 }
1648                 clk_enable(priv->phy_clk);
1649         }
1650
1651         /* do minimal hardware init to be able to probe mii bus */
1652         bcm_enet_hw_preinit(priv);
1653
1654         /* MII bus registration */
1655         if (priv->has_phy) {
1656                 bus = &priv->mii_bus;
1657                 bus->name = "bcm63xx_enet MII bus";
1658                 bus->dev = &pdev->dev;
1659                 bus->priv = priv;
1660                 bus->read = bcm_enet_mdio_read_phylib;
1661                 bus->write = bcm_enet_mdio_write_phylib;
1662                 sprintf(bus->id, "%d", priv->mac_id);
1663
1664                 /* only probe bus where we think the PHY is, because
1665                  * the mdio read operation return 0 instead of 0xffff
1666                  * if a slave is not present on hw */
1667                 bus->phy_mask = ~(1 << priv->phy_id);
1668
1669                 bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
1670                 if (!bus->irq) {
1671                         ret = -ENOMEM;
1672                         goto err;
1673                 }
1674
1675                 if (priv->has_phy_interrupt)
1676                         bus->irq[priv->phy_id] = priv->phy_interrupt;
1677                 else
1678                         bus->irq[priv->phy_id] = PHY_POLL;
1679
1680                 ret = mdiobus_register(bus);
1681                 if (ret) {
1682                         dev_err(&pdev->dev, "unable to register mdio bus\n");
1683                         goto err;
1684                 }
1685                 mdio_registered = 1;
1686         } else {
1687
1688                 /* run platform code to initialize PHY device */
1689                 if (pd->mii_config &&
1690                     pd->mii_config(dev, 1, bcm_enet_mdio_read_mii,
1691                                    bcm_enet_mdio_write_mii)) {
1692                         dev_err(&pdev->dev, "unable to configure mdio bus\n");
1693                         goto err;
1694                 }
1695         }
1696
1697         spin_lock_init(&priv->rx_lock);
1698
1699         /* init rx timeout (used for oom) */
1700         init_timer(&priv->rx_timeout);
1701         priv->rx_timeout.function = bcm_enet_refill_rx_timer;
1702         priv->rx_timeout.data = (unsigned long)dev;
1703
1704         /* init the mib update lock&work */
1705         mutex_init(&priv->mib_update_lock);
1706         INIT_WORK(&priv->mib_update_task, bcm_enet_update_mib_counters_defer);
1707
1708         /* zero mib counters */
1709         for (i = 0; i < ENET_MIB_REG_COUNT; i++)
1710                 enet_writel(priv, 0, ENET_MIB_REG(i));
1711
1712         /* register netdevice */
1713         dev->open = bcm_enet_open;
1714         dev->stop = bcm_enet_stop;
1715         dev->hard_start_xmit = bcm_enet_start_xmit;
1716         dev->get_stats = bcm_enet_get_stats;
1717         dev->set_mac_address = bcm_enet_set_mac_address;
1718         dev->set_multicast_list = bcm_enet_set_multicast_list;
1719         netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
1720         dev->do_ioctl = bcm_enet_ioctl;
1721 #ifdef CONFIG_NET_POLL_CONTROLLER
1722         dev->poll_controller = bcm_enet_netpoll;
1723 #endif
1724
1725         SET_ETHTOOL_OPS(dev, &bcm_enet_ethtool_ops);
1726
1727         ret = register_netdev(dev);
1728         if (ret)
1729                 goto err;
1730
1731         platform_set_drvdata(pdev, dev);
1732         priv->pdev = pdev;
1733         priv->net_dev = dev;
1734         SET_NETDEV_DEV(dev, &pdev->dev);
1735
1736         return 0;
1737
1738 err:
1739         if (mem_requested)
1740                 release_mem_region(res_mem->start, iomem_size);
1741         if (mdio_registered)
1742                 mdiobus_unregister(&priv->mii_bus);
1743         kfree(priv->mii_bus.irq);
1744         if (priv->mac_clk) {
1745                 clk_disable(priv->mac_clk);
1746                 clk_put(priv->mac_clk);
1747         }
1748         if (priv->phy_clk) {
1749                 clk_disable(priv->phy_clk);
1750                 clk_put(priv->phy_clk);
1751         }
1752         if (priv->base) {
1753                 /* turn off mdc clock */
1754                 enet_writel(priv, 0, ENET_MIISC_REG);
1755                 iounmap(priv->base);
1756         }
1757         free_netdev(dev);
1758         return ret;
1759 }
1760
1761
1762 /*
1763  * exit func, stops hardware and unregisters netdevice
1764  */
1765 static int __devexit bcm_enet_remove(struct platform_device *pdev)
1766 {
1767         struct bcm_enet_priv *priv;
1768         struct net_device *dev;
1769         struct resource *res;
1770
1771         /* stop netdevice */
1772         dev = platform_get_drvdata(pdev);
1773         priv = netdev_priv(dev);
1774         unregister_netdev(dev);
1775
1776         /* turn off mdc clock */
1777         enet_writel(priv, 0, ENET_MIISC_REG);
1778
1779         if (priv->has_phy) {
1780                 mdiobus_unregister(&priv->mii_bus);
1781                 kfree(priv->mii_bus.irq);
1782         } else {
1783                 struct bcm63xx_enet_platform_data *pd;
1784
1785                 pd = pdev->dev.platform_data;
1786                 if (pd && pd->mii_config)
1787                         pd->mii_config(dev, 0, bcm_enet_mdio_read_mii,
1788                                        bcm_enet_mdio_write_mii);
1789         }
1790
1791         /* release device resources */
1792         iounmap(priv->base);
1793         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1794         release_mem_region(res->start, res->end - res->start + 1);
1795
1796         /* disable hw block clocks */
1797         if (priv->phy_clk) {
1798                 clk_disable(priv->phy_clk);
1799                 clk_put(priv->phy_clk);
1800         }
1801         clk_disable(priv->mac_clk);
1802         clk_put(priv->mac_clk);
1803
1804         free_netdev(dev);
1805         return 0;
1806 }
1807
1808 struct platform_driver bcm63xx_enet_driver = {
1809         .probe  = bcm_enet_probe,
1810         .remove = __devexit_p(bcm_enet_remove),
1811         .driver = {
1812                 .name   = "bcm63xx_enet",
1813                 .owner  = THIS_MODULE,
1814         },
1815 };
1816
1817 /*
1818  * reserve & remap memory space shared between all macs
1819  */
1820 static int __devinit bcm_enet_shared_probe(struct platform_device *pdev)
1821 {
1822         struct resource *res;
1823         unsigned int iomem_size;
1824
1825         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1826         if (!res)
1827                 return -ENODEV;
1828
1829         iomem_size = res->end - res->start + 1;
1830         if (!request_mem_region(res->start, iomem_size, "bcm63xx_enet_dma"))
1831                 return -EBUSY;
1832
1833         bcm_enet_shared_base = ioremap(res->start, iomem_size);
1834         if (!bcm_enet_shared_base) {
1835                 release_mem_region(res->start, iomem_size);
1836                 return -ENOMEM;
1837         }
1838         return 0;
1839 }
1840
1841 static int __devexit bcm_enet_shared_remove(struct platform_device *pdev)
1842 {
1843         struct resource *res;
1844
1845         iounmap(bcm_enet_shared_base);
1846         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1847         release_mem_region(res->start, res->end - res->start + 1);
1848         return 0;
1849 }
1850
1851 /*
1852  * this "shared" driver is needed because both macs share a single
1853  * address space
1854  */
1855 struct platform_driver bcm63xx_enet_shared_driver = {
1856         .probe  = bcm_enet_shared_probe,
1857         .remove = __devexit_p(bcm_enet_shared_remove),
1858         .driver = {
1859                 .name   = "bcm63xx_enet_shared",
1860                 .owner  = THIS_MODULE,
1861         },
1862 };
1863
1864 /*
1865  * entry point
1866  */
1867 static int __init bcm_enet_init(void)
1868 {
1869         int ret;
1870
1871         ret = platform_driver_register(&bcm63xx_enet_shared_driver);
1872         if (ret)
1873                 return ret;
1874
1875         ret = platform_driver_register(&bcm63xx_enet_driver);
1876         if (ret)
1877                 platform_driver_unregister(&bcm63xx_enet_shared_driver);
1878
1879         return ret;
1880 }
1881
1882 static void __exit bcm_enet_exit(void)
1883 {
1884         platform_driver_unregister(&bcm63xx_enet_driver);
1885         platform_driver_unregister(&bcm63xx_enet_shared_driver);
1886 }
1887
1888
1889 module_init(bcm_enet_init);
1890 module_exit(bcm_enet_exit);
1891
1892 MODULE_DESCRIPTION("BCM63xx internal ethernet mac driver");
1893 MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
1894 MODULE_LICENSE("GPL");