1 patches for bgmac backported from net-next/master
3 --- a/drivers/net/ethernet/broadcom/bgmac.c
4 +++ b/drivers/net/ethernet/broadcom/bgmac.c
5 @@ -149,6 +149,8 @@ static netdev_tx_t bgmac_dma_tx_add(stru
6 dma_desc->ctl0 = cpu_to_le32(ctl0);
7 dma_desc->ctl1 = cpu_to_le32(ctl1);
9 + netdev_sent_queue(net_dev, skb->len);
13 /* Increase ring->end to point empty slot. We tell hardware the first
14 @@ -178,6 +180,7 @@ static void bgmac_dma_tx_free(struct bgm
15 struct device *dma_dev = bgmac->core->dma_dev;
18 + unsigned bytes_compl = 0, pkts_compl = 0;
20 /* The last slot that hardware didn't consume yet */
21 empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
22 @@ -195,6 +198,9 @@ static void bgmac_dma_tx_free(struct bgm
23 slot->skb->len, DMA_TO_DEVICE);
26 + bytes_compl += slot->skb->len;
30 dev_kfree_skb(slot->skb);
32 @@ -208,6 +214,8 @@ static void bgmac_dma_tx_free(struct bgm
36 + netdev_completed_queue(bgmac->net_dev, pkts_compl, bytes_compl);
38 if (freed && netif_queue_stopped(bgmac->net_dev))
39 netif_wake_queue(bgmac->net_dev);
41 @@ -244,31 +252,59 @@ static int bgmac_dma_rx_skb_for_slot(str
42 struct bgmac_slot_info *slot)
44 struct device *dma_dev = bgmac->core->dma_dev;
45 + struct sk_buff *skb;
46 + dma_addr_t dma_addr;
47 struct bgmac_rx_header *rx;
50 - slot->skb = netdev_alloc_skb(bgmac->net_dev, BGMAC_RX_BUF_SIZE);
52 + skb = netdev_alloc_skb(bgmac->net_dev, BGMAC_RX_BUF_SIZE);
56 /* Poison - if everything goes fine, hardware will overwrite it */
57 - rx = (struct bgmac_rx_header *)slot->skb->data;
58 + rx = (struct bgmac_rx_header *)skb->data;
59 rx->len = cpu_to_le16(0xdead);
60 rx->flags = cpu_to_le16(0xbeef);
62 /* Map skb for the DMA */
63 - slot->dma_addr = dma_map_single(dma_dev, slot->skb->data,
64 - BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
65 - if (dma_mapping_error(dma_dev, slot->dma_addr)) {
66 + dma_addr = dma_map_single(dma_dev, skb->data,
67 + BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
68 + if (dma_mapping_error(dma_dev, dma_addr)) {
69 bgmac_err(bgmac, "DMA mapping error\n");
74 + /* Update the slot */
76 + slot->dma_addr = dma_addr;
78 if (slot->dma_addr & 0xC0000000)
79 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
84 +static void bgmac_dma_rx_setup_desc(struct bgmac *bgmac,
85 + struct bgmac_dma_ring *ring, int desc_idx)
87 + struct bgmac_dma_desc *dma_desc = ring->cpu_base + desc_idx;
88 + u32 ctl0 = 0, ctl1 = 0;
90 + if (desc_idx == ring->num_slots - 1)
91 + ctl0 |= BGMAC_DESC_CTL0_EOT;
92 + ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN;
93 + /* Is there any BGMAC device that requires extension? */
94 + /* ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) &
95 + * B43_DMA64_DCTL1_ADDREXT_MASK;
98 + dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[desc_idx].dma_addr));
99 + dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[desc_idx].dma_addr));
100 + dma_desc->ctl0 = cpu_to_le32(ctl0);
101 + dma_desc->ctl1 = cpu_to_le32(ctl1);
104 static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
107 @@ -287,7 +323,6 @@ static int bgmac_dma_rx_read(struct bgma
108 struct device *dma_dev = bgmac->core->dma_dev;
109 struct bgmac_slot_info *slot = &ring->slots[ring->start];
110 struct sk_buff *skb = slot->skb;
111 - struct sk_buff *new_skb;
112 struct bgmac_rx_header *rx;
115 @@ -300,38 +335,51 @@ static int bgmac_dma_rx_read(struct bgma
116 len = le16_to_cpu(rx->len);
117 flags = le16_to_cpu(rx->flags);
119 - /* Check for poison and drop or pass the packet */
120 - if (len == 0xdead && flags == 0xbeef) {
121 - bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n",
125 + dma_addr_t old_dma_addr = slot->dma_addr;
128 + /* Check for poison and drop or pass the packet */
129 + if (len == 0xdead && flags == 0xbeef) {
130 + bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n",
132 + dma_sync_single_for_device(dma_dev,
142 - new_skb = netdev_alloc_skb_ip_align(bgmac->net_dev, len);
144 - skb_put(new_skb, len);
145 - skb_copy_from_linear_data_offset(skb, BGMAC_RX_FRAME_OFFSET,
148 - skb_checksum_none_assert(skb);
149 - new_skb->protocol =
150 - eth_type_trans(new_skb, bgmac->net_dev);
151 - netif_receive_skb(new_skb);
154 - bgmac->net_dev->stats.rx_dropped++;
155 - bgmac_err(bgmac, "Allocation of skb for copying packet failed!\n");
156 + /* Prepare new skb as replacement */
157 + err = bgmac_dma_rx_skb_for_slot(bgmac, slot);
159 + /* Poison the old skb */
160 + rx->len = cpu_to_le16(0xdead);
161 + rx->flags = cpu_to_le16(0xbeef);
163 + dma_sync_single_for_device(dma_dev,
169 + bgmac_dma_rx_setup_desc(bgmac, ring, ring->start);
171 - /* Poison the old skb */
172 - rx->len = cpu_to_le16(0xdead);
173 - rx->flags = cpu_to_le16(0xbeef);
176 - /* Make it back accessible to the hardware */
177 - dma_sync_single_for_device(dma_dev, slot->dma_addr,
178 - BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
179 + /* Unmap old skb, we'll pass it to the netfif */
180 + dma_unmap_single(dma_dev, old_dma_addr,
181 + BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
183 + skb_put(skb, BGMAC_RX_FRAME_OFFSET + len);
184 + skb_pull(skb, BGMAC_RX_FRAME_OFFSET);
186 + skb_checksum_none_assert(skb);
187 + skb->protocol = eth_type_trans(skb, bgmac->net_dev);
188 + netif_receive_skb(skb);
192 if (++ring->start >= BGMAC_RX_RING_SLOTS)
194 @@ -495,8 +543,6 @@ err_dma_free:
195 static void bgmac_dma_init(struct bgmac *bgmac)
197 struct bgmac_dma_ring *ring;
198 - struct bgmac_dma_desc *dma_desc;
202 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
203 @@ -529,23 +575,8 @@ static void bgmac_dma_init(struct bgmac
205 bgmac_dma_rx_enable(bgmac, ring);
207 - for (j = 0, dma_desc = ring->cpu_base; j < ring->num_slots;
211 - if (j == ring->num_slots - 1)
212 - ctl0 |= BGMAC_DESC_CTL0_EOT;
213 - ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN;
214 - /* Is there any BGMAC device that requires extension? */
215 - /* ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) &
216 - * B43_DMA64_DCTL1_ADDREXT_MASK;
219 - dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[j].dma_addr));
220 - dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[j].dma_addr));
221 - dma_desc->ctl0 = cpu_to_le32(ctl0);
222 - dma_desc->ctl1 = cpu_to_le32(ctl1);
224 + for (j = 0; j < ring->num_slots; j++)
225 + bgmac_dma_rx_setup_desc(bgmac, ring, j);
227 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX,
229 @@ -988,6 +1019,8 @@ static void bgmac_chip_reset(struct bgma
230 bgmac_miiconfig(bgmac);
231 bgmac_phy_init(bgmac);
233 + netdev_reset_queue(bgmac->net_dev);
235 bgmac->int_status = 0;