fix some madwifi-testing bugs
[openwrt.git] / package / madwifi / patches-r3776 / 300-napi_polling.patch
1 Index: madwifi-trunk-r3776/ath/if_ath.c
2 ===================================================================
3 --- madwifi-trunk-r3776.orig/ath/if_ath.c       2008-07-18 23:26:43.000000000 +0200
4 +++ madwifi-trunk-r3776/ath/if_ath.c    2008-07-18 23:29:26.000000000 +0200
5 @@ -182,7 +182,11 @@
6         struct sk_buff *, int, int, u_int64_t);
7  static void ath_setdefantenna(struct ath_softc *, u_int);
8  static struct ath_txq *ath_txq_setup(struct ath_softc *, int, int);
9 -static void ath_rx_tasklet(TQUEUE_ARG);
10 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
11 +static int ath_rx_poll(struct napi_struct *napi, int budget);
12 +#else
13 +static int ath_rx_poll(struct net_device *dev, int *budget);
14 +#endif
15  static int ath_hardstart(struct sk_buff *, struct net_device *);
16  static int ath_mgtstart(struct ieee80211com *, struct sk_buff *);
17  #ifdef ATH_SUPERG_COMP
18 @@ -331,6 +335,9 @@
19  static u_int32_t ath_set_clamped_maxtxpower(struct ath_softc *sc, 
20                 u_int32_t new_clamped_maxtxpower);
21  
22 +static void ath_poll_disable(struct net_device *dev);
23 +static void ath_poll_enable(struct net_device *dev);
24 +
25  static void ath_scanbufs(struct ath_softc *sc);
26  static int ath_debug_iwpriv(struct ieee80211com *ic, 
27                 unsigned int param, unsigned int value);
28 @@ -518,7 +525,6 @@
29  
30         atomic_set(&sc->sc_txbuf_counter, 0);
31  
32 -       ATH_INIT_TQUEUE(&sc->sc_rxtq,           ath_rx_tasklet,         dev);
33         ATH_INIT_TQUEUE(&sc->sc_txtq,           ath_tx_tasklet,         dev);
34         ATH_INIT_TQUEUE(&sc->sc_bmisstq,        ath_bmiss_tasklet,      dev);
35         ATH_INIT_TQUEUE(&sc->sc_bstucktq,       ath_bstuck_tasklet,     dev);
36 @@ -833,6 +839,12 @@
37         dev->set_mac_address = ath_set_mac_address;
38         dev->change_mtu = ath_change_mtu;
39         dev->tx_queue_len = ATH_TXBUF - ATH_TXBUF_MGT_RESERVED;
40 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
41 +       netif_napi_add(dev, &sc->sc_napi, ath_rx_poll, 64);
42 +#else
43 +       dev->poll = ath_rx_poll;
44 +       dev->weight = 64;
45 +#endif
46  #ifdef USE_HEADERLEN_RESV
47         dev->hard_header_len += sizeof(struct ieee80211_qosframe) +
48                                 sizeof(struct llc) +
49 @@ -1770,7 +1782,7 @@
50  }
51  
52  static void
53 -ath_intr_process_rx_descriptors(struct ath_softc *sc, int *pneedmark, u_int64_t hw_tsf)
54 +ath_intr_process_rx_descriptors(struct ath_softc *sc, int *pneedmark, u_int64_t hw_tsf, int schedule)
55  {
56         struct ath_hal *ah = sc->sc_ah;
57         struct ath_desc *ds;
58 @@ -2252,8 +2264,25 @@
59         }
60  
61         /* If we got something to process, schedule rx queue to handle it */
62 -       if (count)
63 -               ATH_SCHEDULE_TQUEUE(&sc->sc_rxtq, pneedmark);
64 +       if (count) {
65 +               sc->sc_isr &= ~HAL_INT_RX;
66 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
67 +               if (netif_rx_schedule_prep(sc->sc_dev, &sc->sc_napi))
68 +#else
69 +               if (netif_rx_schedule_prep(sc->sc_dev))
70 +#endif
71 +               {
72 +#ifndef ATH_PRECISE_TSF
73 +                       sc->sc_imask &= ~HAL_INT_RX;
74 +                       ath_hal_intrset(ah, sc->sc_imask);
75 +#endif
76 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
77 +                       __netif_rx_schedule(sc->sc_dev, &sc->sc_napi);
78 +#else
79 +                       __netif_rx_schedule(sc->sc_dev);
80 +#endif
81 +               }
82 +       }
83         ATH_RXBUF_UNLOCK_IRQ(sc);
84  #undef PA2DESC
85  }
86 @@ -2343,6 +2372,7 @@
87                 (status & HAL_INT_GLOBAL)       ? " HAL_INT_GLOBAL"     : ""
88                 );
89  
90 +       sc->sc_isr = status;
91         status &= sc->sc_imask;                 /* discard unasked for bits */
92         /* As soon as we know we have a real interrupt we intend to service, 
93          * we will check to see if we need an initial hardware TSF reading. 
94 @@ -2400,7 +2430,7 @@
95                 }
96                 if (status & (HAL_INT_RX | HAL_INT_RXPHY)) {
97                         /* NB: Will schedule rx tasklet if necessary. */
98 -                       ath_intr_process_rx_descriptors(sc, &needmark, hw_tsf);
99 +                       ath_intr_process_rx_descriptors(sc, &needmark, hw_tsf, 1);
100                 }
101                 if (status & HAL_INT_TX) {
102  #ifdef ATH_SUPERG_DYNTURBO
103 @@ -2426,6 +2456,11 @@
104                                 }
105                         }
106  #endif
107 +                       /* disable transmit interrupt */
108 +                       sc->sc_isr &= ~HAL_INT_TX;
109 +                       ath_hal_intrset(ah, sc->sc_imask & ~HAL_INT_TX);
110 +                       sc->sc_imask &= ~HAL_INT_TX;
111 +
112                         ATH_SCHEDULE_TQUEUE(&sc->sc_txtq, &needmark);
113                 }
114                 if (status & HAL_INT_BMISS) {
115 @@ -2617,6 +2652,7 @@
116         if (sc->sc_tx99 != NULL)
117                 sc->sc_tx99->start(sc->sc_tx99);
118  #endif
119 +       ath_poll_enable(dev);
120  
121  done:
122         ATH_UNLOCK(sc);
123 @@ -2657,6 +2693,9 @@
124                 if (sc->sc_tx99 != NULL)
125                         sc->sc_tx99->stop(sc->sc_tx99);
126  #endif
127 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
128 +               ath_poll_disable(dev);
129 +#endif
130                 netif_stop_queue(dev);  /* XXX re-enabled by ath_newstate */
131                 dev->flags &= ~IFF_RUNNING;     /* NB: avoid recursion */
132                 ieee80211_stop_running(ic);     /* stop all VAPs */
133 @@ -4109,6 +4148,43 @@
134         return ath_keyset(sc, k, mac, vap->iv_bss);
135  }
136  
137 +static void ath_poll_disable(struct net_device *dev)
138 +{
139 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
140 +       struct ath_softc *sc = dev->priv;
141 +#endif
142 +
143 +       /*
144 +        * XXX Using in_softirq is not right since we might
145 +        * be called from other soft irq contexts than
146 +        * ath_rx_poll
147 +        */
148 +       if (!in_softirq()) {
149 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
150 +               napi_disable(&sc->sc_napi);
151 +#else
152 +               netif_poll_disable(dev);
153 +#endif
154 +       }
155 +}
156 +
157 +static void ath_poll_enable(struct net_device *dev)
158 +{
159 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
160 +       struct ath_softc *sc = dev->priv;
161 +#endif
162 +
163 +       /* NB: see above */
164 +       if (!in_softirq()) {
165 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
166 +               napi_enable(&sc->sc_napi);
167 +#else
168 +               netif_poll_enable(dev);
169 +#endif
170 +       }
171 +}
172 +
173 +
174  /*
175   * Block/unblock tx+rx processing while a key change is done.
176   * We assume the caller serializes key management operations
177 @@ -4119,33 +4195,23 @@
178  ath_key_update_begin(struct ieee80211vap *vap)
179  {
180         struct net_device *dev = vap->iv_ic->ic_dev;
181 -       struct ath_softc *sc = dev->priv;
182  
183 -       DPRINTF(sc, ATH_DEBUG_KEYCACHE, "Begin\n");
184         /*
185          * When called from the rx tasklet we cannot use
186          * tasklet_disable because it will block waiting
187          * for us to complete execution.
188 -        *
189 -        * XXX Using in_softirq is not right since we might
190 -        * be called from other soft irq contexts than
191 -        * ath_rx_tasklet.
192          */
193 -       if (!in_softirq())
194 -               tasklet_disable(&sc->sc_rxtq);
195 -       netif_stop_queue(dev);
196 +       if ((dev->flags & (IFF_UP|IFF_RUNNING)) == (IFF_UP|IFF_RUNNING))
197 +               netif_stop_queue(dev);
198  }
199  
200  static void
201  ath_key_update_end(struct ieee80211vap *vap)
202  {
203         struct net_device *dev = vap->iv_ic->ic_dev;
204 -       struct ath_softc *sc = dev->priv;
205  
206 -       DPRINTF(sc, ATH_DEBUG_KEYCACHE, "End\n");
207 -       netif_wake_queue(dev);
208 -       if (!in_softirq())              /* NB: see above */
209 -               tasklet_enable(&sc->sc_rxtq);
210 +       if ((dev->flags & (IFF_UP|IFF_RUNNING)) == (IFF_UP|IFF_RUNNING))
211 +               netif_wake_queue(dev);
212  }
213  
214  /*
215 @@ -6405,15 +6471,25 @@
216         sc->sc_numrxotherant = 0;
217  }
218  
219 -static void
220 -ath_rx_tasklet(TQUEUE_ARG data)
221 +static int
222 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
223 +ath_rx_poll(struct napi_struct *napi, int budget)
224 +#else
225 +ath_rx_poll(struct net_device *dev, int *budget)
226 +#endif
227  {
228  #define        PA2DESC(_sc, _pa) \
229         ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
230                 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
231 -       struct net_device *dev = (struct net_device *)data;
232 -       struct ath_buf *bf;
233 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
234 +       struct ath_softc *sc = container_of(napi, struct ath_softc, sc_napi);
235 +       struct net_device *dev = sc->sc_dev;
236 +       u_int rx_limit = budget;
237 +#else
238         struct ath_softc *sc = dev->priv;
239 +       u_int rx_limit = min(dev->quota, *budget);
240 +#endif
241 +       struct ath_buf *bf;
242         struct ieee80211com *ic = &sc->sc_ic;
243         struct ath_hal *ah = sc ? sc->sc_ah : NULL;
244         struct ath_desc *ds;
245 @@ -6421,6 +6497,7 @@
246         struct ieee80211_node *ni;
247         struct sk_buff *skb = NULL;
248         unsigned int len, phyerr, mic_fail = 0;
249 +       unsigned int early_stop = 0;
250         int type = -1; /* undefined */
251         int init_ret = 0;
252         int bf_processed = 0;
253 @@ -6428,6 +6505,7 @@
254         int errors       = 0;
255  
256         DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s started...\n", __func__);
257 +process_rx_again:
258         do {
259                 /* Get next RX buffer pending processing by RX tasklet...
260                  *  
261 @@ -6457,6 +6535,10 @@
262                         break;
263  
264                 bf_processed++;
265 +               if (rx_limit-- < 0) {
266 +                       early_stop = 1;
267 +                       break;
268 +               }
269                 ds  = bf->bf_desc;
270  
271  #ifdef AR_DEBUG
272 @@ -6491,6 +6573,7 @@
273                                 sc->sc_stats.ast_rx_phyerr++;
274                                 phyerr = rs->rs_phyerr & 0x1f;
275                                 sc->sc_stats.ast_rx_phy[phyerr]++;
276 +                               goto rx_next;
277                         }
278                         if (rs->rs_status & HAL_RXERR_DECRYPT) {
279                                 /* Decrypt error.  If the error occurred
280 @@ -6689,6 +6772,33 @@
281                 STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
282                 ATH_RXBUF_UNLOCK_IRQ(sc);
283         } while (1);
284 +       if (!early_stop) {
285 +               unsigned long flags;
286 +               /* Check if more data is received while we were
287 +                * processing the descriptor chain.
288 +                */
289 +#ifndef ATH_PRECISE_TSF
290 +               local_irq_save(flags);
291 +               if (sc->sc_isr & HAL_INT_RX) {
292 +                       u_int64_t hw_tsf = ath_hal_gettsf64(ah);
293 +                       sc->sc_isr &= ~HAL_INT_RX;
294 +                       local_irq_restore(flags);
295 +                       ath_intr_process_rx_descriptors(sc, NULL, hw_tsf, 0);
296 +                       goto process_rx_again;
297 +               }
298 +               sc->sc_imask |= HAL_INT_RX;
299 +               ath_hal_intrset(ah, sc->sc_imask);
300 +               local_irq_restore(flags);
301 +#endif
302 +       }
303 +
304 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
305 +       netif_rx_complete(dev, napi);
306 +#else
307 +       netif_rx_complete(dev);
308 +       *budget -= bf_processed;
309 +       dev->quota -= bf_processed;
310 +#endif
311  
312         if (sc->sc_useintmit) 
313                 ath_hal_rxmonitor(ah, &sc->sc_halstats, &sc->sc_curchan);
314 @@ -6701,6 +6811,12 @@
315                 " %d rx buf processed. %d were errors. %d skb accepted.\n",
316                 __func__, bf_processed, errors, skb_accepted);
317  #undef PA2DESC
318 +
319 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
320 +       return bf_processed;
321 +#else
322 +       return early_stop;
323 +#endif
324  }
325  
326  #ifdef ATH_SUPERG_XR
327 @@ -8306,12 +8422,24 @@
328  {
329         struct net_device *dev = (struct net_device *)data;
330         struct ath_softc *sc = dev->priv;
331 +       unsigned long flags;
332  
333 +process_tx_again:
334         if (txqactive(sc->sc_ah, 0))
335                 ath_tx_processq(sc, &sc->sc_txq[0]);
336         if (txqactive(sc->sc_ah, sc->sc_cabq->axq_qnum))
337                 ath_tx_processq(sc, sc->sc_cabq);
338  
339 +       local_irq_save(flags);
340 +       if (sc->sc_isr & HAL_INT_TX) {
341 +               sc->sc_isr &= ~HAL_INT_TX;
342 +               local_irq_restore(flags);
343 +               goto process_tx_again;
344 +       }
345 +       sc->sc_imask |= HAL_INT_TX;
346 +       ath_hal_intrset(sc->sc_ah, sc->sc_imask);
347 +       local_irq_restore(flags);
348 +
349         netif_wake_queue(dev);
350  
351         if (sc->sc_softled)
352 @@ -8327,7 +8455,9 @@
353  {
354         struct net_device *dev = (struct net_device *)data;
355         struct ath_softc *sc = dev->priv;
356 +       unsigned long flags;
357  
358 +process_tx_again:
359         /*
360          * Process each active queue.
361          */
362 @@ -8357,6 +8487,16 @@
363         if (sc->sc_uapsdq && txqactive(sc->sc_ah, sc->sc_uapsdq->axq_qnum))
364                 ath_tx_processq(sc, sc->sc_uapsdq);
365  
366 +       local_irq_save(flags);
367 +       if (sc->sc_isr & HAL_INT_TX) {
368 +               sc->sc_isr &= ~HAL_INT_TX;
369 +               local_irq_restore(flags);
370 +               goto process_tx_again;
371 +       }
372 +       sc->sc_imask |= HAL_INT_TX;
373 +       ath_hal_intrset(sc->sc_ah, sc->sc_imask);
374 +       local_irq_restore(flags);
375 +
376         netif_wake_queue(dev);
377  
378         if (sc->sc_softled)
379 @@ -10322,9 +10462,9 @@
380         dev->mtu = mtu;
381         if ((dev->flags & IFF_RUNNING) && !sc->sc_invalid) {
382                 /* NB: the rx buffers may need to be reallocated */
383 -               tasklet_disable(&sc->sc_rxtq);
384 +               ath_poll_disable(dev);
385                 error = ath_reset(dev);
386 -               tasklet_enable(&sc->sc_rxtq);
387 +               ath_poll_enable(dev);
388         }
389         ATH_UNLOCK(sc);
390  
391 Index: madwifi-trunk-r3776/ath/if_athvar.h
392 ===================================================================
393 --- madwifi-trunk-r3776.orig/ath/if_athvar.h    2008-07-18 23:26:22.000000000 +0200
394 +++ madwifi-trunk-r3776/ath/if_athvar.h 2008-07-18 23:27:47.000000000 +0200
395 @@ -56,6 +56,10 @@
396  # include      <asm/bitops.h>
397  #endif
398  
399 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
400 +#define irqs_disabled()                        0
401 +#endif
402 +
403  /*
404   * Deduce if tasklets are available.  If not then
405   * fall back to using the immediate work queue.
406 @@ -644,6 +648,9 @@
407  struct ath_softc {
408         struct ieee80211com sc_ic;              /* NB: must be first */
409         struct net_device *sc_dev;
410 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
411 +       struct napi_struct sc_napi;
412 +#endif
413         void __iomem *sc_iobase;                /* address of the device */
414         struct semaphore sc_lock;               /* dev-level lock */
415         struct net_device_stats sc_devstats;    /* device statistics */
416 @@ -756,7 +763,6 @@
417         struct ath_buf *sc_rxbufcur;            /* current rx buffer */
418         u_int32_t *sc_rxlink;                   /* link ptr in last RX desc */
419         spinlock_t sc_rxbuflock;
420 -       struct ATH_TQ_STRUCT sc_rxtq;           /* rx intr tasklet */
421         struct ATH_TQ_STRUCT sc_rxorntq;        /* rxorn intr tasklet */
422         u_int16_t sc_cachelsz;                  /* cache line size */
423  
424 @@ -769,6 +775,7 @@
425         u_int sc_txintrperiod;                  /* tx interrupt batching */
426         struct ath_txq sc_txq[HAL_NUM_TX_QUEUES];
427         struct ath_txq *sc_ac2q[WME_NUM_AC];    /* WME AC -> h/w qnum */
428 +       HAL_INT sc_isr;                         /* unmasked ISR state */
429         struct ATH_TQ_STRUCT sc_txtq;           /* tx intr tasklet */
430         u_int8_t sc_grppoll_str[GRPPOLL_RATE_STR_LEN];
431         struct ath_descdma sc_bdma;             /* beacon descriptors */
432 @@ -888,6 +895,8 @@
433  #define        ATH_TXBUF_LOCK_CHECK(_sc)
434  #endif
435  
436 +#define ATH_DISABLE_INTR               local_irq_disable
437 +#define ATH_ENABLE_INTR                local_irq_enable
438  
439  #define        ATH_RXBUF_LOCK_INIT(_sc)        spin_lock_init(&(_sc)->sc_rxbuflock)
440  #define        ATH_RXBUF_LOCK_DESTROY(_sc)
441 Index: madwifi-trunk-r3776/net80211/ieee80211_skb.c
442 ===================================================================
443 --- madwifi-trunk-r3776.orig/net80211/ieee80211_skb.c   2008-07-18 23:26:22.000000000 +0200
444 +++ madwifi-trunk-r3776/net80211/ieee80211_skb.c        2008-07-18 23:27:47.000000000 +0200
445 @@ -73,7 +73,7 @@
446  #undef dev_queue_xmit
447  #undef kfree_skb
448  #undef kfree_skb_fast
449 -#undef netif_rx
450 +#undef netif_receive_skb
451  #undef pskb_copy
452  #undef skb_clone
453  #undef skb_copy
454 @@ -581,8 +581,8 @@
455                 grp, vlan_tag);
456  }
457  
458 -int netif_rx_debug(struct sk_buff *skb, const char *func, int line) {
459 -       return netif_rx(untrack_skb(skb, 0, __func__, __LINE__));
460 +int netif_receive_skb_debug(struct sk_buff *skb, const char *func, int line) {
461 +       return netif_receive_skb(untrack_skb(skb, 0, __func__, __LINE__));
462  }
463  
464  struct sk_buff *alloc_skb_debug(unsigned int length, gfp_t gfp_mask,
465 @@ -707,7 +707,7 @@
466  }
467  
468  EXPORT_SYMBOL(vlan_hwaccel_rx_debug);
469 -EXPORT_SYMBOL(netif_rx_debug);
470 +EXPORT_SYMBOL(netif_receive_skb_debug);
471  EXPORT_SYMBOL(alloc_skb_debug);
472  EXPORT_SYMBOL(dev_alloc_skb_debug);
473  EXPORT_SYMBOL(skb_clone_debug);
474 Index: madwifi-trunk-r3776/net80211/ieee80211_skb.h
475 ===================================================================
476 --- madwifi-trunk-r3776.orig/net80211/ieee80211_skb.h   2008-07-18 23:26:22.000000000 +0200
477 +++ madwifi-trunk-r3776/net80211/ieee80211_skb.h        2008-07-18 23:27:47.000000000 +0200
478 @@ -115,7 +115,7 @@
479  
480  int vlan_hwaccel_rx_debug(struct sk_buff *skb, struct vlan_group *grp,
481                 unsigned short vlan_tag, const char *func, int line);
482 -int netif_rx_debug(struct sk_buff *skb, const char *func, int line);
483 +int netif_receive_skb_debug(struct sk_buff *skb, const char *func, int line);
484  struct sk_buff *alloc_skb_debug(unsigned int length, gfp_t gfp_mask,
485                 const char *func, int line);
486  struct sk_buff *dev_alloc_skb_debug(unsigned int length,
487 @@ -150,7 +150,7 @@
488  #undef dev_queue_xmit
489  #undef kfree_skb
490  #undef kfree_skb_fast
491 -#undef netif_rx
492 +#undef netif_receive_skb
493  #undef pskb_copy
494  #undef skb_clone
495  #undef skb_copy
496 @@ -167,8 +167,8 @@
497         skb_copy_expand_debug(_skb, _newheadroom, _newtailroom, _gfp_mask, __func__, __LINE__)
498  #define vlan_hwaccel_rx(_skb, _grp, _tag) \
499         vlan_hwaccel_rx_debug(_skb, _grp, _tag, __func__, __LINE__)
500 -#define netif_rx(_skb) \
501 -       netif_rx_debug(_skb, __func__, __LINE__)
502 +#define netif_receive_skb(_skb) \
503 +       netif_receive_skb_debug(_skb, __func__, __LINE__)
504  #define        alloc_skb(_length, _gfp_mask) \
505         alloc_skb_debug(_length, _gfp_mask, __func__, __LINE__)
506  #define        dev_alloc_skb(_length) \
507 Index: madwifi-trunk-r3776/net80211/ieee80211_input.c
508 ===================================================================
509 --- madwifi-trunk-r3776.orig/net80211/ieee80211_input.c 2008-07-18 23:26:22.000000000 +0200
510 +++ madwifi-trunk-r3776/net80211/ieee80211_input.c      2008-07-18 23:27:47.000000000 +0200
511 @@ -1185,7 +1185,7 @@
512                         ret = vlan_hwaccel_rx(skb,
513                                         vap->iv_vlgrp, ni->ni_vlan);
514                 else
515 -                       ret = netif_rx(skb);
516 +                       ret = netif_receive_skb(skb);
517                 if (ret == NET_RX_DROP)
518                         vap->iv_devstats.rx_dropped++;
519                 if (tni != NULL)
520 @@ -2285,7 +2285,7 @@
521  
522                 if (SKB_NI(skb1) != NULL)
523                         ieee80211_unref_node(&SKB_NI(skb1));
524 -               if (netif_rx(skb1) == NET_RX_DROP)
525 +               if (netif_receive_skb(skb1) == NET_RX_DROP)
526                         vap->iv_devstats.rx_dropped++;
527         }
528  }
529 Index: madwifi-trunk-r3776/net80211/ieee80211_monitor.c
530 ===================================================================
531 --- madwifi-trunk-r3776.orig/net80211/ieee80211_monitor.c       2008-07-18 23:26:22.000000000 +0200
532 +++ madwifi-trunk-r3776/net80211/ieee80211_monitor.c    2008-07-18 23:27:47.000000000 +0200
533 @@ -580,7 +580,7 @@
534  
535                         if (SKB_NI(skb1) != NULL)
536                                 ieee80211_unref_node(&SKB_NI(skb1));
537 -                       if (netif_rx(skb1) == NET_RX_DROP)
538 +                       if (netif_receive_skb(skb1) == NET_RX_DROP)
539                                 vap->iv_devstats.rx_dropped++;
540                         skb1 = NULL;
541                 }