huge madwifi update (work in progress, disabled by default, compiles but breaks at...
[openwrt.git] / package / madwifi / patches-r3776 / 300-napi_polling.patch
1 Index: madwifi-trunk-r3776/ath/if_ath.c
2 ===================================================================
3 --- madwifi-trunk-r3776.orig/ath/if_ath.c       2008-07-17 01:20:11.000000000 +0200
4 +++ madwifi-trunk-r3776/ath/if_ath.c    2008-07-17 01:46:37.000000000 +0200
5 @@ -182,7 +182,11 @@
6         struct sk_buff *, int, int, u_int64_t);
7  static void ath_setdefantenna(struct ath_softc *, u_int);
8  static struct ath_txq *ath_txq_setup(struct ath_softc *, int, int);
9 -static void ath_rx_tasklet(TQUEUE_ARG);
10 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
11 +static int ath_rx_poll(struct napi_struct *napi, int budget);
12 +#else
13 +static int ath_rx_poll(struct net_device *dev, int *budget);
14 +#endif
15  static int ath_hardstart(struct sk_buff *, struct net_device *);
16  static int ath_mgtstart(struct ieee80211com *, struct sk_buff *);
17  #ifdef ATH_SUPERG_COMP
18 @@ -331,6 +335,9 @@
19  static u_int32_t ath_set_clamped_maxtxpower(struct ath_softc *sc, 
20                 u_int32_t new_clamped_maxtxpower);
21  
22 +static void ath_poll_disable(struct net_device *dev);
23 +static void ath_poll_enable(struct net_device *dev);
24 +
25  static void ath_scanbufs(struct ath_softc *sc);
26  static int ath_debug_iwpriv(struct ieee80211com *ic, 
27                 unsigned int param, unsigned int value);
28 @@ -518,7 +525,6 @@
29  
30         atomic_set(&sc->sc_txbuf_counter, 0);
31  
32 -       ATH_INIT_TQUEUE(&sc->sc_rxtq,           ath_rx_tasklet,         dev);
33         ATH_INIT_TQUEUE(&sc->sc_txtq,           ath_tx_tasklet,         dev);
34         ATH_INIT_TQUEUE(&sc->sc_bmisstq,        ath_bmiss_tasklet,      dev);
35         ATH_INIT_TQUEUE(&sc->sc_bstucktq,       ath_bstuck_tasklet,     dev);
36 @@ -833,6 +839,12 @@
37         dev->set_mac_address = ath_set_mac_address;
38         dev->change_mtu = ath_change_mtu;
39         dev->tx_queue_len = ATH_TXBUF - ATH_TXBUF_MGT_RESERVED;
40 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
41 +       netif_napi_add(dev, &sc->sc_napi, ath_rx_poll, 64);
42 +#else
43 +       dev->poll = ath_rx_poll;
44 +       dev->weight = 64;
45 +#endif
46  #ifdef USE_HEADERLEN_RESV
47         dev->hard_header_len += sizeof(struct ieee80211_qosframe) +
48                                 sizeof(struct llc) +
49 @@ -1770,7 +1782,7 @@
50  }
51  
52  static void
53 -ath_intr_process_rx_descriptors(struct ath_softc *sc, int *pneedmark, u_int64_t hw_tsf)
54 +ath_intr_process_rx_descriptors(struct ath_softc *sc, int *pneedmark, u_int64_t hw_tsf, int schedule)
55  {
56         struct ath_hal *ah = sc->sc_ah;
57         struct ath_desc *ds;
58 @@ -2252,8 +2264,25 @@
59         }
60  
61         /* If we got something to process, schedule rx queue to handle it */
62 -       if (count)
63 -               ATH_SCHEDULE_TQUEUE(&sc->sc_rxtq, pneedmark);
64 +       if (count) {
65 +               sc->sc_isr &= ~HAL_INT_RX;
66 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
67 +               if (netif_rx_schedule_prep(sc->sc_dev, &sc->sc_napi))
68 +#else
69 +               if (netif_rx_schedule_prep(sc->sc_dev))
70 +#endif
71 +               {
72 +#ifndef ATH_PRECISE_TSF
73 +                       sc->sc_imask &= ~HAL_INT_RX;
74 +                       ath_hal_intrset(ah, sc->sc_imask);
75 +#endif
76 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
77 +                       __netif_rx_schedule(sc->sc_dev, &sc->sc_napi);
78 +#else
79 +                       __netif_rx_schedule(sc->sc_dev);
80 +#endif
81 +               }
82 +       }
83         ATH_RXBUF_UNLOCK_IRQ(sc);
84  #undef PA2DESC
85  }
86 @@ -2343,6 +2372,7 @@
87                 (status & HAL_INT_GLOBAL)       ? " HAL_INT_GLOBAL"     : ""
88                 );
89  
90 +       sc->sc_isr = status;
91         status &= sc->sc_imask;                 /* discard unasked for bits */
92         /* As soon as we know we have a real interrupt we intend to service, 
93          * we will check to see if we need an initial hardware TSF reading. 
94 @@ -2400,7 +2430,7 @@
95                 }
96                 if (status & (HAL_INT_RX | HAL_INT_RXPHY)) {
97                         /* NB: Will schedule rx tasklet if necessary. */
98 -                       ath_intr_process_rx_descriptors(sc, &needmark, hw_tsf);
99 +                       ath_intr_process_rx_descriptors(sc, &needmark, hw_tsf, 1);
100                 }
101                 if (status & HAL_INT_TX) {
102  #ifdef ATH_SUPERG_DYNTURBO
103 @@ -2426,6 +2456,11 @@
104                                 }
105                         }
106  #endif
107 +                       /* disable transmit interrupt */
108 +                       sc->sc_isr &= ~HAL_INT_TX;
109 +                       ath_hal_intrset(ah, sc->sc_imask & ~HAL_INT_TX);
110 +                       sc->sc_imask &= ~HAL_INT_TX;
111 +
112                         ATH_SCHEDULE_TQUEUE(&sc->sc_txtq, &needmark);
113                 }
114                 if (status & HAL_INT_BMISS) {
115 @@ -2617,6 +2652,7 @@
116         if (sc->sc_tx99 != NULL)
117                 sc->sc_tx99->start(sc->sc_tx99);
118  #endif
119 +       ath_poll_enable(dev);
120  
121  done:
122         ATH_UNLOCK(sc);
123 @@ -2657,6 +2693,9 @@
124                 if (sc->sc_tx99 != NULL)
125                         sc->sc_tx99->stop(sc->sc_tx99);
126  #endif
127 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
128 +               ath_poll_disable(dev);
129 +#endif
130                 netif_stop_queue(dev);  /* XXX re-enabled by ath_newstate */
131                 dev->flags &= ~IFF_RUNNING;     /* NB: avoid recursion */
132                 ieee80211_stop_running(ic);     /* stop all VAPs */
133 @@ -4109,6 +4148,39 @@
134         return ath_keyset(sc, k, mac, vap->iv_bss);
135  }
136  
137 +static void ath_poll_disable(struct net_device *dev)
138 +{
139 +       struct ath_softc *sc = dev->priv;
140 +
141 +       /*
142 +        * XXX Using in_softirq is not right since we might
143 +        * be called from other soft irq contexts than
144 +        * ath_rx_poll
145 +        */
146 +       if (!in_softirq()) {
147 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
148 +               napi_disable(&sc->sc_napi);
149 +#else
150 +               netif_poll_disable(dev);
151 +#endif
152 +       }
153 +}
154 +
155 +static void ath_poll_enable(struct net_device *dev)
156 +{
157 +       struct ath_softc *sc = dev->priv;
158 +
159 +       /* NB: see above */
160 +       if (!in_softirq()) {
161 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
162 +               napi_enable(&sc->sc_napi);
163 +#else
164 +               netif_poll_enable(dev);
165 +#endif
166 +       }
167 +}
168 +
169 +
170  /*
171   * Block/unblock tx+rx processing while a key change is done.
172   * We assume the caller serializes key management operations
173 @@ -4119,33 +4191,26 @@
174  ath_key_update_begin(struct ieee80211vap *vap)
175  {
176         struct net_device *dev = vap->iv_ic->ic_dev;
177 -       struct ath_softc *sc = dev->priv;
178  
179         DPRINTF(sc, ATH_DEBUG_KEYCACHE, "Begin\n");
180         /*
181          * When called from the rx tasklet we cannot use
182          * tasklet_disable because it will block waiting
183          * for us to complete execution.
184 -        *
185 -        * XXX Using in_softirq is not right since we might
186 -        * be called from other soft irq contexts than
187 -        * ath_rx_tasklet.
188          */
189 -       if (!in_softirq())
190 -               tasklet_disable(&sc->sc_rxtq);
191 -       netif_stop_queue(dev);
192 +       if ((dev->flags & (IFF_UP|IFF_RUNNING)) == (IFF_UP|IFF_RUNNING))
193 +               netif_stop_queue(dev);
194  }
195  
196  static void
197  ath_key_update_end(struct ieee80211vap *vap)
198  {
199         struct net_device *dev = vap->iv_ic->ic_dev;
200 -       struct ath_softc *sc = dev->priv;
201  
202         DPRINTF(sc, ATH_DEBUG_KEYCACHE, "End\n");
203 -       netif_wake_queue(dev);
204 -       if (!in_softirq())              /* NB: see above */
205 -               tasklet_enable(&sc->sc_rxtq);
206 +
207 +       if ((dev->flags & (IFF_UP|IFF_RUNNING)) == (IFF_UP|IFF_RUNNING))
208 +               netif_wake_queue(dev);
209  }
210  
211  /*
212 @@ -6405,15 +6470,25 @@
213         sc->sc_numrxotherant = 0;
214  }
215  
216 -static void
217 -ath_rx_tasklet(TQUEUE_ARG data)
218 +static int
219 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
220 +ath_rx_poll(struct napi_struct *napi, int budget)
221 +#else
222 +ath_rx_poll(struct net_device *dev, int *budget)
223 +#endif
224  {
225  #define        PA2DESC(_sc, _pa) \
226         ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
227                 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
228 -       struct net_device *dev = (struct net_device *)data;
229 -       struct ath_buf *bf;
230 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
231 +       struct ath_softc *sc = container_of(napi, struct ath_softc, sc_napi);
232 +       struct net_device *dev = sc->sc_dev;
233 +       u_int rx_limit = budget;
234 +#else
235         struct ath_softc *sc = dev->priv;
236 +       u_int rx_limit = min(dev->quota, *budget);
237 +#endif
238 +       struct ath_buf *bf;
239         struct ieee80211com *ic = &sc->sc_ic;
240         struct ath_hal *ah = sc ? sc->sc_ah : NULL;
241         struct ath_desc *ds;
242 @@ -6421,6 +6496,7 @@
243         struct ieee80211_node *ni;
244         struct sk_buff *skb = NULL;
245         unsigned int len, phyerr, mic_fail = 0;
246 +       unsigned int early_stop = 0;
247         int type = -1; /* undefined */
248         int init_ret = 0;
249         int bf_processed = 0;
250 @@ -6428,6 +6504,7 @@
251         int errors       = 0;
252  
253         DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s started...\n", __func__);
254 +process_rx_again:
255         do {
256                 /* Get next RX buffer pending processing by RX tasklet...
257                  *  
258 @@ -6457,6 +6534,10 @@
259                         break;
260  
261                 bf_processed++;
262 +               if (rx_limit-- < 0) {
263 +                       early_stop = 1;
264 +                       break;
265 +               }
266                 ds  = bf->bf_desc;
267  
268  #ifdef AR_DEBUG
269 @@ -6491,6 +6572,7 @@
270                                 sc->sc_stats.ast_rx_phyerr++;
271                                 phyerr = rs->rs_phyerr & 0x1f;
272                                 sc->sc_stats.ast_rx_phy[phyerr]++;
273 +                               goto rx_next;
274                         }
275                         if (rs->rs_status & HAL_RXERR_DECRYPT) {
276                                 /* Decrypt error.  If the error occurred
277 @@ -6689,6 +6771,33 @@
278                 STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
279                 ATH_RXBUF_UNLOCK_IRQ(sc);
280         } while (1);
281 +       if (!early_stop) {
282 +               unsigned long flags;
283 +               /* Check if more data is received while we were
284 +                * processing the descriptor chain.
285 +                */
286 +#ifndef ATH_PRECISE_TSF
287 +               local_irq_save(flags);
288 +               if (sc->sc_isr & HAL_INT_RX) {
289 +                       u_int64_t hw_tsf = ath_hal_gettsf64(ah);
290 +                       sc->sc_isr &= ~HAL_INT_RX;
291 +                       local_irq_restore(flags);
292 +                       ath_intr_process_rx_descriptors(sc, NULL, hw_tsf, 0);
293 +                       goto process_rx_again;
294 +               }
295 +               sc->sc_imask |= HAL_INT_RX;
296 +               ath_hal_intrset(ah, sc->sc_imask);
297 +               local_irq_restore(flags);
298 +#endif
299 +       }
300 +
301 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
302 +       netif_rx_complete(dev, napi);
303 +#else
304 +       netif_rx_complete(dev);
305 +       *budget -= bf_processed;
306 +       dev->quota -= bf_processed;
307 +#endif
308  
309         if (sc->sc_useintmit) 
310                 ath_hal_rxmonitor(ah, &sc->sc_halstats, &sc->sc_curchan);
311 @@ -6701,6 +6810,12 @@
312                 " %d rx buf processed. %d were errors. %d skb accepted.\n",
313                 __func__, bf_processed, errors, skb_accepted);
314  #undef PA2DESC
315 +
316 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
317 +       return bf_processed;
318 +#else
319 +       return early_stop;
320 +#endif
321  }
322  
323  #ifdef ATH_SUPERG_XR
324 @@ -8306,12 +8421,24 @@
325  {
326         struct net_device *dev = (struct net_device *)data;
327         struct ath_softc *sc = dev->priv;
328 +       unsigned long flags;
329  
330 +process_tx_again:
331         if (txqactive(sc->sc_ah, 0))
332                 ath_tx_processq(sc, &sc->sc_txq[0]);
333         if (txqactive(sc->sc_ah, sc->sc_cabq->axq_qnum))
334                 ath_tx_processq(sc, sc->sc_cabq);
335  
336 +       local_irq_save(flags);
337 +       if (sc->sc_isr & HAL_INT_TX) {
338 +               sc->sc_isr &= ~HAL_INT_TX;
339 +               local_irq_restore(flags);
340 +               goto process_tx_again;
341 +       }
342 +       sc->sc_imask |= HAL_INT_TX;
343 +       ath_hal_intrset(sc->sc_ah, sc->sc_imask);
344 +       local_irq_restore(flags);
345 +
346         netif_wake_queue(dev);
347  
348         if (sc->sc_softled)
349 @@ -8327,7 +8454,9 @@
350  {
351         struct net_device *dev = (struct net_device *)data;
352         struct ath_softc *sc = dev->priv;
353 +       unsigned long flags;
354  
355 +process_tx_again:
356         /*
357          * Process each active queue.
358          */
359 @@ -8357,6 +8486,16 @@
360         if (sc->sc_uapsdq && txqactive(sc->sc_ah, sc->sc_uapsdq->axq_qnum))
361                 ath_tx_processq(sc, sc->sc_uapsdq);
362  
363 +       local_irq_save(flags);
364 +       if (sc->sc_isr & HAL_INT_TX) {
365 +               sc->sc_isr &= ~HAL_INT_TX;
366 +               local_irq_restore(flags);
367 +               goto process_tx_again;
368 +       }
369 +       sc->sc_imask |= HAL_INT_TX;
370 +       ath_hal_intrset(sc->sc_ah, sc->sc_imask);
371 +       local_irq_restore(flags);
372 +
373         netif_wake_queue(dev);
374  
375         if (sc->sc_softled)
376 @@ -10322,9 +10461,9 @@
377         dev->mtu = mtu;
378         if ((dev->flags & IFF_RUNNING) && !sc->sc_invalid) {
379                 /* NB: the rx buffers may need to be reallocated */
380 -               tasklet_disable(&sc->sc_rxtq);
381 +               ath_poll_disable(dev);
382                 error = ath_reset(dev);
383 -               tasklet_enable(&sc->sc_rxtq);
384 +               ath_poll_enable(dev);
385         }
386         ATH_UNLOCK(sc);
387  
388 Index: madwifi-trunk-r3776/ath/if_athvar.h
389 ===================================================================
390 --- madwifi-trunk-r3776.orig/ath/if_athvar.h    2008-07-17 00:52:28.000000000 +0200
391 +++ madwifi-trunk-r3776/ath/if_athvar.h 2008-07-17 01:27:21.000000000 +0200
392 @@ -56,6 +56,10 @@
393  # include      <asm/bitops.h>
394  #endif
395  
396 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
397 +#define irqs_disabled()                        0
398 +#endif
399 +
400  /*
401   * Deduce if tasklets are available.  If not then
402   * fall back to using the immediate work queue.
403 @@ -644,6 +648,9 @@
404  struct ath_softc {
405         struct ieee80211com sc_ic;              /* NB: must be first */
406         struct net_device *sc_dev;
407 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
408 +       struct napi_struct sc_napi;
409 +#endif
410         void __iomem *sc_iobase;                /* address of the device */
411         struct semaphore sc_lock;               /* dev-level lock */
412         struct net_device_stats sc_devstats;    /* device statistics */
413 @@ -756,7 +763,6 @@
414         struct ath_buf *sc_rxbufcur;            /* current rx buffer */
415         u_int32_t *sc_rxlink;                   /* link ptr in last RX desc */
416         spinlock_t sc_rxbuflock;
417 -       struct ATH_TQ_STRUCT sc_rxtq;           /* rx intr tasklet */
418         struct ATH_TQ_STRUCT sc_rxorntq;        /* rxorn intr tasklet */
419         u_int16_t sc_cachelsz;                  /* cache line size */
420  
421 @@ -769,6 +775,7 @@
422         u_int sc_txintrperiod;                  /* tx interrupt batching */
423         struct ath_txq sc_txq[HAL_NUM_TX_QUEUES];
424         struct ath_txq *sc_ac2q[WME_NUM_AC];    /* WME AC -> h/w qnum */
425 +       HAL_INT sc_isr;                         /* unmasked ISR state */
426         struct ATH_TQ_STRUCT sc_txtq;           /* tx intr tasklet */
427         u_int8_t sc_grppoll_str[GRPPOLL_RATE_STR_LEN];
428         struct ath_descdma sc_bdma;             /* beacon descriptors */
429 @@ -888,6 +895,8 @@
430  #define        ATH_TXBUF_LOCK_CHECK(_sc)
431  #endif
432  
433 +#define ATH_DISABLE_INTR               local_irq_disable
434 +#define ATH_ENABLE_INTR                local_irq_enable
435  
436  #define        ATH_RXBUF_LOCK_INIT(_sc)        spin_lock_init(&(_sc)->sc_rxbuflock)
437  #define        ATH_RXBUF_LOCK_DESTROY(_sc)
438 Index: madwifi-trunk-r3776/net80211/ieee80211_skb.c
439 ===================================================================
440 --- madwifi-trunk-r3776.orig/net80211/ieee80211_skb.c   2008-07-17 00:21:29.000000000 +0200
441 +++ madwifi-trunk-r3776/net80211/ieee80211_skb.c        2008-07-17 01:42:17.000000000 +0200
442 @@ -73,7 +73,7 @@
443  #undef dev_queue_xmit
444  #undef kfree_skb
445  #undef kfree_skb_fast
446 -#undef netif_rx
447 +#undef netif_receive_skb
448  #undef pskb_copy
449  #undef skb_clone
450  #undef skb_copy
451 @@ -581,8 +581,8 @@
452                 grp, vlan_tag);
453  }
454  
455 -int netif_rx_debug(struct sk_buff *skb, const char *func, int line) {
456 -       return netif_rx(untrack_skb(skb, 0, __func__, __LINE__));
457 +int netif_receive_skb_debug(struct sk_buff *skb, const char *func, int line) {
458 +       return netif_receive_skb(untrack_skb(skb, 0, __func__, __LINE__));
459  }
460  
461  struct sk_buff *alloc_skb_debug(unsigned int length, gfp_t gfp_mask,
462 @@ -707,7 +707,7 @@
463  }
464  
465  EXPORT_SYMBOL(vlan_hwaccel_rx_debug);
466 -EXPORT_SYMBOL(netif_rx_debug);
467 +EXPORT_SYMBOL(netif_receive_skb_debug);
468  EXPORT_SYMBOL(alloc_skb_debug);
469  EXPORT_SYMBOL(dev_alloc_skb_debug);
470  EXPORT_SYMBOL(skb_clone_debug);
471 Index: madwifi-trunk-r3776/net80211/ieee80211_skb.h
472 ===================================================================
473 --- madwifi-trunk-r3776.orig/net80211/ieee80211_skb.h   2008-07-17 00:21:29.000000000 +0200
474 +++ madwifi-trunk-r3776/net80211/ieee80211_skb.h        2008-07-17 01:42:43.000000000 +0200
475 @@ -115,7 +115,7 @@
476  
477  int vlan_hwaccel_rx_debug(struct sk_buff *skb, struct vlan_group *grp,
478                 unsigned short vlan_tag, const char *func, int line);
479 -int netif_rx_debug(struct sk_buff *skb, const char *func, int line);
480 +int netif_receive_skb_debug(struct sk_buff *skb, const char *func, int line);
481  struct sk_buff *alloc_skb_debug(unsigned int length, gfp_t gfp_mask,
482                 const char *func, int line);
483  struct sk_buff *dev_alloc_skb_debug(unsigned int length,
484 @@ -150,7 +150,7 @@
485  #undef dev_queue_xmit
486  #undef kfree_skb
487  #undef kfree_skb_fast
488 -#undef netif_rx
489 +#undef netif_receive_skb
490  #undef pskb_copy
491  #undef skb_clone
492  #undef skb_copy
493 @@ -167,8 +167,8 @@
494         skb_copy_expand_debug(_skb, _newheadroom, _newtailroom, _gfp_mask, __func__, __LINE__)
495  #define vlan_hwaccel_rx(_skb, _grp, _tag) \
496         vlan_hwaccel_rx_debug(_skb, _grp, _tag, __func__, __LINE__)
497 -#define netif_rx(_skb) \
498 -       netif_rx_debug(_skb, __func__, __LINE__)
499 +#define netif_receive_skb(_skb) \
500 +       netif_receive_skb_debug(_skb, __func__, __LINE__)
501  #define        alloc_skb(_length, _gfp_mask) \
502         alloc_skb_debug(_length, _gfp_mask, __func__, __LINE__)
503  #define        dev_alloc_skb(_length) \
504 Index: madwifi-trunk-r3776/net80211/ieee80211_input.c
505 ===================================================================
506 --- madwifi-trunk-r3776.orig/net80211/ieee80211_input.c 2008-07-17 00:21:29.000000000 +0200
507 +++ madwifi-trunk-r3776/net80211/ieee80211_input.c      2008-07-17 01:41:16.000000000 +0200
508 @@ -1185,7 +1185,7 @@
509                         ret = vlan_hwaccel_rx(skb,
510                                         vap->iv_vlgrp, ni->ni_vlan);
511                 else
512 -                       ret = netif_rx(skb);
513 +                       ret = netif_receive_skb(skb);
514                 if (ret == NET_RX_DROP)
515                         vap->iv_devstats.rx_dropped++;
516                 if (tni != NULL)
517 @@ -2285,7 +2285,7 @@
518  
519                 if (SKB_NI(skb1) != NULL)
520                         ieee80211_unref_node(&SKB_NI(skb1));
521 -               if (netif_rx(skb1) == NET_RX_DROP)
522 +               if (netif_receive_skb(skb1) == NET_RX_DROP)
523                         vap->iv_devstats.rx_dropped++;
524         }
525  }
526 Index: madwifi-trunk-r3776/net80211/ieee80211_monitor.c
527 ===================================================================
528 --- madwifi-trunk-r3776.orig/net80211/ieee80211_monitor.c       2008-07-17 00:21:29.000000000 +0200
529 +++ madwifi-trunk-r3776/net80211/ieee80211_monitor.c    2008-07-17 01:41:51.000000000 +0200
530 @@ -580,7 +580,7 @@
531  
532                         if (SKB_NI(skb1) != NULL)
533                                 ieee80211_unref_node(&SKB_NI(skb1));
534 -                       if (netif_rx(skb1) == NET_RX_DROP)
535 +                       if (netif_receive_skb(skb1) == NET_RX_DROP)
536                                 vap->iv_devstats.rx_dropped++;
537                         skb1 = NULL;
538                 }