diff options
author | nbd <nbd@3c298f89-4303-0410-b956-a3cf2f4a3e73> | 2013-08-06 10:31:10 +0000 |
---|---|---|
committer | nbd <nbd@3c298f89-4303-0410-b956-a3cf2f4a3e73> | 2013-08-06 10:31:10 +0000 |
commit | 25b23d67d93da4fc739c3d44bcb33520d5f32351 (patch) | |
tree | 9c7954c29fc2f3e5bb461f4ee05fc60e127895f0 /package/kernel/mac80211/patches/300-pending_work.patch | |
parent | 9a1c4c883a346aa6f777c921932fa8182c7bc78c (diff) |
ath9k: fold the aggregation rework into 300-pending_work.patch (tracked as upstream submission)
Signed-off-by: Felix Fietkau <nbd@openwrt.org>
git-svn-id: svn://svn.openwrt.org/openwrt/trunk@37716 3c298f89-4303-0410-b956-a3cf2f4a3e73
Diffstat (limited to 'package/kernel/mac80211/patches/300-pending_work.patch')
-rw-r--r-- | package/kernel/mac80211/patches/300-pending_work.patch | 988 |
1 files changed, 970 insertions, 18 deletions
diff --git a/package/kernel/mac80211/patches/300-pending_work.patch b/package/kernel/mac80211/patches/300-pending_work.patch index 0e5ceb8b92..55449db40f 100644 --- a/package/kernel/mac80211/patches/300-pending_work.patch +++ b/package/kernel/mac80211/patches/300-pending_work.patch @@ -278,7 +278,17 @@ WLAN_STA_BLOCK_BA, --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c -@@ -146,6 +146,28 @@ static void ath_set_rates(struct ieee802 +@@ -135,6 +135,9 @@ static struct ath_frame_info *get_frame_ + + static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno) + { ++ if (!tid->an->sta) ++ return; ++ + ieee80211_send_bar(tid->an->vif, tid->an->sta->addr, tid->tidno, + seqno << IEEE80211_SEQ_SEQ_SHIFT); + } +@@ -146,6 +149,93 @@ static void ath_set_rates(struct ieee802 ARRAY_SIZE(bf->rates)); } @@ -304,34 +314,780 @@ + } +} + ++static struct ath_atx_tid * ++ath_get_skb_tid(struct ath_softc *sc, struct ath_node *an, struct sk_buff *skb) ++{ ++ struct ieee80211_hdr *hdr; ++ u8 tidno = 0; ++ ++ hdr = (struct ieee80211_hdr *) skb->data; ++ if (ieee80211_is_data_qos(hdr->frame_control)) ++ tidno = ieee80211_get_qos_ctl(hdr)[0]; ++ ++ tidno &= IEEE80211_QOS_CTL_TID_MASK; ++ return ATH_AN_2_TID(an, tidno); ++} ++ ++static bool ath_tid_has_buffered(struct ath_atx_tid *tid) ++{ ++ return !skb_queue_empty(&tid->buf_q) || !skb_queue_empty(&tid->retry_q); ++} ++ ++static struct sk_buff *ath_tid_dequeue(struct ath_atx_tid *tid) ++{ ++ struct sk_buff *skb; ++ ++ skb = __skb_dequeue(&tid->retry_q); ++ if (!skb) ++ skb = __skb_dequeue(&tid->buf_q); ++ ++ return skb; ++} ++ ++/* ++ * ath_tx_tid_change_state: ++ * - clears a-mpdu flag of previous session ++ * - force sequence number allocation to fix next BlockAck Window ++ */ ++static void ++ath_tx_tid_change_state(struct ath_softc *sc, struct ath_atx_tid *tid) ++{ ++ struct ath_txq *txq = tid->ac->txq; ++ struct ieee80211_tx_info *tx_info; ++ struct sk_buff *skb, *tskb; ++ struct ath_buf *bf; ++ struct ath_frame_info *fi; ++ ++ skb_queue_walk_safe(&tid->buf_q, skb, tskb) { ++ fi = get_frame_info(skb); ++ bf = fi->bf; ++ ++ tx_info = IEEE80211_SKB_CB(skb); ++ tx_info->flags &= ~IEEE80211_TX_CTL_AMPDU; ++ ++ if (bf) ++ continue; ++ ++ bf = ath_tx_setup_buffer(sc, txq, tid, skb); ++ if (!bf) { ++ __skb_unlink(skb, &tid->buf_q); ++ ath_txq_skb_done(sc, txq, skb); ++ ieee80211_free_txskb(sc->hw, skb); ++ continue; ++ } ++ } ++ ++} ++ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) { struct ath_txq *txq = tid->ac->txq; -@@ -167,6 +189,7 @@ static void ath_tx_flush_tid(struct ath_ +@@ -160,27 +250,22 @@ static void ath_tx_flush_tid(struct ath_ + + memset(&ts, 0, sizeof(ts)); + +- while ((skb = __skb_dequeue(&tid->buf_q))) { ++ while ((skb = __skb_dequeue(&tid->retry_q))) { + fi = get_frame_info(skb); + bf = fi->bf; +- if (!bf) { +- bf = ath_tx_setup_buffer(sc, txq, tid, skb); +- if (!bf) { +- ieee80211_free_txskb(sc->hw, skb); +- continue; +- } ++ ath_txq_skb_done(sc, txq, skb); ++ ieee80211_free_txskb(sc->hw, skb); ++ continue; + } + +- if (fi->retries) { +- list_add_tail(&bf->list, &bf_head); ++ if (fi->baw_tracked) { + ath_tx_update_baw(sc, tid, bf->bf_state.seqno); +- ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0); + sendbar = true; +- } else { +- ath_set_rates(tid->an->vif, tid->an->sta, bf); +- ath_tx_send_normal(sc, txq, NULL, skb); + } ++ ++ list_add_tail(&bf->list, &bf_head); ++ ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0); + } + + if (sendbar) { +@@ -209,13 +294,16 @@ static void ath_tx_update_baw(struct ath + } + + static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid, +- u16 seqno) ++ struct ath_buf *bf) + { ++ struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu); ++ u16 seqno = bf->bf_state.seqno; + int index, cindex; + + index = ATH_BA_INDEX(tid->seq_start, seqno); + cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); + __set_bit(cindex, tid->tx_buf); ++ fi->baw_tracked = 1; + + if (index >= ((tid->baw_tail - tid->baw_head) & + (ATH_TID_MAX_BUFS - 1))) { +@@ -243,7 +331,7 @@ static void ath_tid_drain(struct ath_sof + memset(&ts, 0, sizeof(ts)); + INIT_LIST_HEAD(&bf_head); + +- while ((skb = __skb_dequeue(&tid->buf_q))) { ++ while ((skb = ath_tid_dequeue(tid))) { + fi = get_frame_info(skb); + bf = fi->bf; + +@@ -380,7 +468,6 @@ static void ath_tx_complete_aggr(struct + struct ieee80211_tx_rate rates[4]; + struct ath_frame_info *fi; + int nframes; +- u8 tidno; + bool flush = !!(ts->ts_status & ATH9K_TX_FLUSH); + int i, retries; + int bar_index = -1; +@@ -417,8 +504,7 @@ static void ath_tx_complete_aggr(struct + } + + an = (struct ath_node *)sta->drv_priv; +- tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK; +- tid = ATH_AN_2_TID(an, tidno); ++ tid = ath_get_skb_tid(sc, an, skb); + seq_first = tid->seq_start; + isba = ts->ts_flags & ATH9K_TX_BA; + +@@ -430,7 +516,7 @@ static void ath_tx_complete_aggr(struct + * Only BlockAcks have a TID and therefore normal Acks cannot be + * checked + */ +- if (isba && tidno != ts->tid) ++ if (isba && tid->tidno != ts->tid) + txok = false; + + isaggr = bf_isaggr(bf); +@@ -466,7 +552,8 @@ static void ath_tx_complete_aggr(struct + tx_info = IEEE80211_SKB_CB(skb); + fi = get_frame_info(skb); + +- if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) { ++ if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno) || ++ !tid->active) { + /* + * Outside of the current BlockAck window, + * maybe part of a previous session +@@ -560,7 +647,7 @@ static void ath_tx_complete_aggr(struct + if (an->sleeping) + ieee80211_sta_set_buffered(sta, tid->tidno, true); + +- skb_queue_splice(&bf_pending, &tid->buf_q); ++ skb_queue_splice_tail(&bf_pending, &tid->retry_q); + if (!an->sleeping) { + ath_tx_queue_tid(txq, tid); + +@@ -618,7 +705,7 @@ static void ath_tx_process_buffer(struct + } else + ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok); + +- if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) && !flush) ++ if (!flush) + ath_txq_schedule(sc, txq); + } + +@@ -792,15 +879,20 @@ static int ath_compute_num_delims(struct + + static struct ath_buf * + ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq, +- struct ath_atx_tid *tid) ++ struct ath_atx_tid *tid, struct sk_buff_head **q) + { ++ struct ieee80211_tx_info *tx_info; + struct ath_frame_info *fi; + struct sk_buff *skb; + struct ath_buf *bf; + u16 seqno; + + while (1) { +- skb = skb_peek(&tid->buf_q); ++ *q = &tid->retry_q; ++ if (skb_queue_empty(*q)) ++ *q = &tid->buf_q; ++ ++ skb = skb_peek(*q); + if (!skb) + break; + +@@ -810,11 +902,22 @@ ath_tx_get_tid_subframe(struct ath_softc bf = ath_tx_setup_buffer(sc, txq, tid, skb); - if (!bf) { -+ ath_txq_skb_done(sc, txq, skb); - ieee80211_free_txskb(sc->hw, skb); - continue; - } -@@ -811,6 +834,7 @@ ath_tx_get_tid_subframe(struct ath_softc if (!bf) { - __skb_unlink(skb, &tid->buf_q); +- __skb_unlink(skb, &tid->buf_q); ++ __skb_unlink(skb, *q); + ath_txq_skb_done(sc, txq, skb); ieee80211_free_txskb(sc->hw, skb); continue; } -@@ -1824,6 +1848,7 @@ static void ath_tx_send_ampdu(struct ath - bf = ath_tx_setup_buffer(sc, txq, tid, skb); - if (!bf) { -+ ath_txq_skb_done(sc, txq, skb); - ieee80211_free_txskb(sc->hw, skb); ++ bf->bf_next = NULL; ++ bf->bf_lastbf = bf; ++ ++ tx_info = IEEE80211_SKB_CB(skb); ++ tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT; ++ if (!(tx_info->flags & IEEE80211_TX_CTL_AMPDU)) { ++ bf->bf_state.bf_type = 0; ++ return bf; ++ } ++ + bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR; + seqno = bf->bf_state.seqno; + +@@ -828,73 +931,52 @@ ath_tx_get_tid_subframe(struct ath_softc + + INIT_LIST_HEAD(&bf_head); + list_add(&bf->list, &bf_head); +- __skb_unlink(skb, &tid->buf_q); ++ __skb_unlink(skb, *q); + ath_tx_update_baw(sc, tid, seqno); + ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0); + continue; + } + +- bf->bf_next = NULL; +- bf->bf_lastbf = bf; + return bf; + } + + return NULL; + } + +-static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, +- struct ath_txq *txq, +- struct ath_atx_tid *tid, +- struct list_head *bf_q, +- int *aggr_len) ++static bool ++ath_tx_form_aggr(struct ath_softc *sc, struct ath_txq *txq, ++ struct ath_atx_tid *tid, struct list_head *bf_q, ++ struct ath_buf *bf_first, struct sk_buff_head *tid_q, ++ int *aggr_len) + { + #define PADBYTES(_len) ((4 - ((_len) % 4)) % 4) +- struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL; +- int rl = 0, nframes = 0, ndelim, prev_al = 0; ++ struct ath_buf *bf = bf_first, *bf_prev = NULL; ++ int nframes = 0, ndelim; + u16 aggr_limit = 0, al = 0, bpad = 0, +- al_delta, h_baw = tid->baw_size / 2; +- enum ATH_AGGR_STATUS status = ATH_AGGR_DONE; ++ al_delta, h_baw = tid->baw_size / 2; + struct ieee80211_tx_info *tx_info; + struct ath_frame_info *fi; + struct sk_buff *skb; ++ bool closed = false; + +- do { +- bf = ath_tx_get_tid_subframe(sc, txq, tid); +- if (!bf) { +- status = ATH_AGGR_BAW_CLOSED; +- break; +- } ++ bf = bf_first; ++ aggr_limit = ath_lookup_rate(sc, bf, tid); + ++ do { + skb = bf->bf_mpdu; + fi = get_frame_info(skb); + +- if (!bf_first) +- bf_first = bf; +- +- if (!rl) { +- ath_set_rates(tid->an->vif, tid->an->sta, bf); +- aggr_limit = ath_lookup_rate(sc, bf, tid); +- rl = 1; +- } +- + /* do not exceed aggregation limit */ + al_delta = ATH_AGGR_DELIM_SZ + fi->framelen; ++ if (nframes) { ++ if (aggr_limit < al + bpad + al_delta || ++ ath_lookup_legacy(bf) || nframes >= h_baw) ++ break; + +- if (nframes && +- ((aggr_limit < (al + bpad + al_delta + prev_al)) || +- ath_lookup_legacy(bf))) { +- status = ATH_AGGR_LIMITED; +- break; +- } +- +- tx_info = IEEE80211_SKB_CB(bf->bf_mpdu); +- if (nframes && (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) +- break; +- +- /* do not exceed subframe limit */ +- if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) { +- status = ATH_AGGR_LIMITED; +- break; ++ tx_info = IEEE80211_SKB_CB(bf->bf_mpdu); ++ if ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) || ++ !(tx_info->flags & IEEE80211_TX_CTL_AMPDU)) ++ break; + } + + /* add padding for previous frame to aggregation length */ +@@ -912,22 +994,37 @@ static enum ATH_AGGR_STATUS ath_tx_form_ + bf->bf_next = NULL; + + /* link buffers of this frame to the aggregate */ +- if (!fi->retries) +- ath_tx_addto_baw(sc, tid, bf->bf_state.seqno); ++ if (!fi->baw_tracked) ++ ath_tx_addto_baw(sc, tid, bf); + bf->bf_state.ndelim = ndelim; + +- __skb_unlink(skb, &tid->buf_q); ++ __skb_unlink(skb, tid_q); + list_add_tail(&bf->list, bf_q); + if (bf_prev) + bf_prev->bf_next = bf; + + bf_prev = bf; + +- } while (!skb_queue_empty(&tid->buf_q)); ++ bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q); ++ if (!bf) { ++ closed = true; ++ break; ++ } ++ } while (ath_tid_has_buffered(tid)); ++ ++ bf = bf_first; ++ bf->bf_lastbf = bf_prev; ++ ++ if (bf == bf_prev) { ++ al = get_frame_info(bf->bf_mpdu)->framelen; ++ bf->bf_state.bf_type = BUF_AMPDU; ++ } else { ++ TX_STAT_INC(txq->axq_qnum, a_aggr); ++ } + + *aggr_len = al; + +- return status; ++ return closed; + #undef PADBYTES + } + +@@ -1188,53 +1285,86 @@ static void ath_tx_fill_desc(struct ath_ + } + } + +-static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq, +- struct ath_atx_tid *tid) ++static void ++ath_tx_form_burst(struct ath_softc *sc, struct ath_txq *txq, ++ struct ath_atx_tid *tid, struct list_head *bf_q, ++ struct ath_buf *bf_first, struct sk_buff_head *tid_q) + { +- struct ath_buf *bf; +- enum ATH_AGGR_STATUS status; +- struct ieee80211_tx_info *tx_info; +- struct list_head bf_q; +- int aggr_len; ++ struct ath_buf *bf = bf_first, *bf_prev = NULL; ++ struct sk_buff *skb; ++ int nframes = 0; + + do { +- if (skb_queue_empty(&tid->buf_q)) +- return; ++ struct ieee80211_tx_info *tx_info; ++ skb = bf->bf_mpdu; + +- INIT_LIST_HEAD(&bf_q); ++ nframes++; ++ __skb_unlink(skb, tid_q); ++ list_add_tail(&bf->list, bf_q); ++ if (bf_prev) ++ bf_prev->bf_next = bf; ++ bf_prev = bf; + +- status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len); ++ if (nframes >= 2) ++ break; + +- /* +- * no frames picked up to be aggregated; +- * block-ack window is not open. +- */ +- if (list_empty(&bf_q)) ++ bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q); ++ if (!bf) + break; + +- bf = list_first_entry(&bf_q, struct ath_buf, list); +- bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list); + tx_info = IEEE80211_SKB_CB(bf->bf_mpdu); ++ if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) ++ break; + +- if (tid->ac->clear_ps_filter) { +- tid->ac->clear_ps_filter = false; +- tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; +- } else { +- tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT; +- } ++ ath_set_rates(tid->an->vif, tid->an->sta, bf); ++ } while (1); ++} + +- /* if only one frame, send as non-aggregate */ +- if (bf == bf->bf_lastbf) { +- aggr_len = get_frame_info(bf->bf_mpdu)->framelen; +- bf->bf_state.bf_type = BUF_AMPDU; +- } else { +- TX_STAT_INC(txq->axq_qnum, a_aggr); +- } ++static bool ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq, ++ struct ath_atx_tid *tid, bool *stop) ++{ ++ struct ath_buf *bf; ++ struct ieee80211_tx_info *tx_info; ++ struct sk_buff_head *tid_q; ++ struct list_head bf_q; ++ int aggr_len = 0; ++ bool aggr, last = true; ++ ++ if (!ath_tid_has_buffered(tid)) ++ return false; ++ ++ INIT_LIST_HEAD(&bf_q); ++ ++ bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q); ++ if (!bf) ++ return false; ++ ++ tx_info = IEEE80211_SKB_CB(bf->bf_mpdu); ++ aggr = !!(tx_info->flags & IEEE80211_TX_CTL_AMPDU); ++ if ((aggr && txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) || ++ (!aggr && txq->axq_depth >= ATH_NON_AGGR_MIN_QDEPTH)) { ++ *stop = true; ++ return false; ++ } ++ ++ ath_set_rates(tid->an->vif, tid->an->sta, bf); ++ if (aggr) ++ last = ath_tx_form_aggr(sc, txq, tid, &bf_q, bf, ++ tid_q, &aggr_len); ++ else ++ ath_tx_form_burst(sc, txq, tid, &bf_q, bf, tid_q); ++ ++ if (list_empty(&bf_q)) ++ return false; ++ ++ if (tid->ac->clear_ps_filter || tid->an->no_ps_filter) { ++ tid->ac->clear_ps_filter = false; ++ tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; ++ } + +- ath_tx_fill_desc(sc, bf, txq, aggr_len); +- ath_tx_txqaddbuf(sc, txq, &bf_q, false); +- } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH && +- status != ATH_AGGR_BAW_CLOSED); ++ ath_tx_fill_desc(sc, bf, txq, aggr_len); ++ ath_tx_txqaddbuf(sc, txq, &bf_q, false); ++ return true; + } + + int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, +@@ -1258,6 +1388,9 @@ int ath_tx_aggr_start(struct ath_softc * + an->mpdudensity = density; + } + ++ /* force sequence number allocation for pending frames */ ++ ath_tx_tid_change_state(sc, txtid); ++ + txtid->active = true; + txtid->paused = true; + *ssn = txtid->seq_start = txtid->seq_next; +@@ -1277,8 +1410,9 @@ void ath_tx_aggr_stop(struct ath_softc * + + ath_txq_lock(sc, txq); + txtid->active = false; +- txtid->paused = true; ++ txtid->paused = false; + ath_tx_flush_tid(sc, txtid); ++ ath_tx_tid_change_state(sc, txtid); + ath_txq_unlock_complete(sc, txq); + } + +@@ -1302,7 +1436,7 @@ void ath_tx_aggr_sleep(struct ieee80211_ + + ath_txq_lock(sc, txq); + +- buffered = !skb_queue_empty(&tid->buf_q); ++ buffered = ath_tid_has_buffered(tid); + + tid->sched = false; + list_del(&tid->list); +@@ -1334,7 +1468,7 @@ void ath_tx_aggr_wakeup(struct ath_softc + ath_txq_lock(sc, txq); + ac->clear_ps_filter = true; + +- if (!skb_queue_empty(&tid->buf_q) && !tid->paused) { ++ if (!tid->paused && ath_tid_has_buffered(tid)) { + ath_tx_queue_tid(txq, tid); + ath_txq_schedule(sc, txq); + } +@@ -1359,7 +1493,7 @@ void ath_tx_aggr_resume(struct ath_softc + tid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor; + tid->paused = false; + +- if (!skb_queue_empty(&tid->buf_q)) { ++ if (ath_tid_has_buffered(tid)) { + ath_tx_queue_tid(txq, tid); + ath_txq_schedule(sc, txq); + } +@@ -1379,6 +1513,7 @@ void ath9k_release_buffered_frames(struc + struct ieee80211_tx_info *info; + struct list_head bf_q; + struct ath_buf *bf_tail = NULL, *bf; ++ struct sk_buff_head *tid_q; + int sent = 0; + int i; + +@@ -1394,15 +1529,15 @@ void ath9k_release_buffered_frames(struc + continue; + + ath_txq_lock(sc, tid->ac->txq); +- while (!skb_queue_empty(&tid->buf_q) && nframes > 0) { +- bf = ath_tx_get_tid_subframe(sc, sc->tx.uapsdq, tid); ++ while (nframes > 0) { ++ bf = ath_tx_get_tid_subframe(sc, sc->tx.uapsdq, tid, &tid_q); + if (!bf) + break; + +- __skb_unlink(bf->bf_mpdu, &tid->buf_q); ++ __skb_unlink(bf->bf_mpdu, tid_q); + list_add_tail(&bf->list, &bf_q); + ath_set_rates(tid->an->vif, tid->an->sta, bf); +- ath_tx_addto_baw(sc, tid, bf->bf_state.seqno); ++ ath_tx_addto_baw(sc, tid, bf); + bf->bf_state.bf_type &= ~BUF_AGGR; + if (bf_tail) + bf_tail->bf_next = bf; +@@ -1412,7 +1547,7 @@ void ath9k_release_buffered_frames(struc + sent++; + TX_STAT_INC(txq->axq_qnum, a_queued_hw); + +- if (skb_queue_empty(&tid->buf_q)) ++ if (an->sta && !ath_tid_has_buffered(tid)) + ieee80211_sta_set_buffered(an->sta, i, false); + } + ath_txq_unlock_complete(sc, tid->ac->txq); +@@ -1665,25 +1800,27 @@ void ath_tx_cleanupq(struct ath_softc *s + */ + void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) + { +- struct ath_atx_ac *ac, *ac_tmp, *last_ac; ++ struct ath_atx_ac *ac, *last_ac; + struct ath_atx_tid *tid, *last_tid; ++ bool sent = false; + + if (test_bit(SC_OP_HW_RESET, &sc->sc_flags) || +- list_empty(&txq->axq_acq) || +- txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) ++ list_empty(&txq->axq_acq)) return; + + rcu_read_lock(); + +- ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list); + last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list); ++ while (!list_empty(&txq->axq_acq)) { ++ bool stop = false; + +- list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) { ++ ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list); + last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list); + list_del(&ac->list); + ac->sched = false; + + while (!list_empty(&ac->tid_q)) { ++ + tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, + list); + list_del(&tid->list); +@@ -1692,17 +1829,17 @@ void ath_txq_schedule(struct ath_softc * + if (tid->paused) + continue; + +- ath_tx_sched_aggr(sc, txq, tid); ++ if (ath_tx_sched_aggr(sc, txq, tid, &stop)) ++ sent = true; + + /* + * add tid to round-robin queue if more frames + * are pending for the tid + */ +- if (!skb_queue_empty(&tid->buf_q)) ++ if (ath_tid_has_buffered(tid)) + ath_tx_queue_tid(txq, tid); + +- if (tid == last_tid || +- txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) ++ if (stop || tid == last_tid) + break; + } + +@@ -1711,9 +1848,17 @@ void ath_txq_schedule(struct ath_softc * + list_add_tail(&ac->list, &txq->axq_acq); + } + +- if (ac == last_ac || +- txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) ++ if (stop) + break; ++ ++ if (ac == last_ac) { ++ if (!sent) ++ break; ++ ++ sent = false; ++ last_ac = list_entry(txq->axq_acq.prev, ++ struct ath_atx_ac, list); ++ } + } + + rcu_read_unlock(); +@@ -1792,57 +1937,6 @@ static void ath_tx_txqaddbuf(struct ath_ + } + } + +-static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_txq *txq, +- struct ath_atx_tid *tid, struct sk_buff *skb, +- struct ath_tx_control *txctl) +-{ +- struct ath_frame_info *fi = get_frame_info(skb); +- struct list_head bf_head; +- struct ath_buf *bf; +- +- /* +- * Do not queue to h/w when any of the following conditions is true: +- * - there are pending frames in software queue +- * - the TID is currently paused for ADDBA/BAR request +- * - seqno is not within block-ack window +- * - h/w queue depth exceeds low water mark +- */ +- if ((!skb_queue_empty(&tid->buf_q) || tid->paused || +- !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) || +- txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) && +- txq != sc->tx.uapsdq) { +- /* +- * Add this frame to software queue for scheduling later +- * for aggregation. +- */ +- TX_STAT_INC(txq->axq_qnum, a_queued_sw); +- __skb_queue_tail(&tid->buf_q, skb); +- if (!txctl->an || !txctl->an->sleeping) +- ath_tx_queue_tid(txq, tid); +- return; +- } +- +- bf = ath_tx_setup_buffer(sc, txq, tid, skb); +- if (!bf) { +- ieee80211_free_txskb(sc->hw, skb); +- return; +- } +- +- ath_set_rates(tid->an->vif, tid->an->sta, bf); +- bf->bf_state.bf_type = BUF_AMPDU; +- INIT_LIST_HEAD(&bf_head); +- list_add(&bf->list, &bf_head); +- +- /* Add sub-frame to BAW */ +- ath_tx_addto_baw(sc, tid, bf->bf_state.seqno); +- +- /* Queue to h/w without aggregation */ +- TX_STAT_INC(txq->axq_qnum, a_queued_hw); +- bf->bf_lastbf = bf; +- ath_tx_fill_desc(sc, bf, txq, fi->framelen); +- ath_tx_txqaddbuf(sc, txq, &bf_head, false); +-} +- + static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq, + struct ath_atx_tid *tid, struct sk_buff *skb) + { +@@ -1985,6 +2079,7 @@ static int ath_tx_prepare(struct ieee802 + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_sta *sta = txctl->sta; + struct ieee80211_vif *vif = info->control.vif; ++ struct ath_vif *avp; + struct ath_softc *sc = hw->priv; + int frmlen = skb->len + FCS_LEN; + int padpos, padsize; +@@ -1992,6 +2087,10 @@ static int ath_tx_prepare(struct ieee802 + /* NOTE: sta can be NULL according to net/mac80211.h */ + if (sta) + txctl->an = (struct ath_node *)sta->drv_priv; ++ else if (vif && ieee80211_is_data(hdr->frame_control)) { ++ avp = (void *)vif->drv_priv; ++ txctl->an = &avp->mcast_node; ++ } + + if (info->control.hw_key) + frmlen += info->control.hw_key->icv_len; +@@ -2041,7 +2140,6 @@ int ath_tx_start(struct ieee80211_hw *hw + struct ath_txq *txq = txctl->txq; + struct ath_atx_tid *tid = NULL; + struct ath_buf *bf; +- u8 tidno; + int q; + int ret; + +@@ -2069,27 +2167,31 @@ int ath_tx_start(struct ieee80211_hw *hw + ath_txq_unlock(sc, txq); + txq = sc->tx.uapsdq; + ath_txq_lock(sc, txq); +- } +- +- if (txctl->an && ieee80211_is_data_qos(hdr->frame_control)) { +- tidno = ieee80211_get_qos_ctl(hdr)[0] & +- IEEE80211_QOS_CTL_TID_MASK; +- tid = ATH_AN_2_TID(txctl->an, tidno); ++ } else if (txctl->an && ++ ieee80211_is_data_present(hdr->frame_control)) { ++ tid = ath_get_skb_tid(sc, txctl->an, skb); + + WARN_ON(tid->ac->txq != txctl->txq); +- } + +- if ((info->flags & IEEE80211_TX_CTL_AMPDU) && tid) { ++ if (info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT) ++ tid->ac->clear_ps_filter = true; ++ + /* +- * Try aggregation if it's a unicast data frame +- * and the destination is HT capable. ++ * Add this frame to software queue for scheduling later ++ * for aggregation. + */ +- ath_tx_send_ampdu(sc, txq, tid, skb, txctl); ++ TX_STAT_INC(txq->axq_qnum, a_queued_sw); ++ __skb_queue_tail(&tid->buf_q, skb); ++ if (!txctl->an->sleeping) ++ ath_tx_queue_tid(txq, tid); ++ ++ ath_txq_schedule(sc, txq); + goto out; } -@@ -2090,6 +2115,7 @@ int ath_tx_start(struct ieee80211_hw *hw bf = ath_tx_setup_buffer(sc, txq, tid, skb); if (!bf) { @@ -339,7 +1095,7 @@ if (txctl->paprd) dev_kfree_skb_any(skb); else -@@ -2189,7 +2215,7 @@ static void ath_tx_complete(struct ath_s +@@ -2189,7 +2291,7 @@ static void ath_tx_complete(struct ath_s struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data; @@ -348,7 +1104,7 @@ unsigned long flags; ath_dbg(common, XMIT, "TX complete: skb: %p\n", skb); -@@ -2225,21 +2251,7 @@ static void ath_tx_complete(struct ath_s +@@ -2225,21 +2327,7 @@ static void ath_tx_complete(struct ath_s spin_unlock_irqrestore(&sc->sc_pm_lock, flags); __skb_queue_tail(&txq->complete_q, skb); @@ -371,9 +1127,84 @@ } static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, +@@ -2360,8 +2448,7 @@ static void ath_tx_processq(struct ath_s + + if (list_empty(&txq->axq_q)) { + txq->axq_link = NULL; +- if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) +- ath_txq_schedule(sc, txq); ++ ath_txq_schedule(sc, txq); + break; + } + bf = list_first_entry(&txq->axq_q, struct ath_buf, list); +@@ -2583,6 +2670,7 @@ void ath_tx_node_init(struct ath_softc * + tid->paused = false; + tid->active = false; + __skb_queue_head_init(&tid->buf_q); ++ __skb_queue_head_init(&tid->retry_q); + acno = TID_TO_WME_AC(tidno); + tid->ac = &an->ac[acno]; + } +@@ -2590,6 +2678,7 @@ void ath_tx_node_init(struct ath_softc * + for (acno = 0, ac = &an->ac[acno]; + acno < IEEE80211_NUM_ACS; acno++, ac++) { + ac->sched = false; ++ ac->clear_ps_filter = true; + ac->txq = sc->tx.txq_map[acno]; + INIT_LIST_HEAD(&ac->tid_q); + } --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c -@@ -2094,7 +2094,7 @@ static void ath9k_wow_add_pattern(struct +@@ -966,6 +966,8 @@ static int ath9k_add_interface(struct ie + struct ath_softc *sc = hw->priv; + struct ath_hw *ah = sc->sc_ah; + struct ath_common *common = ath9k_hw_common(ah); ++ struct ath_vif *avp = (void *)vif->drv_priv; ++ struct ath_node *an = &avp->mcast_node; + + mutex_lock(&sc->mutex); + +@@ -979,6 +981,12 @@ static int ath9k_add_interface(struct ie + if (ath9k_uses_beacons(vif->type)) + ath9k_beacon_assign_slot(sc, vif); + ++ an->sc = sc; ++ an->sta = NULL; ++ an->vif = vif; ++ an->no_ps_filter = true; ++ ath_tx_node_init(sc, an); ++ + mutex_unlock(&sc->mutex); + return 0; + } +@@ -1016,6 +1024,7 @@ static void ath9k_remove_interface(struc + { + struct ath_softc *sc = hw->priv; + struct ath_common *common = ath9k_hw_common(sc->sc_ah); ++ struct ath_vif *avp = (void *)vif->drv_priv; + + ath_dbg(common, CONFIG, "Detach Interface\n"); + +@@ -1030,6 +1039,8 @@ static void ath9k_remove_interface(struc + ath9k_calculate_summary_state(hw, NULL); + ath9k_ps_restore(sc); + ++ ath_tx_node_cleanup(sc, &avp->mcast_node); ++ + mutex_unlock(&sc->mutex); + } + +@@ -1374,9 +1385,6 @@ static void ath9k_sta_notify(struct ieee + struct ath_softc *sc = hw->priv; + struct ath_node *an = (struct ath_node *) sta->drv_priv; + +- if (!sta->ht_cap.ht_supported) +- return; +- + switch (cmd) { + case STA_NOTIFY_SLEEP: + an->sleeping = true; +@@ -2094,7 +2102,7 @@ static void ath9k_wow_add_pattern(struct { struct ath_hw *ah = sc->sc_ah; struct ath9k_wow_pattern *wow_pattern = NULL; @@ -1402,3 +2233,124 @@ #define REALTEK_USB_VENQT_READ 0xC0 #define REALTEK_USB_VENQT_WRITE 0x40 +--- a/drivers/net/wireless/ath/ath9k/ath9k.h ++++ b/drivers/net/wireless/ath/ath9k/ath9k.h +@@ -137,7 +137,8 @@ int ath_descdma_setup(struct ath_softc * + #define ATH_AGGR_ENCRYPTDELIM 10 + /* minimum h/w qdepth to be sustained to maximize aggregation */ + #define ATH_AGGR_MIN_QDEPTH 2 +-#define ATH_AMPDU_SUBFRAME_DEFAULT 32 ++/* minimum h/w qdepth for non-aggregated traffic */ ++#define ATH_NON_AGGR_MIN_QDEPTH 8 + + #define IEEE80211_SEQ_SEQ_SHIFT 4 + #define IEEE80211_SEQ_MAX 4096 +@@ -174,12 +175,6 @@ int ath_descdma_setup(struct ath_softc * + + #define ATH_TX_COMPLETE_POLL_INT 1000 + +-enum ATH_AGGR_STATUS { +- ATH_AGGR_DONE, +- ATH_AGGR_BAW_CLOSED, +- ATH_AGGR_LIMITED, +-}; +- + #define ATH_TXFIFO_DEPTH 8 + struct ath_txq { + int mac80211_qnum; /* mac80211 queue number, -1 means not mac80211 Q */ +@@ -212,8 +207,9 @@ struct ath_frame_info { + int framelen; + enum ath9k_key_type keytype; + u8 keyix; +- u8 retries; + u8 rtscts_rate; ++ u8 retries : 7; ++ u8 baw_tracked : 1; + }; + + struct ath_buf_state { +@@ -241,6 +237,7 @@ struct ath_buf { + struct ath_atx_tid { + struct list_head list; + struct sk_buff_head buf_q; ++ struct sk_buff_head retry_q; + struct ath_node *an; + struct ath_atx_ac *ac; + unsigned long tx_buf[BITS_TO_LONGS(ATH_TID_MAX_BUFS)]; +@@ -268,6 +265,7 @@ struct ath_node { + u8 mpdudensity; + + bool sleeping; ++ bool no_ps_filter; + + #if defined(CPTCFG_MAC80211_DEBUGFS) && defined(CPTCFG_ATH9K_DEBUGFS) + struct dentry *node_stat; +@@ -367,6 +365,7 @@ void ath9k_release_buffered_frames(struc + /********/ + + struct ath_vif { ++ struct ath_node mcast_node; + int av_bslot; + bool primary_sta_vif; + __le64 tsf_adjust; /* TSF adjustment for staggered beacons */ +--- a/drivers/net/wireless/ath/ath9k/debug.c ++++ b/drivers/net/wireless/ath/ath9k/debug.c +@@ -607,6 +607,28 @@ static ssize_t read_file_xmit(struct fil + return retval; + } + ++static ssize_t print_queue(struct ath_softc *sc, struct ath_txq *txq, ++ char *buf, ssize_t size) ++{ ++ ssize_t len = 0; ++ ++ ath_txq_lock(sc, txq); ++ ++ len += snprintf(buf + len, size - len, "%s: %d ", ++ "qnum", txq->axq_qnum); ++ len += snprintf(buf + len, size - len, "%s: %2d ", ++ "qdepth", txq->axq_depth); ++ len += snprintf(buf + len, size - len, "%s: %2d ", ++ "ampdu-depth", txq->axq_ampdu_depth); ++ len += snprintf(buf + len, size - len, "%s: %3d ", ++ "pending", txq->pending_frames); ++ len += snprintf(buf + len, size - len, "%s: %d\n", ++ "stopped", txq->stopped); ++ ++ ath_txq_unlock(sc, txq); ++ return len; ++} ++ + static ssize_t read_file_queues(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) + { +@@ -624,24 +646,13 @@ static ssize_t read_file_queues(struct f + + for (i = 0; i < IEEE80211_NUM_ACS; i++) { + txq = sc->tx.txq_map[i]; +- len += snprintf(buf + len, size - len, "(%s): ", qname[i]); +- +- ath_txq_lock(sc, txq); +- +- len += snprintf(buf + len, size - len, "%s: %d ", +- "qnum", txq->axq_qnum); +- len += snprintf(buf + len, size - len, "%s: %2d ", +- "qdepth", txq->axq_depth); +- len += snprintf(buf + len, size - len, "%s: %2d ", +- "ampdu-depth", txq->axq_ampdu_depth); +- len += snprintf(buf + len, size - len, "%s: %3d ", +- "pending", txq->pending_frames); +- len += snprintf(buf + len, size - len, "%s: %d\n", +- "stopped", txq->stopped); +- +- ath_txq_unlock(sc, txq); ++ len += snprintf(buf + len, size - len, "(%s): ", qname[i]); ++ len += print_queue(sc, txq, buf + len, size - len); + } + ++ len += snprintf(buf + len, size - len, "(CAB): "); ++ len += print_queue(sc, sc->beacon.cabq, buf + len, size - len); ++ + if (len > size) + len = size; + |