commit 07c9738dcc3090d264fe80b81d842c730e701067 Author: Jiri Slaby Date: Fri Jul 6 23:20:47 2007 +0200 move ath_rxbuf_init after tasklets diff --git a/ath/if_ath_pci.c b/ath/if_ath_pci.c index b93d0ed..4665fa4 100644 --- a/ath/if_ath_pci.c +++ b/ath/if_ath_pci.c @@ -139,6 +139,68 @@ static void ath_printtxbuf(struct ath_buf *bf, int done) } #endif +static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) +{ + struct ieee80211_tx_status txs = {}; + struct ath_buf *bf, *bf0; + struct ath_desc *ds; + struct sk_buff *skb; + int ret; + + spin_lock(&txq->lock); + list_for_each_entry_safe(bf, bf0, &txq->q, list) { + ds = bf->desc; + + ret = sc->ah->ah_proc_tx_desc(sc->ah, ds); + if (ret == -EINPROGRESS) + break; + else if (ret) { + printk(KERN_ERR "ath: error %d while processing " + "queue %u\n", ret, txq->qnum); + break; + } + + skb = bf->skb; + bf->skb = NULL; + pci_unmap_single(sc->pdev, bf->skbaddr, skb->len, + PCI_DMA_TODEVICE); + + txs.control = bf->ctl; + txs.retry_count = ds->ds_txstat.ts_shortretry + + ds->ds_txstat.ts_longretry; + if (ds->ds_txstat.ts_status) { + if (ds->ds_txstat.ts_status & AR5K_TXERR_XRETRY) { + txs.excessive_retries = 1; + } else if (ds->ds_txstat.ts_status & AR5K_TXERR_FILT) { + txs.flags |= IEEE80211_TX_STATUS_TX_FILTERED; + } + } else { + txs.flags |= IEEE80211_TX_STATUS_ACK; + txs.ack_signal = ds->ds_txstat.ts_rssi; + } + + ieee80211_tx_status(sc->hw, skb, &txs); + + printk(KERN_DEBUG "DONE skb: %p, rssi: %d, stat: %x, seq: %u, stamp: %u\n", skb, ds->ds_txstat.ts_rssi, ds->ds_txstat.ts_status, ds->ds_txstat.ts_seqnum, ds->ds_txstat.ts_tstamp); + + spin_lock(&sc->txbuflock); + list_move_tail(&bf->list, &sc->txbuf); + spin_unlock(&sc->txbuflock); + } + if (list_empty(&txq->q)) + txq->link = NULL; + spin_unlock(&txq->lock); +} + +static void ath_tasklet_tx(unsigned long data) +{ + struct ath_softc *sc = (void *)data; + + ath_tx_processq(sc, sc->txq); + + ieee80211_wake_queue(sc->hw, 0); +} + static int ath_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf) { struct ath_hw *ah = sc->ah; @@ -209,68 +271,6 @@ static int ath_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf) return 0; } -static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) -{ - struct ieee80211_tx_status txs = {}; - struct ath_buf *bf, *bf0; - struct ath_desc *ds; - struct sk_buff *skb; - int ret; - - spin_lock(&txq->lock); - list_for_each_entry_safe(bf, bf0, &txq->q, list) { - ds = bf->desc; - - ret = sc->ah->ah_proc_tx_desc(sc->ah, ds); - if (ret == -EINPROGRESS) - break; - else if (ret) { - printk(KERN_ERR "ath: error %d while processing " - "queue %u\n", ret, txq->qnum); - break; - } - - skb = bf->skb; - bf->skb = NULL; - pci_unmap_single(sc->pdev, bf->skbaddr, skb->len, - PCI_DMA_TODEVICE); - - txs.control = bf->ctl; - txs.retry_count = ds->ds_txstat.ts_shortretry + - ds->ds_txstat.ts_longretry; - if (ds->ds_txstat.ts_status) { - if (ds->ds_txstat.ts_status & AR5K_TXERR_XRETRY) { - txs.excessive_retries = 1; - } else if (ds->ds_txstat.ts_status & AR5K_TXERR_FILT) { - txs.flags |= IEEE80211_TX_STATUS_TX_FILTERED; - } - } else { - txs.flags |= IEEE80211_TX_STATUS_ACK; - txs.ack_signal = ds->ds_txstat.ts_rssi; - } - - ieee80211_tx_status(sc->hw, skb, &txs); - - printk(KERN_DEBUG "DONE skb: %p, rssi: %d, stat: %x, seq: %u, stamp: %u\n", skb, ds->ds_txstat.ts_rssi, ds->ds_txstat.ts_status, ds->ds_txstat.ts_seqnum, ds->ds_txstat.ts_tstamp); - - spin_lock(&sc->txbuflock); - list_move_tail(&bf->list, &sc->txbuf); - spin_unlock(&sc->txbuflock); - } - if (list_empty(&txq->q)) - txq->link = NULL; - spin_unlock(&txq->lock); -} - -static void ath_tasklet_tx(unsigned long data) -{ - struct ath_softc *sc = (void *)data; - - ath_tx_processq(sc, sc->txq); - - ieee80211_wake_queue(sc->hw, 0); -} - /* * Calculate the receive filter according to the * operating mode and state: