commit 50519d604a0638bfa5fe2519b1f45ea147e7a331 Author: Jiri Slaby Date: Mon Jun 25 16:02:02 2007 +0200 setup queues diff --git a/ath/if_ath.c b/ath/if_ath.c index beae81d..44628a9 100644 --- a/ath/if_ath.c +++ b/ath/if_ath.c @@ -1609,24 +1609,6 @@ ath_updateslot(struct net_device *dev) else ath_setslottime(sc); } -#endif -/* - * Setup a h/w transmit queue for beacons. - */ -static int -ath_beaconq_setup(struct ath_hal *ah) -{ - struct ath5k_txq_info qi; - - memset(&qi, 0, sizeof(qi)); - qi.tqi_aifs = AR5K_TXQ_USEDEFAULT; - qi.tqi_cw_min = AR5K_TXQ_USEDEFAULT; - qi.tqi_cw_max = AR5K_TXQ_USEDEFAULT; - /* NB: for dynamic turbo, don't enable any other interrupts */ - qi.tqi_flags = AR5K_TXQ_FLAG_TXDESCINT_ENABLE; - return ath5k_hw_setup_tx_queue(ah, AR5K_TX_QUEUE_BEACON, &qi); -} -#ifdef BLE /* * Setup the transmit queue parameters for the beacon queue. */ @@ -3079,63 +3061,6 @@ rx_next: #undef PA2DESC } -/* - * Setup a h/w transmit queue. - */ -static struct ath_txq * -ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) -{ -#define N(a) (sizeof(a)/sizeof(a[0])) - struct ath_hal *ah = sc->sc_ah; - struct ath5k_txq_info qi; - int qnum; - - memset(&qi, 0, sizeof(qi)); - qi.tqi_subtype = subtype; - qi.tqi_aifs = AR5K_TXQ_USEDEFAULT; - qi.tqi_cw_min = AR5K_TXQ_USEDEFAULT; - qi.tqi_cw_max = AR5K_TXQ_USEDEFAULT; - /* - * Enable interrupts only for EOL and DESC conditions. - * We mark tx descriptors to receive a DESC interrupt - * when a tx queue gets deep; otherwise waiting for the - * EOL to reap descriptors. Note that this is done to - * reduce interrupt load and this only defers reaping - * descriptors, never transmitting frames. Aside from - * reducing interrupts this also permits more concurrency. - * The only potential downside is if the tx queue backs - * up in which case the top half of the kernel may backup - * due to a lack of tx descriptors. - */ - qi.tqi_flags = AR5K_TXQ_FLAG_TXEOLINT_ENABLE | AR5K_TXQ_FLAG_TXDESCINT_ENABLE; - qnum = ath5k_hw_setup_tx_queue(ah, qtype, &qi); - if (qnum == -1) { - /* - * NB: don't print a message, this happens - * normally on parts with too few tx queues - */ - return NULL; - } - if (qnum >= N(sc->sc_txq)) { - printk("%s: hal qnum %u out of range, max %u!\n", - "BLE"/*sc->sc_dev->name*/, qnum, (unsigned int) N(sc->sc_txq)); - ath5k_hw_release_tx_queue(ah, qnum); - return NULL; - } - if (!ATH_TXQ_SETUP(sc, qnum)) { - struct ath_txq *txq = &sc->sc_txq[qnum]; - - txq->axq_qnum = qnum; - txq->axq_depth = 0; - txq->axq_intrcnt = 0; - txq->axq_link = NULL; -// STAILQ_INIT(&txq->axq_q); - ATH_TXQ_LOCK_INIT(sc, txq); - sc->sc_txqsetup |= 1<sc_txq[qnum]; -#undef N -} #ifdef BLE /* * Setup a hardware data transmit queue for the specified @@ -3211,34 +3136,7 @@ ath_wme_update(struct ieee80211com *ic) !ath_txq_update(sc, WME_AC_VI) || !ath_txq_update(sc, WME_AC_VO) ? EIO : 0; } -#endif -/* - * Reclaim resources for a setup queue. - */ -static void -ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) -{ - ath5k_hw_release_tx_queue(sc->sc_ah, txq->axq_qnum); - ATH_TXQ_LOCK_DESTROY(txq); - sc->sc_txqsetup &= ~(1<axq_qnum); -} -/* - * Reclaim all tx queue resources. - */ -static void -ath_tx_cleanup(struct ath_softc *sc) -{ - int i; - - ATH_LOCK_DESTROY(sc); - ATH_TXBUF_LOCK_DESTROY(sc); - for (i = 0; i < AR5K_NUM_TX_QUEUES; i++) { - if (ATH_TXQ_SETUP(sc, i)) - ath_tx_cleanupq(sc, &sc->sc_txq[i]); - } -} -#ifdef BLE static int ath_tx_start(struct net_device *dev, struct ieee80211_node *ni, struct ath_buf *bf, struct sk_buff *skb) diff --git a/ath/if_ath_pci.c b/ath/if_ath_pci.c index d62bf6f..6faf778 100644 --- a/ath/if_ath_pci.c +++ b/ath/if_ath_pci.c @@ -573,6 +573,85 @@ static void ath_desc_free(struct ath_softc *sc, struct pci_dev *pdev) sc->bufptr = NULL; } +static int ath_beaconq_setup(struct ath_hw *ah) +{ + struct ath5k_txq_info qi; + + memset(&qi, 0, sizeof(qi)); + qi.tqi_aifs = AR5K_TXQ_USEDEFAULT; + qi.tqi_cw_min = AR5K_TXQ_USEDEFAULT; + qi.tqi_cw_max = AR5K_TXQ_USEDEFAULT; + /* NB: for dynamic turbo, don't enable any other interrupts */ + qi.tqi_flags = AR5K_TXQ_FLAG_TXDESCINT_ENABLE; + + return ath5k_hw_setup_tx_queue(ah, AR5K_TX_QUEUE_BEACON, &qi); +} + +static struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, + int subtype) +{ + struct ath_hw *ah = sc->ah; + struct ath5k_txq_info qi; + int qnum; + + memset(&qi, 0, sizeof(qi)); + qi.tqi_subtype = subtype; + qi.tqi_aifs = AR5K_TXQ_USEDEFAULT; + qi.tqi_cw_min = AR5K_TXQ_USEDEFAULT; + qi.tqi_cw_max = AR5K_TXQ_USEDEFAULT; + /* + * Enable interrupts only for EOL and DESC conditions. + * We mark tx descriptors to receive a DESC interrupt + * when a tx queue gets deep; otherwise waiting for the + * EOL to reap descriptors. Note that this is done to + * reduce interrupt load and this only defers reaping + * descriptors, never transmitting frames. Aside from + * reducing interrupts this also permits more concurrency. + * The only potential downside is if the tx queue backs + * up in which case the top half of the kernel may backup + * due to a lack of tx descriptors. + */ + qi.tqi_flags = AR5K_TXQ_FLAG_TXEOLINT_ENABLE | + AR5K_TXQ_FLAG_TXDESCINT_ENABLE; + qnum = ath5k_hw_setup_tx_queue(ah, qtype, &qi); + if (qnum < 0) { + /* + * NB: don't print a message, this happens + * normally on parts with too few tx queues + */ + return ERR_PTR(qnum); + } + if (qnum >= ARRAY_SIZE(sc->txq)) { + printk(KERN_ERR "hal qnum %u out of range, max %u!\n", + qnum, ARRAY_SIZE(sc->txq)); + ath5k_hw_release_tx_queue(ah, qnum); + return ERR_PTR(-EINVAL); + } + if (!test_bit(qnum, sc->txqsetup)) { + struct ath_txq *txq = &sc->txq[qnum]; + + txq->axq_qnum = qnum; + txq->axq_depth = 0; + txq->axq_intrcnt = 0; + txq->axq_link = NULL; + INIT_LIST_HEAD(&txq->axq_q); + spin_lock_init(&txq->axq_lock); + set_bit(qnum, sc->txqsetup); + } + return &sc->txq[qnum]; +} + +static void ath_tx_cleanup(struct ath_softc *sc) +{ + unsigned int i; + + for (i = 0; i < AR5K_NUM_TX_QUEUES; i++) + if (test_bit(i, sc->txqsetup)) { + ath5k_hw_release_tx_queue(sc->ah, sc->txq[i].axq_qnum); + clear_bit(i, sc->txqsetup); + } +} + static int ath_attach(struct pci_dev *pdev, struct ieee80211_hw *hw) { struct ath_softc *sc = hw->priv; @@ -642,7 +721,6 @@ static int ath_attach(struct pci_dev *pdev, struct ieee80211_hw *hw) dev_err(&pdev->dev, "can't allocate descriptors\n"); goto err; } -#ifdef BLE /* * Allocate hardware transmit queues: one queue for @@ -652,18 +730,22 @@ static int ath_attach(struct pci_dev *pdev, struct ieee80211_hw *hw) * * XXX PS-Poll */ - sc->sc_bhalq = ath_beaconq_setup(ah); - if (sc->sc_bhalq == (u_int) -1) { - printk(KERN_ERR "unable to setup a beacon xmit queue!\n"); - goto bad2; + ret = ath_beaconq_setup(ah); + if (ret < 0) { + dev_err(&pdev->dev, "can't setup a beacon xmit queue\n"); + goto err_desc; } - sc->sc_cabq = ath_txq_setup(sc, AR5K_TX_QUEUE_CAB, 0); - if (sc->sc_cabq == NULL) { - printk(KERN_ERR "unable to setup CAB xmit queue!\n"); - error = EIO; - goto bad2; + sc->bhalq = ret; + + sc->cabq = ath_txq_setup(sc, AR5K_TX_QUEUE_CAB, 0); + if (IS_ERR(sc->cabq)) { + dev_err(&pdev->dev, "can't setup CAB xmit queue\n"); + ret = PTR_ERR(sc->cabq); + sc->cabq = NULL; + goto err_queues; } +#ifdef BLE /* NB: insure BK queue is the lowest priority h/w queue */ if (!ath_tx_setup(sc, WME_AC_BK, AR5K_WME_AC_BK)) { printk(KERN_ERR "unable to setup xmit queue for %s traffic!\n", @@ -911,9 +993,11 @@ bad2: if (sc->sc_txq[WME_AC_VO].axq_qnum != (u_int) -1) { ATH_TXQ_LOCK_DESTROY(&sc->sc_txq[WME_AC_VO]); } - ath_tx_cleanup(sc); - ath_desc_free(sc); #endif +err_queues: + ath_tx_cleanup(sc); +err_desc: + ath_desc_free(sc, pdev); err: return ret; } @@ -938,7 +1022,7 @@ static void ath_detach(struct pci_dev *pdev, struct ieee80211_hw *hw) // ieee80211_ifdetach(ic); // ath_rate_detach(sc->sc_rc); ath_desc_free(sc, pdev); -// ath_tx_cleanup(sc); + ath_tx_cleanup(sc); /* * NB: can't reclaim these until after ieee80211_ifdetach diff --git a/ath/if_athvar.h b/ath/if_athvar.h index 0b85915..fce6a97 100644 --- a/ath/if_athvar.h +++ b/ath/if_athvar.h @@ -290,25 +290,23 @@ struct ath_softc { #endif struct list_head txbuf; /* transmit buffer */ spinlock_t txbuflock; /* txbuf lock */ + struct ath_txq txq[AR5K_NUM_TX_QUEUES]; + DECLARE_BITMAP(txqsetup, AR5K_NUM_TX_QUEUES); /* h/w queues setup */ #ifdef BLE int sc_tx_timer; /* transmit timeout */ - u_int sc_txqsetup; /* h/w queues setup */ u_int sc_txintrperiod;/* tx interrupt batching */ - struct ath_txq sc_txq[AR5K_NUM_TX_QUEUES]; struct ath_txq *sc_ac2q[5]; /* WME AC -> h/w q map */ #endif struct tasklet_struct txtq; /* tx intr tasklet */ struct list_head bbuf; /* beacon buffers */ + unsigned int bhalq; /* HAL q for outgoing beacons */ #ifdef BLE - u_int sc_bhalq; /* HAL q for outgoing beacons */ u_int sc_bmisscount; /* missed beacon transmits */ u32 sc_ant_tx[8]; /* recent tx frames/antenna */ - struct ath_txq *sc_cabq; /* tx q for cab frames */ -#ifdef BLE struct ieee80211_beacon_offsets boff; /* dynamic update state */ #endif -#endif + struct ath_txq *cabq; /* tx q for cab frames */ struct tasklet_struct bmisstq; /* bmiss intr tasklet */ #ifdef BLE struct tasklet_struct sc_bstuckq; /* stuck beacon processing */ @@ -326,12 +324,10 @@ struct ath_softc { #endif }; -#define ATH_TXQ_SETUP(sc, i) ((sc)->sc_txqsetup & (1<sc_txbuflock) +/*#define ATH_TXBUF_LOCK(_sc) spin_lock(&(_sc)->sc_txbuflock) #define ATH_TXBUF_UNLOCK(_sc) spin_unlock(&(_sc)->sc_txbuflock) #define ATH_TXBUF_LOCK_BH(_sc) spin_lock_bh(&(_sc)->sc_txbuflock) -#define ATH_TXBUF_UNLOCK_BH(_sc) spin_unlock_bh(&(_sc)->sc_txbuflock) +#define ATH_TXBUF_UNLOCK_BH(_sc) spin_unlock_bh(&(_sc)->sc_txbuflock)*/ #define ATH_TXBUF_LOCK_ASSERT(_sc) \ KASSERT(spin_is_locked(&(_sc)->sc_txbuflock), "txbuf not locked!") diff --git a/openhal/ath5k.h b/openhal/ath5k.h index 54c1a78..4364949 100644 --- a/openhal/ath5k.h +++ b/openhal/ath5k.h @@ -1068,9 +1068,9 @@ bool ath5k_hw_set_key(struct ath_hw *hal, u16 entry, const struct ath5k_keyval * bool ath5k_hw_set_key_lladdr(struct ath_hw *hal, u16 entry, const u8 *mac); /* Queue Control Unit, DFS Control Unit Functions */ int ath5k_hw_setup_tx_queue(struct ath_hw *hal, enum ath5k_tx_queue queue_type, struct ath5k_txq_info *queue_info); -bool ath5k_hw_setup_tx_queueprops(struct ath_hw *hal, int queue, const struct ath5k_txq_info *queue_info); +int ath5k_hw_setup_tx_queueprops(struct ath_hw *hal, int queue, const struct ath5k_txq_info *queue_info); bool ath5k_hw_get_tx_queueprops(struct ath_hw *hal, int queue, struct ath5k_txq_info *queue_info); -bool ath5k_hw_release_tx_queue(struct ath_hw *hal, unsigned int queue); +void ath5k_hw_release_tx_queue(struct ath_hw *hal, unsigned int queue); bool ath5k_hw_reset_tx_queue(struct ath_hw *hal, unsigned int queue); u32 ath5k_hw_num_tx_pending(struct ath_hw *hal, unsigned int queue); bool ath5k_hw_set_slot_time(struct ath_hw *hal, unsigned int slot_time); diff --git a/openhal/ath5k_hw.c b/openhal/ath5k_hw.c index 660c295..56761ca 100644 --- a/openhal/ath5k_hw.c +++ b/openhal/ath5k_hw.c @@ -3326,11 +3326,12 @@ Queue Control Unit, DFS Control Unit Functions /* * Initialize a transmit queue */ -int -ath5k_hw_setup_tx_queue(struct ath_hw *hal, enum ath5k_tx_queue queue_type, - struct ath5k_txq_info *queue_info) +int ath5k_hw_setup_tx_queue(struct ath_hw *hal, enum ath5k_tx_queue queue_type, + struct ath5k_txq_info *queue_info) { unsigned int queue; + int ret; + AR5K_TRACE; /* @@ -3347,7 +3348,7 @@ ath5k_hw_setup_tx_queue(struct ath_hw *hal, enum ath5k_tx_queue queue_type, queue = AR5K_TX_QUEUE_ID_NOQCU_BEACON; break; default: - return -1; + return -EINVAL; } } else { switch (queue_type) { @@ -3357,7 +3358,7 @@ ath5k_hw_setup_tx_queue(struct ath_hw *hal, enum ath5k_tx_queue queue_type, AR5K_TX_QUEUE_INACTIVE; queue++) { if (queue > AR5K_TX_QUEUE_ID_DATA_MAX) - return -1; + return -EINVAL; } break; case AR5K_TX_QUEUE_UAPSD: @@ -3371,11 +3372,12 @@ ath5k_hw_setup_tx_queue(struct ath_hw *hal, enum ath5k_tx_queue queue_type, break; case AR5K_TX_QUEUE_XR_DATA: if (hal->ah_version != AR5K_AR5212) - AR5K_PRINTF("XR data queues only supported in 5212!"); + AR5K_PRINTF("XR data queues only " + "supported in 5212!"); queue = AR5K_TX_QUEUE_ID_XR_DATA; break; default: - return -1; + return -EINVAL; } } @@ -3387,9 +3389,9 @@ ath5k_hw_setup_tx_queue(struct ath_hw *hal, enum ath5k_tx_queue queue_type, if (queue_info != NULL) { queue_info->tqi_type = queue_type; - if (ath5k_hw_setup_tx_queueprops(hal, queue, queue_info) - != true) - return -1; + ret = ath5k_hw_setup_tx_queueprops(hal, queue, queue_info); + if (ret) + return ret; } /* * We use ah_txq_interrupts to hold a temp value for @@ -3404,27 +3406,25 @@ ath5k_hw_setup_tx_queue(struct ath_hw *hal, enum ath5k_tx_queue queue_type, /* * Setup a transmit queue */ -bool -ath5k_hw_setup_tx_queueprops(struct ath_hw *hal, int queue, +int ath5k_hw_setup_tx_queueprops(struct ath_hw *hal, int queue, const struct ath5k_txq_info *queue_info) { AR5K_TRACE; AR5K_ASSERT_ENTRY(queue, hal->ah_capabilities.cap_queues.q_tx_num); if (hal->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE) - return false; + return -EIO; memcpy(&hal->ah_txq[queue], queue_info, sizeof(struct ath5k_txq_info)); /*XXX: Is this supported on 5210 ?*/ if ((queue_info->tqi_type == AR5K_TX_QUEUE_DATA && - ((queue_info->tqi_subtype == AR5K_WME_AC_VI) || - (queue_info->tqi_subtype == AR5K_WME_AC_VO))) || - queue_info->tqi_type == AR5K_TX_QUEUE_UAPSD) - hal->ah_txq[queue].tqi_flags |= - AR5K_TXQ_FLAG_POST_FR_BKOFF_DIS; + ((queue_info->tqi_subtype == AR5K_WME_AC_VI) || + (queue_info->tqi_subtype == AR5K_WME_AC_VO))) || + queue_info->tqi_type == AR5K_TX_QUEUE_UAPSD) + hal->ah_txq[queue].tqi_flags |= AR5K_TXQ_FLAG_POST_FR_BKOFF_DIS; - return true; + return 0; } /* @@ -3441,18 +3441,18 @@ ath5k_hw_get_tx_queueprops(struct ath_hw *hal, int queue, struct ath5k_txq_info /* * Set a transmit queue inactive */ -bool -ath5k_hw_release_tx_queue(struct ath_hw *hal, unsigned int queue) +void ath5k_hw_release_tx_queue(struct ath_hw *hal, unsigned int queue) { AR5K_TRACE; - AR5K_ASSERT_ENTRY(queue, hal->ah_capabilities.cap_queues.q_tx_num); + if (queue >= hal->ah_capabilities.cap_queues.q_tx_num) { + WARN_ON(1); + return; + } /* This queue will be skipped in further operations */ hal->ah_txq[queue].tqi_type = AR5K_TX_QUEUE_INACTIVE; /*For SIMR setup*/ AR5K_Q_DISABLE_BITS(hal->ah_txq_interrupts, queue); - - return false; /*???*/ } /*