commit 45551c33f749ad1a0bbb075ac661a66efae07090 Author: Jiri Slaby Date: Fri Aug 3 23:05:14 2007 +0200 ath.c -> ath5k_base.c diff --git a/Makefile b/Makefile index 5d941fe..ace1a37 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ KDIR=/lib/modules/$(shell uname -r)/build KBUILD=$(MAKE) -C $(KDIR) M=$(PWD) -ath5k-objs := ath.o ath5k_hw.o ath5k_regdom.o +ath5k-objs := ath5k_base.o ath5k_hw.o ath5k_regdom.o obj-m += ath5k.o diff --git a/ath.c b/ath.c deleted file mode 100644 index cc4c913..0000000 --- a/ath.c +++ /dev/null @@ -1,2567 +0,0 @@ -/*- - * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting - * Copyright (c) 2004-2005 Atheros Communications, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer, - * without modification. - * 2. Redistributions in binary form must reproduce at minimum a disclaimer - * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any - * redistribution must be conditioned upon including a substantially - * similar Disclaimer requirement for further binary redistribution. - * 3. Neither the names of the above-listed copyright holders nor the names - * of any contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - */ -#define ATH_PCI_VERSION "0.9.5.0-BSD" - -#include -#include -#include -#include -#include -#include -#include - -#include - -#include -#include - -#include "ath.h" -#include "ath5k_reg.h" - -#define ATH_DEBUG_MODES 0 /* Show found modes in the log? */ -#define ATH_DUMP_SKB 0 /* show skb contents */ -#define AR_DEBUG 1 - -/* unaligned little endian access */ -#define LE_READ_2(_p) (le16_to_cpu(get_unaligned((__le16 *)(_p)))) -#define LE_READ_4(_p) (le32_to_cpu(get_unaligned((__le32 *)(_p)))) - -#if AR_DEBUG -#define DPRINTF(sc, _m, _fmt...) do { \ - if (unlikely(((sc)->debug & (_m)) && net_ratelimit())) \ - printk(KERN_DEBUG _fmt); \ -} while (0) -#else -static inline int __attribute__ ((format (printf, 3, 4))) -DPRINTF(struct ath_softc *sc, unsigned int m, const char *fmt, ...) -{ - return 0; -} -#endif -enum { - ATH_DEBUG_XMIT = 0x00000001, /* basic xmit operation */ - ATH_DEBUG_RESET = 0x00000020, /* reset processing */ - ATH_DEBUG_MODE = 0x00000040, /* mode init/setup */ - ATH_DEBUG_BEACON = 0x00000080, /* beacon handling */ - ATH_DEBUG_INTR = 0x00001000, /* ISR */ - ATH_DEBUG_BEACON_PROC = 0x00008000, /* beacon ISR proc */ - ATH_DEBUG_CALIBRATE = 0x00010000, /* periodic calibration */ - ATH_DEBUG_LED = 0x00100000, /* led management */ - ATH_DEBUG_FATAL = 0x80000000, /* fatal errors */ - ATH_DEBUG_ANY = 0xffffffff -}; - -enum { - ATH_LED_TX, - ATH_LED_RX, -}; - -static int ath_calinterval = ATH_SHORT_CALIB; - -static int countrycode = CTRY_DEFAULT; -static int outdoor = true; -static int xchanmode = true; -module_param(countrycode, int, 0); -MODULE_PARM_DESC(countrycode, "Override default country code"); -module_param(outdoor, int, 0); -MODULE_PARM_DESC(outdoor, "Enable/disable outdoor use"); -module_param(xchanmode, int, 0); -MODULE_PARM_DESC(xchanmode, "Enable/disable extended channel mode"); - -#if AR_DEBUG -static unsigned int ath_debug; -module_param_named(debug, ath_debug, uint, 0); -#endif - -/* - * User a static table of PCI id's for now. While this is the - * "new way" to do things, we may want to switch back to having - * the HAL check them by defining a probe method. - */ -static struct pci_device_id ath_pci_id_table[] __devinitdata = { - { PCI_VDEVICE(ATHEROS, 0x0207), .driver_data = AR5K_AR5210 }, /* 5210 early */ - { PCI_VDEVICE(ATHEROS, 0x0007), .driver_data = AR5K_AR5210 }, /* 5210 */ - { PCI_VDEVICE(ATHEROS, 0x0011), .driver_data = AR5K_AR5211 }, /* 5311 */ - { PCI_VDEVICE(ATHEROS, 0x0012), .driver_data = AR5K_AR5211 }, /* 5211 */ - { PCI_VDEVICE(ATHEROS, 0x0013), .driver_data = AR5K_AR5212 }, /* 5212 */ - { PCI_VDEVICE(3COM_2, 0x0013), .driver_data = AR5K_AR5212 }, /* 3com 5212 */ - { PCI_VDEVICE(3COM, 0x0013), .driver_data = AR5K_AR5212 }, /* 3com 3CRDAG675 5212 */ - { PCI_VDEVICE(ATHEROS, 0x1014), .driver_data = AR5K_AR5212 }, /* IBM minipci 5212 */ - { PCI_VDEVICE(ATHEROS, 0x0014), .driver_data = AR5K_AR5212 }, - { PCI_VDEVICE(ATHEROS, 0x0015), .driver_data = AR5K_AR5212 }, - { PCI_VDEVICE(ATHEROS, 0x0016), .driver_data = AR5K_AR5212 }, - { PCI_VDEVICE(ATHEROS, 0x0017), .driver_data = AR5K_AR5212 }, - { PCI_VDEVICE(ATHEROS, 0x0018), .driver_data = AR5K_AR5212 }, - { PCI_VDEVICE(ATHEROS, 0x0019), .driver_data = AR5K_AR5212 }, - { PCI_VDEVICE(ATHEROS, 0x001a), .driver_data = AR5K_AR5212 }, /* 2413 Griffin-lite */ - { PCI_VDEVICE(ATHEROS, 0x001b), .driver_data = AR5K_AR5212 }, /* 5413 Eagle */ - { PCI_VDEVICE(ATHEROS, 0x001c), .driver_data = AR5K_AR5212 }, /* 5424 Condor (PCI-E)*/ - { 0 } -}; -MODULE_DEVICE_TABLE(pci, ath_pci_id_table); - -static void ath_led_event(struct ath_softc *, int); -static int ath_reset(struct ieee80211_hw *); - -#if AR_DEBUG -static void ath_printrxbuf(struct ath_buf *bf, int done) -{ - struct ath_desc *ds = bf->desc; - - printk(KERN_DEBUG "R (%p %llx) %08x %08x %08x %08x %08x %08x %c\n", - ds, (unsigned long long)bf->daddr, - ds->ds_link, ds->ds_data, ds->ds_ctl0, ds->ds_ctl1, - ds->ds_hw[0], ds->ds_hw[1], - !done ? ' ' : (ds->ds_rxstat.rs_status == 0) ? '*' : '!'); -} - -static void ath_printtxbuf(struct ath_buf *bf, int done) -{ - struct ath_desc *ds = bf->desc; - - printk(KERN_DEBUG "T (%p %llx) %08x %08x %08x %08x %08x %08x %08x " - "%08x %c\n", - ds, (unsigned long long)bf->daddr, - ds->ds_link, ds->ds_data, ds->ds_ctl0, ds->ds_ctl1, - ds->ds_hw[0], ds->ds_hw[1], ds->ds_hw[2], ds->ds_hw[3], - !done ? ' ' : (ds->ds_txstat.ts_status == 0) ? '*' : '!'); -} -#endif - -#if ATH_DUMP_SKB -static inline void ath_dump_skb(struct sk_buff *skb, const char *prefix) -{ - print_hex_dump_bytes(prefix, DUMP_PREFIX_NONE, skb->data, - min(200U, skb->len)); -} -#else -static inline void ath_dump_skb(struct sk_buff *skb, const char *prefix) {} -#endif - -static inline void ath_cleanup_txbuf(struct ath_softc *sc, struct ath_buf *bf) -{ - BUG_ON(!bf); - if (!bf->skb) - return; - pci_unmap_single(sc->pdev, bf->skbaddr, bf->skb->len, - PCI_DMA_TODEVICE); - dev_kfree_skb(bf->skb); - bf->skb = NULL; -} - -static void ath_tasklet_reset(unsigned long data) -{ - struct ath_softc *sc = (void *)data; - - ath_reset(sc->hw); -} - -static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) -{ - struct ieee80211_tx_status txs = {}; - struct ath_buf *bf, *bf0; - struct ath_desc *ds; - struct sk_buff *skb; - int ret; - - spin_lock(&txq->lock); - list_for_each_entry_safe(bf, bf0, &txq->q, list) { - ds = bf->desc; - - /* TODO only one segment */ - pci_dma_sync_single_for_cpu(sc->pdev, sc->desc_daddr, - sc->desc_len, PCI_DMA_FROMDEVICE); - ret = sc->ah->ah_proc_tx_desc(sc->ah, ds); - if (unlikely(ret == -EINPROGRESS)) - break; - else if (unlikely(ret)) { - printk(KERN_ERR "ath: error %d while processing " - "queue %u\n", ret, txq->qnum); - break; - } - - skb = bf->skb; - bf->skb = NULL; - pci_unmap_single(sc->pdev, bf->skbaddr, skb->len, - PCI_DMA_TODEVICE); - - txs.control = bf->ctl; - txs.retry_count = ds->ds_txstat.ts_shortretry + - ds->ds_txstat.ts_longretry / 6; - if (unlikely(ds->ds_txstat.ts_status)) { - sc->ll_stats.dot11ACKFailureCount++; - if (ds->ds_txstat.ts_status & AR5K_TXERR_XRETRY) { - txs.excessive_retries = 1; - } else if (ds->ds_txstat.ts_status & AR5K_TXERR_FILT) { - txs.flags |= IEEE80211_TX_STATUS_TX_FILTERED; - } - } else { - txs.flags |= IEEE80211_TX_STATUS_ACK; - txs.ack_signal = ds->ds_txstat.ts_rssi; - } - - ieee80211_tx_status(sc->hw, skb, &txs); - sc->tx_stats.data[txq->qnum].count++; - -// printk(KERN_DEBUG "DONE skb: %p, rssi: %d, stat: %x, seq: %u, stamp: %u\n", skb, ds->ds_txstat.ts_rssi, ds->ds_txstat.ts_status, ds->ds_txstat.ts_seqnum, ds->ds_txstat.ts_tstamp); - - spin_lock(&sc->txbuflock); - sc->tx_stats.data[txq->qnum].len--; - list_move_tail(&bf->list, &sc->txbuf); - sc->txbuf_len++; - spin_unlock(&sc->txbuflock); - } - if (likely(list_empty(&txq->q))) - txq->link = NULL; - spin_unlock(&txq->lock); - if (sc->txbuf_len > ATH_TXBUF / 5) - ieee80211_wake_queues(sc->hw); -} - -static void ath_tasklet_tx(unsigned long data) -{ - struct ath_softc *sc = (void *)data; - - ath_tx_processq(sc, sc->txq); - - ath_led_event(sc, ATH_LED_TX); -} - -static int ath_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf) -{ - struct ath_hw *ah = sc->ah; - struct sk_buff *skb = bf->skb; - struct ath_desc *ds; - - if (likely(skb == NULL)) { - unsigned int off; - - /* - * Allocate buffer with headroom_needed space for the - * fake physical layer header at the start. - */ - skb = dev_alloc_skb(sc->rxbufsize + sc->cachelsz - 1); - if (unlikely(skb == NULL)) { - DPRINTF(sc, ATH_DEBUG_ANY, "%s: skbuff alloc of " - "size %u failed\n", __func__, - sc->rxbufsize + sc->cachelsz - 1); - sc->stats.ast_rx_nobuf++; - return -ENOMEM; - } - /* - * Cache-line-align. This is important (for the - * 5210 at least) as not doing so causes bogus data - * in rx'd frames. - */ - off = ((unsigned long)skb->data) % sc->cachelsz; - if (off != 0) - skb_reserve(skb, sc->cachelsz - off); - - bf->skb = skb; - bf->skbaddr = pci_map_single(sc->pdev, - skb->data, sc->rxbufsize, PCI_DMA_FROMDEVICE); - if (unlikely(pci_dma_mapping_error(bf->skbaddr))) { - printk(KERN_ERR "%s: DMA mapping failed\n", __func__); - dev_kfree_skb(skb); - bf->skb = NULL; - sc->stats.ast_rx_busdma++; - return -ENOMEM; - } - } - - /* - * Setup descriptors. For receive we always terminate - * the descriptor list with a self-linked entry so we'll - * not get overrun under high load (as can happen with a - * 5212 when ANI processing enables PHY error frames). - * - * To insure the last descriptor is self-linked we create - * each descriptor as self-linked and add it to the end. As - * each additional descriptor is added the previous self-linked - * entry is ``fixed'' naturally. This should be safe even - * if DMA is happening. When processing RX interrupts we - * never remove/process the last, self-linked, entry on the - * descriptor list. This insures the hardware always has - * someplace to write a new frame. - */ - ds = bf->desc; - ds->ds_link = bf->daddr; /* link to self */ - ds->ds_data = bf->skbaddr; - ath5k_hw_setup_rx_desc(ah, ds, - skb_tailroom(skb), /* buffer size */ - 0); - - if (sc->rxlink != NULL) - *sc->rxlink = bf->daddr; - sc->rxlink = &ds->ds_link; - return 0; -} - -static unsigned int ath_rx_decrypted(struct ath_softc *sc, - struct ath_desc *ds, struct sk_buff *skb) -{ - struct ieee80211_hdr *hdr = (void *)skb->data; - unsigned int keyix, hlen = ieee80211_get_hdrlen_from_skb(skb); - - if (!(ds->ds_rxstat.rs_status & AR5K_RXERR_DECRYPT) && - ds->ds_rxstat.rs_keyix != AR5K_RXKEYIX_INVALID) - return RX_FLAG_DECRYPTED; - - /* Apparently when a default key is used to decrypt the packet - the hal does not set the index used to decrypt. In such cases - get the index from the packet. */ - if ((le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_PROTECTED) && - !(ds->ds_rxstat.rs_status & AR5K_RXERR_DECRYPT) && - skb->len >= hlen + 4) { - keyix = skb->data[hlen + 3] >> 6; - - if (test_bit(keyix, sc->keymap)) - return RX_FLAG_DECRYPTED; - } - - return 0; -} - -static inline u64 ath_extend_tsf(struct ath_hw *ah, u32 rstamp) -{ - u64 tsf = ath5k_hw_get_tsf64(ah); - - if ((tsf & 0x7fff) < rstamp) - tsf -= 0x8000; - - return (tsf &~ 0x7fff) | rstamp; -} - -static void ath_tasklet_rx(unsigned long data) -{ - struct ieee80211_rx_status rxs = {}; - struct sk_buff *skb; - struct ath_softc *sc = (void *)data; - struct ath_buf *bf; - struct ath_desc *ds; - u16 len; - u8 stat; - int ret; - - spin_lock(&sc->rxbuflock); - do { - if (unlikely(list_empty(&sc->rxbuf))) { - if (net_ratelimit()) - printk(KERN_WARNING "ath: empty rx buf pool\n"); - break; - } - bf = list_first_entry(&sc->rxbuf, struct ath_buf, list); - BUG_ON(bf->skb == NULL); - skb = bf->skb; - ds = bf->desc; - - /* TODO only one segment */ - pci_dma_sync_single_for_cpu(sc->pdev, sc->desc_daddr, - sc->desc_len, PCI_DMA_FROMDEVICE); - - if (unlikely(ds->ds_link == bf->daddr)) /* this is the end */ - break; - - ret = sc->ah->ah_proc_rx_desc(sc->ah, ds); - if (unlikely(ret == -EINPROGRESS)) - break; - else if (unlikely(ret)) { - if (net_ratelimit()) - printk(KERN_ERR "ath: error in processing rx " - "descriptor\n"); - return; - } - - if (unlikely(ds->ds_rxstat.rs_more)) { - if (net_ratelimit()) - printk(KERN_INFO "ath: unsupported jumbo\n"); - goto next; - } - - stat = ds->ds_rxstat.rs_status; - if (unlikely(stat)) { - if (stat & AR5K_RXERR_CRC) - sc->stats.ast_rx_crcerr++; - if (stat & AR5K_RXERR_FIFO) - sc->stats.ast_rx_fifoerr++; - if (stat & AR5K_RXERR_PHY) { - sc->stats.ast_rx_phyerr++; - sc->stats.ast_rx_phy - [ds->ds_rxstat.rs_phyerr & 0x1f]++; - goto next; - } - if (stat & AR5K_RXERR_DECRYPT) { - /* - * Decrypt error. If the error occurred - * because there was no hardware key, then - * let the frame through so the upper layers - * can process it. This is necessary for 5210 - * parts which have no way to setup a ``clear'' - * key cache entry. - * - * XXX do key cache faulting - */ - if (ds->ds_rxstat.rs_keyix == - AR5K_RXKEYIX_INVALID && - !(stat & AR5K_RXERR_CRC)) - goto accept; - sc->stats.ast_rx_badcrypt++; - } - if (stat & AR5K_RXERR_MIC) { - rxs.flag |= RX_FLAG_MMIC_ERROR; - sc->stats.ast_rx_badmic++; - goto accept; - } - - /* let crypto-error packets fall through in MNTR */ - if ((stat &~ (AR5K_RXERR_DECRYPT|AR5K_RXERR_MIC)) || - sc->opmode != IEEE80211_IF_TYPE_MNTR) - goto next; - } -accept: - len = ds->ds_rxstat.rs_datalen; - pci_dma_sync_single_for_cpu(sc->pdev, bf->skbaddr, len, - PCI_DMA_FROMDEVICE); - pci_unmap_single(sc->pdev, bf->skbaddr, sc->rxbufsize, - PCI_DMA_FROMDEVICE); - bf->skb = NULL; - - if (unlikely((ieee80211_get_hdrlen_from_skb(skb) & 3) && net_ratelimit())) - printk(KERN_DEBUG "rx len is not %%4: %u\n", ieee80211_get_hdrlen_from_skb(skb)); - - skb_put(skb, len); - - sc->stats.ast_ant_rx[ds->ds_rxstat.rs_antenna]++; - - if (sc->opmode == IEEE80211_IF_TYPE_MNTR) - rxs.mactime = ath_extend_tsf(sc->ah, - ds->ds_rxstat.rs_tstamp); - else - rxs.mactime = ds->ds_rxstat.rs_tstamp; - rxs.freq = sc->curchan->freq; - rxs.channel = sc->curchan->chan; - rxs.phymode = sc->curmode; - rxs.ssi = ds->ds_rxstat.rs_rssi; - rxs.antenna = ds->ds_rxstat.rs_antenna; - rxs.rate = ds->ds_rxstat.rs_rate; - rxs.flag |= ath_rx_decrypted(sc, ds, skb); - -// printk(KERN_DEBUG "stat: %x, dlen: %u (hdr: %u), rssi: %d, rate: %u\n", ds->ds_rxstat.rs_status, len, ieee80211_get_hdrlen_from_skb(skb), ds->ds_rxstat.rs_rssi, ds->ds_rxstat.rs_rate); - ath_dump_skb(skb, "r"); - - __ieee80211_rx(sc->hw, skb, &rxs); - sc->led_rxrate = ds->ds_rxstat.rs_rate; - ath_led_event(sc, ATH_LED_RX); -next: - list_move_tail(&bf->list, &sc->rxbuf); - } while (ath_rxbuf_init(sc, bf) == 0); - spin_unlock(&sc->rxbuflock); -} - -/* - * Setup the beacon frame for transmit. - */ -static int ath_beacon_setup(struct ath_softc *sc, struct ath_buf *bf, - struct ieee80211_tx_control *ctl) -{ - struct sk_buff *skb = bf->skb; - struct ath_hw *ah = sc->ah; - struct ath_desc *ds; - int ret, antenna = 0; - u32 flags; - - bf->skbaddr = pci_map_single(sc->pdev, skb->data, skb->len, - PCI_DMA_TODEVICE); - DPRINTF(sc, ATH_DEBUG_BEACON, "%s: skb %p [data %p len %u] " - "skbaddr %llx\n", __func__, skb, skb->data, skb->len, - (unsigned long long)bf->skbaddr); - if (pci_dma_mapping_error(bf->skbaddr)) { - printk(KERN_ERR "ath: beacon DMA mapping failed\n"); - return -EIO; - } - - ds = bf->desc; - - flags = AR5K_TXDESC_NOACK; - if (sc->opmode == IEEE80211_IF_TYPE_IBSS && ath5k_hw_hasveol(ah)) { - ds->ds_link = bf->daddr; /* self-linked */ - flags |= AR5K_TXDESC_VEOL; - /* - * Let hardware handle antenna switching if txantenna is not set - */ - } else { - ds->ds_link = 0; - /* - * Switch antenna every 4 beacons if txantenna is not set - * XXX assumes two antenna - */ - if (antenna == 0) { - antenna = (sc->stats.ast_be_xmit & 4 ? 2 : 1); - } - } - - ds->ds_data = bf->skbaddr; - ret = ah->ah_setup_tx_desc(ah, ds, skb->len + FCS_LEN, - ieee80211_get_hdrlen_from_skb(skb), - AR5K_PKT_TYPE_BEACON, 0xffff, ctl->tx_rate, 1, - AR5K_TXKEYIX_INVALID, antenna, flags, 0, 0); - if (ret) - goto err_unmap; - /* NB: beacon's BufLen must be a multiple of 4 bytes */ - ret = ah->ah_fill_tx_desc(ah, ds, roundup(skb->len, 4), true, true); - if (ret) - goto err_unmap; - - return 0; -err_unmap: - pci_unmap_single(sc->pdev, bf->skbaddr, skb->len, PCI_DMA_TODEVICE); - return ret; -} - -/* - * Transmit a beacon frame at SWBA. Dynamic updates to the - * frame contents are done as needed and the slot time is - * also adjusted based on current state. - * - * this is usually called from interrupt context (ath_intr()) - * but also from ath_beacon_config() in IBSS mode which in turn - * can be called from a tasklet and user context - */ -static void ath_beacon_send(struct ath_softc *sc) -{ - struct ath_buf *bf = sc->bbuf; - struct ath_hw *ah = sc->ah; - - DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s\n", __func__); - - if (unlikely(bf->skb == NULL || sc->opmode == IEEE80211_IF_TYPE_STA || - sc->opmode == IEEE80211_IF_TYPE_MNTR)) { - DPRINTF(sc, ATH_DEBUG_ANY, "%s: bf=%p bf_skb=%p\n", - __func__, bf, bf ? bf->skb : NULL); - return; - } - /* - * Check if the previous beacon has gone out. If - * not don't don't try to post another, skip this - * period and wait for the next. Missed beacons - * indicate a problem and should not occur. If we - * miss too many consecutive beacons reset the device. - */ - if (unlikely(ath5k_hw_num_tx_pending(ah, sc->bhalq) != 0)) { - sc->bmisscount++; - DPRINTF(sc, ATH_DEBUG_BEACON_PROC, - "%s: missed %u consecutive beacons\n", - __func__, sc->bmisscount); - if (sc->bmisscount > 3) { /* NB: 3 is a guess */ - DPRINTF(sc, ATH_DEBUG_BEACON_PROC, - "%s: stuck beacon time (%u missed)\n", - __func__, sc->bmisscount); - tasklet_schedule(&sc->restq); - } - return; - } - if (unlikely(sc->bmisscount != 0)) { - DPRINTF(sc, ATH_DEBUG_BEACON_PROC, - "%s: resume beacon xmit after %u misses\n", - __func__, sc->bmisscount); - sc->bmisscount = 0; - } - - /* - * Stop any current dma and put the new frame on the queue. - * This should never fail since we check above that no frames - * are still pending on the queue. - */ - if (unlikely(!ath5k_hw_stop_tx_dma(ah, sc->bhalq))) { - DPRINTF(sc, ATH_DEBUG_ANY, "%s: beacon queue %u didn't stop?\n", - __func__, sc->bhalq); - /* NB: the HAL still stops DMA, so proceed */ - } - pci_dma_sync_single_for_cpu(sc->pdev, bf->skbaddr, bf->skb->len, - PCI_DMA_TODEVICE); - - ath5k_hw_put_tx_buf(ah, sc->bhalq, bf->daddr); - ath5k_hw_tx_start(ah, sc->bhalq); - DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: TXDP[%u] = %llx (%p)\n", - __func__, sc->bhalq, (unsigned long long)bf->daddr, bf->desc); - - sc->stats.ast_be_xmit++; -} - -static int ath_beaconq_config(struct ath_softc *sc) -{ - struct ath_hw *ah = sc->ah; - struct ath5k_txq_info qi; - int ret; - - ret = ath5k_hw_get_tx_queueprops(ah, sc->bhalq, &qi); - if (ret) - return ret; - if (sc->opmode == IEEE80211_IF_TYPE_AP || - sc->opmode == IEEE80211_IF_TYPE_IBSS) { - /* - * Always burst out beacon and CAB traffic. - */ - qi.tqi_aifs = ATH_BEACON_AIFS_DEFAULT; - qi.tqi_cw_min = ATH_BEACON_CWMIN_DEFAULT; - qi.tqi_cw_max = ATH_BEACON_CWMAX_DEFAULT; - } - - ret = ath5k_hw_setup_tx_queueprops(ah, sc->bhalq, &qi); - if (ret) { - printk(KERN_ERR "%s: unable to update parameters for beacon " - "hardware queue!\n", __func__); - return ret; - } - - return ath5k_hw_reset_tx_queue(ah, sc->bhalq); /* push to h/w */; -} - -/* - * Configure the beacon and sleep timers. - * - * When operating as an AP this resets the TSF and sets - * up the hardware to notify us when we need to issue beacons. - * - * When operating in station mode this sets up the beacon - * timers according to the timestamp of the last received - * beacon and the current TSF, configures PCF and DTIM - * handling, programs the sleep registers so the hardware - * will wakeup in time to receive beacons, and configures - * the beacon miss handling so we'll receive a BMISS - * interrupt when we stop seeing beacons from the AP - * we've associated with. - */ -static void ath_beacon_config(struct ath_softc *sc) -{ -#define TSF_TO_TU(_h,_l) (((_h) << 22) | ((_l) >> 10)) - struct ath_hw *ah = sc->ah; - u32 uninitialized_var(nexttbtt), intval, tsftu; - u64 tsf; - - intval = sc->bintval & AR5K_BEACON_PERIOD; - if (WARN_ON(!intval)) - return; - - /* current TSF converted to TU */ - tsf = ath5k_hw_get_tsf64(ah); - tsftu = TSF_TO_TU((u32)(tsf >> 32), (u32)tsf); - - DPRINTF(sc, ATH_DEBUG_BEACON, "%s: intval %u hw tsftu %u\n", __func__, - intval, tsftu); - - if (sc->opmode == IEEE80211_IF_TYPE_STA) { - ath5k_hw_set_intr(ah, 0); - sc->imask |= AR5K_INT_BMISS; - sc->bmisscount = 0; - ath5k_hw_set_intr(ah, sc->imask); - } else if (sc->opmode == IEEE80211_IF_TYPE_IBSS /* TODO || AP */) { - ath5k_hw_set_intr(ah, 0); - if (sc->opmode == IEEE80211_IF_TYPE_IBSS) { - /* - * Pull nexttbtt forward to reflect the current - * TSF. Add one intval otherwise the timespan - * can be too short for ibss merges. - */ - nexttbtt = tsftu + 2 * intval; - - DPRINTF(sc, ATH_DEBUG_BEACON, "%s: nexttbtt %u " - "intval %u\n", __func__, nexttbtt, intval); - - /* - * In IBSS mode enable the beacon timers but only - * enable SWBA interrupts if we need to manually - * prepare beacon frames. Otherwise we use a - * self-linked tx descriptor and let the hardware - * deal with things. - */ - if (!ath5k_hw_hasveol(ah)) - sc->imask |= AR5K_INT_SWBA; - } /* TODO else AP */ - - intval |= AR5K_BEACON_ENA; - - ath_beaconq_config(sc); - ath5k_hw_init_beacon(ah, nexttbtt, intval); - - sc->bmisscount = 0; - ath5k_hw_set_intr(ah, sc->imask); - /* - * When using a self-linked beacon descriptor in - * ibss mode load it once here. - */ - if (sc->opmode == IEEE80211_IF_TYPE_IBSS && - ath5k_hw_hasveol(ah)) - ath_beacon_send(sc); - } -#undef TSF_TO_TU -} - -/* - * Calculate the receive filter according to the - * operating mode and state: - * - * o always accept unicast, broadcast, and multicast traffic - * o maintain current state of phy error reception (the hal - * may enable phy error frames for noise immunity work) - * o probe request frames are accepted only when operating in - * hostap, adhoc, or monitor modes - * o enable promiscuous mode according to the interface state - * o accept beacons: - * - when operating in adhoc mode so the 802.11 layer creates - * node table entries for peers, - * - when operating in station mode for collecting rssi data when - * the station is otherwise quiet, or - * - when scanning - * o accept any additional packets specified by sc_rxfilter - */ -static u32 ath_calcrxfilter(struct ath_softc *sc) -{ - struct ath_hw *ah = sc->ah; - unsigned int opmode = sc->opmode; - u32 rfilt; - - rfilt = (ath5k_hw_get_rx_filter(ah) & AR5K_RX_FILTER_PHYERR) | - AR5K_RX_FILTER_UCAST | AR5K_RX_FILTER_BCAST | - AR5K_RX_FILTER_MCAST | AR5K_RX_FILTER_RADARERR; - - if (sc->opmode == IEEE80211_IF_TYPE_MNTR) - rfilt |= AR5K_RX_FILTER_CONTROL | AR5K_RX_FILTER_BEACON | - AR5K_RX_FILTER_PROBEREQ | AR5K_RX_FILTER_PROM; - if (opmode != IEEE80211_IF_TYPE_STA) - rfilt |= AR5K_RX_FILTER_PROBEREQ; - if (opmode != IEEE80211_IF_TYPE_AP && sc->promisc) - rfilt |= AR5K_RX_FILTER_PROM; - if (opmode == IEEE80211_IF_TYPE_STA || opmode == IEEE80211_IF_TYPE_IBSS) - rfilt |= AR5K_RX_FILTER_BEACON; - - return rfilt; -} - -static void ath_mode_init(struct ath_softc *sc) -{ - struct ath_hw *ah = sc->ah; - u32 rfilt; - - /* configure rx filter */ - rfilt = ath_calcrxfilter(sc); - ath5k_hw_set_rx_filter(ah, rfilt); - - if (ath5k_hw_hasbssidmask(ah)) - ath5k_hw_set_bssid_mask(ah, sc->bssidmask); - - /* configure operational mode */ - ath5k_hw_set_opmode(ah); - - ath5k_hw_set_mcast_filter(ah, 0, 0); - DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x\n", __func__, rfilt); -} - -/* - * Enable the receive h/w following a reset. - */ -static int ath_startrecv(struct ath_softc *sc) -{ - struct ath_hw *ah = sc->ah; - struct ath_buf *bf; - int ret; - - sc->rxbufsize = roundup(IEEE80211_MAX_LEN, sc->cachelsz); - - DPRINTF(sc, ATH_DEBUG_RESET, "%s: cachelsz %u rxbufsize %u\n", - __func__, sc->cachelsz, sc->rxbufsize); - - sc->rxlink = NULL; - - spin_lock_bh(&sc->rxbuflock); - list_for_each_entry(bf, &sc->rxbuf, list) { - ret = ath_rxbuf_init(sc, bf); - if (ret != 0) { - spin_unlock_bh(&sc->rxbuflock); - goto err; - } - } - bf = list_first_entry(&sc->rxbuf, struct ath_buf, list); - spin_unlock_bh(&sc->rxbuflock); - - ath5k_hw_put_rx_buf(ah, bf->daddr); - ath5k_hw_start_rx(ah); /* enable recv descriptors */ - ath_mode_init(sc); /* set filters, etc. */ - ath5k_hw_start_rx_pcu(ah); /* re-enable PCU/DMA engine */ - - return 0; -err: - return ret; -} - -static inline void ath_update_txpow(struct ath_softc *sc) -{ - ath5k_hw_set_txpower_limit(sc->ah, 0); -} - -static int ath_stop_locked(struct ath_softc *); - -static int ath_init(struct ath_softc *sc) -{ - int ret; - - mutex_lock(&sc->lock); - - DPRINTF(sc, ATH_DEBUG_RESET, "%s: mode %d\n", __func__, sc->opmode); - - /* - * Stop anything previously setup. This is safe - * no matter this is the first time through or not. - */ - ath_stop_locked(sc); - - /* - * The basic interface to setting the hardware in a good - * state is ``reset''. On return the hardware is known to - * be powered up and with interrupts disabled. This must - * be followed by initialization of the appropriate bits - * and then setup of the interrupt mask. - */ - sc->curchan = sc->hw->conf.chan; - ret = ath5k_hw_reset(sc->ah, sc->opmode, sc->curchan, false); - if (ret) { - printk(KERN_ERR "unable to reset hardware: %d\n", ret); - goto done; - } - /* - * This is needed only to setup initial state - * but it's best done after a reset. - */ - ath_update_txpow(sc); - - /* - * Setup the hardware after reset: the key cache - * is filled as needed and the receive engine is - * set going. Frame transmit is handled entirely - * in the frame output path; there's nothing to do - * here except setup the interrupt mask. - */ - ret = ath_startrecv(sc); - if (ret) - goto done; - - /* - * Enable interrupts. - */ - sc->imask = AR5K_INT_RX | AR5K_INT_TX | AR5K_INT_RXEOL | AR5K_INT_RXORN - | AR5K_INT_FATAL | AR5K_INT_GLOBAL; - - ath5k_hw_set_intr(sc->ah, sc->imask); - - mod_timer(&sc->calib_tim, round_jiffies(jiffies + - msecs_to_jiffies(ath_calinterval * 1000))); - - ret = 0; -done: - mutex_unlock(&sc->lock); - return ret; -} - -/* - * Disable the receive h/w in preparation for a reset. - */ -static void ath_stoprecv(struct ath_softc *sc) -{ - struct ath_hw *ah = sc->ah; - - ath5k_hw_stop_pcu_recv(ah); /* disable PCU */ - ath5k_hw_set_rx_filter(ah, 0); /* clear recv filter */ - ath5k_hw_stop_rx_dma(ah); /* disable DMA engine */ - mdelay(3); /* 3ms is long enough for 1 frame */ -#if AR_DEBUG - if (unlikely(sc->debug & (ATH_DEBUG_RESET | ATH_DEBUG_FATAL))) { - struct ath_desc *ds; - struct ath_buf *bf; - int status; - - printk(KERN_DEBUG "%s: rx queue %x, link %p\n", __func__, - ath5k_hw_get_rx_buf(ah), sc->rxlink); - - spin_lock_bh(&sc->rxbuflock); - list_for_each_entry(bf, &sc->rxbuf, list) { - ds = bf->desc; - status = ah->ah_proc_rx_desc(ah, ds); - if (!status || (sc->debug & ATH_DEBUG_FATAL)) - ath_printrxbuf(bf, status == 0); - } - spin_unlock_bh(&sc->rxbuflock); - } -#endif - sc->rxlink = NULL; /* just in case */ -} - -static void ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq) -{ - struct ath_buf *bf, *bf0; - - /* - * NB: this assumes output has been stopped and - * we do not need to block ath_tx_tasklet - */ - spin_lock_bh(&txq->lock); - list_for_each_entry_safe(bf, bf0, &txq->q, list) { -#if AR_DEBUG - if (sc->debug & ATH_DEBUG_RESET) - ath_printtxbuf(bf, !sc->ah->ah_proc_tx_desc(sc->ah, - bf->desc)); -#endif - ath_cleanup_txbuf(sc, bf); - - spin_lock_bh(&sc->txbuflock); - sc->tx_stats.data[txq->qnum].len--; - list_move_tail(&bf->list, &sc->txbuf); - sc->txbuf_len++; - spin_unlock_bh(&sc->txbuflock); - } - txq->link = NULL; - spin_unlock_bh(&txq->lock); -} - -/* - * Drain the transmit queues and reclaim resources. - */ -static void ath_draintxq(struct ath_softc *sc) -{ - struct ath_hw *ah = sc->ah; - int i; - - /* XXX return value */ - if (likely(!sc->invalid)) { - /* don't touch the hardware if marked invalid */ - (void)ath5k_hw_stop_tx_dma(ah, sc->bhalq); - DPRINTF(sc, ATH_DEBUG_RESET, "%s: beacon queue %x\n", __func__, - ath5k_hw_get_tx_buf(ah, sc->bhalq)); - for (i = 0; i < ARRAY_SIZE(sc->txqs); i++) - if (sc->txqs[i].setup) { - ath5k_hw_stop_tx_dma(ah, sc->txqs[i].qnum); - DPRINTF(sc, ATH_DEBUG_RESET, "%s: txq [%u] %x, " - "link %p\n", __func__, - sc->txqs[i].qnum, - ath5k_hw_get_tx_buf(ah, - sc->txqs[i].qnum), - sc->txqs[i].link); - } - } - ieee80211_start_queues(sc->hw); /* XXX move to callers */ - - for (i = 0; i < ARRAY_SIZE(sc->txqs); i++) - if (sc->txqs[i].setup) - ath_tx_draintxq(sc, &sc->txqs[i]); -} - -static int ath_stop_locked(struct ath_softc *sc) -{ - struct ath_hw *ah = sc->ah; - - DPRINTF(sc, ATH_DEBUG_RESET, "%s: invalid %u\n", __func__, sc->invalid); - - /* - * Shutdown the hardware and driver: - * stop output from above - * disable interrupts - * turn off timers - * turn off the radio - * clear transmit machinery - * clear receive machinery - * drain and release tx queues - * reclaim beacon resources - * power down hardware - * - * Note that some of this work is not possible if the - * hardware is gone (invalid). - */ - ieee80211_stop_queues(sc->hw); - - if (!sc->invalid) { - if (sc->led_soft) { - del_timer_sync(&sc->led_tim); - ath5k_hw_set_gpio(ah, sc->led_pin, !sc->led_on); - sc->led_blinking = 0; - } - ath5k_hw_set_intr(ah, 0); - } - ath_draintxq(sc); - if (!sc->invalid) { - ath_stoprecv(sc); - ath5k_hw_phy_disable(ah); - } else - sc->rxlink = NULL; -// ath_beacon_free(sc); - - return 0; -} - -/* - * Stop the device, grabbing the top-level lock to protect - * against concurrent entry through ath_init (which can happen - * if another thread does a system call and the thread doing the - * stop is preempted). - */ -static int ath_stop_hw(struct ath_softc *sc) -{ - int ret; - - mutex_lock(&sc->lock); - ret = ath_stop_locked(sc); - if (ret == 0 && !sc->invalid) { - /* - * Set the chip in full sleep mode. Note that we are - * careful to do this only when bringing the interface - * completely to a stop. When the chip is in this state - * it must be carefully woken up or references to - * registers in the PCI clock domain may freeze the bus - * (and system). This varies by chip and is mostly an - * issue with newer parts that go to sleep more quickly. - */ - if (sc->ah->ah_mac_version >= 7 && sc->ah->ah_mac_revision >= 8) { - /* - * XXX - * don't put newer MAC revisions > 7.8 to sleep because - * of the above mentioned problems - */ - DPRINTF(sc, ATH_DEBUG_RESET, "%s: mac version > 7.8, " - "not putting device to sleep\n", __func__); - } - else { - DPRINTF(sc, ATH_DEBUG_RESET, - "%s: putting device to full sleep\n", __func__); - ath5k_hw_set_power(sc->ah, AR5K_PM_FULL_SLEEP, true, 0); - } - } - ath_cleanup_txbuf(sc, sc->bbuf); - mutex_unlock(&sc->lock); - - del_timer_sync(&sc->calib_tim); - - return ret; -} - -static void ath_setcurmode(struct ath_softc *sc, unsigned int mode) -{ - if (unlikely(sc->led_soft)) { - /* from Atheros NDIS driver, w/ permission */ - static const struct { - u16 rate; /* tx/rx 802.11 rate */ - u16 timeOn; /* LED on time (ms) */ - u16 timeOff; /* LED off time (ms) */ - } blinkrates[] = { - { 108, 40, 10 }, - { 96, 44, 11 }, - { 72, 50, 13 }, - { 48, 57, 14 }, - { 36, 67, 16 }, - { 24, 80, 20 }, - { 22, 100, 25 }, - { 18, 133, 34 }, - { 12, 160, 40 }, - { 10, 200, 50 }, - { 6, 240, 58 }, - { 4, 267, 66 }, - { 2, 400, 100 }, - { 0, 500, 130 } - }; - const struct ath5k_rate_table* rt = - ath5k_hw_get_rate_table(sc->ah, mode); - unsigned int i, j; - - BUG_ON(rt == NULL); - - memset(sc->hwmap, 0, sizeof(sc->hwmap)); - for (i = 0; i < 32; i++) { - u8 ix = rt->rate_code_to_index[i]; - if (ix == 0xff) { - sc->hwmap[i].ledon = msecs_to_jiffies(500); - sc->hwmap[i].ledoff = msecs_to_jiffies(130); - continue; - } - sc->hwmap[i].txflags = IEEE80211_RADIOTAP_F_DATAPAD; - if (SHPREAMBLE_FLAG(ix) || rt->rates[ix].modulation == - MODULATION_OFDM) - sc->hwmap[i].txflags |= - IEEE80211_RADIOTAP_F_SHORTPRE; - /* receive frames include FCS */ - sc->hwmap[i].rxflags = sc->hwmap[i].txflags | - IEEE80211_RADIOTAP_F_FCS; - /* setup blink rate table to avoid per-packet lookup */ - for (j = 0; j < ARRAY_SIZE(blinkrates) - 1; j++) - if (blinkrates[j].rate == /* XXX why 7f? */ - (rt->rates[ix].dot11_rate&0x7f)) - break; - - sc->hwmap[i].ledon = msecs_to_jiffies(blinkrates[j]. - timeOn); - sc->hwmap[i].ledoff = msecs_to_jiffies(blinkrates[j]. - timeOff); - } - } - - sc->curmode = mode; -} - -/* - * Set/change channels. If the channel is really being changed, - * it's done by reseting the chip. To accomplish this we must - * first cleanup any pending DMA, then restart stuff after a la - * ath_init. - */ -static int ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan) -{ - struct ath_hw *ah = sc->ah; - int ret; - - DPRINTF(sc, ATH_DEBUG_RESET, "%s: %u (%u MHz) -> %u (%u MHz)\n", - __func__, sc->curchan->chan, sc->curchan->freq, - chan->chan, chan->freq); - - if (chan->freq != sc->curchan->freq || chan->val != sc->curchan->val) { - /* - * To switch channels clear any pending DMA operations; - * wait long enough for the RX fifo to drain, reset the - * hardware at the new frequency, and then re-enable - * the relevant bits of the h/w. - */ - ath5k_hw_set_intr(ah, 0); /* disable interrupts */ - ath_draintxq(sc); /* clear pending tx frames */ - ath_stoprecv(sc); /* turn off frame recv */ - ret = ath5k_hw_reset(ah, sc->opmode, chan, true); - if (ret) { - printk(KERN_ERR "%s: unable to reset channel %u " - "(%u Mhz)\n", __func__, chan->chan, chan->freq); - return ret; - } - sc->curchan = chan; - ath_update_txpow(sc); - - /* - * Re-enable rx framework. - */ - ret = ath_startrecv(sc); - if (ret) { - printk(KERN_ERR "%s: unable to restart recv logic\n", - __func__); - return ret; - } - - /* - * Change channels and update the h/w rate map - * if we're switching; e.g. 11a to 11b/g. - */ -// ath_chan_change(sc, chan); - - /* - * Re-enable interrupts. - */ - ath5k_hw_set_intr(ah, sc->imask); - } - - return 0; -} - -static int ath_tx_bf(struct ath_softc *sc, struct ath_buf *bf, - struct ieee80211_tx_control *ctl) -{ - struct ath_hw *ah = sc->ah; - struct ath_txq *txq = sc->txq; - struct ath_desc *ds = bf->desc; - struct sk_buff *skb = bf->skb; - unsigned int hdrpad, pktlen, flags, keyidx = AR5K_TXKEYIX_INVALID; - int ret; - - flags = AR5K_TXDESC_INTREQ | AR5K_TXDESC_CLRDMASK; - bf->ctl = *ctl; - /* XXX endianness */ - bf->skbaddr = pci_map_single(sc->pdev, skb->data, skb->len, - PCI_DMA_TODEVICE); - - if (ctl->flags & IEEE80211_TXCTL_NO_ACK) - flags |= AR5K_TXDESC_NOACK; - - if ((ieee80211_get_hdrlen_from_skb(skb) & 3) && net_ratelimit()) - printk(KERN_DEBUG "tx len is not %%4: %u\n", ieee80211_get_hdrlen_from_skb(skb)); - - hdrpad = 0; - pktlen = skb->len - hdrpad + FCS_LEN; - - if (ctl->key_idx != HW_KEY_IDX_INVALID) { - keyidx = ctl->key_idx; - pktlen += ctl->icv_len; - } - - ret = ah->ah_setup_tx_desc(ah, ds, pktlen, - ieee80211_get_hdrlen_from_skb(skb), AR5K_PKT_TYPE_NORMAL, - 0xffff, ctl->tx_rate, ctl->retry_limit, keyidx, 0, flags, 0, 0); - if (ret) - goto err_unmap; - - ds->ds_link = 0; - ds->ds_data = bf->skbaddr; - - ret = ah->ah_fill_tx_desc(ah, ds, skb->len, true, true); - if (ret) - goto err_unmap; - - spin_lock_bh(&txq->lock); - list_add_tail(&bf->list, &txq->q); - sc->tx_stats.data[txq->qnum].len++; - if (txq->link == NULL) /* is this first packet? */ - ath5k_hw_put_tx_buf(ah, txq->qnum, bf->daddr); - else /* no, so only link it */ - *txq->link = bf->daddr; - - txq->link = &ds->ds_link; - ath5k_hw_tx_start(ah, txq->qnum); - spin_unlock_bh(&txq->lock); - -// printk(KERN_DEBUG "bf: %p, skb: %p, flags: %x, daddr: %x, dlink: %x, tlink: %x\n", bf, skb, flags, bf->daddr, ds->ds_link, *txq->link); - - return 0; -err_unmap: - pci_unmap_single(sc->pdev, bf->skbaddr, skb->len, PCI_DMA_TODEVICE); - return ret; -} - -static int ath_tx(struct ieee80211_hw *hw, struct sk_buff *skb, - struct ieee80211_tx_control *ctl) -{ - struct ath_softc *sc = hw->priv; - struct ath_buf *bf; - unsigned long flags; - - ath_dump_skb(skb, "t"); - - if (sc->opmode == IEEE80211_IF_TYPE_MNTR) - DPRINTF(sc, ATH_DEBUG_XMIT, "tx in monitor (scan?)\n"); - - sc->led_txrate = ctl->tx_rate; - - spin_lock_irqsave(&sc->txbuflock, flags); - if (list_empty(&sc->txbuf)) { - if (net_ratelimit()) - printk(KERN_ERR "ath: no further txbuf available, " - "dropping packet\n"); - sc->stats.ast_tx_nobuf++; - spin_unlock_irqrestore(&sc->txbuflock, flags); - ieee80211_stop_queue(hw, ctl->queue); - return -1; - } - bf = list_first_entry(&sc->txbuf, struct ath_buf, list); - list_del(&bf->list); - sc->txbuf_len--; - if (list_empty(&sc->txbuf)) { - sc->stats.ast_tx_qstop++; - ieee80211_stop_queues(hw); - } - spin_unlock_irqrestore(&sc->txbuflock, flags); - - bf->skb = skb; - - if (ath_tx_bf(sc, bf, ctl)) { - bf->skb = NULL; - spin_lock_irqsave(&sc->txbuflock, flags); - list_add_tail(&bf->list, &sc->txbuf); - sc->txbuf_len++; - spin_unlock_irqrestore(&sc->txbuflock, flags); - dev_kfree_skb_any(skb); - return 0; - } - - return 0; -} - -static int ath_reset(struct ieee80211_hw *hw) -{ - struct ath_softc *sc = hw->priv; - struct ath_hw *ah = sc->ah; - int ret; - - DPRINTF(sc, ATH_DEBUG_RESET, "resetting\n"); - /* - * Convert to a HAL channel description with the flags - * constrained to reflect the current operating mode. - */ - sc->curchan = hw->conf.chan; - - ath5k_hw_set_intr(ah, 0); - ath_draintxq(sc); - ath_stoprecv(sc); - - ret = ath5k_hw_reset(ah, sc->opmode, sc->curchan, true); - if (unlikely(ret)) { - printk(KERN_ERR "ath: can't reset hardware (%d)\n", ret); - goto err; - } - ath_update_txpow(sc); - - ret = ath_startrecv(sc); - if (unlikely(ret)) { - printk(KERN_ERR "ath: can't start recv logic\n"); - goto err; - } - /* - * We may be doing a reset in response to an ioctl - * that changes the channel so update any state that - * might change as a result. - */ -// ath_chan_change(sc, c); - ath_beacon_config(sc); - /* intrs are started by ath_beacon_config */ - - ieee80211_wake_queues(hw); - - return 0; -err: - return ret; -} - -static int ath_open(struct ieee80211_hw *hw) -{ - return ath_init(hw->priv); -} - -static int ath_stop(struct ieee80211_hw *hw) -{ - return ath_stop_hw(hw->priv); -} - -static int ath_add_interface(struct ieee80211_hw *hw, - struct ieee80211_if_init_conf *conf) -{ - struct ath_softc *sc = hw->priv; - int ret; - - mutex_lock(&sc->lock); - if (sc->iface_id) { - ret = 0; - goto end; - } - - sc->iface_id = conf->if_id; - - switch (conf->type) { - case IEEE80211_IF_TYPE_STA: - case IEEE80211_IF_TYPE_IBSS: - case IEEE80211_IF_TYPE_MNTR: - sc->opmode = conf->type; - break; - default: - ret = -EOPNOTSUPP; - goto end; - } - ret = 0; -end: - mutex_unlock(&sc->lock); - return ret; -} - -static void ath_remove_interface(struct ieee80211_hw *hw, - struct ieee80211_if_init_conf *conf) -{ - struct ath_softc *sc = hw->priv; - - mutex_lock(&sc->lock); - if (sc->iface_id != conf->if_id) { - goto end; - } - - sc->iface_id = 0; -end: - mutex_unlock(&sc->lock); -} - -static int ath_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf) -{ - struct ath_softc *sc = hw->priv; - - sc->bintval = conf->beacon_int * 1000 / 1024; - ath_setcurmode(sc, conf->phymode); - - return ath_chan_set(sc, conf->chan); -} - -static int ath_config_interface(struct ieee80211_hw *hw, int if_id, - struct ieee80211_if_conf *conf) -{ - struct ath_softc *sc = hw->priv; - int ret; - - mutex_lock(&sc->lock); - if (sc->iface_id != if_id) { - ret = -EIO; - goto unlock; - } - if (conf->bssid) - ath5k_hw_set_associd(sc->ah, conf->bssid, 0 /* FIXME: aid */); - mutex_unlock(&sc->lock); - - return ath_reset(hw); -unlock: - mutex_unlock(&sc->lock); - return ret; -} - -static void ath_set_multicast_list(struct ieee80211_hw *hw, - unsigned short flags, int mc_count) -{ - struct ath_softc *sc = hw->priv; - unsigned int prom = !!(flags & IFF_PROMISC); - u32 rfilt; - - if (sc->promisc != prom) { - sc->promisc = prom; - rfilt = ath_calcrxfilter(sc); - ath5k_hw_set_rx_filter(sc->ah, rfilt); - } -} - -static int ath_set_key(struct ieee80211_hw *hw, set_key_cmd cmd, - u8 *addr, struct ieee80211_key_conf *key, int aid) -{ - struct ath_softc *sc = hw->priv; - int ret = 0; - - mutex_lock(&sc->lock); - - switch (cmd) { - case SET_KEY: - if (key->alg != ALG_WEP && key->alg != ALG_NONE && - key->alg != ALG_NULL) { - ret = -EINVAL; - goto unlock; - } - - ret = ath5k_hw_set_key(sc->ah, key->keyidx, key, addr); - if (ret) { - printk(KERN_ERR "ath: can't set the key\n"); - goto unlock; - } - - set_bit(key->keyidx, sc->keymap); - key->hw_key_idx = key->keyidx; - key->flags &= ~IEEE80211_KEY_FORCE_SW_ENCRYPT; - break; - case DISABLE_KEY: - ath5k_hw_reset_key(sc->ah, key->keyidx); - clear_bit(key->keyidx, sc->keymap); - break; - case REMOVE_ALL_KEYS: { - unsigned int i; - for (i = 0; i < AR5K_KEYCACHE_SIZE; i++) { - ath5k_hw_reset_key(sc->ah, i); - clear_bit(i, sc->keymap); - } - break; - } - default: - ret = -EINVAL; - goto unlock; - } - -unlock: - mutex_unlock(&sc->lock); - return ret; -} - -static int ath_get_stats(struct ieee80211_hw *hw, - struct ieee80211_low_level_stats *stats) -{ - struct ath_softc *sc = hw->priv; - - memcpy(stats, &sc->ll_stats, sizeof(sc->ll_stats)); - - return 0; -} - -static int ath_get_tx_stats(struct ieee80211_hw *hw, - struct ieee80211_tx_queue_stats *stats) -{ - struct ath_softc *sc = hw->priv; - - memcpy(stats, &sc->tx_stats, sizeof(sc->tx_stats)); - - return 0; -} - -static u64 ath_get_tsf(struct ieee80211_hw *hw) -{ - struct ath_softc *sc = hw->priv; - - return ath5k_hw_get_tsf64(sc->ah); -} - -static void ath_reset_tsf(struct ieee80211_hw *hw) -{ - struct ath_softc *sc = hw->priv; - - ath5k_hw_reset_tsf(sc->ah); -} - -static int ath_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb, - struct ieee80211_tx_control *ctl) -{ - struct ath_softc *sc = hw->priv; - int ret; - - ath_dump_skb(skb, "b"); - - mutex_lock(&sc->lock); - - if (sc->opmode != IEEE80211_IF_TYPE_IBSS) { - ret = -EIO; - goto end; - } - - ath_cleanup_txbuf(sc, sc->bbuf); - sc->bbuf->skb = skb; - ret = ath_beacon_setup(sc, sc->bbuf, ctl); - if (ret) - sc->bbuf->skb = NULL; - -end: - mutex_unlock(&sc->lock); - return ret; -} - -static struct ieee80211_ops ath_hw_ops = { - .tx = ath_tx, - .reset = ath_reset, - .open = ath_open, - .stop = ath_stop, - .add_interface = ath_add_interface, - .remove_interface = ath_remove_interface, - .config = ath_config, - .config_interface = ath_config_interface, - .set_multicast_list = ath_set_multicast_list, - .set_key = ath_set_key, - .get_stats = ath_get_stats, - .conf_tx = NULL, - .get_tx_stats = ath_get_tx_stats, - .get_tsf = ath_get_tsf, - .reset_tsf = ath_reset_tsf, - .beacon_update = ath_beacon_update, -}; - -/* - * Periodically recalibrate the PHY to account - * for temperature/environment changes. - */ -static void ath_calibrate(unsigned long data) -{ - struct ath_softc *sc = (void *)data; - struct ath_hw *ah = sc->ah; - - sc->stats.ast_per_cal++; - - DPRINTF(sc, ATH_DEBUG_CALIBRATE, "ath: channel %u/%x\n", - sc->curchan->chan, sc->curchan->val); - - if (ath5k_hw_get_rf_gain(ah) == AR5K_RFGAIN_NEED_CHANGE) { - /* - * Rfgain is out of bounds, reset the chip - * to load new gain values. - */ - sc->stats.ast_per_rfgain++; - DPRINTF(sc, ATH_DEBUG_RESET, "calibration, resetting\n"); - ath_reset(sc->hw); - } - if (ath5k_hw_phy_calibrate(ah, sc->curchan)) { - DPRINTF(sc, ATH_DEBUG_ANY, "ath: calibration of channel %u " - "failed\n", sc->curchan->chan); - sc->stats.ast_per_calfail++; - } - - mod_timer(&sc->calib_tim, round_jiffies(jiffies + - msecs_to_jiffies(ath_calinterval * 1000))); -} - -static void ath_led_off(unsigned long data) -{ - struct ath_softc *sc = (void *)data; - - if (sc->led_endblink) - sc->led_blinking = 0; - else { - sc->led_endblink = 1; - ath5k_hw_set_gpio(sc->ah, sc->led_pin, !sc->led_on); - mod_timer(&sc->led_tim, jiffies + sc->led_off); - } -} - -/* - * Blink the LED according to the specified on/off times. - */ -static void ath_led_blink(struct ath_softc *sc, unsigned int on, - unsigned int off) -{ - DPRINTF(sc, ATH_DEBUG_LED, "%s: on %u off %u\n", __func__, on, off); - ath5k_hw_set_gpio(sc->ah, sc->led_pin, sc->led_on); - sc->led_blinking = 1; - sc->led_endblink = 0; - sc->led_off = off; - mod_timer(&sc->led_tim, jiffies + on); -} - -static void ath_led_event(struct ath_softc *sc, int event) -{ - if (likely(!sc->led_soft)) - return; - if (unlikely(sc->led_blinking)) /* don't interrupt active blink */ - return; - switch (event) { - case ATH_LED_TX: - ath_led_blink(sc, sc->hwmap[sc->led_txrate].ledon, - sc->hwmap[sc->led_txrate].ledoff); - break; - case ATH_LED_RX: - ath_led_blink(sc, sc->hwmap[sc->led_rxrate].ledon, - sc->hwmap[sc->led_rxrate].ledoff); - break; - } -} - -static irqreturn_t ath_intr(int irq, void *dev_id) -{ - struct ath_softc *sc = dev_id; - struct ath_hw *ah = sc->ah; - enum ath5k_int status; - unsigned int counter = 1000; - - if (unlikely(sc->invalid || !ath5k_hw_is_intr_pending(ah))) - return IRQ_NONE; - - do { - /* - * Figure out the reason(s) for the interrupt. Note - * that the hal returns a pseudo-ISR that may include - * bits we haven't explicitly enabled so we mask the - * value to insure we only process bits we requested. - */ - ath5k_hw_get_isr(ah, &status); /* NB: clears IRQ too */ - DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x/0x%x\n", __func__, - status, sc->imask); - status &= sc->imask; /* discard unasked for bits */ - if (unlikely(status & AR5K_INT_FATAL)) { - /* - * Fatal errors are unrecoverable. Typically - * these are caused by DMA errors. Unfortunately - * the exact reason is not (presently) returned - * by the hal. - */ - sc->stats.ast_hardware++; - tasklet_schedule(&sc->restq); - } else if (unlikely(status & AR5K_INT_RXORN)) { - sc->stats.ast_rxorn++; - tasklet_schedule(&sc->restq); - } else { - if (status & AR5K_INT_SWBA) { - /* - * Software beacon alert--time to send a beacon. - * Handle beacon transmission directly; deferring - * this is too slow to meet timing constraints - * under load. - */ - ath_beacon_send(sc); - } - if (status & AR5K_INT_RXEOL) { - /* - * NB: the hardware should re-read the link when - * RXE bit is written, but it doesn't work at - * least on older hardware revs. - */ - sc->stats.ast_rxeol++; - sc->rxlink = NULL; - } - if (status & AR5K_INT_TXURN) { - sc->stats.ast_txurn++; - /* bump tx trigger level */ - ath5k_hw_update_tx_triglevel(ah, true); - } - if (status & AR5K_INT_RX) - tasklet_schedule(&sc->rxtq); - if (status & AR5K_INT_TX) - tasklet_schedule(&sc->txtq); - if (status & AR5K_INT_BMISS) { - sc->stats.ast_bmiss++; -/* tasklet_schedule(&sc->bmisstq);*/ - } - if (status & AR5K_INT_MIB) { - sc->stats.ast_mib++; - /* TODO */ - } - } - } while (ath5k_hw_is_intr_pending(ah) && counter-- > 0); - - if (unlikely(!counter && net_ratelimit())) - printk(KERN_WARNING "ath: too many interrupts, giving up for " - "now\n"); - - return IRQ_HANDLED; -} - -/* - * Convert IEEE channel number to MHz frequency. - */ -static inline short ath_ieee2mhz(short chan) -{ - if (chan <= 14 || chan >= 27) - return ieee80211chan2mhz(chan); - else - return 2212 + chan * 20; -} - -static unsigned int ath_copy_rates(struct ieee80211_rate *rates, - const struct ath5k_rate_table *rt, unsigned int max) -{ - unsigned int i, count; - - if (rt == NULL) - return 0; - - for (i = 0, count = 0; i < rt->rate_count && max > 0; i++) { - if (!rt->rates[i].valid) - continue; - rates->rate = rt->rates[i].rate_kbps / 100; - rates->val = rt->rates[i].rate_code; - rates->flags = rt->rates[i].modulation; - rates++; - count++; - max--; - } - - return count; -} - -static unsigned int ath_copy_channels(struct ath_hw *ah, - struct ieee80211_channel *channels, unsigned int mode, - unsigned int max) -{ - static const struct { unsigned int mode, mask, chan; } map[] = { - [MODE_IEEE80211A] = { CHANNEL_OFDM, CHANNEL_OFDM | CHANNEL_TURBO, CHANNEL_A }, - [MODE_ATHEROS_TURBO] = { CHANNEL_OFDM|CHANNEL_TURBO, CHANNEL_OFDM | CHANNEL_TURBO, CHANNEL_T }, - [MODE_IEEE80211B] = { CHANNEL_CCK, CHANNEL_CCK, CHANNEL_B }, - [MODE_IEEE80211G] = { CHANNEL_OFDM, CHANNEL_OFDM, CHANNEL_G }, - [MODE_ATHEROS_TURBOG] = { CHANNEL_OFDM | CHANNEL_TURBO, CHANNEL_OFDM | CHANNEL_TURBO, CHANNEL_TG }, - }; - static const struct ath5k_regchannel chans_2ghz[] = - IEEE80211_CHANNELS_2GHZ; - static const struct ath5k_regchannel chans_5ghz[] = - IEEE80211_CHANNELS_5GHZ; - const struct ath5k_regchannel *chans; - enum ath5k_regdom dmn; - unsigned int i, count, size, chfreq, all, f, ch; - - if (!test_bit(mode, ah->ah_modes)) - return 0; - - all = ah->ah_regdomain == DMN_DEFAULT || CHAN_DEBUG == 1; - - switch (mode) { - case MODE_IEEE80211A: - case MODE_ATHEROS_TURBO: - /* 1..220, but 2GHz frequencies are filtered by check_channel */ - size = all ? 220 : ARRAY_SIZE(chans_5ghz); - chans = chans_5ghz; - dmn = ath5k_regdom2flag(ah->ah_regdomain, - IEEE80211_CHANNELS_5GHZ_MIN); - chfreq = CHANNEL_5GHZ; - break; - case MODE_IEEE80211B: - case MODE_IEEE80211G: - case MODE_ATHEROS_TURBOG: - size = all ? 26 : ARRAY_SIZE(chans_2ghz); - chans = chans_2ghz; - dmn = ath5k_regdom2flag(ah->ah_regdomain, - IEEE80211_CHANNELS_2GHZ_MIN); - chfreq = CHANNEL_2GHZ; - break; - default: - printk(KERN_WARNING "bad mode, not copying channels\n"); - return 0; - } - - for (i = 0, count = 0; i < size && max > 0; i++) { - ch = all ? i + 1 : chans[i].chan; - f = ath_ieee2mhz(ch); - /* Check if channel is supported by the chipset */ - if (!ath5k_channel_ok(ah, f, chfreq)) - continue; - - /* Match regulation domain */ - if (!all && !(IEEE80211_DMN(chans[i].domain) & - IEEE80211_DMN(dmn))) - continue; - - if (!all && (chans[i].mode & map[mode].mask) != map[mode].mode) - continue; - - /* Write channel and increment counter */ - channels->chan = ch; - channels->freq = f; - channels->val = map[mode].chan; - channels++; - count++; - max--; - } - - return count; -} - -#if ATH_DEBUG_MODES -static void ath_dump_modes(struct ieee80211_hw_mode *modes) -{ - unsigned int m, i; - - for (m = 0; m < NUM_IEEE80211_MODES; m++) { - printk(KERN_DEBUG "Mode %u: channels %d, rates %d\n", m, - modes[m].num_channels, modes[m].num_rates); - printk(KERN_DEBUG " channels:\n"); - for (i = 0; i < modes[m].num_channels; i++) { - printk(KERN_DEBUG " %3d %d %.4x %.4x\n", - modes[m].channels[i].chan, - modes[m].channels[i].freq, - modes[m].channels[i].val, - modes[m].channels[i].flag); - } - printk(KERN_DEBUG " rates:\n"); - for (i = 0; i < modes[m].num_rates; i++) { - printk(KERN_DEBUG " %4d %.4x %.4x %.4x\n", - modes[m].rates[i].rate, - modes[m].rates[i].val, - modes[m].rates[i].flags, - modes[m].rates[i].val2); - } - } -} -#else -static inline void ath_dump_modes(struct ieee80211_hw_mode *modes) {} -#endif - -static int ath_getchannels(struct ieee80211_hw *hw) -{ - struct ath_softc *sc = hw->priv; - struct ath_hw *ah = sc->ah; - struct ieee80211_hw_mode *modes = sc->modes; - unsigned int i, max; - int ret; - enum { - A = MODE_IEEE80211A, - B = MODE_IEEE80211G, /* this is not a typo, but workaround */ - G = MODE_IEEE80211B, /* to prefer g over b */ - T = MODE_ATHEROS_TURBO, - TG = MODE_ATHEROS_TURBOG, - }; - - BUILD_BUG_ON(ARRAY_SIZE(sc->modes) < 5); - - ah->ah_country_code = countrycode; - - modes[A].mode = MODE_IEEE80211A; - modes[B].mode = MODE_IEEE80211B; - modes[G].mode = MODE_IEEE80211G; - - max = ARRAY_SIZE(sc->rates); - modes[A].rates = sc->rates; - max -= modes[A].num_rates = ath_copy_rates(modes[A].rates, - ath5k_hw_get_rate_table(ah, MODE_IEEE80211A), max); - modes[B].rates = &modes[A].rates[modes[A].num_rates]; - max -= modes[B].num_rates = ath_copy_rates(modes[B].rates, - ath5k_hw_get_rate_table(ah, MODE_IEEE80211B), max); - modes[G].rates = &modes[B].rates[modes[B].num_rates]; - max -= modes[G].num_rates = ath_copy_rates(modes[G].rates, - ath5k_hw_get_rate_table(ah, MODE_IEEE80211G), max); - - if (!max) - printk(KERN_WARNING "yet another rates found, but there is not " - "sufficient space to store them\n"); - - max = ARRAY_SIZE(sc->channels); - modes[A].channels = sc->channels; - max -= modes[A].num_channels = ath_copy_channels(ah, modes[A].channels, - MODE_IEEE80211A, max); - modes[B].channels = &modes[A].channels[modes[A].num_channels]; - max -= modes[B].num_channels = ath_copy_channels(ah, modes[B].channels, - MODE_IEEE80211B, max); - modes[G].channels = &modes[B].channels[modes[B].num_channels]; - max -= modes[G].num_channels = ath_copy_channels(ah, modes[G].channels, - MODE_IEEE80211G, max); - - if (!max) - printk(KERN_WARNING "yet another modes found, but there is not " - "sufficient space to store them\n"); - - for (i = 0; i < ARRAY_SIZE(sc->modes); i++) - if (modes[i].num_channels) { - ret = ieee80211_register_hwmode(hw, &modes[i]); - if (ret) { - printk(KERN_ERR "can't register hwmode %u\n",i); - goto err; - } - } - ath_dump_modes(modes); - - return 0; -err: - return ret; -} - -static int ath_desc_alloc(struct ath_softc *sc, struct pci_dev *pdev) -{ - struct ath_desc *ds; - struct ath_buf *bf; - dma_addr_t da; - unsigned int i; - int ret; - - /* allocate descriptors */ - sc->desc_len = sizeof(struct ath_desc) * - (ATH_TXBUF * ATH_TXDESC + ATH_RXBUF + ATH_BCBUF + 1); - sc->desc = pci_alloc_consistent(pdev, sc->desc_len, &sc->desc_daddr); - if (sc->desc == NULL) { - dev_err(&pdev->dev, "can't allocate descriptors\n"); - ret = -ENOMEM; - goto err; - } - ds = sc->desc; - da = sc->desc_daddr; - DPRINTF(sc, ATH_DEBUG_ANY, "%s: DMA map: %p (%zu) -> %llx\n", - __func__, ds, sc->desc_len, (unsigned long long)sc->desc_daddr); - - bf = kcalloc(1 + ATH_TXBUF + ATH_RXBUF + ATH_BCBUF, - sizeof(struct ath_buf), GFP_KERNEL); - if (bf == NULL) { - dev_err(&pdev->dev, "can't allocate bufptr\n"); - ret = -ENOMEM; - goto err_free; - } - sc->bufptr = bf; - - INIT_LIST_HEAD(&sc->rxbuf); - for (i = 0; i < ATH_RXBUF; i++, bf++, ds++, da += sizeof(*ds)) { - bf->desc = ds; - bf->daddr = da; - list_add_tail(&bf->list, &sc->rxbuf); - } - - INIT_LIST_HEAD(&sc->txbuf); - sc->txbuf_len = ATH_TXBUF; - for (i = 0; i < ATH_TXBUF; i++, bf++, ds += ATH_TXDESC, - da += ATH_TXDESC * sizeof(*ds)) { - bf->desc = ds; - bf->daddr = da; - list_add_tail(&bf->list, &sc->txbuf); - } - - /* beacon buffer */ - bf->desc = ds; - bf->daddr = da; - sc->bbuf = bf; - - return 0; -err_free: - pci_free_consistent(pdev, sc->desc_len, sc->desc, sc->desc_daddr); -err: - sc->desc = NULL; - return ret; -} - -static void ath_desc_free(struct ath_softc *sc, struct pci_dev *pdev) -{ - struct ath_buf *bf; - - ath_cleanup_txbuf(sc, sc->bbuf); - list_for_each_entry(bf, &sc->txbuf, list) - ath_cleanup_txbuf(sc, bf); - list_for_each_entry(bf, &sc->rxbuf, list) - ath_cleanup_txbuf(sc, bf); - - /* Free memory associated with all descriptors */ - pci_free_consistent(pdev, sc->desc_len, sc->desc, sc->desc_daddr); - - kfree(sc->bufptr); - sc->bufptr = NULL; -} - -static int ath_beaconq_setup(struct ath_hw *ah) -{ - struct ath5k_txq_info qi = { - .tqi_aifs = AR5K_TXQ_USEDEFAULT, - .tqi_cw_min = AR5K_TXQ_USEDEFAULT, - .tqi_cw_max = AR5K_TXQ_USEDEFAULT, - /* NB: for dynamic turbo, don't enable any other interrupts */ - .tqi_flags = AR5K_TXQ_FLAG_TXDESCINT_ENABLE - }; - - return ath5k_hw_setup_tx_queue(ah, AR5K_TX_QUEUE_BEACON, &qi); -} - -static struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, - int subtype) -{ - struct ath_hw *ah = sc->ah; - struct ath_txq *txq; - struct ath5k_txq_info qi = { - .tqi_subtype = subtype, - .tqi_aifs = AR5K_TXQ_USEDEFAULT, - .tqi_cw_min = AR5K_TXQ_USEDEFAULT, - .tqi_cw_max = AR5K_TXQ_USEDEFAULT - }; - int qnum; - - /* - * Enable interrupts only for EOL and DESC conditions. - * We mark tx descriptors to receive a DESC interrupt - * when a tx queue gets deep; otherwise waiting for the - * EOL to reap descriptors. Note that this is done to - * reduce interrupt load and this only defers reaping - * descriptors, never transmitting frames. Aside from - * reducing interrupts this also permits more concurrency. - * The only potential downside is if the tx queue backs - * up in which case the top half of the kernel may backup - * due to a lack of tx descriptors. - */ - qi.tqi_flags = AR5K_TXQ_FLAG_TXEOLINT_ENABLE | - AR5K_TXQ_FLAG_TXDESCINT_ENABLE; - qnum = ath5k_hw_setup_tx_queue(ah, qtype, &qi); - if (qnum < 0) { - /* - * NB: don't print a message, this happens - * normally on parts with too few tx queues - */ - return ERR_PTR(qnum); - } - if (qnum >= ARRAY_SIZE(sc->txqs)) { - printk(KERN_ERR "hal qnum %u out of range, max %u!\n", - qnum, ARRAY_SIZE(sc->txqs)); - ath5k_hw_release_tx_queue(ah, qnum); - return ERR_PTR(-EINVAL); - } - txq = &sc->txqs[qnum]; - if (!txq->setup) { - txq->qnum = qnum; - txq->link = NULL; - INIT_LIST_HEAD(&txq->q); - spin_lock_init(&txq->lock); - txq->setup = true; - } - return &sc->txqs[qnum]; -} - -static void ath_tx_cleanup(struct ath_softc *sc) -{ - struct ath_txq *txq = sc->txqs; - unsigned int i; - - for (i = 0; i < ARRAY_SIZE(sc->txqs); i++, txq++) - if (txq->setup) { - ath5k_hw_release_tx_queue(sc->ah, txq->qnum); - txq->setup = false; - } -} - -static int ath_attach(struct pci_dev *pdev, struct ieee80211_hw *hw) -{ - struct ath_softc *sc = hw->priv; - struct ath_hw *ah = sc->ah; - u8 mac[ETH_ALEN]; - unsigned int i; - int ret; - - DPRINTF(sc, ATH_DEBUG_ANY, "%s: devid 0x%x\n", __func__, pdev->device); - - /* - * Check if the MAC has multi-rate retry support. - * We do this by trying to setup a fake extended - * descriptor. MAC's that don't have support will - * return false w/o doing anything. MAC's that do - * support it will return true w/o doing anything. - */ - sc->mrretry = ah->ah_setup_xtx_desc(ah, NULL, 0, 0, 0, 0, 0, 0); - - /* - * Reset the key cache since some parts do not - * reset the contents on initial power up. - */ - for (i = 0; i < AR5K_KEYCACHE_SIZE; i++) - ath5k_hw_reset_key(ah, i); - - /* - * Collect the channel list using the default country - * code and including outdoor channels. The 802.11 layer - * is resposible for filtering this list based on settings - * like the phy mode. - */ - ret = ath_getchannels(hw); - if (ret) { - dev_err(&pdev->dev, "can't get channels\n"); - goto err; - } - - /* NB: setup here so ath_rate_update is happy */ - if (test_bit(MODE_IEEE80211A, ah->ah_modes)) - ath_setcurmode(sc, MODE_IEEE80211A); - else - ath_setcurmode(sc, MODE_IEEE80211B); - - /* - * Allocate tx+rx descriptors and populate the lists. - */ - ret = ath_desc_alloc(sc, pdev); - if (ret) { - dev_err(&pdev->dev, "can't allocate descriptors\n"); - goto err; - } - - /* - * Allocate hardware transmit queues: one queue for - * beacon frames and one data queue for each QoS - * priority. Note that the hal handles reseting - * these queues at the needed time. - */ - ret = ath_beaconq_setup(ah); - if (ret < 0) { - dev_err(&pdev->dev, "can't setup a beacon xmit queue\n"); - goto err_desc; - } - sc->bhalq = ret; - - sc->txq = ath_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BK); - if (IS_ERR(sc->txq)) { - dev_err(&pdev->dev, "can't setup xmit queue\n"); - ret = PTR_ERR(sc->txq); - goto err_bhal; - } - - tasklet_init(&sc->rxtq, ath_tasklet_rx, (unsigned long)sc); - tasklet_init(&sc->txtq, ath_tasklet_tx, (unsigned long)sc); - tasklet_init(&sc->restq, ath_tasklet_reset, (unsigned long)sc); - setup_timer(&sc->calib_tim, ath_calibrate, (unsigned long)sc); - setup_timer(&sc->led_tim, ath_led_off, (unsigned long)sc); - - sc->led_blinking = 0; - sc->led_on = 0; /* low true */ - /* - * Auto-enable soft led processing for IBM cards and for - * 5211 minipci cards. Users can also manually enable/disable - * support with a sysctl. - */ - if (pdev->device == PCI_DEVICE_ID_ATHEROS_AR5212_IBM || - pdev->device == PCI_DEVICE_ID_ATHEROS_AR5211) { - sc->led_soft = 1; - sc->led_pin = 0; - } - /* Enable softled on PIN1 on HP Compaq nc6xx, nc4000 & nx5000 laptops */ - if (pdev->subsystem_vendor == PCI_VENDOR_ID_COMPAQ) { - sc->led_soft = 1; - sc->led_pin = 0; - } - if (sc->led_soft) { - ath5k_hw_set_gpio_output(ah, sc->led_pin); - ath5k_hw_set_gpio(ah, sc->led_pin, !sc->led_on); - } - - ath5k_hw_get_lladdr(ah, mac); - SET_IEEE80211_PERM_ADDR(hw, mac); - if (ath5k_hw_hasbssidmask(ah)) { - memset(sc->bssidmask, 0xff, ETH_ALEN); - ath5k_hw_set_bssid_mask(ah, sc->bssidmask); - } - - ret = ieee80211_register_hw(hw); - if (ret) { - dev_err(&pdev->dev, "can't register ieee80211 hw\n"); - goto err_queues; - } - - return 0; -err_queues: - ath_tx_cleanup(sc); -err_bhal: - ath5k_hw_release_tx_queue(ah, sc->bhalq); -err_desc: - ath_desc_free(sc, pdev); -err: - return ret; -} - -static void ath_detach(struct pci_dev *pdev, struct ieee80211_hw *hw) -{ - struct ath_softc *sc = hw->priv; - - /* - * NB: the order of these is important: - * o call the 802.11 layer before detaching the hal to - * insure callbacks into the driver to delete global - * key cache entries can be handled - * o reclaim the tx queue data structures after calling - * the 802.11 layer as we'll get called back to reclaim - * node state and potentially want to use them - * o to cleanup the tx queues the hal is called, so detach - * it last - * Other than that, it's straightforward... - */ - ieee80211_unregister_hw(hw); - ath_desc_free(sc, pdev); - ath_tx_cleanup(sc); - ath5k_hw_release_tx_queue(sc->ah, sc->bhalq); - - /* - * NB: can't reclaim these until after ieee80211_ifdetach - * returns because we'll get called back to reclaim node - * state and potentially want to use them. - */ -// ath_dynamic_sysctl_unregister(sc); -} - -static const char *ath_chip_name(u8 mac_version) -{ - switch (mac_version) { - case AR5K_AR5210: - return "AR5210"; - case AR5K_AR5211: - return "AR5211"; - case AR5K_AR5212: - return "AR5212"; - } - return "Unknown"; -} - -static int __devinit ath_pci_probe(struct pci_dev *pdev, - const struct pci_device_id *id) -{ - void __iomem *mem; - struct ath_softc *sc; - struct ieee80211_hw *hw; - int ret; - u8 csz; - - ret = pci_enable_device(pdev); - if (ret) { - dev_err(&pdev->dev, "can't enable device\n"); - goto err; - } - - /* XXX 32-bit addressing only */ - ret = pci_set_dma_mask(pdev, DMA_32BIT_MASK); - if (ret) { - dev_err(&pdev->dev, "32-bit DMA not available\n"); - goto err_dis; - } - - /* - * Cache line size is used to size and align various - * structures used to communicate with the hardware. - */ - pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &csz); - if (csz == 0) { - /* - * Linux 2.4.18 (at least) writes the cache line size - * register as a 16-bit wide register which is wrong. - * We must have this setup properly for rx buffer - * DMA to work so force a reasonable value here if it - * comes up zero. - */ - csz = L1_CACHE_BYTES / sizeof(u32); - pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, csz); - } - /* - * The default setting of latency timer yields poor results, - * set it to the value used by other systems. It may be worth - * tweaking this setting more. - */ - pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xa8); - - pci_set_master(pdev); - - /* - * Disable the RETRY_TIMEOUT register (0x41) to keep - * PCI Tx retries from interfering with C3 CPU state. - */ - pci_write_config_byte(pdev, 0x41, 0); - - ret = pci_request_region(pdev, 0, "ath"); - if (ret) { - dev_err(&pdev->dev, "cannot reserve PCI memory region\n"); - goto err_dis; - } - - mem = pci_iomap(pdev, 0, 0); - if (!mem) { - dev_err(&pdev->dev, "cannot remap PCI memory region\n") ; - ret = -EIO; - goto err_reg; - } - - hw = ieee80211_alloc_hw(sizeof(*sc), &ath_hw_ops); - if (hw == NULL) { - dev_err(&pdev->dev, "cannot allocate ieee80211_hw\n"); - ret = -ENOMEM; - goto err_map; - } - - SET_IEEE80211_DEV(hw, &pdev->dev); - hw->flags = IEEE80211_HW_RX_INCLUDES_FCS | IEEE80211_HW_WEP_INCLUDE_IV | - IEEE80211_HW_DATA_NULLFUNC_ACK; - hw->extra_tx_headroom = 2; - hw->channel_change_time = 5000; - hw->max_rssi = 127; /* FIXME: get a real value for this. */ - sc = hw->priv; - sc->hw = hw; - - /* - * Mark the device as detached to avoid processing - * interrupts until setup is complete. - */ -#if AR_DEBUG - sc->debug = ath_debug; -#endif - sc->invalid = 1; - sc->iobase = mem; - sc->cachelsz = csz * sizeof(u32); /* convert to bytes */ - sc->opmode = IEEE80211_IF_TYPE_STA; - mutex_init(&sc->lock); - spin_lock_init(&sc->rxbuflock); - spin_lock_init(&sc->txbuflock); - - pci_set_drvdata(pdev, hw); - - ret = request_irq(pdev->irq, ath_intr, IRQF_SHARED, "ath", sc); - if (ret) { - dev_err(&pdev->dev, "request_irq failed\n"); - goto err_free; - } - - sc->ah = ath5k_hw_attach(pdev->device, id->driver_data, sc, sc->iobase); - if (IS_ERR(sc->ah)) { - ret = PTR_ERR(sc->ah); - goto err_irq; - } - - ret = ath_attach(pdev, hw); - if (ret) - goto err_ah; - - dev_info(&pdev->dev, "%s chip found: mac %d.%d phy %d.%d\n", - ath_chip_name(id->driver_data), sc->ah->ah_mac_version, - sc->ah->ah_mac_version, sc->ah->ah_phy_revision >> 4, - sc->ah->ah_phy_revision & 0xf); - - /* ready to process interrupts */ - sc->invalid = 0; - - return 0; -err_ah: - ath5k_hw_detach(sc->ah); -err_irq: - free_irq(pdev->irq, sc); -err_free: - ieee80211_free_hw(hw); -err_map: - pci_iounmap(pdev, mem); -err_reg: - pci_release_region(pdev, 0); -err_dis: - pci_disable_device(pdev); -err: - return ret; -} - -static void __devexit ath_pci_remove(struct pci_dev *pdev) -{ - struct ieee80211_hw *hw = pci_get_drvdata(pdev); - struct ath_softc *sc = hw->priv; - - ath_detach(pdev, hw); - ath5k_hw_detach(sc->ah); - free_irq(pdev->irq, sc); - pci_iounmap(pdev, sc->iobase); - pci_release_region(pdev, 0); - pci_disable_device(pdev); - ieee80211_free_hw(hw); -} - -#ifdef CONFIG_PM -static int ath_pci_suspend(struct pci_dev *pdev, pm_message_t state) -{ - struct ieee80211_hw *hw = pci_get_drvdata(pdev); - struct ath_softc *sc = hw->priv; - - if (sc->led_soft) - ath5k_hw_set_gpio(sc->ah, sc->led_pin, 1); - - ath_stop_hw(sc); - pci_save_state(pdev); - pci_disable_device(pdev); - pci_set_power_state(pdev, PCI_D3hot); - - return 0; -} - -static int ath_pci_resume(struct pci_dev *pdev) -{ - struct ieee80211_hw *hw = pci_get_drvdata(pdev); - struct ath_softc *sc = hw->priv; - int err; - - err = pci_set_power_state(pdev, PCI_D0); - if (err) - return err; - - err = pci_enable_device(pdev); - if (err) - return err; - - pci_restore_state(pdev); - /* - * Suspend/Resume resets the PCI configuration space, so we have to - * re-disable the RETRY_TIMEOUT register (0x41) to keep - * PCI Tx retries from interfering with C3 CPU state - */ - pci_write_config_byte(pdev, 0x41, 0); - - ath_init(sc); - if (sc->led_soft) { - ath5k_hw_set_gpio_output(sc->ah, sc->led_pin); - ath5k_hw_set_gpio(sc->ah, sc->led_pin, 0); - } - - return 0; -} -#else -#define ath_pci_suspend NULL -#define ath_pci_resume NULL -#endif /* CONFIG_PM */ - -static struct pci_driver ath_pci_drv_id = { - .name = "ath_pci", - .id_table = ath_pci_id_table, - .probe = ath_pci_probe, - .remove = __devexit_p(ath_pci_remove), - .suspend = ath_pci_suspend, - .resume = ath_pci_resume, -}; - -/* - * Static (i.e. global) sysctls. Note that the hal sysctls - * are located under ours by sharing the setting for DEV_ATH. - */ -enum { - DEV_ATH = 9, /* XXX known by hal */ -}; - -static int mincalibrate = 1; -static int maxint = 0x7ffffff / 1000; -#define CTL_AUTO -2 /* cannot be CTL_ANY or CTL_NONE */ - -static ctl_table ath_static_sysctls[] = { -#if AR_DEBUG - { .ctl_name = CTL_AUTO, - .procname = "debug", - .mode = 0644, - .data = &ath_debug, - .maxlen = sizeof(ath_debug), - .proc_handler = proc_dointvec - }, -#endif - { .ctl_name = CTL_AUTO, - .procname = "countrycode", - .mode = 0444, - .data = &countrycode, - .maxlen = sizeof(countrycode), - .proc_handler = proc_dointvec - }, - { .ctl_name = CTL_AUTO, - .procname = "outdoor", - .mode = 0444, - .data = &outdoor, - .maxlen = sizeof(outdoor), - .proc_handler = proc_dointvec - }, - { .ctl_name = CTL_AUTO, - .procname = "xchanmode", - .mode = 0444, - .data = &xchanmode, - .maxlen = sizeof(xchanmode), - .proc_handler = proc_dointvec - }, - { .ctl_name = CTL_AUTO, - .procname = "calibrate", - .mode = 0644, - .data = &ath_calinterval, - .maxlen = sizeof(ath_calinterval), - .extra1 = &mincalibrate, - .extra2 = &maxint, - .proc_handler = proc_dointvec_minmax - }, - { 0 } -}; -static ctl_table ath_ath_table[] = { - { .ctl_name = DEV_ATH, - .procname = "ath", - .mode = 0555, - .child = ath_static_sysctls - }, { 0 } -}; -static ctl_table ath_root_table[] = { - { .ctl_name = CTL_DEV, - .procname = "dev", - .mode = 0555, - .child = ath_ath_table - }, { 0 } -}; -static struct ctl_table_header *ath_sysctl_header; - -static int __init init_ath_pci(void) -{ - int ret; - - ret = pci_register_driver(&ath_pci_drv_id); - if (ret) { - printk(KERN_ERR "ath_pci: can't register pci driver\n"); - return ret; - } - ath_sysctl_header = register_sysctl_table(ath_root_table); - - return 0; -} - -static void __exit exit_ath_pci(void) -{ - if (ath_sysctl_header) - unregister_sysctl_table(ath_sysctl_header); - pci_unregister_driver(&ath_pci_drv_id); -} - -module_init(init_ath_pci); -module_exit(exit_ath_pci); - -MODULE_AUTHOR("Jiri Slaby"); -MODULE_DESCRIPTION("Support for Atheros 802.11 wireless LAN cards."); -MODULE_SUPPORTED_DEVICE("Atheros WLAN cards"); -MODULE_LICENSE("Dual BSD/GPL"); -MODULE_VERSION(ATH_PCI_VERSION " (EXPERIMENTAL)"); diff --git a/ath.h b/ath.h deleted file mode 100644 index 26a8126..0000000 --- a/ath.h +++ /dev/null @@ -1,301 +0,0 @@ -/*- - * Copyright (c) 2002-2007 Sam Leffler, Errno Consulting - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer, - * without modification. - * 2. Redistributions in binary form must reproduce at minimum a disclaimer - * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any - * redistribution must be conditioned upon including a substantially - * similar Disclaimer requirement for further binary redistribution. - * 3. Neither the names of the above-listed copyright holders nor the names - * of any contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * NO WARRANTY - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY - * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, - * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER - * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF - * THE POSSIBILITY OF SUCH DAMAGES. - * - * $FreeBSD: src/sys/dev/ath/if_athvar.h,v 1.20 2005/01/24 20:31:24 sam Exp $ - */ - -/* - * Defintions for the Atheros Wireless LAN controller driver. - */ -#ifndef _DEV_ATH_ATHVAR_H -#define _DEV_ATH_ATHVAR_H - -#include -#include -#include -#include - -#include "ath5k_hw.h" - -/* Set this to 1 to disable regulatory domain restrictions for channel tests. - * WARNING: This is for debuging only and has side effects (eg. scan takes too - * long and results timeouts). It's also illegal to tune to some of the - * supported frequencies in some countries, so use this at your own risk, - * you've been warned. */ -#define CHAN_DEBUG 0 - -#define ATH_TIMEOUT 1000 - -#define ATH_LONG_CALIB 30 /* seconds */ -#define ATH_SHORT_CALIB 1 - -/* - * Maximum acceptable MTU - * MAXFRAMEBODY - WEP - QOS - RSN/WPA: - * 2312 - 8 - 2 - 12 = 2290 - */ -#define ATH_MAX_MTU 2290 -#define ATH_MIN_MTU 32 - -#define ATH_RXBUF 40 /* number of RX buffers */ -#define ATH_TXBUF 200 /* number of TX buffers */ -#define ATH_TXDESC 1 /* number of descriptors per buffer */ -#define ATH_BCBUF 1 /* number of beacon buffers */ -#define ATH_TXMAXTRY 11 /* max number of transmit attempts */ -#define ATH_TXINTR_PERIOD 5 /* max number of batched tx descriptors */ - -#define ATH_BEACON_AIFS_DEFAULT 0 /* default aifs for ap beacon q */ -#define ATH_BEACON_CWMIN_DEFAULT 0 /* default cwmin for ap beacon q */ -#define ATH_BEACON_CWMAX_DEFAULT 0 /* default cwmax for ap beacon q */ - -#define ATH_RSSI_LPF_LEN 10 -#define ATH_RSSI_DUMMY_MARKER 0x127 -#define ATH_EP_MUL(x, mul) ((x) * (mul)) -#define ATH_RSSI_IN(x) (ATH_EP_MUL((x), AR5K_RSSI_EP_MULTIPLIER)) -#define ATH_LPF_RSSI(x, y, len) \ - ((x != ATH_RSSI_DUMMY_MARKER) ? (((x) * ((len) - 1) + (y)) / (len)) : (y)) -#define ATH_RSSI_LPF(x, y) do { \ - if ((y) >= -20) \ - x = ATH_LPF_RSSI((x), ATH_RSSI_IN((y)), ATH_RSSI_LPF_LEN); \ -} while (0) - -struct ath_buf { - struct list_head list; - unsigned int flags; /* tx descriptor flags */ - struct ath_desc *desc; /* virtual addr of desc */ - dma_addr_t daddr; /* physical addr of desc */ - struct sk_buff *skb; /* skbuff for buf */ - dma_addr_t skbaddr;/* physical addr of skb data */ - struct ieee80211_tx_control ctl; -}; - -/* - * Data transmit queue state. One of these exists for each - * hardware transmit queue. Packets sent to us from above - * are assigned to queues based on their priority. Not all - * devices support a complete set of hardware transmit queues. - * For those devices the array sc_ac2q will map multiple - * priorities to fewer hardware queues (typically all to one - * hardware queue). - */ -struct ath_txq { - unsigned int qnum; /* hardware q number */ - u32 *link; /* link ptr in last TX desc */ - struct list_head q; /* transmit queue */ - spinlock_t lock; /* lock on q and link */ - bool setup; -}; - -struct ath_stats { - __u32 ast_watchdog; /* device reset by watchdog */ - __u32 ast_hardware; /* fatal hardware error interrupts */ - __u32 ast_bmiss; /* beacon miss interrupts */ - __u32 ast_bstuck; /* beacon stuck interrupts */ - __u32 ast_rxorn; /* rx overrun interrupts */ - __u32 ast_rxeol; /* rx eol interrupts */ - __u32 ast_txurn; /* tx underrun interrupts */ - __u32 ast_mib; /* mib interrupts */ - __u32 ast_intrcoal; /* interrupts coalesced */ - __u32 ast_tx_packets; /* packet sent on the interface */ - __u32 ast_tx_mgmt; /* management frames transmitted */ - __u32 ast_tx_discard; /* frames discarded prior to assoc */ - __u32 ast_tx_invalid; /* frames discarded 'cuz device gone */ - __u32 ast_tx_qstop; /* output stopped 'cuz no buffer */ - __u32 ast_tx_encap; /* tx encapsulation failed */ - __u32 ast_tx_nonode; /* tx failed 'cuz no node */ - __u32 ast_tx_nobuf; /* tx failed 'cuz no tx buffer (data) */ - __u32 ast_tx_nobufmgt;/* tx failed 'cuz no tx buffer (mgmt)*/ - __u32 ast_tx_linear; /* tx linearized to cluster */ - __u32 ast_tx_nodata; /* tx discarded empty frame */ - __u32 ast_tx_busdma; /* tx failed for dma resrcs */ - __u32 ast_tx_xretries;/* tx failed 'cuz too many retries */ - __u32 ast_tx_fifoerr; /* tx failed 'cuz FIFO underrun */ - __u32 ast_tx_filtered;/* tx failed 'cuz xmit filtered */ - __u32 ast_tx_shortretry;/* tx on-chip retries (short) */ - __u32 ast_tx_longretry;/* tx on-chip retries (long) */ - __u32 ast_tx_badrate; /* tx failed 'cuz bogus xmit rate */ - __u32 ast_tx_noack; /* tx frames with no ack marked */ - __u32 ast_tx_rts; /* tx frames with rts enabled */ - __u32 ast_tx_cts; /* tx frames with cts enabled */ - __u32 ast_tx_shortpre;/* tx frames with short preamble */ - __u32 ast_tx_altrate; /* tx frames with alternate rate */ - __u32 ast_tx_protect; /* tx frames with protection */ - __u32 ast_tx_ctsburst;/* tx frames with cts and bursting */ - __u32 ast_tx_ctsext; /* tx frames with cts extension */ - __u32 ast_rx_nobuf; /* rx setup failed 'cuz no skb */ - __u32 ast_rx_busdma; /* rx setup failed for dma resrcs */ - __u32 ast_rx_orn; /* rx failed 'cuz of desc overrun */ - __u32 ast_rx_crcerr; /* rx failed 'cuz of bad CRC */ - __u32 ast_rx_fifoerr; /* rx failed 'cuz of FIFO overrun */ - __u32 ast_rx_badcrypt;/* rx failed 'cuz decryption */ - __u32 ast_rx_badmic; /* rx failed 'cuz MIC failure */ - __u32 ast_rx_phyerr; /* rx failed 'cuz of PHY err */ - __u32 ast_rx_phy[32]; /* rx PHY error per-code counts */ - __u32 ast_rx_tooshort;/* rx discarded 'cuz frame too short */ - __u32 ast_rx_toobig; /* rx discarded 'cuz frame too large */ - __u32 ast_rx_packets; /* packet recv on the interface */ - __u32 ast_rx_mgt; /* management frames received */ - __u32 ast_rx_ctl; /* rx discarded 'cuz ctl frame */ - __s8 ast_tx_rssi; /* tx rssi of last ack */ - __s8 ast_rx_rssi; /* rx rssi from histogram */ - __u32 ast_be_xmit; /* beacons transmitted */ - __u32 ast_be_nobuf; /* beacon setup failed 'cuz no skb */ - __u32 ast_per_cal; /* periodic calibration calls */ - __u32 ast_per_calfail;/* periodic calibration failed */ - __u32 ast_per_rfgain; /* periodic calibration rfgain reset */ - __u32 ast_rate_calls; /* rate control checks */ - __u32 ast_rate_raise; /* rate control raised xmit rate */ - __u32 ast_rate_drop; /* rate control dropped xmit rate */ - __u32 ast_ant_defswitch;/* rx/default antenna switches */ - __u32 ast_ant_txswitch;/* tx antenna switches */ - __u32 ast_ant_rx[8]; /* rx frames with antenna */ - __u32 ast_ant_tx[8]; /* tx frames with antenna */ -}; - -#if CHAN_DEBUG -#define ATH_CHAN_MAX (26+26+26+200+200) -#else -#define ATH_CHAN_MAX (14+14+14+252+20) /* XXX what's the max? */ -#endif - -struct ath_softc { - struct pci_dev *pdev; /* for dma mapping */ - void __iomem *iobase; /* address of the device */ - struct mutex lock; /* dev-level lock */ - struct ath_stats stats; /* private statistics */ - struct ieee80211_tx_queue_stats tx_stats; - struct ieee80211_low_level_stats ll_stats; - struct ieee80211_hw *hw; /* IEEE 802.11 common */ - struct ieee80211_hw_mode modes[NUM_IEEE80211_MODES]; - struct ieee80211_channel channels[ATH_CHAN_MAX]; - struct ieee80211_rate rates[AR5K_MAX_RATES * NUM_IEEE80211_MODES]; - enum ieee80211_if_types opmode; - struct ath_hw *ah; /* Atheros HW */ - - int debug; - - struct ath_buf *bufptr; /* allocated buffer ptr */ - struct ath_desc *desc; /* TX/RX descriptors */ - dma_addr_t desc_daddr; /* DMA (physical) address */ - size_t desc_len; /* size of TX/RX descriptors */ - u16 cachelsz; /* cache line size */ -#ifdef UNUSED - void (*sc_setdefantenna)(struct ath_softc *, u_int); -#endif - unsigned int invalid : 1, /* disable hardware accesses */ - mrretry : 1, /* multi-rate retry support */ - promisc : 1; -#ifdef UNUSED - sc_diversity : 1,/* enable rx diversity */ - sc_hasveol : 1, /* tx VEOL support */ - sc_mcastkey: 1, /* mcast key cache search */ - sc_hasclrkey:1; /* CLR key supported */ - /* rate tables */ -#endif - unsigned int curmode; /* current phy mode */ - struct ieee80211_channel *curchan; /* current h/w channel */ - - int iface_id; /* add/remove_interface id */ - - struct { - u8 rxflags; /* radiotap rx flags */ - u8 txflags; /* radiotap tx flags */ - u16 ledon; /* softled on time */ - u16 ledoff; /* softled off time */ - } hwmap[32]; /* h/w rate ix mappings */ -#ifdef UNUSED - u8 sc_protrix; /* protection rate index */ - u_int sc_txantenna; /* tx antenna (fixed or auto) */ -#endif - enum ath5k_int imask; /* interrupt mask copy */ - - DECLARE_BITMAP(keymap, AR5K_KEYCACHE_SIZE); /* key use bit map */ - - u8 bssidmask[ETH_ALEN]; - - unsigned int led_pin, /* GPIO pin for driving LED */ - led_on, /* pin setting for LED on */ - led_off, /* off time for current blink */ - led_blinking: 1,/* LED blink operation active */ - led_endblink: 1,/* finish LED blink operation */ - led_soft: 1; /* enable LED gpio status */ - struct timer_list led_tim; /* led off timer */ - u8 led_rxrate; /* current rx rate for LED */ - u8 led_txrate; /* current tx rate for LED */ - - struct tasklet_struct restq; /* reset tasklet */ - - unsigned int rxbufsize; /* rx size based on mtu */ - struct list_head rxbuf; /* receive buffer */ - spinlock_t rxbuflock; - u32 *rxlink; /* link ptr in last RX desc */ - struct tasklet_struct rxtq; /* rx intr tasklet */ -#ifdef UNUSED - u8 sc_defant; /* current default antenna */ - u8 sc_rxotherant; /* rx's on non-default antenna*/ -#endif - struct list_head txbuf; /* transmit buffer */ - spinlock_t txbuflock; - unsigned int txbuf_len; /* buf count in txbuf list */ - struct ath_txq txqs[2]; /* beacon and tx */ -#ifdef UNUSED - struct ath_txq *sc_ac2q[5]; /* WME AC -> h/w q map */ -#endif - struct ath_txq *txq; /* beacon and tx*/ - struct tasklet_struct txtq; /* tx intr tasklet */ - - struct ath_buf *bbuf; /* beacon buffer */ - unsigned int bhalq, /* HAL q for outgoing beacons */ - bmisscount, /* missed beacon transmits */ - bintval; /* beacon interval */ -#ifdef BEACON - u32 sc_ant_tx[8]; /* recent tx frames/antenna */ - struct ath_txq *cabq; /* tx q for cab frames */ - - struct tasklet_struct bmisstq; /* bmiss intr tasklet */ -#endif -#ifdef UNUSED - struct ctl_table_header *sc_sysctl_header; - struct ctl_table *sc_sysctls; -#endif - struct timer_list calib_tim; /* calibration timer */ -}; - -#define ath5k_hw_hasbssidmask(_ah) \ - (ath5k_hw_get_capability(_ah, AR5K_CAP_BSSIDMASK, 0, NULL) == 0) -#define ath5k_hw_hasveol(_ah) \ - (ath5k_hw_get_capability(_ah, AR5K_CAP_VEOL, 0, NULL) == 0) - -#endif diff --git a/ath5k_base.c b/ath5k_base.c new file mode 100644 index 0000000..10aa888 --- /dev/null +++ b/ath5k_base.c @@ -0,0 +1,2567 @@ +/*- + * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting + * Copyright (c) 2004-2005 Atheros Communications, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any + * redistribution must be conditioned upon including a substantially + * similar Disclaimer requirement for further binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + */ +#define ATH_PCI_VERSION "0.9.5.0-BSD" + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +#include "ath5k_base.h" +#include "ath5k_reg.h" + +#define ATH_DEBUG_MODES 0 /* Show found modes in the log? */ +#define ATH_DUMP_SKB 0 /* show skb contents */ +#define AR_DEBUG 1 + +/* unaligned little endian access */ +#define LE_READ_2(_p) (le16_to_cpu(get_unaligned((__le16 *)(_p)))) +#define LE_READ_4(_p) (le32_to_cpu(get_unaligned((__le32 *)(_p)))) + +#if AR_DEBUG +#define DPRINTF(sc, _m, _fmt...) do { \ + if (unlikely(((sc)->debug & (_m)) && net_ratelimit())) \ + printk(KERN_DEBUG _fmt); \ +} while (0) +#else +static inline int __attribute__ ((format (printf, 3, 4))) +DPRINTF(struct ath_softc *sc, unsigned int m, const char *fmt, ...) +{ + return 0; +} +#endif +enum { + ATH_DEBUG_XMIT = 0x00000001, /* basic xmit operation */ + ATH_DEBUG_RESET = 0x00000020, /* reset processing */ + ATH_DEBUG_MODE = 0x00000040, /* mode init/setup */ + ATH_DEBUG_BEACON = 0x00000080, /* beacon handling */ + ATH_DEBUG_INTR = 0x00001000, /* ISR */ + ATH_DEBUG_BEACON_PROC = 0x00008000, /* beacon ISR proc */ + ATH_DEBUG_CALIBRATE = 0x00010000, /* periodic calibration */ + ATH_DEBUG_LED = 0x00100000, /* led management */ + ATH_DEBUG_FATAL = 0x80000000, /* fatal errors */ + ATH_DEBUG_ANY = 0xffffffff +}; + +enum { + ATH_LED_TX, + ATH_LED_RX, +}; + +static int ath_calinterval = ATH_SHORT_CALIB; + +static int countrycode = CTRY_DEFAULT; +static int outdoor = true; +static int xchanmode = true; +module_param(countrycode, int, 0); +MODULE_PARM_DESC(countrycode, "Override default country code"); +module_param(outdoor, int, 0); +MODULE_PARM_DESC(outdoor, "Enable/disable outdoor use"); +module_param(xchanmode, int, 0); +MODULE_PARM_DESC(xchanmode, "Enable/disable extended channel mode"); + +#if AR_DEBUG +static unsigned int ath_debug; +module_param_named(debug, ath_debug, uint, 0); +#endif + +/* + * User a static table of PCI id's for now. While this is the + * "new way" to do things, we may want to switch back to having + * the HAL check them by defining a probe method. + */ +static struct pci_device_id ath_pci_id_table[] __devinitdata = { + { PCI_VDEVICE(ATHEROS, 0x0207), .driver_data = AR5K_AR5210 }, /* 5210 early */ + { PCI_VDEVICE(ATHEROS, 0x0007), .driver_data = AR5K_AR5210 }, /* 5210 */ + { PCI_VDEVICE(ATHEROS, 0x0011), .driver_data = AR5K_AR5211 }, /* 5311 */ + { PCI_VDEVICE(ATHEROS, 0x0012), .driver_data = AR5K_AR5211 }, /* 5211 */ + { PCI_VDEVICE(ATHEROS, 0x0013), .driver_data = AR5K_AR5212 }, /* 5212 */ + { PCI_VDEVICE(3COM_2, 0x0013), .driver_data = AR5K_AR5212 }, /* 3com 5212 */ + { PCI_VDEVICE(3COM, 0x0013), .driver_data = AR5K_AR5212 }, /* 3com 3CRDAG675 5212 */ + { PCI_VDEVICE(ATHEROS, 0x1014), .driver_data = AR5K_AR5212 }, /* IBM minipci 5212 */ + { PCI_VDEVICE(ATHEROS, 0x0014), .driver_data = AR5K_AR5212 }, + { PCI_VDEVICE(ATHEROS, 0x0015), .driver_data = AR5K_AR5212 }, + { PCI_VDEVICE(ATHEROS, 0x0016), .driver_data = AR5K_AR5212 }, + { PCI_VDEVICE(ATHEROS, 0x0017), .driver_data = AR5K_AR5212 }, + { PCI_VDEVICE(ATHEROS, 0x0018), .driver_data = AR5K_AR5212 }, + { PCI_VDEVICE(ATHEROS, 0x0019), .driver_data = AR5K_AR5212 }, + { PCI_VDEVICE(ATHEROS, 0x001a), .driver_data = AR5K_AR5212 }, /* 2413 Griffin-lite */ + { PCI_VDEVICE(ATHEROS, 0x001b), .driver_data = AR5K_AR5212 }, /* 5413 Eagle */ + { PCI_VDEVICE(ATHEROS, 0x001c), .driver_data = AR5K_AR5212 }, /* 5424 Condor (PCI-E)*/ + { 0 } +}; +MODULE_DEVICE_TABLE(pci, ath_pci_id_table); + +static void ath_led_event(struct ath_softc *, int); +static int ath_reset(struct ieee80211_hw *); + +#if AR_DEBUG +static void ath_printrxbuf(struct ath_buf *bf, int done) +{ + struct ath_desc *ds = bf->desc; + + printk(KERN_DEBUG "R (%p %llx) %08x %08x %08x %08x %08x %08x %c\n", + ds, (unsigned long long)bf->daddr, + ds->ds_link, ds->ds_data, ds->ds_ctl0, ds->ds_ctl1, + ds->ds_hw[0], ds->ds_hw[1], + !done ? ' ' : (ds->ds_rxstat.rs_status == 0) ? '*' : '!'); +} + +static void ath_printtxbuf(struct ath_buf *bf, int done) +{ + struct ath_desc *ds = bf->desc; + + printk(KERN_DEBUG "T (%p %llx) %08x %08x %08x %08x %08x %08x %08x " + "%08x %c\n", + ds, (unsigned long long)bf->daddr, + ds->ds_link, ds->ds_data, ds->ds_ctl0, ds->ds_ctl1, + ds->ds_hw[0], ds->ds_hw[1], ds->ds_hw[2], ds->ds_hw[3], + !done ? ' ' : (ds->ds_txstat.ts_status == 0) ? '*' : '!'); +} +#endif + +#if ATH_DUMP_SKB +static inline void ath_dump_skb(struct sk_buff *skb, const char *prefix) +{ + print_hex_dump_bytes(prefix, DUMP_PREFIX_NONE, skb->data, + min(200U, skb->len)); +} +#else +static inline void ath_dump_skb(struct sk_buff *skb, const char *prefix) {} +#endif + +static inline void ath_cleanup_txbuf(struct ath_softc *sc, struct ath_buf *bf) +{ + BUG_ON(!bf); + if (!bf->skb) + return; + pci_unmap_single(sc->pdev, bf->skbaddr, bf->skb->len, + PCI_DMA_TODEVICE); + dev_kfree_skb(bf->skb); + bf->skb = NULL; +} + +static void ath_tasklet_reset(unsigned long data) +{ + struct ath_softc *sc = (void *)data; + + ath_reset(sc->hw); +} + +static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) +{ + struct ieee80211_tx_status txs = {}; + struct ath_buf *bf, *bf0; + struct ath_desc *ds; + struct sk_buff *skb; + int ret; + + spin_lock(&txq->lock); + list_for_each_entry_safe(bf, bf0, &txq->q, list) { + ds = bf->desc; + + /* TODO only one segment */ + pci_dma_sync_single_for_cpu(sc->pdev, sc->desc_daddr, + sc->desc_len, PCI_DMA_FROMDEVICE); + ret = sc->ah->ah_proc_tx_desc(sc->ah, ds); + if (unlikely(ret == -EINPROGRESS)) + break; + else if (unlikely(ret)) { + printk(KERN_ERR "ath: error %d while processing " + "queue %u\n", ret, txq->qnum); + break; + } + + skb = bf->skb; + bf->skb = NULL; + pci_unmap_single(sc->pdev, bf->skbaddr, skb->len, + PCI_DMA_TODEVICE); + + txs.control = bf->ctl; + txs.retry_count = ds->ds_txstat.ts_shortretry + + ds->ds_txstat.ts_longretry / 6; + if (unlikely(ds->ds_txstat.ts_status)) { + sc->ll_stats.dot11ACKFailureCount++; + if (ds->ds_txstat.ts_status & AR5K_TXERR_XRETRY) { + txs.excessive_retries = 1; + } else if (ds->ds_txstat.ts_status & AR5K_TXERR_FILT) { + txs.flags |= IEEE80211_TX_STATUS_TX_FILTERED; + } + } else { + txs.flags |= IEEE80211_TX_STATUS_ACK; + txs.ack_signal = ds->ds_txstat.ts_rssi; + } + + ieee80211_tx_status(sc->hw, skb, &txs); + sc->tx_stats.data[txq->qnum].count++; + +// printk(KERN_DEBUG "DONE skb: %p, rssi: %d, stat: %x, seq: %u, stamp: %u\n", skb, ds->ds_txstat.ts_rssi, ds->ds_txstat.ts_status, ds->ds_txstat.ts_seqnum, ds->ds_txstat.ts_tstamp); + + spin_lock(&sc->txbuflock); + sc->tx_stats.data[txq->qnum].len--; + list_move_tail(&bf->list, &sc->txbuf); + sc->txbuf_len++; + spin_unlock(&sc->txbuflock); + } + if (likely(list_empty(&txq->q))) + txq->link = NULL; + spin_unlock(&txq->lock); + if (sc->txbuf_len > ATH_TXBUF / 5) + ieee80211_wake_queues(sc->hw); +} + +static void ath_tasklet_tx(unsigned long data) +{ + struct ath_softc *sc = (void *)data; + + ath_tx_processq(sc, sc->txq); + + ath_led_event(sc, ATH_LED_TX); +} + +static int ath_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf) +{ + struct ath_hw *ah = sc->ah; + struct sk_buff *skb = bf->skb; + struct ath_desc *ds; + + if (likely(skb == NULL)) { + unsigned int off; + + /* + * Allocate buffer with headroom_needed space for the + * fake physical layer header at the start. + */ + skb = dev_alloc_skb(sc->rxbufsize + sc->cachelsz - 1); + if (unlikely(skb == NULL)) { + DPRINTF(sc, ATH_DEBUG_ANY, "%s: skbuff alloc of " + "size %u failed\n", __func__, + sc->rxbufsize + sc->cachelsz - 1); + sc->stats.ast_rx_nobuf++; + return -ENOMEM; + } + /* + * Cache-line-align. This is important (for the + * 5210 at least) as not doing so causes bogus data + * in rx'd frames. + */ + off = ((unsigned long)skb->data) % sc->cachelsz; + if (off != 0) + skb_reserve(skb, sc->cachelsz - off); + + bf->skb = skb; + bf->skbaddr = pci_map_single(sc->pdev, + skb->data, sc->rxbufsize, PCI_DMA_FROMDEVICE); + if (unlikely(pci_dma_mapping_error(bf->skbaddr))) { + printk(KERN_ERR "%s: DMA mapping failed\n", __func__); + dev_kfree_skb(skb); + bf->skb = NULL; + sc->stats.ast_rx_busdma++; + return -ENOMEM; + } + } + + /* + * Setup descriptors. For receive we always terminate + * the descriptor list with a self-linked entry so we'll + * not get overrun under high load (as can happen with a + * 5212 when ANI processing enables PHY error frames). + * + * To insure the last descriptor is self-linked we create + * each descriptor as self-linked and add it to the end. As + * each additional descriptor is added the previous self-linked + * entry is ``fixed'' naturally. This should be safe even + * if DMA is happening. When processing RX interrupts we + * never remove/process the last, self-linked, entry on the + * descriptor list. This insures the hardware always has + * someplace to write a new frame. + */ + ds = bf->desc; + ds->ds_link = bf->daddr; /* link to self */ + ds->ds_data = bf->skbaddr; + ath5k_hw_setup_rx_desc(ah, ds, + skb_tailroom(skb), /* buffer size */ + 0); + + if (sc->rxlink != NULL) + *sc->rxlink = bf->daddr; + sc->rxlink = &ds->ds_link; + return 0; +} + +static unsigned int ath_rx_decrypted(struct ath_softc *sc, + struct ath_desc *ds, struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (void *)skb->data; + unsigned int keyix, hlen = ieee80211_get_hdrlen_from_skb(skb); + + if (!(ds->ds_rxstat.rs_status & AR5K_RXERR_DECRYPT) && + ds->ds_rxstat.rs_keyix != AR5K_RXKEYIX_INVALID) + return RX_FLAG_DECRYPTED; + + /* Apparently when a default key is used to decrypt the packet + the hal does not set the index used to decrypt. In such cases + get the index from the packet. */ + if ((le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_PROTECTED) && + !(ds->ds_rxstat.rs_status & AR5K_RXERR_DECRYPT) && + skb->len >= hlen + 4) { + keyix = skb->data[hlen + 3] >> 6; + + if (test_bit(keyix, sc->keymap)) + return RX_FLAG_DECRYPTED; + } + + return 0; +} + +static inline u64 ath_extend_tsf(struct ath_hw *ah, u32 rstamp) +{ + u64 tsf = ath5k_hw_get_tsf64(ah); + + if ((tsf & 0x7fff) < rstamp) + tsf -= 0x8000; + + return (tsf &~ 0x7fff) | rstamp; +} + +static void ath_tasklet_rx(unsigned long data) +{ + struct ieee80211_rx_status rxs = {}; + struct sk_buff *skb; + struct ath_softc *sc = (void *)data; + struct ath_buf *bf; + struct ath_desc *ds; + u16 len; + u8 stat; + int ret; + + spin_lock(&sc->rxbuflock); + do { + if (unlikely(list_empty(&sc->rxbuf))) { + if (net_ratelimit()) + printk(KERN_WARNING "ath: empty rx buf pool\n"); + break; + } + bf = list_first_entry(&sc->rxbuf, struct ath_buf, list); + BUG_ON(bf->skb == NULL); + skb = bf->skb; + ds = bf->desc; + + /* TODO only one segment */ + pci_dma_sync_single_for_cpu(sc->pdev, sc->desc_daddr, + sc->desc_len, PCI_DMA_FROMDEVICE); + + if (unlikely(ds->ds_link == bf->daddr)) /* this is the end */ + break; + + ret = sc->ah->ah_proc_rx_desc(sc->ah, ds); + if (unlikely(ret == -EINPROGRESS)) + break; + else if (unlikely(ret)) { + if (net_ratelimit()) + printk(KERN_ERR "ath: error in processing rx " + "descriptor\n"); + return; + } + + if (unlikely(ds->ds_rxstat.rs_more)) { + if (net_ratelimit()) + printk(KERN_INFO "ath: unsupported jumbo\n"); + goto next; + } + + stat = ds->ds_rxstat.rs_status; + if (unlikely(stat)) { + if (stat & AR5K_RXERR_CRC) + sc->stats.ast_rx_crcerr++; + if (stat & AR5K_RXERR_FIFO) + sc->stats.ast_rx_fifoerr++; + if (stat & AR5K_RXERR_PHY) { + sc->stats.ast_rx_phyerr++; + sc->stats.ast_rx_phy + [ds->ds_rxstat.rs_phyerr & 0x1f]++; + goto next; + } + if (stat & AR5K_RXERR_DECRYPT) { + /* + * Decrypt error. If the error occurred + * because there was no hardware key, then + * let the frame through so the upper layers + * can process it. This is necessary for 5210 + * parts which have no way to setup a ``clear'' + * key cache entry. + * + * XXX do key cache faulting + */ + if (ds->ds_rxstat.rs_keyix == + AR5K_RXKEYIX_INVALID && + !(stat & AR5K_RXERR_CRC)) + goto accept; + sc->stats.ast_rx_badcrypt++; + } + if (stat & AR5K_RXERR_MIC) { + rxs.flag |= RX_FLAG_MMIC_ERROR; + sc->stats.ast_rx_badmic++; + goto accept; + } + + /* let crypto-error packets fall through in MNTR */ + if ((stat &~ (AR5K_RXERR_DECRYPT|AR5K_RXERR_MIC)) || + sc->opmode != IEEE80211_IF_TYPE_MNTR) + goto next; + } +accept: + len = ds->ds_rxstat.rs_datalen; + pci_dma_sync_single_for_cpu(sc->pdev, bf->skbaddr, len, + PCI_DMA_FROMDEVICE); + pci_unmap_single(sc->pdev, bf->skbaddr, sc->rxbufsize, + PCI_DMA_FROMDEVICE); + bf->skb = NULL; + + if (unlikely((ieee80211_get_hdrlen_from_skb(skb) & 3) && net_ratelimit())) + printk(KERN_DEBUG "rx len is not %%4: %u\n", ieee80211_get_hdrlen_from_skb(skb)); + + skb_put(skb, len); + + sc->stats.ast_ant_rx[ds->ds_rxstat.rs_antenna]++; + + if (sc->opmode == IEEE80211_IF_TYPE_MNTR) + rxs.mactime = ath_extend_tsf(sc->ah, + ds->ds_rxstat.rs_tstamp); + else + rxs.mactime = ds->ds_rxstat.rs_tstamp; + rxs.freq = sc->curchan->freq; + rxs.channel = sc->curchan->chan; + rxs.phymode = sc->curmode; + rxs.ssi = ds->ds_rxstat.rs_rssi; + rxs.antenna = ds->ds_rxstat.rs_antenna; + rxs.rate = ds->ds_rxstat.rs_rate; + rxs.flag |= ath_rx_decrypted(sc, ds, skb); + +// printk(KERN_DEBUG "stat: %x, dlen: %u (hdr: %u), rssi: %d, rate: %u\n", ds->ds_rxstat.rs_status, len, ieee80211_get_hdrlen_from_skb(skb), ds->ds_rxstat.rs_rssi, ds->ds_rxstat.rs_rate); + ath_dump_skb(skb, "r"); + + __ieee80211_rx(sc->hw, skb, &rxs); + sc->led_rxrate = ds->ds_rxstat.rs_rate; + ath_led_event(sc, ATH_LED_RX); +next: + list_move_tail(&bf->list, &sc->rxbuf); + } while (ath_rxbuf_init(sc, bf) == 0); + spin_unlock(&sc->rxbuflock); +} + +/* + * Setup the beacon frame for transmit. + */ +static int ath_beacon_setup(struct ath_softc *sc, struct ath_buf *bf, + struct ieee80211_tx_control *ctl) +{ + struct sk_buff *skb = bf->skb; + struct ath_hw *ah = sc->ah; + struct ath_desc *ds; + int ret, antenna = 0; + u32 flags; + + bf->skbaddr = pci_map_single(sc->pdev, skb->data, skb->len, + PCI_DMA_TODEVICE); + DPRINTF(sc, ATH_DEBUG_BEACON, "%s: skb %p [data %p len %u] " + "skbaddr %llx\n", __func__, skb, skb->data, skb->len, + (unsigned long long)bf->skbaddr); + if (pci_dma_mapping_error(bf->skbaddr)) { + printk(KERN_ERR "ath: beacon DMA mapping failed\n"); + return -EIO; + } + + ds = bf->desc; + + flags = AR5K_TXDESC_NOACK; + if (sc->opmode == IEEE80211_IF_TYPE_IBSS && ath5k_hw_hasveol(ah)) { + ds->ds_link = bf->daddr; /* self-linked */ + flags |= AR5K_TXDESC_VEOL; + /* + * Let hardware handle antenna switching if txantenna is not set + */ + } else { + ds->ds_link = 0; + /* + * Switch antenna every 4 beacons if txantenna is not set + * XXX assumes two antenna + */ + if (antenna == 0) { + antenna = (sc->stats.ast_be_xmit & 4 ? 2 : 1); + } + } + + ds->ds_data = bf->skbaddr; + ret = ah->ah_setup_tx_desc(ah, ds, skb->len + FCS_LEN, + ieee80211_get_hdrlen_from_skb(skb), + AR5K_PKT_TYPE_BEACON, 0xffff, ctl->tx_rate, 1, + AR5K_TXKEYIX_INVALID, antenna, flags, 0, 0); + if (ret) + goto err_unmap; + /* NB: beacon's BufLen must be a multiple of 4 bytes */ + ret = ah->ah_fill_tx_desc(ah, ds, roundup(skb->len, 4), true, true); + if (ret) + goto err_unmap; + + return 0; +err_unmap: + pci_unmap_single(sc->pdev, bf->skbaddr, skb->len, PCI_DMA_TODEVICE); + return ret; +} + +/* + * Transmit a beacon frame at SWBA. Dynamic updates to the + * frame contents are done as needed and the slot time is + * also adjusted based on current state. + * + * this is usually called from interrupt context (ath_intr()) + * but also from ath_beacon_config() in IBSS mode which in turn + * can be called from a tasklet and user context + */ +static void ath_beacon_send(struct ath_softc *sc) +{ + struct ath_buf *bf = sc->bbuf; + struct ath_hw *ah = sc->ah; + + DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s\n", __func__); + + if (unlikely(bf->skb == NULL || sc->opmode == IEEE80211_IF_TYPE_STA || + sc->opmode == IEEE80211_IF_TYPE_MNTR)) { + DPRINTF(sc, ATH_DEBUG_ANY, "%s: bf=%p bf_skb=%p\n", + __func__, bf, bf ? bf->skb : NULL); + return; + } + /* + * Check if the previous beacon has gone out. If + * not don't don't try to post another, skip this + * period and wait for the next. Missed beacons + * indicate a problem and should not occur. If we + * miss too many consecutive beacons reset the device. + */ + if (unlikely(ath5k_hw_num_tx_pending(ah, sc->bhalq) != 0)) { + sc->bmisscount++; + DPRINTF(sc, ATH_DEBUG_BEACON_PROC, + "%s: missed %u consecutive beacons\n", + __func__, sc->bmisscount); + if (sc->bmisscount > 3) { /* NB: 3 is a guess */ + DPRINTF(sc, ATH_DEBUG_BEACON_PROC, + "%s: stuck beacon time (%u missed)\n", + __func__, sc->bmisscount); + tasklet_schedule(&sc->restq); + } + return; + } + if (unlikely(sc->bmisscount != 0)) { + DPRINTF(sc, ATH_DEBUG_BEACON_PROC, + "%s: resume beacon xmit after %u misses\n", + __func__, sc->bmisscount); + sc->bmisscount = 0; + } + + /* + * Stop any current dma and put the new frame on the queue. + * This should never fail since we check above that no frames + * are still pending on the queue. + */ + if (unlikely(!ath5k_hw_stop_tx_dma(ah, sc->bhalq))) { + DPRINTF(sc, ATH_DEBUG_ANY, "%s: beacon queue %u didn't stop?\n", + __func__, sc->bhalq); + /* NB: the HAL still stops DMA, so proceed */ + } + pci_dma_sync_single_for_cpu(sc->pdev, bf->skbaddr, bf->skb->len, + PCI_DMA_TODEVICE); + + ath5k_hw_put_tx_buf(ah, sc->bhalq, bf->daddr); + ath5k_hw_tx_start(ah, sc->bhalq); + DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: TXDP[%u] = %llx (%p)\n", + __func__, sc->bhalq, (unsigned long long)bf->daddr, bf->desc); + + sc->stats.ast_be_xmit++; +} + +static int ath_beaconq_config(struct ath_softc *sc) +{ + struct ath_hw *ah = sc->ah; + struct ath5k_txq_info qi; + int ret; + + ret = ath5k_hw_get_tx_queueprops(ah, sc->bhalq, &qi); + if (ret) + return ret; + if (sc->opmode == IEEE80211_IF_TYPE_AP || + sc->opmode == IEEE80211_IF_TYPE_IBSS) { + /* + * Always burst out beacon and CAB traffic. + */ + qi.tqi_aifs = ATH_BEACON_AIFS_DEFAULT; + qi.tqi_cw_min = ATH_BEACON_CWMIN_DEFAULT; + qi.tqi_cw_max = ATH_BEACON_CWMAX_DEFAULT; + } + + ret = ath5k_hw_setup_tx_queueprops(ah, sc->bhalq, &qi); + if (ret) { + printk(KERN_ERR "%s: unable to update parameters for beacon " + "hardware queue!\n", __func__); + return ret; + } + + return ath5k_hw_reset_tx_queue(ah, sc->bhalq); /* push to h/w */; +} + +/* + * Configure the beacon and sleep timers. + * + * When operating as an AP this resets the TSF and sets + * up the hardware to notify us when we need to issue beacons. + * + * When operating in station mode this sets up the beacon + * timers according to the timestamp of the last received + * beacon and the current TSF, configures PCF and DTIM + * handling, programs the sleep registers so the hardware + * will wakeup in time to receive beacons, and configures + * the beacon miss handling so we'll receive a BMISS + * interrupt when we stop seeing beacons from the AP + * we've associated with. + */ +static void ath_beacon_config(struct ath_softc *sc) +{ +#define TSF_TO_TU(_h,_l) (((_h) << 22) | ((_l) >> 10)) + struct ath_hw *ah = sc->ah; + u32 uninitialized_var(nexttbtt), intval, tsftu; + u64 tsf; + + intval = sc->bintval & AR5K_BEACON_PERIOD; + if (WARN_ON(!intval)) + return; + + /* current TSF converted to TU */ + tsf = ath5k_hw_get_tsf64(ah); + tsftu = TSF_TO_TU((u32)(tsf >> 32), (u32)tsf); + + DPRINTF(sc, ATH_DEBUG_BEACON, "%s: intval %u hw tsftu %u\n", __func__, + intval, tsftu); + + if (sc->opmode == IEEE80211_IF_TYPE_STA) { + ath5k_hw_set_intr(ah, 0); + sc->imask |= AR5K_INT_BMISS; + sc->bmisscount = 0; + ath5k_hw_set_intr(ah, sc->imask); + } else if (sc->opmode == IEEE80211_IF_TYPE_IBSS /* TODO || AP */) { + ath5k_hw_set_intr(ah, 0); + if (sc->opmode == IEEE80211_IF_TYPE_IBSS) { + /* + * Pull nexttbtt forward to reflect the current + * TSF. Add one intval otherwise the timespan + * can be too short for ibss merges. + */ + nexttbtt = tsftu + 2 * intval; + + DPRINTF(sc, ATH_DEBUG_BEACON, "%s: nexttbtt %u " + "intval %u\n", __func__, nexttbtt, intval); + + /* + * In IBSS mode enable the beacon timers but only + * enable SWBA interrupts if we need to manually + * prepare beacon frames. Otherwise we use a + * self-linked tx descriptor and let the hardware + * deal with things. + */ + if (!ath5k_hw_hasveol(ah)) + sc->imask |= AR5K_INT_SWBA; + } /* TODO else AP */ + + intval |= AR5K_BEACON_ENA; + + ath_beaconq_config(sc); + ath5k_hw_init_beacon(ah, nexttbtt, intval); + + sc->bmisscount = 0; + ath5k_hw_set_intr(ah, sc->imask); + /* + * When using a self-linked beacon descriptor in + * ibss mode load it once here. + */ + if (sc->opmode == IEEE80211_IF_TYPE_IBSS && + ath5k_hw_hasveol(ah)) + ath_beacon_send(sc); + } +#undef TSF_TO_TU +} + +/* + * Calculate the receive filter according to the + * operating mode and state: + * + * o always accept unicast, broadcast, and multicast traffic + * o maintain current state of phy error reception (the hal + * may enable phy error frames for noise immunity work) + * o probe request frames are accepted only when operating in + * hostap, adhoc, or monitor modes + * o enable promiscuous mode according to the interface state + * o accept beacons: + * - when operating in adhoc mode so the 802.11 layer creates + * node table entries for peers, + * - when operating in station mode for collecting rssi data when + * the station is otherwise quiet, or + * - when scanning + * o accept any additional packets specified by sc_rxfilter + */ +static u32 ath_calcrxfilter(struct ath_softc *sc) +{ + struct ath_hw *ah = sc->ah; + unsigned int opmode = sc->opmode; + u32 rfilt; + + rfilt = (ath5k_hw_get_rx_filter(ah) & AR5K_RX_FILTER_PHYERR) | + AR5K_RX_FILTER_UCAST | AR5K_RX_FILTER_BCAST | + AR5K_RX_FILTER_MCAST | AR5K_RX_FILTER_RADARERR; + + if (sc->opmode == IEEE80211_IF_TYPE_MNTR) + rfilt |= AR5K_RX_FILTER_CONTROL | AR5K_RX_FILTER_BEACON | + AR5K_RX_FILTER_PROBEREQ | AR5K_RX_FILTER_PROM; + if (opmode != IEEE80211_IF_TYPE_STA) + rfilt |= AR5K_RX_FILTER_PROBEREQ; + if (opmode != IEEE80211_IF_TYPE_AP && sc->promisc) + rfilt |= AR5K_RX_FILTER_PROM; + if (opmode == IEEE80211_IF_TYPE_STA || opmode == IEEE80211_IF_TYPE_IBSS) + rfilt |= AR5K_RX_FILTER_BEACON; + + return rfilt; +} + +static void ath_mode_init(struct ath_softc *sc) +{ + struct ath_hw *ah = sc->ah; + u32 rfilt; + + /* configure rx filter */ + rfilt = ath_calcrxfilter(sc); + ath5k_hw_set_rx_filter(ah, rfilt); + + if (ath5k_hw_hasbssidmask(ah)) + ath5k_hw_set_bssid_mask(ah, sc->bssidmask); + + /* configure operational mode */ + ath5k_hw_set_opmode(ah); + + ath5k_hw_set_mcast_filter(ah, 0, 0); + DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x\n", __func__, rfilt); +} + +/* + * Enable the receive h/w following a reset. + */ +static int ath_startrecv(struct ath_softc *sc) +{ + struct ath_hw *ah = sc->ah; + struct ath_buf *bf; + int ret; + + sc->rxbufsize = roundup(IEEE80211_MAX_LEN, sc->cachelsz); + + DPRINTF(sc, ATH_DEBUG_RESET, "%s: cachelsz %u rxbufsize %u\n", + __func__, sc->cachelsz, sc->rxbufsize); + + sc->rxlink = NULL; + + spin_lock_bh(&sc->rxbuflock); + list_for_each_entry(bf, &sc->rxbuf, list) { + ret = ath_rxbuf_init(sc, bf); + if (ret != 0) { + spin_unlock_bh(&sc->rxbuflock); + goto err; + } + } + bf = list_first_entry(&sc->rxbuf, struct ath_buf, list); + spin_unlock_bh(&sc->rxbuflock); + + ath5k_hw_put_rx_buf(ah, bf->daddr); + ath5k_hw_start_rx(ah); /* enable recv descriptors */ + ath_mode_init(sc); /* set filters, etc. */ + ath5k_hw_start_rx_pcu(ah); /* re-enable PCU/DMA engine */ + + return 0; +err: + return ret; +} + +static inline void ath_update_txpow(struct ath_softc *sc) +{ + ath5k_hw_set_txpower_limit(sc->ah, 0); +} + +static int ath_stop_locked(struct ath_softc *); + +static int ath_init(struct ath_softc *sc) +{ + int ret; + + mutex_lock(&sc->lock); + + DPRINTF(sc, ATH_DEBUG_RESET, "%s: mode %d\n", __func__, sc->opmode); + + /* + * Stop anything previously setup. This is safe + * no matter this is the first time through or not. + */ + ath_stop_locked(sc); + + /* + * The basic interface to setting the hardware in a good + * state is ``reset''. On return the hardware is known to + * be powered up and with interrupts disabled. This must + * be followed by initialization of the appropriate bits + * and then setup of the interrupt mask. + */ + sc->curchan = sc->hw->conf.chan; + ret = ath5k_hw_reset(sc->ah, sc->opmode, sc->curchan, false); + if (ret) { + printk(KERN_ERR "unable to reset hardware: %d\n", ret); + goto done; + } + /* + * This is needed only to setup initial state + * but it's best done after a reset. + */ + ath_update_txpow(sc); + + /* + * Setup the hardware after reset: the key cache + * is filled as needed and the receive engine is + * set going. Frame transmit is handled entirely + * in the frame output path; there's nothing to do + * here except setup the interrupt mask. + */ + ret = ath_startrecv(sc); + if (ret) + goto done; + + /* + * Enable interrupts. + */ + sc->imask = AR5K_INT_RX | AR5K_INT_TX | AR5K_INT_RXEOL | AR5K_INT_RXORN + | AR5K_INT_FATAL | AR5K_INT_GLOBAL; + + ath5k_hw_set_intr(sc->ah, sc->imask); + + mod_timer(&sc->calib_tim, round_jiffies(jiffies + + msecs_to_jiffies(ath_calinterval * 1000))); + + ret = 0; +done: + mutex_unlock(&sc->lock); + return ret; +} + +/* + * Disable the receive h/w in preparation for a reset. + */ +static void ath_stoprecv(struct ath_softc *sc) +{ + struct ath_hw *ah = sc->ah; + + ath5k_hw_stop_pcu_recv(ah); /* disable PCU */ + ath5k_hw_set_rx_filter(ah, 0); /* clear recv filter */ + ath5k_hw_stop_rx_dma(ah); /* disable DMA engine */ + mdelay(3); /* 3ms is long enough for 1 frame */ +#if AR_DEBUG + if (unlikely(sc->debug & (ATH_DEBUG_RESET | ATH_DEBUG_FATAL))) { + struct ath_desc *ds; + struct ath_buf *bf; + int status; + + printk(KERN_DEBUG "%s: rx queue %x, link %p\n", __func__, + ath5k_hw_get_rx_buf(ah), sc->rxlink); + + spin_lock_bh(&sc->rxbuflock); + list_for_each_entry(bf, &sc->rxbuf, list) { + ds = bf->desc; + status = ah->ah_proc_rx_desc(ah, ds); + if (!status || (sc->debug & ATH_DEBUG_FATAL)) + ath_printrxbuf(bf, status == 0); + } + spin_unlock_bh(&sc->rxbuflock); + } +#endif + sc->rxlink = NULL; /* just in case */ +} + +static void ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq) +{ + struct ath_buf *bf, *bf0; + + /* + * NB: this assumes output has been stopped and + * we do not need to block ath_tx_tasklet + */ + spin_lock_bh(&txq->lock); + list_for_each_entry_safe(bf, bf0, &txq->q, list) { +#if AR_DEBUG + if (sc->debug & ATH_DEBUG_RESET) + ath_printtxbuf(bf, !sc->ah->ah_proc_tx_desc(sc->ah, + bf->desc)); +#endif + ath_cleanup_txbuf(sc, bf); + + spin_lock_bh(&sc->txbuflock); + sc->tx_stats.data[txq->qnum].len--; + list_move_tail(&bf->list, &sc->txbuf); + sc->txbuf_len++; + spin_unlock_bh(&sc->txbuflock); + } + txq->link = NULL; + spin_unlock_bh(&txq->lock); +} + +/* + * Drain the transmit queues and reclaim resources. + */ +static void ath_draintxq(struct ath_softc *sc) +{ + struct ath_hw *ah = sc->ah; + int i; + + /* XXX return value */ + if (likely(!sc->invalid)) { + /* don't touch the hardware if marked invalid */ + (void)ath5k_hw_stop_tx_dma(ah, sc->bhalq); + DPRINTF(sc, ATH_DEBUG_RESET, "%s: beacon queue %x\n", __func__, + ath5k_hw_get_tx_buf(ah, sc->bhalq)); + for (i = 0; i < ARRAY_SIZE(sc->txqs); i++) + if (sc->txqs[i].setup) { + ath5k_hw_stop_tx_dma(ah, sc->txqs[i].qnum); + DPRINTF(sc, ATH_DEBUG_RESET, "%s: txq [%u] %x, " + "link %p\n", __func__, + sc->txqs[i].qnum, + ath5k_hw_get_tx_buf(ah, + sc->txqs[i].qnum), + sc->txqs[i].link); + } + } + ieee80211_start_queues(sc->hw); /* XXX move to callers */ + + for (i = 0; i < ARRAY_SIZE(sc->txqs); i++) + if (sc->txqs[i].setup) + ath_tx_draintxq(sc, &sc->txqs[i]); +} + +static int ath_stop_locked(struct ath_softc *sc) +{ + struct ath_hw *ah = sc->ah; + + DPRINTF(sc, ATH_DEBUG_RESET, "%s: invalid %u\n", __func__, sc->invalid); + + /* + * Shutdown the hardware and driver: + * stop output from above + * disable interrupts + * turn off timers + * turn off the radio + * clear transmit machinery + * clear receive machinery + * drain and release tx queues + * reclaim beacon resources + * power down hardware + * + * Note that some of this work is not possible if the + * hardware is gone (invalid). + */ + ieee80211_stop_queues(sc->hw); + + if (!sc->invalid) { + if (sc->led_soft) { + del_timer_sync(&sc->led_tim); + ath5k_hw_set_gpio(ah, sc->led_pin, !sc->led_on); + sc->led_blinking = 0; + } + ath5k_hw_set_intr(ah, 0); + } + ath_draintxq(sc); + if (!sc->invalid) { + ath_stoprecv(sc); + ath5k_hw_phy_disable(ah); + } else + sc->rxlink = NULL; +// ath_beacon_free(sc); + + return 0; +} + +/* + * Stop the device, grabbing the top-level lock to protect + * against concurrent entry through ath_init (which can happen + * if another thread does a system call and the thread doing the + * stop is preempted). + */ +static int ath_stop_hw(struct ath_softc *sc) +{ + int ret; + + mutex_lock(&sc->lock); + ret = ath_stop_locked(sc); + if (ret == 0 && !sc->invalid) { + /* + * Set the chip in full sleep mode. Note that we are + * careful to do this only when bringing the interface + * completely to a stop. When the chip is in this state + * it must be carefully woken up or references to + * registers in the PCI clock domain may freeze the bus + * (and system). This varies by chip and is mostly an + * issue with newer parts that go to sleep more quickly. + */ + if (sc->ah->ah_mac_version >= 7 && sc->ah->ah_mac_revision >= 8) { + /* + * XXX + * don't put newer MAC revisions > 7.8 to sleep because + * of the above mentioned problems + */ + DPRINTF(sc, ATH_DEBUG_RESET, "%s: mac version > 7.8, " + "not putting device to sleep\n", __func__); + } + else { + DPRINTF(sc, ATH_DEBUG_RESET, + "%s: putting device to full sleep\n", __func__); + ath5k_hw_set_power(sc->ah, AR5K_PM_FULL_SLEEP, true, 0); + } + } + ath_cleanup_txbuf(sc, sc->bbuf); + mutex_unlock(&sc->lock); + + del_timer_sync(&sc->calib_tim); + + return ret; +} + +static void ath_setcurmode(struct ath_softc *sc, unsigned int mode) +{ + if (unlikely(sc->led_soft)) { + /* from Atheros NDIS driver, w/ permission */ + static const struct { + u16 rate; /* tx/rx 802.11 rate */ + u16 timeOn; /* LED on time (ms) */ + u16 timeOff; /* LED off time (ms) */ + } blinkrates[] = { + { 108, 40, 10 }, + { 96, 44, 11 }, + { 72, 50, 13 }, + { 48, 57, 14 }, + { 36, 67, 16 }, + { 24, 80, 20 }, + { 22, 100, 25 }, + { 18, 133, 34 }, + { 12, 160, 40 }, + { 10, 200, 50 }, + { 6, 240, 58 }, + { 4, 267, 66 }, + { 2, 400, 100 }, + { 0, 500, 130 } + }; + const struct ath5k_rate_table* rt = + ath5k_hw_get_rate_table(sc->ah, mode); + unsigned int i, j; + + BUG_ON(rt == NULL); + + memset(sc->hwmap, 0, sizeof(sc->hwmap)); + for (i = 0; i < 32; i++) { + u8 ix = rt->rate_code_to_index[i]; + if (ix == 0xff) { + sc->hwmap[i].ledon = msecs_to_jiffies(500); + sc->hwmap[i].ledoff = msecs_to_jiffies(130); + continue; + } + sc->hwmap[i].txflags = IEEE80211_RADIOTAP_F_DATAPAD; + if (SHPREAMBLE_FLAG(ix) || rt->rates[ix].modulation == + MODULATION_OFDM) + sc->hwmap[i].txflags |= + IEEE80211_RADIOTAP_F_SHORTPRE; + /* receive frames include FCS */ + sc->hwmap[i].rxflags = sc->hwmap[i].txflags | + IEEE80211_RADIOTAP_F_FCS; + /* setup blink rate table to avoid per-packet lookup */ + for (j = 0; j < ARRAY_SIZE(blinkrates) - 1; j++) + if (blinkrates[j].rate == /* XXX why 7f? */ + (rt->rates[ix].dot11_rate&0x7f)) + break; + + sc->hwmap[i].ledon = msecs_to_jiffies(blinkrates[j]. + timeOn); + sc->hwmap[i].ledoff = msecs_to_jiffies(blinkrates[j]. + timeOff); + } + } + + sc->curmode = mode; +} + +/* + * Set/change channels. If the channel is really being changed, + * it's done by reseting the chip. To accomplish this we must + * first cleanup any pending DMA, then restart stuff after a la + * ath_init. + */ +static int ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan) +{ + struct ath_hw *ah = sc->ah; + int ret; + + DPRINTF(sc, ATH_DEBUG_RESET, "%s: %u (%u MHz) -> %u (%u MHz)\n", + __func__, sc->curchan->chan, sc->curchan->freq, + chan->chan, chan->freq); + + if (chan->freq != sc->curchan->freq || chan->val != sc->curchan->val) { + /* + * To switch channels clear any pending DMA operations; + * wait long enough for the RX fifo to drain, reset the + * hardware at the new frequency, and then re-enable + * the relevant bits of the h/w. + */ + ath5k_hw_set_intr(ah, 0); /* disable interrupts */ + ath_draintxq(sc); /* clear pending tx frames */ + ath_stoprecv(sc); /* turn off frame recv */ + ret = ath5k_hw_reset(ah, sc->opmode, chan, true); + if (ret) { + printk(KERN_ERR "%s: unable to reset channel %u " + "(%u Mhz)\n", __func__, chan->chan, chan->freq); + return ret; + } + sc->curchan = chan; + ath_update_txpow(sc); + + /* + * Re-enable rx framework. + */ + ret = ath_startrecv(sc); + if (ret) { + printk(KERN_ERR "%s: unable to restart recv logic\n", + __func__); + return ret; + } + + /* + * Change channels and update the h/w rate map + * if we're switching; e.g. 11a to 11b/g. + */ +// ath_chan_change(sc, chan); + + /* + * Re-enable interrupts. + */ + ath5k_hw_set_intr(ah, sc->imask); + } + + return 0; +} + +static int ath_tx_bf(struct ath_softc *sc, struct ath_buf *bf, + struct ieee80211_tx_control *ctl) +{ + struct ath_hw *ah = sc->ah; + struct ath_txq *txq = sc->txq; + struct ath_desc *ds = bf->desc; + struct sk_buff *skb = bf->skb; + unsigned int hdrpad, pktlen, flags, keyidx = AR5K_TXKEYIX_INVALID; + int ret; + + flags = AR5K_TXDESC_INTREQ | AR5K_TXDESC_CLRDMASK; + bf->ctl = *ctl; + /* XXX endianness */ + bf->skbaddr = pci_map_single(sc->pdev, skb->data, skb->len, + PCI_DMA_TODEVICE); + + if (ctl->flags & IEEE80211_TXCTL_NO_ACK) + flags |= AR5K_TXDESC_NOACK; + + if ((ieee80211_get_hdrlen_from_skb(skb) & 3) && net_ratelimit()) + printk(KERN_DEBUG "tx len is not %%4: %u\n", ieee80211_get_hdrlen_from_skb(skb)); + + hdrpad = 0; + pktlen = skb->len - hdrpad + FCS_LEN; + + if (ctl->key_idx != HW_KEY_IDX_INVALID) { + keyidx = ctl->key_idx; + pktlen += ctl->icv_len; + } + + ret = ah->ah_setup_tx_desc(ah, ds, pktlen, + ieee80211_get_hdrlen_from_skb(skb), AR5K_PKT_TYPE_NORMAL, + 0xffff, ctl->tx_rate, ctl->retry_limit, keyidx, 0, flags, 0, 0); + if (ret) + goto err_unmap; + + ds->ds_link = 0; + ds->ds_data = bf->skbaddr; + + ret = ah->ah_fill_tx_desc(ah, ds, skb->len, true, true); + if (ret) + goto err_unmap; + + spin_lock_bh(&txq->lock); + list_add_tail(&bf->list, &txq->q); + sc->tx_stats.data[txq->qnum].len++; + if (txq->link == NULL) /* is this first packet? */ + ath5k_hw_put_tx_buf(ah, txq->qnum, bf->daddr); + else /* no, so only link it */ + *txq->link = bf->daddr; + + txq->link = &ds->ds_link; + ath5k_hw_tx_start(ah, txq->qnum); + spin_unlock_bh(&txq->lock); + +// printk(KERN_DEBUG "bf: %p, skb: %p, flags: %x, daddr: %x, dlink: %x, tlink: %x\n", bf, skb, flags, bf->daddr, ds->ds_link, *txq->link); + + return 0; +err_unmap: + pci_unmap_single(sc->pdev, bf->skbaddr, skb->len, PCI_DMA_TODEVICE); + return ret; +} + +static int ath_tx(struct ieee80211_hw *hw, struct sk_buff *skb, + struct ieee80211_tx_control *ctl) +{ + struct ath_softc *sc = hw->priv; + struct ath_buf *bf; + unsigned long flags; + + ath_dump_skb(skb, "t"); + + if (sc->opmode == IEEE80211_IF_TYPE_MNTR) + DPRINTF(sc, ATH_DEBUG_XMIT, "tx in monitor (scan?)\n"); + + sc->led_txrate = ctl->tx_rate; + + spin_lock_irqsave(&sc->txbuflock, flags); + if (list_empty(&sc->txbuf)) { + if (net_ratelimit()) + printk(KERN_ERR "ath: no further txbuf available, " + "dropping packet\n"); + sc->stats.ast_tx_nobuf++; + spin_unlock_irqrestore(&sc->txbuflock, flags); + ieee80211_stop_queue(hw, ctl->queue); + return -1; + } + bf = list_first_entry(&sc->txbuf, struct ath_buf, list); + list_del(&bf->list); + sc->txbuf_len--; + if (list_empty(&sc->txbuf)) { + sc->stats.ast_tx_qstop++; + ieee80211_stop_queues(hw); + } + spin_unlock_irqrestore(&sc->txbuflock, flags); + + bf->skb = skb; + + if (ath_tx_bf(sc, bf, ctl)) { + bf->skb = NULL; + spin_lock_irqsave(&sc->txbuflock, flags); + list_add_tail(&bf->list, &sc->txbuf); + sc->txbuf_len++; + spin_unlock_irqrestore(&sc->txbuflock, flags); + dev_kfree_skb_any(skb); + return 0; + } + + return 0; +} + +static int ath_reset(struct ieee80211_hw *hw) +{ + struct ath_softc *sc = hw->priv; + struct ath_hw *ah = sc->ah; + int ret; + + DPRINTF(sc, ATH_DEBUG_RESET, "resetting\n"); + /* + * Convert to a HAL channel description with the flags + * constrained to reflect the current operating mode. + */ + sc->curchan = hw->conf.chan; + + ath5k_hw_set_intr(ah, 0); + ath_draintxq(sc); + ath_stoprecv(sc); + + ret = ath5k_hw_reset(ah, sc->opmode, sc->curchan, true); + if (unlikely(ret)) { + printk(KERN_ERR "ath: can't reset hardware (%d)\n", ret); + goto err; + } + ath_update_txpow(sc); + + ret = ath_startrecv(sc); + if (unlikely(ret)) { + printk(KERN_ERR "ath: can't start recv logic\n"); + goto err; + } + /* + * We may be doing a reset in response to an ioctl + * that changes the channel so update any state that + * might change as a result. + */ +// ath_chan_change(sc, c); + ath_beacon_config(sc); + /* intrs are started by ath_beacon_config */ + + ieee80211_wake_queues(hw); + + return 0; +err: + return ret; +} + +static int ath_open(struct ieee80211_hw *hw) +{ + return ath_init(hw->priv); +} + +static int ath_stop(struct ieee80211_hw *hw) +{ + return ath_stop_hw(hw->priv); +} + +static int ath_add_interface(struct ieee80211_hw *hw, + struct ieee80211_if_init_conf *conf) +{ + struct ath_softc *sc = hw->priv; + int ret; + + mutex_lock(&sc->lock); + if (sc->iface_id) { + ret = 0; + goto end; + } + + sc->iface_id = conf->if_id; + + switch (conf->type) { + case IEEE80211_IF_TYPE_STA: + case IEEE80211_IF_TYPE_IBSS: + case IEEE80211_IF_TYPE_MNTR: + sc->opmode = conf->type; + break; + default: + ret = -EOPNOTSUPP; + goto end; + } + ret = 0; +end: + mutex_unlock(&sc->lock); + return ret; +} + +static void ath_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_if_init_conf *conf) +{ + struct ath_softc *sc = hw->priv; + + mutex_lock(&sc->lock); + if (sc->iface_id != conf->if_id) { + goto end; + } + + sc->iface_id = 0; +end: + mutex_unlock(&sc->lock); +} + +static int ath_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf) +{ + struct ath_softc *sc = hw->priv; + + sc->bintval = conf->beacon_int * 1000 / 1024; + ath_setcurmode(sc, conf->phymode); + + return ath_chan_set(sc, conf->chan); +} + +static int ath_config_interface(struct ieee80211_hw *hw, int if_id, + struct ieee80211_if_conf *conf) +{ + struct ath_softc *sc = hw->priv; + int ret; + + mutex_lock(&sc->lock); + if (sc->iface_id != if_id) { + ret = -EIO; + goto unlock; + } + if (conf->bssid) + ath5k_hw_set_associd(sc->ah, conf->bssid, 0 /* FIXME: aid */); + mutex_unlock(&sc->lock); + + return ath_reset(hw); +unlock: + mutex_unlock(&sc->lock); + return ret; +} + +static void ath_set_multicast_list(struct ieee80211_hw *hw, + unsigned short flags, int mc_count) +{ + struct ath_softc *sc = hw->priv; + unsigned int prom = !!(flags & IFF_PROMISC); + u32 rfilt; + + if (sc->promisc != prom) { + sc->promisc = prom; + rfilt = ath_calcrxfilter(sc); + ath5k_hw_set_rx_filter(sc->ah, rfilt); + } +} + +static int ath_set_key(struct ieee80211_hw *hw, set_key_cmd cmd, + u8 *addr, struct ieee80211_key_conf *key, int aid) +{ + struct ath_softc *sc = hw->priv; + int ret = 0; + + mutex_lock(&sc->lock); + + switch (cmd) { + case SET_KEY: + if (key->alg != ALG_WEP && key->alg != ALG_NONE && + key->alg != ALG_NULL) { + ret = -EINVAL; + goto unlock; + } + + ret = ath5k_hw_set_key(sc->ah, key->keyidx, key, addr); + if (ret) { + printk(KERN_ERR "ath: can't set the key\n"); + goto unlock; + } + + set_bit(key->keyidx, sc->keymap); + key->hw_key_idx = key->keyidx; + key->flags &= ~IEEE80211_KEY_FORCE_SW_ENCRYPT; + break; + case DISABLE_KEY: + ath5k_hw_reset_key(sc->ah, key->keyidx); + clear_bit(key->keyidx, sc->keymap); + break; + case REMOVE_ALL_KEYS: { + unsigned int i; + for (i = 0; i < AR5K_KEYCACHE_SIZE; i++) { + ath5k_hw_reset_key(sc->ah, i); + clear_bit(i, sc->keymap); + } + break; + } + default: + ret = -EINVAL; + goto unlock; + } + +unlock: + mutex_unlock(&sc->lock); + return ret; +} + +static int ath_get_stats(struct ieee80211_hw *hw, + struct ieee80211_low_level_stats *stats) +{ + struct ath_softc *sc = hw->priv; + + memcpy(stats, &sc->ll_stats, sizeof(sc->ll_stats)); + + return 0; +} + +static int ath_get_tx_stats(struct ieee80211_hw *hw, + struct ieee80211_tx_queue_stats *stats) +{ + struct ath_softc *sc = hw->priv; + + memcpy(stats, &sc->tx_stats, sizeof(sc->tx_stats)); + + return 0; +} + +static u64 ath_get_tsf(struct ieee80211_hw *hw) +{ + struct ath_softc *sc = hw->priv; + + return ath5k_hw_get_tsf64(sc->ah); +} + +static void ath_reset_tsf(struct ieee80211_hw *hw) +{ + struct ath_softc *sc = hw->priv; + + ath5k_hw_reset_tsf(sc->ah); +} + +static int ath_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb, + struct ieee80211_tx_control *ctl) +{ + struct ath_softc *sc = hw->priv; + int ret; + + ath_dump_skb(skb, "b"); + + mutex_lock(&sc->lock); + + if (sc->opmode != IEEE80211_IF_TYPE_IBSS) { + ret = -EIO; + goto end; + } + + ath_cleanup_txbuf(sc, sc->bbuf); + sc->bbuf->skb = skb; + ret = ath_beacon_setup(sc, sc->bbuf, ctl); + if (ret) + sc->bbuf->skb = NULL; + +end: + mutex_unlock(&sc->lock); + return ret; +} + +static struct ieee80211_ops ath_hw_ops = { + .tx = ath_tx, + .reset = ath_reset, + .open = ath_open, + .stop = ath_stop, + .add_interface = ath_add_interface, + .remove_interface = ath_remove_interface, + .config = ath_config, + .config_interface = ath_config_interface, + .set_multicast_list = ath_set_multicast_list, + .set_key = ath_set_key, + .get_stats = ath_get_stats, + .conf_tx = NULL, + .get_tx_stats = ath_get_tx_stats, + .get_tsf = ath_get_tsf, + .reset_tsf = ath_reset_tsf, + .beacon_update = ath_beacon_update, +}; + +/* + * Periodically recalibrate the PHY to account + * for temperature/environment changes. + */ +static void ath_calibrate(unsigned long data) +{ + struct ath_softc *sc = (void *)data; + struct ath_hw *ah = sc->ah; + + sc->stats.ast_per_cal++; + + DPRINTF(sc, ATH_DEBUG_CALIBRATE, "ath: channel %u/%x\n", + sc->curchan->chan, sc->curchan->val); + + if (ath5k_hw_get_rf_gain(ah) == AR5K_RFGAIN_NEED_CHANGE) { + /* + * Rfgain is out of bounds, reset the chip + * to load new gain values. + */ + sc->stats.ast_per_rfgain++; + DPRINTF(sc, ATH_DEBUG_RESET, "calibration, resetting\n"); + ath_reset(sc->hw); + } + if (ath5k_hw_phy_calibrate(ah, sc->curchan)) { + DPRINTF(sc, ATH_DEBUG_ANY, "ath: calibration of channel %u " + "failed\n", sc->curchan->chan); + sc->stats.ast_per_calfail++; + } + + mod_timer(&sc->calib_tim, round_jiffies(jiffies + + msecs_to_jiffies(ath_calinterval * 1000))); +} + +static void ath_led_off(unsigned long data) +{ + struct ath_softc *sc = (void *)data; + + if (sc->led_endblink) + sc->led_blinking = 0; + else { + sc->led_endblink = 1; + ath5k_hw_set_gpio(sc->ah, sc->led_pin, !sc->led_on); + mod_timer(&sc->led_tim, jiffies + sc->led_off); + } +} + +/* + * Blink the LED according to the specified on/off times. + */ +static void ath_led_blink(struct ath_softc *sc, unsigned int on, + unsigned int off) +{ + DPRINTF(sc, ATH_DEBUG_LED, "%s: on %u off %u\n", __func__, on, off); + ath5k_hw_set_gpio(sc->ah, sc->led_pin, sc->led_on); + sc->led_blinking = 1; + sc->led_endblink = 0; + sc->led_off = off; + mod_timer(&sc->led_tim, jiffies + on); +} + +static void ath_led_event(struct ath_softc *sc, int event) +{ + if (likely(!sc->led_soft)) + return; + if (unlikely(sc->led_blinking)) /* don't interrupt active blink */ + return; + switch (event) { + case ATH_LED_TX: + ath_led_blink(sc, sc->hwmap[sc->led_txrate].ledon, + sc->hwmap[sc->led_txrate].ledoff); + break; + case ATH_LED_RX: + ath_led_blink(sc, sc->hwmap[sc->led_rxrate].ledon, + sc->hwmap[sc->led_rxrate].ledoff); + break; + } +} + +static irqreturn_t ath_intr(int irq, void *dev_id) +{ + struct ath_softc *sc = dev_id; + struct ath_hw *ah = sc->ah; + enum ath5k_int status; + unsigned int counter = 1000; + + if (unlikely(sc->invalid || !ath5k_hw_is_intr_pending(ah))) + return IRQ_NONE; + + do { + /* + * Figure out the reason(s) for the interrupt. Note + * that the hal returns a pseudo-ISR that may include + * bits we haven't explicitly enabled so we mask the + * value to insure we only process bits we requested. + */ + ath5k_hw_get_isr(ah, &status); /* NB: clears IRQ too */ + DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x/0x%x\n", __func__, + status, sc->imask); + status &= sc->imask; /* discard unasked for bits */ + if (unlikely(status & AR5K_INT_FATAL)) { + /* + * Fatal errors are unrecoverable. Typically + * these are caused by DMA errors. Unfortunately + * the exact reason is not (presently) returned + * by the hal. + */ + sc->stats.ast_hardware++; + tasklet_schedule(&sc->restq); + } else if (unlikely(status & AR5K_INT_RXORN)) { + sc->stats.ast_rxorn++; + tasklet_schedule(&sc->restq); + } else { + if (status & AR5K_INT_SWBA) { + /* + * Software beacon alert--time to send a beacon. + * Handle beacon transmission directly; deferring + * this is too slow to meet timing constraints + * under load. + */ + ath_beacon_send(sc); + } + if (status & AR5K_INT_RXEOL) { + /* + * NB: the hardware should re-read the link when + * RXE bit is written, but it doesn't work at + * least on older hardware revs. + */ + sc->stats.ast_rxeol++; + sc->rxlink = NULL; + } + if (status & AR5K_INT_TXURN) { + sc->stats.ast_txurn++; + /* bump tx trigger level */ + ath5k_hw_update_tx_triglevel(ah, true); + } + if (status & AR5K_INT_RX) + tasklet_schedule(&sc->rxtq); + if (status & AR5K_INT_TX) + tasklet_schedule(&sc->txtq); + if (status & AR5K_INT_BMISS) { + sc->stats.ast_bmiss++; +/* tasklet_schedule(&sc->bmisstq);*/ + } + if (status & AR5K_INT_MIB) { + sc->stats.ast_mib++; + /* TODO */ + } + } + } while (ath5k_hw_is_intr_pending(ah) && counter-- > 0); + + if (unlikely(!counter && net_ratelimit())) + printk(KERN_WARNING "ath: too many interrupts, giving up for " + "now\n"); + + return IRQ_HANDLED; +} + +/* + * Convert IEEE channel number to MHz frequency. + */ +static inline short ath_ieee2mhz(short chan) +{ + if (chan <= 14 || chan >= 27) + return ieee80211chan2mhz(chan); + else + return 2212 + chan * 20; +} + +static unsigned int ath_copy_rates(struct ieee80211_rate *rates, + const struct ath5k_rate_table *rt, unsigned int max) +{ + unsigned int i, count; + + if (rt == NULL) + return 0; + + for (i = 0, count = 0; i < rt->rate_count && max > 0; i++) { + if (!rt->rates[i].valid) + continue; + rates->rate = rt->rates[i].rate_kbps / 100; + rates->val = rt->rates[i].rate_code; + rates->flags = rt->rates[i].modulation; + rates++; + count++; + max--; + } + + return count; +} + +static unsigned int ath_copy_channels(struct ath_hw *ah, + struct ieee80211_channel *channels, unsigned int mode, + unsigned int max) +{ + static const struct { unsigned int mode, mask, chan; } map[] = { + [MODE_IEEE80211A] = { CHANNEL_OFDM, CHANNEL_OFDM | CHANNEL_TURBO, CHANNEL_A }, + [MODE_ATHEROS_TURBO] = { CHANNEL_OFDM|CHANNEL_TURBO, CHANNEL_OFDM | CHANNEL_TURBO, CHANNEL_T }, + [MODE_IEEE80211B] = { CHANNEL_CCK, CHANNEL_CCK, CHANNEL_B }, + [MODE_IEEE80211G] = { CHANNEL_OFDM, CHANNEL_OFDM, CHANNEL_G }, + [MODE_ATHEROS_TURBOG] = { CHANNEL_OFDM | CHANNEL_TURBO, CHANNEL_OFDM | CHANNEL_TURBO, CHANNEL_TG }, + }; + static const struct ath5k_regchannel chans_2ghz[] = + IEEE80211_CHANNELS_2GHZ; + static const struct ath5k_regchannel chans_5ghz[] = + IEEE80211_CHANNELS_5GHZ; + const struct ath5k_regchannel *chans; + enum ath5k_regdom dmn; + unsigned int i, count, size, chfreq, all, f, ch; + + if (!test_bit(mode, ah->ah_modes)) + return 0; + + all = ah->ah_regdomain == DMN_DEFAULT || CHAN_DEBUG == 1; + + switch (mode) { + case MODE_IEEE80211A: + case MODE_ATHEROS_TURBO: + /* 1..220, but 2GHz frequencies are filtered by check_channel */ + size = all ? 220 : ARRAY_SIZE(chans_5ghz); + chans = chans_5ghz; + dmn = ath5k_regdom2flag(ah->ah_regdomain, + IEEE80211_CHANNELS_5GHZ_MIN); + chfreq = CHANNEL_5GHZ; + break; + case MODE_IEEE80211B: + case MODE_IEEE80211G: + case MODE_ATHEROS_TURBOG: + size = all ? 26 : ARRAY_SIZE(chans_2ghz); + chans = chans_2ghz; + dmn = ath5k_regdom2flag(ah->ah_regdomain, + IEEE80211_CHANNELS_2GHZ_MIN); + chfreq = CHANNEL_2GHZ; + break; + default: + printk(KERN_WARNING "bad mode, not copying channels\n"); + return 0; + } + + for (i = 0, count = 0; i < size && max > 0; i++) { + ch = all ? i + 1 : chans[i].chan; + f = ath_ieee2mhz(ch); + /* Check if channel is supported by the chipset */ + if (!ath5k_channel_ok(ah, f, chfreq)) + continue; + + /* Match regulation domain */ + if (!all && !(IEEE80211_DMN(chans[i].domain) & + IEEE80211_DMN(dmn))) + continue; + + if (!all && (chans[i].mode & map[mode].mask) != map[mode].mode) + continue; + + /* Write channel and increment counter */ + channels->chan = ch; + channels->freq = f; + channels->val = map[mode].chan; + channels++; + count++; + max--; + } + + return count; +} + +#if ATH_DEBUG_MODES +static void ath_dump_modes(struct ieee80211_hw_mode *modes) +{ + unsigned int m, i; + + for (m = 0; m < NUM_IEEE80211_MODES; m++) { + printk(KERN_DEBUG "Mode %u: channels %d, rates %d\n", m, + modes[m].num_channels, modes[m].num_rates); + printk(KERN_DEBUG " channels:\n"); + for (i = 0; i < modes[m].num_channels; i++) { + printk(KERN_DEBUG " %3d %d %.4x %.4x\n", + modes[m].channels[i].chan, + modes[m].channels[i].freq, + modes[m].channels[i].val, + modes[m].channels[i].flag); + } + printk(KERN_DEBUG " rates:\n"); + for (i = 0; i < modes[m].num_rates; i++) { + printk(KERN_DEBUG " %4d %.4x %.4x %.4x\n", + modes[m].rates[i].rate, + modes[m].rates[i].val, + modes[m].rates[i].flags, + modes[m].rates[i].val2); + } + } +} +#else +static inline void ath_dump_modes(struct ieee80211_hw_mode *modes) {} +#endif + +static int ath_getchannels(struct ieee80211_hw *hw) +{ + struct ath_softc *sc = hw->priv; + struct ath_hw *ah = sc->ah; + struct ieee80211_hw_mode *modes = sc->modes; + unsigned int i, max; + int ret; + enum { + A = MODE_IEEE80211A, + B = MODE_IEEE80211G, /* this is not a typo, but workaround */ + G = MODE_IEEE80211B, /* to prefer g over b */ + T = MODE_ATHEROS_TURBO, + TG = MODE_ATHEROS_TURBOG, + }; + + BUILD_BUG_ON(ARRAY_SIZE(sc->modes) < 5); + + ah->ah_country_code = countrycode; + + modes[A].mode = MODE_IEEE80211A; + modes[B].mode = MODE_IEEE80211B; + modes[G].mode = MODE_IEEE80211G; + + max = ARRAY_SIZE(sc->rates); + modes[A].rates = sc->rates; + max -= modes[A].num_rates = ath_copy_rates(modes[A].rates, + ath5k_hw_get_rate_table(ah, MODE_IEEE80211A), max); + modes[B].rates = &modes[A].rates[modes[A].num_rates]; + max -= modes[B].num_rates = ath_copy_rates(modes[B].rates, + ath5k_hw_get_rate_table(ah, MODE_IEEE80211B), max); + modes[G].rates = &modes[B].rates[modes[B].num_rates]; + max -= modes[G].num_rates = ath_copy_rates(modes[G].rates, + ath5k_hw_get_rate_table(ah, MODE_IEEE80211G), max); + + if (!max) + printk(KERN_WARNING "yet another rates found, but there is not " + "sufficient space to store them\n"); + + max = ARRAY_SIZE(sc->channels); + modes[A].channels = sc->channels; + max -= modes[A].num_channels = ath_copy_channels(ah, modes[A].channels, + MODE_IEEE80211A, max); + modes[B].channels = &modes[A].channels[modes[A].num_channels]; + max -= modes[B].num_channels = ath_copy_channels(ah, modes[B].channels, + MODE_IEEE80211B, max); + modes[G].channels = &modes[B].channels[modes[B].num_channels]; + max -= modes[G].num_channels = ath_copy_channels(ah, modes[G].channels, + MODE_IEEE80211G, max); + + if (!max) + printk(KERN_WARNING "yet another modes found, but there is not " + "sufficient space to store them\n"); + + for (i = 0; i < ARRAY_SIZE(sc->modes); i++) + if (modes[i].num_channels) { + ret = ieee80211_register_hwmode(hw, &modes[i]); + if (ret) { + printk(KERN_ERR "can't register hwmode %u\n",i); + goto err; + } + } + ath_dump_modes(modes); + + return 0; +err: + return ret; +} + +static int ath_desc_alloc(struct ath_softc *sc, struct pci_dev *pdev) +{ + struct ath_desc *ds; + struct ath_buf *bf; + dma_addr_t da; + unsigned int i; + int ret; + + /* allocate descriptors */ + sc->desc_len = sizeof(struct ath_desc) * + (ATH_TXBUF * ATH_TXDESC + ATH_RXBUF + ATH_BCBUF + 1); + sc->desc = pci_alloc_consistent(pdev, sc->desc_len, &sc->desc_daddr); + if (sc->desc == NULL) { + dev_err(&pdev->dev, "can't allocate descriptors\n"); + ret = -ENOMEM; + goto err; + } + ds = sc->desc; + da = sc->desc_daddr; + DPRINTF(sc, ATH_DEBUG_ANY, "%s: DMA map: %p (%zu) -> %llx\n", + __func__, ds, sc->desc_len, (unsigned long long)sc->desc_daddr); + + bf = kcalloc(1 + ATH_TXBUF + ATH_RXBUF + ATH_BCBUF, + sizeof(struct ath_buf), GFP_KERNEL); + if (bf == NULL) { + dev_err(&pdev->dev, "can't allocate bufptr\n"); + ret = -ENOMEM; + goto err_free; + } + sc->bufptr = bf; + + INIT_LIST_HEAD(&sc->rxbuf); + for (i = 0; i < ATH_RXBUF; i++, bf++, ds++, da += sizeof(*ds)) { + bf->desc = ds; + bf->daddr = da; + list_add_tail(&bf->list, &sc->rxbuf); + } + + INIT_LIST_HEAD(&sc->txbuf); + sc->txbuf_len = ATH_TXBUF; + for (i = 0; i < ATH_TXBUF; i++, bf++, ds += ATH_TXDESC, + da += ATH_TXDESC * sizeof(*ds)) { + bf->desc = ds; + bf->daddr = da; + list_add_tail(&bf->list, &sc->txbuf); + } + + /* beacon buffer */ + bf->desc = ds; + bf->daddr = da; + sc->bbuf = bf; + + return 0; +err_free: + pci_free_consistent(pdev, sc->desc_len, sc->desc, sc->desc_daddr); +err: + sc->desc = NULL; + return ret; +} + +static void ath_desc_free(struct ath_softc *sc, struct pci_dev *pdev) +{ + struct ath_buf *bf; + + ath_cleanup_txbuf(sc, sc->bbuf); + list_for_each_entry(bf, &sc->txbuf, list) + ath_cleanup_txbuf(sc, bf); + list_for_each_entry(bf, &sc->rxbuf, list) + ath_cleanup_txbuf(sc, bf); + + /* Free memory associated with all descriptors */ + pci_free_consistent(pdev, sc->desc_len, sc->desc, sc->desc_daddr); + + kfree(sc->bufptr); + sc->bufptr = NULL; +} + +static int ath_beaconq_setup(struct ath_hw *ah) +{ + struct ath5k_txq_info qi = { + .tqi_aifs = AR5K_TXQ_USEDEFAULT, + .tqi_cw_min = AR5K_TXQ_USEDEFAULT, + .tqi_cw_max = AR5K_TXQ_USEDEFAULT, + /* NB: for dynamic turbo, don't enable any other interrupts */ + .tqi_flags = AR5K_TXQ_FLAG_TXDESCINT_ENABLE + }; + + return ath5k_hw_setup_tx_queue(ah, AR5K_TX_QUEUE_BEACON, &qi); +} + +static struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, + int subtype) +{ + struct ath_hw *ah = sc->ah; + struct ath_txq *txq; + struct ath5k_txq_info qi = { + .tqi_subtype = subtype, + .tqi_aifs = AR5K_TXQ_USEDEFAULT, + .tqi_cw_min = AR5K_TXQ_USEDEFAULT, + .tqi_cw_max = AR5K_TXQ_USEDEFAULT + }; + int qnum; + + /* + * Enable interrupts only for EOL and DESC conditions. + * We mark tx descriptors to receive a DESC interrupt + * when a tx queue gets deep; otherwise waiting for the + * EOL to reap descriptors. Note that this is done to + * reduce interrupt load and this only defers reaping + * descriptors, never transmitting frames. Aside from + * reducing interrupts this also permits more concurrency. + * The only potential downside is if the tx queue backs + * up in which case the top half of the kernel may backup + * due to a lack of tx descriptors. + */ + qi.tqi_flags = AR5K_TXQ_FLAG_TXEOLINT_ENABLE | + AR5K_TXQ_FLAG_TXDESCINT_ENABLE; + qnum = ath5k_hw_setup_tx_queue(ah, qtype, &qi); + if (qnum < 0) { + /* + * NB: don't print a message, this happens + * normally on parts with too few tx queues + */ + return ERR_PTR(qnum); + } + if (qnum >= ARRAY_SIZE(sc->txqs)) { + printk(KERN_ERR "hal qnum %u out of range, max %u!\n", + qnum, ARRAY_SIZE(sc->txqs)); + ath5k_hw_release_tx_queue(ah, qnum); + return ERR_PTR(-EINVAL); + } + txq = &sc->txqs[qnum]; + if (!txq->setup) { + txq->qnum = qnum; + txq->link = NULL; + INIT_LIST_HEAD(&txq->q); + spin_lock_init(&txq->lock); + txq->setup = true; + } + return &sc->txqs[qnum]; +} + +static void ath_tx_cleanup(struct ath_softc *sc) +{ + struct ath_txq *txq = sc->txqs; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(sc->txqs); i++, txq++) + if (txq->setup) { + ath5k_hw_release_tx_queue(sc->ah, txq->qnum); + txq->setup = false; + } +} + +static int ath_attach(struct pci_dev *pdev, struct ieee80211_hw *hw) +{ + struct ath_softc *sc = hw->priv; + struct ath_hw *ah = sc->ah; + u8 mac[ETH_ALEN]; + unsigned int i; + int ret; + + DPRINTF(sc, ATH_DEBUG_ANY, "%s: devid 0x%x\n", __func__, pdev->device); + + /* + * Check if the MAC has multi-rate retry support. + * We do this by trying to setup a fake extended + * descriptor. MAC's that don't have support will + * return false w/o doing anything. MAC's that do + * support it will return true w/o doing anything. + */ + sc->mrretry = ah->ah_setup_xtx_desc(ah, NULL, 0, 0, 0, 0, 0, 0); + + /* + * Reset the key cache since some parts do not + * reset the contents on initial power up. + */ + for (i = 0; i < AR5K_KEYCACHE_SIZE; i++) + ath5k_hw_reset_key(ah, i); + + /* + * Collect the channel list using the default country + * code and including outdoor channels. The 802.11 layer + * is resposible for filtering this list based on settings + * like the phy mode. + */ + ret = ath_getchannels(hw); + if (ret) { + dev_err(&pdev->dev, "can't get channels\n"); + goto err; + } + + /* NB: setup here so ath_rate_update is happy */ + if (test_bit(MODE_IEEE80211A, ah->ah_modes)) + ath_setcurmode(sc, MODE_IEEE80211A); + else + ath_setcurmode(sc, MODE_IEEE80211B); + + /* + * Allocate tx+rx descriptors and populate the lists. + */ + ret = ath_desc_alloc(sc, pdev); + if (ret) { + dev_err(&pdev->dev, "can't allocate descriptors\n"); + goto err; + } + + /* + * Allocate hardware transmit queues: one queue for + * beacon frames and one data queue for each QoS + * priority. Note that the hal handles reseting + * these queues at the needed time. + */ + ret = ath_beaconq_setup(ah); + if (ret < 0) { + dev_err(&pdev->dev, "can't setup a beacon xmit queue\n"); + goto err_desc; + } + sc->bhalq = ret; + + sc->txq = ath_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BK); + if (IS_ERR(sc->txq)) { + dev_err(&pdev->dev, "can't setup xmit queue\n"); + ret = PTR_ERR(sc->txq); + goto err_bhal; + } + + tasklet_init(&sc->rxtq, ath_tasklet_rx, (unsigned long)sc); + tasklet_init(&sc->txtq, ath_tasklet_tx, (unsigned long)sc); + tasklet_init(&sc->restq, ath_tasklet_reset, (unsigned long)sc); + setup_timer(&sc->calib_tim, ath_calibrate, (unsigned long)sc); + setup_timer(&sc->led_tim, ath_led_off, (unsigned long)sc); + + sc->led_blinking = 0; + sc->led_on = 0; /* low true */ + /* + * Auto-enable soft led processing for IBM cards and for + * 5211 minipci cards. Users can also manually enable/disable + * support with a sysctl. + */ + if (pdev->device == PCI_DEVICE_ID_ATHEROS_AR5212_IBM || + pdev->device == PCI_DEVICE_ID_ATHEROS_AR5211) { + sc->led_soft = 1; + sc->led_pin = 0; + } + /* Enable softled on PIN1 on HP Compaq nc6xx, nc4000 & nx5000 laptops */ + if (pdev->subsystem_vendor == PCI_VENDOR_ID_COMPAQ) { + sc->led_soft = 1; + sc->led_pin = 0; + } + if (sc->led_soft) { + ath5k_hw_set_gpio_output(ah, sc->led_pin); + ath5k_hw_set_gpio(ah, sc->led_pin, !sc->led_on); + } + + ath5k_hw_get_lladdr(ah, mac); + SET_IEEE80211_PERM_ADDR(hw, mac); + if (ath5k_hw_hasbssidmask(ah)) { + memset(sc->bssidmask, 0xff, ETH_ALEN); + ath5k_hw_set_bssid_mask(ah, sc->bssidmask); + } + + ret = ieee80211_register_hw(hw); + if (ret) { + dev_err(&pdev->dev, "can't register ieee80211 hw\n"); + goto err_queues; + } + + return 0; +err_queues: + ath_tx_cleanup(sc); +err_bhal: + ath5k_hw_release_tx_queue(ah, sc->bhalq); +err_desc: + ath_desc_free(sc, pdev); +err: + return ret; +} + +static void ath_detach(struct pci_dev *pdev, struct ieee80211_hw *hw) +{ + struct ath_softc *sc = hw->priv; + + /* + * NB: the order of these is important: + * o call the 802.11 layer before detaching the hal to + * insure callbacks into the driver to delete global + * key cache entries can be handled + * o reclaim the tx queue data structures after calling + * the 802.11 layer as we'll get called back to reclaim + * node state and potentially want to use them + * o to cleanup the tx queues the hal is called, so detach + * it last + * Other than that, it's straightforward... + */ + ieee80211_unregister_hw(hw); + ath_desc_free(sc, pdev); + ath_tx_cleanup(sc); + ath5k_hw_release_tx_queue(sc->ah, sc->bhalq); + + /* + * NB: can't reclaim these until after ieee80211_ifdetach + * returns because we'll get called back to reclaim node + * state and potentially want to use them. + */ +// ath_dynamic_sysctl_unregister(sc); +} + +static const char *ath_chip_name(u8 mac_version) +{ + switch (mac_version) { + case AR5K_AR5210: + return "AR5210"; + case AR5K_AR5211: + return "AR5211"; + case AR5K_AR5212: + return "AR5212"; + } + return "Unknown"; +} + +static int __devinit ath_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + void __iomem *mem; + struct ath_softc *sc; + struct ieee80211_hw *hw; + int ret; + u8 csz; + + ret = pci_enable_device(pdev); + if (ret) { + dev_err(&pdev->dev, "can't enable device\n"); + goto err; + } + + /* XXX 32-bit addressing only */ + ret = pci_set_dma_mask(pdev, DMA_32BIT_MASK); + if (ret) { + dev_err(&pdev->dev, "32-bit DMA not available\n"); + goto err_dis; + } + + /* + * Cache line size is used to size and align various + * structures used to communicate with the hardware. + */ + pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &csz); + if (csz == 0) { + /* + * Linux 2.4.18 (at least) writes the cache line size + * register as a 16-bit wide register which is wrong. + * We must have this setup properly for rx buffer + * DMA to work so force a reasonable value here if it + * comes up zero. + */ + csz = L1_CACHE_BYTES / sizeof(u32); + pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, csz); + } + /* + * The default setting of latency timer yields poor results, + * set it to the value used by other systems. It may be worth + * tweaking this setting more. + */ + pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xa8); + + pci_set_master(pdev); + + /* + * Disable the RETRY_TIMEOUT register (0x41) to keep + * PCI Tx retries from interfering with C3 CPU state. + */ + pci_write_config_byte(pdev, 0x41, 0); + + ret = pci_request_region(pdev, 0, "ath"); + if (ret) { + dev_err(&pdev->dev, "cannot reserve PCI memory region\n"); + goto err_dis; + } + + mem = pci_iomap(pdev, 0, 0); + if (!mem) { + dev_err(&pdev->dev, "cannot remap PCI memory region\n") ; + ret = -EIO; + goto err_reg; + } + + hw = ieee80211_alloc_hw(sizeof(*sc), &ath_hw_ops); + if (hw == NULL) { + dev_err(&pdev->dev, "cannot allocate ieee80211_hw\n"); + ret = -ENOMEM; + goto err_map; + } + + SET_IEEE80211_DEV(hw, &pdev->dev); + hw->flags = IEEE80211_HW_RX_INCLUDES_FCS | IEEE80211_HW_WEP_INCLUDE_IV | + IEEE80211_HW_DATA_NULLFUNC_ACK; + hw->extra_tx_headroom = 2; + hw->channel_change_time = 5000; + hw->max_rssi = 127; /* FIXME: get a real value for this. */ + sc = hw->priv; + sc->hw = hw; + + /* + * Mark the device as detached to avoid processing + * interrupts until setup is complete. + */ +#if AR_DEBUG + sc->debug = ath_debug; +#endif + sc->invalid = 1; + sc->iobase = mem; + sc->cachelsz = csz * sizeof(u32); /* convert to bytes */ + sc->opmode = IEEE80211_IF_TYPE_STA; + mutex_init(&sc->lock); + spin_lock_init(&sc->rxbuflock); + spin_lock_init(&sc->txbuflock); + + pci_set_drvdata(pdev, hw); + + ret = request_irq(pdev->irq, ath_intr, IRQF_SHARED, "ath", sc); + if (ret) { + dev_err(&pdev->dev, "request_irq failed\n"); + goto err_free; + } + + sc->ah = ath5k_hw_attach(pdev->device, id->driver_data, sc, sc->iobase); + if (IS_ERR(sc->ah)) { + ret = PTR_ERR(sc->ah); + goto err_irq; + } + + ret = ath_attach(pdev, hw); + if (ret) + goto err_ah; + + dev_info(&pdev->dev, "%s chip found: mac %d.%d phy %d.%d\n", + ath_chip_name(id->driver_data), sc->ah->ah_mac_version, + sc->ah->ah_mac_version, sc->ah->ah_phy_revision >> 4, + sc->ah->ah_phy_revision & 0xf); + + /* ready to process interrupts */ + sc->invalid = 0; + + return 0; +err_ah: + ath5k_hw_detach(sc->ah); +err_irq: + free_irq(pdev->irq, sc); +err_free: + ieee80211_free_hw(hw); +err_map: + pci_iounmap(pdev, mem); +err_reg: + pci_release_region(pdev, 0); +err_dis: + pci_disable_device(pdev); +err: + return ret; +} + +static void __devexit ath_pci_remove(struct pci_dev *pdev) +{ + struct ieee80211_hw *hw = pci_get_drvdata(pdev); + struct ath_softc *sc = hw->priv; + + ath_detach(pdev, hw); + ath5k_hw_detach(sc->ah); + free_irq(pdev->irq, sc); + pci_iounmap(pdev, sc->iobase); + pci_release_region(pdev, 0); + pci_disable_device(pdev); + ieee80211_free_hw(hw); +} + +#ifdef CONFIG_PM +static int ath_pci_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct ieee80211_hw *hw = pci_get_drvdata(pdev); + struct ath_softc *sc = hw->priv; + + if (sc->led_soft) + ath5k_hw_set_gpio(sc->ah, sc->led_pin, 1); + + ath_stop_hw(sc); + pci_save_state(pdev); + pci_disable_device(pdev); + pci_set_power_state(pdev, PCI_D3hot); + + return 0; +} + +static int ath_pci_resume(struct pci_dev *pdev) +{ + struct ieee80211_hw *hw = pci_get_drvdata(pdev); + struct ath_softc *sc = hw->priv; + int err; + + err = pci_set_power_state(pdev, PCI_D0); + if (err) + return err; + + err = pci_enable_device(pdev); + if (err) + return err; + + pci_restore_state(pdev); + /* + * Suspend/Resume resets the PCI configuration space, so we have to + * re-disable the RETRY_TIMEOUT register (0x41) to keep + * PCI Tx retries from interfering with C3 CPU state + */ + pci_write_config_byte(pdev, 0x41, 0); + + ath_init(sc); + if (sc->led_soft) { + ath5k_hw_set_gpio_output(sc->ah, sc->led_pin); + ath5k_hw_set_gpio(sc->ah, sc->led_pin, 0); + } + + return 0; +} +#else +#define ath_pci_suspend NULL +#define ath_pci_resume NULL +#endif /* CONFIG_PM */ + +static struct pci_driver ath_pci_drv_id = { + .name = "ath_pci", + .id_table = ath_pci_id_table, + .probe = ath_pci_probe, + .remove = __devexit_p(ath_pci_remove), + .suspend = ath_pci_suspend, + .resume = ath_pci_resume, +}; + +/* + * Static (i.e. global) sysctls. Note that the hal sysctls + * are located under ours by sharing the setting for DEV_ATH. + */ +enum { + DEV_ATH = 9, /* XXX known by hal */ +}; + +static int mincalibrate = 1; +static int maxint = 0x7ffffff / 1000; +#define CTL_AUTO -2 /* cannot be CTL_ANY or CTL_NONE */ + +static ctl_table ath_static_sysctls[] = { +#if AR_DEBUG + { .ctl_name = CTL_AUTO, + .procname = "debug", + .mode = 0644, + .data = &ath_debug, + .maxlen = sizeof(ath_debug), + .proc_handler = proc_dointvec + }, +#endif + { .ctl_name = CTL_AUTO, + .procname = "countrycode", + .mode = 0444, + .data = &countrycode, + .maxlen = sizeof(countrycode), + .proc_handler = proc_dointvec + }, + { .ctl_name = CTL_AUTO, + .procname = "outdoor", + .mode = 0444, + .data = &outdoor, + .maxlen = sizeof(outdoor), + .proc_handler = proc_dointvec + }, + { .ctl_name = CTL_AUTO, + .procname = "xchanmode", + .mode = 0444, + .data = &xchanmode, + .maxlen = sizeof(xchanmode), + .proc_handler = proc_dointvec + }, + { .ctl_name = CTL_AUTO, + .procname = "calibrate", + .mode = 0644, + .data = &ath_calinterval, + .maxlen = sizeof(ath_calinterval), + .extra1 = &mincalibrate, + .extra2 = &maxint, + .proc_handler = proc_dointvec_minmax + }, + { 0 } +}; +static ctl_table ath_ath_table[] = { + { .ctl_name = DEV_ATH, + .procname = "ath", + .mode = 0555, + .child = ath_static_sysctls + }, { 0 } +}; +static ctl_table ath_root_table[] = { + { .ctl_name = CTL_DEV, + .procname = "dev", + .mode = 0555, + .child = ath_ath_table + }, { 0 } +}; +static struct ctl_table_header *ath_sysctl_header; + +static int __init init_ath_pci(void) +{ + int ret; + + ret = pci_register_driver(&ath_pci_drv_id); + if (ret) { + printk(KERN_ERR "ath_pci: can't register pci driver\n"); + return ret; + } + ath_sysctl_header = register_sysctl_table(ath_root_table); + + return 0; +} + +static void __exit exit_ath_pci(void) +{ + if (ath_sysctl_header) + unregister_sysctl_table(ath_sysctl_header); + pci_unregister_driver(&ath_pci_drv_id); +} + +module_init(init_ath_pci); +module_exit(exit_ath_pci); + +MODULE_AUTHOR("Jiri Slaby"); +MODULE_DESCRIPTION("Support for Atheros 802.11 wireless LAN cards."); +MODULE_SUPPORTED_DEVICE("Atheros WLAN cards"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_VERSION(ATH_PCI_VERSION " (EXPERIMENTAL)"); diff --git a/ath5k_base.h b/ath5k_base.h new file mode 100644 index 0000000..26a8126 --- /dev/null +++ b/ath5k_base.h @@ -0,0 +1,301 @@ +/*- + * Copyright (c) 2002-2007 Sam Leffler, Errno Consulting + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any + * redistribution must be conditioned upon including a substantially + * similar Disclaimer requirement for further binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY + * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, + * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER + * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGES. + * + * $FreeBSD: src/sys/dev/ath/if_athvar.h,v 1.20 2005/01/24 20:31:24 sam Exp $ + */ + +/* + * Defintions for the Atheros Wireless LAN controller driver. + */ +#ifndef _DEV_ATH_ATHVAR_H +#define _DEV_ATH_ATHVAR_H + +#include +#include +#include +#include + +#include "ath5k_hw.h" + +/* Set this to 1 to disable regulatory domain restrictions for channel tests. + * WARNING: This is for debuging only and has side effects (eg. scan takes too + * long and results timeouts). It's also illegal to tune to some of the + * supported frequencies in some countries, so use this at your own risk, + * you've been warned. */ +#define CHAN_DEBUG 0 + +#define ATH_TIMEOUT 1000 + +#define ATH_LONG_CALIB 30 /* seconds */ +#define ATH_SHORT_CALIB 1 + +/* + * Maximum acceptable MTU + * MAXFRAMEBODY - WEP - QOS - RSN/WPA: + * 2312 - 8 - 2 - 12 = 2290 + */ +#define ATH_MAX_MTU 2290 +#define ATH_MIN_MTU 32 + +#define ATH_RXBUF 40 /* number of RX buffers */ +#define ATH_TXBUF 200 /* number of TX buffers */ +#define ATH_TXDESC 1 /* number of descriptors per buffer */ +#define ATH_BCBUF 1 /* number of beacon buffers */ +#define ATH_TXMAXTRY 11 /* max number of transmit attempts */ +#define ATH_TXINTR_PERIOD 5 /* max number of batched tx descriptors */ + +#define ATH_BEACON_AIFS_DEFAULT 0 /* default aifs for ap beacon q */ +#define ATH_BEACON_CWMIN_DEFAULT 0 /* default cwmin for ap beacon q */ +#define ATH_BEACON_CWMAX_DEFAULT 0 /* default cwmax for ap beacon q */ + +#define ATH_RSSI_LPF_LEN 10 +#define ATH_RSSI_DUMMY_MARKER 0x127 +#define ATH_EP_MUL(x, mul) ((x) * (mul)) +#define ATH_RSSI_IN(x) (ATH_EP_MUL((x), AR5K_RSSI_EP_MULTIPLIER)) +#define ATH_LPF_RSSI(x, y, len) \ + ((x != ATH_RSSI_DUMMY_MARKER) ? (((x) * ((len) - 1) + (y)) / (len)) : (y)) +#define ATH_RSSI_LPF(x, y) do { \ + if ((y) >= -20) \ + x = ATH_LPF_RSSI((x), ATH_RSSI_IN((y)), ATH_RSSI_LPF_LEN); \ +} while (0) + +struct ath_buf { + struct list_head list; + unsigned int flags; /* tx descriptor flags */ + struct ath_desc *desc; /* virtual addr of desc */ + dma_addr_t daddr; /* physical addr of desc */ + struct sk_buff *skb; /* skbuff for buf */ + dma_addr_t skbaddr;/* physical addr of skb data */ + struct ieee80211_tx_control ctl; +}; + +/* + * Data transmit queue state. One of these exists for each + * hardware transmit queue. Packets sent to us from above + * are assigned to queues based on their priority. Not all + * devices support a complete set of hardware transmit queues. + * For those devices the array sc_ac2q will map multiple + * priorities to fewer hardware queues (typically all to one + * hardware queue). + */ +struct ath_txq { + unsigned int qnum; /* hardware q number */ + u32 *link; /* link ptr in last TX desc */ + struct list_head q; /* transmit queue */ + spinlock_t lock; /* lock on q and link */ + bool setup; +}; + +struct ath_stats { + __u32 ast_watchdog; /* device reset by watchdog */ + __u32 ast_hardware; /* fatal hardware error interrupts */ + __u32 ast_bmiss; /* beacon miss interrupts */ + __u32 ast_bstuck; /* beacon stuck interrupts */ + __u32 ast_rxorn; /* rx overrun interrupts */ + __u32 ast_rxeol; /* rx eol interrupts */ + __u32 ast_txurn; /* tx underrun interrupts */ + __u32 ast_mib; /* mib interrupts */ + __u32 ast_intrcoal; /* interrupts coalesced */ + __u32 ast_tx_packets; /* packet sent on the interface */ + __u32 ast_tx_mgmt; /* management frames transmitted */ + __u32 ast_tx_discard; /* frames discarded prior to assoc */ + __u32 ast_tx_invalid; /* frames discarded 'cuz device gone */ + __u32 ast_tx_qstop; /* output stopped 'cuz no buffer */ + __u32 ast_tx_encap; /* tx encapsulation failed */ + __u32 ast_tx_nonode; /* tx failed 'cuz no node */ + __u32 ast_tx_nobuf; /* tx failed 'cuz no tx buffer (data) */ + __u32 ast_tx_nobufmgt;/* tx failed 'cuz no tx buffer (mgmt)*/ + __u32 ast_tx_linear; /* tx linearized to cluster */ + __u32 ast_tx_nodata; /* tx discarded empty frame */ + __u32 ast_tx_busdma; /* tx failed for dma resrcs */ + __u32 ast_tx_xretries;/* tx failed 'cuz too many retries */ + __u32 ast_tx_fifoerr; /* tx failed 'cuz FIFO underrun */ + __u32 ast_tx_filtered;/* tx failed 'cuz xmit filtered */ + __u32 ast_tx_shortretry;/* tx on-chip retries (short) */ + __u32 ast_tx_longretry;/* tx on-chip retries (long) */ + __u32 ast_tx_badrate; /* tx failed 'cuz bogus xmit rate */ + __u32 ast_tx_noack; /* tx frames with no ack marked */ + __u32 ast_tx_rts; /* tx frames with rts enabled */ + __u32 ast_tx_cts; /* tx frames with cts enabled */ + __u32 ast_tx_shortpre;/* tx frames with short preamble */ + __u32 ast_tx_altrate; /* tx frames with alternate rate */ + __u32 ast_tx_protect; /* tx frames with protection */ + __u32 ast_tx_ctsburst;/* tx frames with cts and bursting */ + __u32 ast_tx_ctsext; /* tx frames with cts extension */ + __u32 ast_rx_nobuf; /* rx setup failed 'cuz no skb */ + __u32 ast_rx_busdma; /* rx setup failed for dma resrcs */ + __u32 ast_rx_orn; /* rx failed 'cuz of desc overrun */ + __u32 ast_rx_crcerr; /* rx failed 'cuz of bad CRC */ + __u32 ast_rx_fifoerr; /* rx failed 'cuz of FIFO overrun */ + __u32 ast_rx_badcrypt;/* rx failed 'cuz decryption */ + __u32 ast_rx_badmic; /* rx failed 'cuz MIC failure */ + __u32 ast_rx_phyerr; /* rx failed 'cuz of PHY err */ + __u32 ast_rx_phy[32]; /* rx PHY error per-code counts */ + __u32 ast_rx_tooshort;/* rx discarded 'cuz frame too short */ + __u32 ast_rx_toobig; /* rx discarded 'cuz frame too large */ + __u32 ast_rx_packets; /* packet recv on the interface */ + __u32 ast_rx_mgt; /* management frames received */ + __u32 ast_rx_ctl; /* rx discarded 'cuz ctl frame */ + __s8 ast_tx_rssi; /* tx rssi of last ack */ + __s8 ast_rx_rssi; /* rx rssi from histogram */ + __u32 ast_be_xmit; /* beacons transmitted */ + __u32 ast_be_nobuf; /* beacon setup failed 'cuz no skb */ + __u32 ast_per_cal; /* periodic calibration calls */ + __u32 ast_per_calfail;/* periodic calibration failed */ + __u32 ast_per_rfgain; /* periodic calibration rfgain reset */ + __u32 ast_rate_calls; /* rate control checks */ + __u32 ast_rate_raise; /* rate control raised xmit rate */ + __u32 ast_rate_drop; /* rate control dropped xmit rate */ + __u32 ast_ant_defswitch;/* rx/default antenna switches */ + __u32 ast_ant_txswitch;/* tx antenna switches */ + __u32 ast_ant_rx[8]; /* rx frames with antenna */ + __u32 ast_ant_tx[8]; /* tx frames with antenna */ +}; + +#if CHAN_DEBUG +#define ATH_CHAN_MAX (26+26+26+200+200) +#else +#define ATH_CHAN_MAX (14+14+14+252+20) /* XXX what's the max? */ +#endif + +struct ath_softc { + struct pci_dev *pdev; /* for dma mapping */ + void __iomem *iobase; /* address of the device */ + struct mutex lock; /* dev-level lock */ + struct ath_stats stats; /* private statistics */ + struct ieee80211_tx_queue_stats tx_stats; + struct ieee80211_low_level_stats ll_stats; + struct ieee80211_hw *hw; /* IEEE 802.11 common */ + struct ieee80211_hw_mode modes[NUM_IEEE80211_MODES]; + struct ieee80211_channel channels[ATH_CHAN_MAX]; + struct ieee80211_rate rates[AR5K_MAX_RATES * NUM_IEEE80211_MODES]; + enum ieee80211_if_types opmode; + struct ath_hw *ah; /* Atheros HW */ + + int debug; + + struct ath_buf *bufptr; /* allocated buffer ptr */ + struct ath_desc *desc; /* TX/RX descriptors */ + dma_addr_t desc_daddr; /* DMA (physical) address */ + size_t desc_len; /* size of TX/RX descriptors */ + u16 cachelsz; /* cache line size */ +#ifdef UNUSED + void (*sc_setdefantenna)(struct ath_softc *, u_int); +#endif + unsigned int invalid : 1, /* disable hardware accesses */ + mrretry : 1, /* multi-rate retry support */ + promisc : 1; +#ifdef UNUSED + sc_diversity : 1,/* enable rx diversity */ + sc_hasveol : 1, /* tx VEOL support */ + sc_mcastkey: 1, /* mcast key cache search */ + sc_hasclrkey:1; /* CLR key supported */ + /* rate tables */ +#endif + unsigned int curmode; /* current phy mode */ + struct ieee80211_channel *curchan; /* current h/w channel */ + + int iface_id; /* add/remove_interface id */ + + struct { + u8 rxflags; /* radiotap rx flags */ + u8 txflags; /* radiotap tx flags */ + u16 ledon; /* softled on time */ + u16 ledoff; /* softled off time */ + } hwmap[32]; /* h/w rate ix mappings */ +#ifdef UNUSED + u8 sc_protrix; /* protection rate index */ + u_int sc_txantenna; /* tx antenna (fixed or auto) */ +#endif + enum ath5k_int imask; /* interrupt mask copy */ + + DECLARE_BITMAP(keymap, AR5K_KEYCACHE_SIZE); /* key use bit map */ + + u8 bssidmask[ETH_ALEN]; + + unsigned int led_pin, /* GPIO pin for driving LED */ + led_on, /* pin setting for LED on */ + led_off, /* off time for current blink */ + led_blinking: 1,/* LED blink operation active */ + led_endblink: 1,/* finish LED blink operation */ + led_soft: 1; /* enable LED gpio status */ + struct timer_list led_tim; /* led off timer */ + u8 led_rxrate; /* current rx rate for LED */ + u8 led_txrate; /* current tx rate for LED */ + + struct tasklet_struct restq; /* reset tasklet */ + + unsigned int rxbufsize; /* rx size based on mtu */ + struct list_head rxbuf; /* receive buffer */ + spinlock_t rxbuflock; + u32 *rxlink; /* link ptr in last RX desc */ + struct tasklet_struct rxtq; /* rx intr tasklet */ +#ifdef UNUSED + u8 sc_defant; /* current default antenna */ + u8 sc_rxotherant; /* rx's on non-default antenna*/ +#endif + struct list_head txbuf; /* transmit buffer */ + spinlock_t txbuflock; + unsigned int txbuf_len; /* buf count in txbuf list */ + struct ath_txq txqs[2]; /* beacon and tx */ +#ifdef UNUSED + struct ath_txq *sc_ac2q[5]; /* WME AC -> h/w q map */ +#endif + struct ath_txq *txq; /* beacon and tx*/ + struct tasklet_struct txtq; /* tx intr tasklet */ + + struct ath_buf *bbuf; /* beacon buffer */ + unsigned int bhalq, /* HAL q for outgoing beacons */ + bmisscount, /* missed beacon transmits */ + bintval; /* beacon interval */ +#ifdef BEACON + u32 sc_ant_tx[8]; /* recent tx frames/antenna */ + struct ath_txq *cabq; /* tx q for cab frames */ + + struct tasklet_struct bmisstq; /* bmiss intr tasklet */ +#endif +#ifdef UNUSED + struct ctl_table_header *sc_sysctl_header; + struct ctl_table *sc_sysctls; +#endif + struct timer_list calib_tim; /* calibration timer */ +}; + +#define ath5k_hw_hasbssidmask(_ah) \ + (ath5k_hw_get_capability(_ah, AR5K_CAP_BSSIDMASK, 0, NULL) == 0) +#define ath5k_hw_hasveol(_ah) \ + (ath5k_hw_get_capability(_ah, AR5K_CAP_VEOL, 0, NULL) == 0) + +#endif