commit fd06b08583b552475389544b3cf2f7f8cdaa5e85 Author: Jiri Slaby Date: Wed Jun 20 23:47:17 2007 +0200 move some stuff to pci (it would be the base) diff --git a/Makefile b/Makefile index cbd7dfa..e71cfe3 100644 --- a/Makefile +++ b/Makefile @@ -3,8 +3,8 @@ KBUILD=$(MAKE) -C $(KDIR) M=$(PWD) EXTRA_CFLAGS += -I$(src)/openhal -ath5k-objs := ath/if_ath.o ath/if_ath_pci.o openhal/ath5k_hw.o \ - openhal/ieee80211_regdomain.o #ath/radar.o +ath5k-objs := ath/if_ath_pci.o openhal/ath5k_hw.o \ + openhal/ieee80211_regdomain.o #ath/radar.o #ath/if_ath.o obj-m += ath5k.o diff --git a/ath/if_ath_pci.c b/ath/if_ath_pci.c index c9c4a4f..fbdeb1e 100644 --- a/ath/if_ath_pci.c +++ b/ath/if_ath_pci.c @@ -37,6 +37,43 @@ #include "if_athvar.h" #include "if_ath_pci.h" +#define DPRINTF(sc, _m, _fmt...) do { \ + if (sc->sc_debug & (_m)) \ + printk(_fmt); \ +} while (0) +enum { + ATH_DEBUG_XMIT = 0x00000001, /* basic xmit operation */ + ATH_DEBUG_XMIT_DESC = 0x00000002, /* xmit descriptors */ + ATH_DEBUG_RECV = 0x00000004, /* basic recv operation */ + ATH_DEBUG_RECV_DESC = 0x00000008, /* recv descriptors */ + ATH_DEBUG_RATE = 0x00000010, /* rate control */ + ATH_DEBUG_RESET = 0x00000020, /* reset processing */ + ATH_DEBUG_MODE = 0x00000040, /* mode init/setup */ + ATH_DEBUG_BEACON = 0x00000080, /* beacon handling */ + ATH_DEBUG_WATCHDOG = 0x00000100, /* watchdog timeout */ + ATH_DEBUG_INTR = 0x00001000, /* ISR */ + ATH_DEBUG_TX_PROC = 0x00002000, /* tx ISR proc */ + ATH_DEBUG_RX_PROC = 0x00004000, /* rx ISR proc */ + ATH_DEBUG_BEACON_PROC = 0x00008000, /* beacon ISR proc */ + ATH_DEBUG_CALIBRATE = 0x00010000, /* periodic calibration */ + ATH_DEBUG_KEYCACHE = 0x00020000, /* key cache management */ + ATH_DEBUG_STATE = 0x00040000, /* 802.11 state transitions */ + ATH_DEBUG_NODE = 0x00080000, /* node management */ + ATH_DEBUG_LED = 0x00100000, /* led management */ + ATH_DEBUG_FATAL = 0x80000000, /* fatal errors */ + ATH_DEBUG_ANY = 0xffffffff +}; + +static int countrycode = CTRY_DEFAULT; +static int outdoor = true; +static int xchanmode = true; +module_param(countrycode, int, 0); +MODULE_PARM_DESC(countrycode, "Override default country code"); +module_param(outdoor, int, 0); +MODULE_PARM_DESC(outdoor, "Enable/disable outdoor use"); +module_param(xchanmode, int, 0); +MODULE_PARM_DESC(xchanmode, "Enable/disable extended channel mode"); + /* * User a static table of PCI id's for now. While this is the * "new way" to do things, we may want to switch back to having @@ -64,12 +101,6 @@ static struct pci_device_id ath_pci_id_table[] __devinitdata = { }; MODULE_DEVICE_TABLE(pci, ath_pci_id_table); -/* return bus cachesize in 4B word units */ -void bus_read_cachesize(struct ath_softc *sc, u8 *csz) -{ - pci_read_config_byte(sc->sc_bdev, PCI_CACHE_LINE_SIZE, csz); -} - static struct ieee80211_ops ath_hw_ops = { /* .tx = d_tx, .open = d_open, @@ -88,6 +119,587 @@ static struct ieee80211_ops ath_hw_ops = { .hw_scan = d_hw_scan*/ }; +/* + * Read from a device register + */ +static inline u32 ath_hw_reg_read(struct ath_hal *hw, u16 reg) +{ + return readl(hw->ah_sh + reg); +} + +/* + * Write to a device register + */ +static inline void ath_hw_reg_write(struct ath_hal *hw, u32 val, u16 reg) +{ + writel(val, hw->ah_sh + reg); +} + +#define ATH_HW_IRQ_PENDING 0x4008 +# define ATH_HW_IRQ_PENDING_FALSE 0 +# define ATH_HW_IRQ_PENDING_TRUE 1 + +/* + * Check if there is an interrupt waiting to be processed. + * Return 1 if there is an interrupt for us, or 0 if there is none or if + * the device has been removed. + */ +static inline int ath_hw_irq_pending(struct ath_hal *hw) +{ + return ath_hw_reg_read(hw, ATH_HW_IRQ_PENDING) == ATH_HW_IRQ_PENDING_TRUE; +} + +/* + * Interrupt handler. Most of the actual processing is deferred. + */ +static irqreturn_t ath_intr(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct ath_softc *sc = dev->priv; + struct ath_hal *ah = sc->sc_ah; + enum ath5k_int status; + int needmark; + + if (sc->sc_invalid) { + /* + * The hardware is not ready/present, don't touch anything. + * Note this can happen early on if the IRQ is shared. + */ + DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid; ignored\n", __func__); + return IRQ_NONE; + } + if (!ath_hw_irq_pending(ah)) /* shared irq, not for us */ + return IRQ_NONE; + if ((dev->flags & (IFF_RUNNING|IFF_UP)) != (IFF_RUNNING|IFF_UP)) { + DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n", + __func__, dev->flags); + ath5k_hw_get_isr(ah, &status); /* clear ISR */ + ath5k_hw_set_intr(ah, 0); /* disable further intr's */ + return IRQ_HANDLED; + } + needmark = 0; + + do { + /* + * Figure out the reason(s) for the interrupt. Note + * that the hal returns a pseudo-ISR that may include + * bits we haven't explicitly enabled so we mask the + * value to insure we only process bits we requested. + */ + ath5k_hw_get_isr(ah, &status); /* NB: clears ISR too */ + DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n", __func__, status); + status &= sc->sc_imask; /* discard unasked for bits */ + if (status & AR5K_INT_FATAL) { + /* + * Fatal errors are unrecoverable. Typically + * these are caused by DMA errors. Unfortunately + * the exact reason is not (presently) returned + * by the hal. + */ + sc->sc_stats.ast_hardware++; + ath5k_hw_set_intr(ah, 0); /* disable intr's until reset */ + tasklet_schedule(&sc->sc_fataltq); + } else if (status & AR5K_INT_RXORN) { + sc->sc_stats.ast_rxorn++; + ath5k_hw_set_intr(ah, 0); /* disable intr's until reset */ + tasklet_schedule(&sc->sc_rxorntq); + } else { + if (status & AR5K_INT_SWBA) { + /* + * Software beacon alert--time to send a beacon. + * Handle beacon transmission directly; deferring + * this is too slow to meet timing constraints + * under load. + */ +// ath_beacon_send(dev); + } + if (status & AR5K_INT_RXEOL) { + /* + * NB: the hardware should re-read the link when + * RXE bit is written, but it doesn't work at + * least on older hardware revs. + */ + sc->sc_stats.ast_rxeol++; + sc->sc_rxlink = NULL; + } + if (status & AR5K_INT_TXURN) { + sc->sc_stats.ast_txurn++; + /* bump tx trigger level */ + ath5k_hw_update_tx_triglevel(ah, true); + } + if (status & AR5K_INT_RX) + tasklet_schedule(&sc->sc_rxtq); + if (status & AR5K_INT_TX) + tasklet_schedule(&sc->sc_txtq); + if (status & AR5K_INT_BMISS) { + sc->sc_stats.ast_bmiss++; + tasklet_schedule(&sc->sc_bmisstq); + } + if (status & AR5K_INT_MIB) { + sc->sc_stats.ast_mib++; + /* + * Disable interrupts until we service the MIB + * interrupt; otherwise it will continue to fire. + */ + ath5k_hw_set_intr(ah, 0); + /* + * Let the hal handle the event. We assume it will + * clear whatever condition caused the interrupt. + */ +#ifdef BLE + ath5k_hw_proc_mib_event(ah, + &ATH_NODE(sc->sc_ic.ic_bss)->an_halstats); +#endif + ath5k_hw_set_intr(ah, sc->sc_imask); + } + } + } while (ath5k_hw_is_intr_pending(ah)); + + return IRQ_HANDLED; +} + +static int ath_attach(struct pci_dev *pdev, struct ieee80211_hw *hw) +{ + struct ath_softc *sc = hw->priv; + struct ath_hal *ah; + enum ath5k_status status; + int error = 0, i; + u8 csz; + + DPRINTF(sc, ATH_DEBUG_ANY, "%s: devid 0x%x\n", __func__, pdev->device); + + /* + * Cache line size is used to size and align various + * structures used to communicate with the hardware. + */ + pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &csz); + /* XXX assert csz is non-zero */ + sc->sc_cachelsz = csz << 2; /* convert to bytes */ + + ATH_LOCK_INIT(sc); + ATH_TXBUF_LOCK_INIT(sc); + +/* tasklet_init(&sc->sc_rxtq, ath_rx_tasklet, (unsigned long)hw); + tasklet_init(&sc->sc_rxorntq, ath_rxorn_tasklet, (unsigned long)hw); + tasklet_init(&sc->sc_fataltq, ath_fatal_tasklet, (unsigned long)hw); + tasklet_init(&sc->sc_bmisstq, ath_bmiss_tasklet, (unsigned long)hw); + tasklet_init(&sc->sc_bstuckq, ath_bstuck_tasklet, (unsigned long)hw); + tasklet_init(&sc->sc_radartq, ath_radar_tasklet, (unsigned long)hw);*/ + + /* + * Attach the hal + */ + ah = ath5k_hw_init(pdev->device, sc, sc->sc_iobase, &status); + if (ah == NULL) { + error = ENXIO; + goto bad; + } + sc->sc_ah = ah; + + /* + * Check if the MAC has multi-rate retry support. + * We do this by trying to setup a fake extended + * descriptor. MAC's that don't have support will + * return false w/o doing anything. MAC's that do + * support it will return true w/o doing anything. + */ + sc->sc_mrretry = ah->ah_setup_xtx_desc(ah, NULL, 0, 0, 0, 0, 0, 0); + + /* + * Check if the device has hardware counters for PHY + * errors. If so we need to enable the MIB interrupt + * so we can act on stat triggers. + */ + if (ath_hal_hwphycounters(ah)) + sc->sc_needmib = 1; + + /* + * Get the hardware key cache size. + */ + sc->sc_keymax = ath5k_hw_get_keycache_size(ah); + if (sc->sc_keymax > ATH_KEYMAX) { + printk(KERN_WARNING "Warning, using only %u of %u key cache " + "slots\n", ATH_KEYMAX, sc->sc_keymax); + sc->sc_keymax = ATH_KEYMAX; + } + + /* + * Reset the key cache since some parts do not + * reset the contents on initial power up. + */ + for (i = 0; i < sc->sc_keymax; i++) + ath5k_hw_reset_key(ah, i); + /* + * Mark key cache slots associated with global keys + * as in use. If we knew TKIP was not to be used we + * could leave the +32, +64, and +32+64 slots free. + * XXX only for splitmic. + */ +#ifdef BLE + for (i = 0; i < IEEE80211_WEP_NKID; i++) { + setbit(sc->sc_keymap, i); + setbit(sc->sc_keymap, i+32); + setbit(sc->sc_keymap, i+64); + setbit(sc->sc_keymap, i+32+64); + } + + /* + * Collect the channel list using the default country + * code and including outdoor channels. The 802.11 layer + * is resposible for filtering this list based on settings + * like the phy mode. + */ + error = ath_getchannels(hw, countrycode, outdoor, xchanmode); + if (error != 0) { + printk("error! but continuing anyways...\n"); + //goto bad; + } + + /* + * Setup rate tables for all potential media types. + */ + ath_rate_setup(hw, MODE_IEEE80211A); + ath_rate_setup(hw, MODE_IEEE80211B); + ath_rate_setup(hw, MODE_IEEE80211G); + ath_rate_setup(hw, MODE_ATHEROS_TURBO); + ath_rate_setup(hw, MODE_ATHEROS_TURBOG); + /* NB: setup here so ath_rate_update is happy */ + ath_setcurmode(sc, MODE_IEEE80211A); + + /* + * Allocate tx+rx descriptors and populate the lists. + */ + error = ath_desc_alloc(sc); + if (error != 0) { + printk(KERN_ERR "failed to allocate descriptors: %d\n", error); + goto bad; + } + + /* + * Allocate hardware transmit queues: one queue for + * beacon frames and one data queue for each QoS + * priority. Note that the hal handles reseting + * these queues at the needed time. + * + * XXX PS-Poll + */ + sc->sc_bhalq = ath_beaconq_setup(ah); + if (sc->sc_bhalq == (u_int) -1) { + printk(KERN_ERR "unable to setup a beacon xmit queue!\n"); + goto bad2; + } + sc->sc_cabq = ath_txq_setup(sc, AR5K_TX_QUEUE_CAB, 0); + if (sc->sc_cabq == NULL) { + printk(KERN_ERR "unable to setup CAB xmit queue!\n"); + error = EIO; + goto bad2; + } +#endif +#ifdef BLE + /* NB: insure BK queue is the lowest priority h/w queue */ + if (!ath_tx_setup(sc, WME_AC_BK, AR5K_WME_AC_BK)) { + printk(KERN_ERR "unable to setup xmit queue for %s traffic!\n", + ieee80211_wme_acnames[WME_AC_BK]); + error = EIO; + goto bad2; + } + if (!ath_tx_setup(sc, WME_AC_BE, AR5K_WME_AC_BE) || + !ath_tx_setup(sc, WME_AC_VI, AR5K_WME_AC_VI) || + !ath_tx_setup(sc, WME_AC_VO, AR5K_WME_AC_VO)) { + /* + * Not enough hardware tx queues to properly do WME; + * just punt and assign them all to the same h/w queue. + * We could do a better job of this if, for example, + * we allocate queues when we switch from station to + * AP mode. + */ + if (sc->sc_ac2q[WME_AC_VI] != NULL) + ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]); + if (sc->sc_ac2q[WME_AC_BE] != NULL) + ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]); + sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK]; + sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK]; + sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK]; + } + + /* + * Special case certain configurations. Note the + * CAB queue is handled by these specially so don't + * include them when checking the txq setup mask. + */ + switch (sc->sc_txqsetup &~ (1<sc_cabq->axq_qnum)) { + case 0x01: + ATH_INIT_TQUEUE(&sc->sc_txtq, ath_tx_tasklet_q0, dev); + break; + case 0x0f: + ATH_INIT_TQUEUE(&sc->sc_txtq, ath_tx_tasklet_q0123, dev); + break; + default: + ATH_INIT_TQUEUE(&sc->sc_txtq, ath_tx_tasklet, dev); + break; + } + + /* + * Setup rate control. Some rate control modules + * call back to change the anntena state so expose + * the necessary entry points. + * XXX maybe belongs in struct ath_ratectrl? + */ + sc->sc_setdefantenna = ath_setdefantenna; + sc->sc_rc = ath_rate_attach(sc); + if (sc->sc_rc == NULL) { + error = EIO; + goto bad2; + } + + init_timer(&sc->sc_scan_ch); + sc->sc_scan_ch.function = ath_next_scan; + sc->sc_scan_ch.data = (unsigned long)hw; + + init_timer(&sc->sc_cal_ch); + sc->sc_cal_ch.function = ath_calibrate; + sc->sc_cal_ch.data = (unsigned long)hw; + + sc->sc_blinking = 0; + sc->sc_ledstate = 1; + sc->sc_ledon = 0; /* low true */ + sc->sc_ledidle = (2700*HZ)/1000; /* 2.7sec */ + + init_timer(&sc->sc_ledtimer); + sc->sc_ledtimer.function = ath_led_off; + sc->sc_ledtimer.data = (unsigned long) sc; + /* + * Auto-enable soft led processing for IBM cards and for + * 5211 minipci cards. Users can also manually enable/disable + * support with a sysctl. + */ + sc->sc_softled = (devid == PCI_DEVICE_ID_ATHEROS_AR5212_IBM || + devid == PCI_DEVICE_ID_ATHEROS_AR5211); + if (sc->sc_softled) { + ath5k_hw_set_gpio_output(ah, sc->sc_ledpin); + ath5k_hw_set_gpio(ah, sc->sc_ledpin, !sc->sc_ledon); + } + + ether_setup(dev); + dev->open = ath_init; + dev->stop = ath_stop; + dev->hard_start_xmit = ath_start; + dev->tx_timeout = ath_tx_timeout; + dev->watchdog_timeo = 5 * HZ; /* XXX */ + dev->set_multicast_list = ath_mode_init; + dev->do_ioctl = ath_ioctl; + dev->get_stats = ath_getstats; + dev->set_mac_address = ath_set_mac_address; + dev->change_mtu = &ath_change_mtu; + dev->tx_queue_len = ATH_TXBUF; /* TODO? 1 for mgmt frame */ +/*get_wireless_stats moved from net_device to iw_handler_def*/ +# if IW_HANDLER_VERSION < 7 + dev->get_wireless_stats = ath_iw_getstats; +# endif + ieee80211_ioctl_iwsetup(&ath_iw_handler_def); + dev->wireless_handlers = &ath_iw_handler_def; +#if IEEE80211_VLAN_TAG_USED + dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; + dev->vlan_rx_register = ath_vlan_register; + dev->vlan_rx_kill_vid = ath_vlan_kill_vid; +#endif /* IEEE80211_VLAN_TAG_USED */ + ic->ic_dev = dev; + ic->ic_devstats = &sc->sc_devstats; + ic->ic_init = ath_init; + ic->ic_reset = ath_reset; + ic->ic_newassoc = ath_newassoc; + ic->ic_updateslot = ath_updateslot; + ic->ic_wme.wme_update = ath_wme_update; + /* XXX not right but it's not used anywhere important */ + ic->ic_phytype = MODULATION_OFDM; + ic->ic_opmode = IEEE80211_M_STA; + ic->ic_caps = + IEEE80211_C_IBSS /* ibss, nee adhoc, mode */ + | IEEE80211_C_AHDEMO /* adhoc demo (pseudo_ibss) mode */ + | IEEE80211_C_HOSTAP /* hostap mode */ + | IEEE80211_C_MONITOR /* monitor mode */ + | IEEE80211_C_SHPREAMBLE /* short preamble supported */ + | IEEE80211_C_SHSLOT /* short slot time supported */ + | IEEE80211_C_WPA /* capable of WPA1+WPA2 */ + ; + /* + * initialize management queue + */ + skb_queue_head_init(&ic->ic_mgtq); + + /* + * Query the hal to figure out h/w crypto support. + */ + if (ath_hal_ciphersupported(ah, AR5K_CIPHER_WEP)) + ic->ic_caps |= IEEE80211_C_WEP; + if (ath_hal_ciphersupported(ah, AR5K_CIPHER_AES_OCB)) + ic->ic_caps |= IEEE80211_C_AES; + if (ath_hal_ciphersupported(ah, AR5K_CIPHER_AES_CCM)) + ic->ic_caps |= IEEE80211_C_AES_CCM; + if (ath_hal_ciphersupported(ah, AR5K_CIPHER_CKIP)) + ic->ic_caps |= IEEE80211_C_CKIP; + if (ath_hal_ciphersupported(ah, AR5K_CIPHER_TKIP)) { + ic->ic_caps |= IEEE80211_C_TKIP; + /* + * Check if h/w does the MIC and/or whether the + * separate key cache entries are required to + * handle both tx+rx MIC keys. + */ + if (ath_hal_ciphersupported(ah, AR5K_CIPHER_MIC)) + ic->ic_caps |= IEEE80211_C_TKIPMIC; + if (ath_hal_tkipsplit(ah)) + sc->sc_splitmic = 1; + } + sc->sc_hasclrkey = ath_hal_ciphersupported(ah, AR5K_CIPHER_CLR); + sc->sc_mcastkey = ath_hal_getmcastkeysearch(ah); + /* + * TPC support can be done either with a global cap or + * per-packet support. The latter is not available on + * all parts. We're a bit pedantic here as all parts + * support a global cap. + */ + if (ath_hal_hastpc(ah) || ath_hal_hastxpowlimit(ah)) + ic->ic_caps |= IEEE80211_C_TXPMGT; + + /* + * Mark WME capability only if we have sufficient + * hardware queues to do proper priority scheduling. + */ + if (sc->sc_ac2q[WME_AC_BE] != sc->sc_ac2q[WME_AC_BK]) + ic->ic_caps |= IEEE80211_C_WME; + /* + * Check for misc other capabilities. + */ + if (ath_hal_hasbursting(ah)) + ic->ic_caps |= IEEE80211_C_BURST; + + /* + * Indicate we need the 802.11 header padded to a + * 32-bit boundary for 4-address and QoS frames. + */ + ic->ic_flags |= IEEE80211_F_DATAPAD; +#endif + + /* + * Query the hal about antenna support. + */ + sc->sc_defant = ath5k_hw_get_def_antenna(ah); + + /* + * Not all chips have the VEOL support we want to + * use with IBSS beacons; check here for it. + */ + sc->sc_hasveol = ath_hal_hasveol(ah); + + sc->sc_rxfilter = 0; + + /* get mac address from hardware */ +// ath_hal_getmac(ah, ic->ic_myaddr); BLEE +// IEEE80211_ADDR_COPY(dev->dev_addr, ic->ic_myaddr); +#ifdef BLE + /* call MI attach routine. */ + ieee80211_ifattach(ic); + /* override default methods */ + ic->ic_node_alloc = ath_node_alloc; + sc->sc_node_free = ic->ic_node_free; + ic->ic_node_free = ath_node_free; + ic->ic_node_getrssi = ath_node_getrssi; + sc->sc_recv_mgmt = ic->ic_recv_mgmt; + ic->ic_recv_mgmt = ath_recv_mgmt; + sc->sc_newstate = ic->ic_newstate; + ic->ic_newstate = ath_newstate; + ic->ic_crypto.cs_key_alloc = ath_key_alloc; + ic->ic_crypto.cs_key_delete = ath_key_delete; + ic->ic_crypto.cs_key_set = ath_key_set; + ic->ic_crypto.cs_key_update_begin = ath_key_update_begin; + ic->ic_crypto.cs_key_update_end = ath_key_update_end; + + radar_init(ic); +#endif +#if 0 + /* complete initialization */ + ieee80211_media_init(ic, ath_media_change, ieee80211_media_status); + + if (register_netdev(dev)) { + printk(KERN_ERR "%s: unable to register device\n", dev->name); + goto bad3; + } + +#endif + /* + * Attach dynamic MIB vars and announce support + * now that we have a device name with unit number. + */ +// ath_dynamic_sysctl_register(sc); +// ath_rate_dynamic_sysctl_register(sc); +// ath_announce(sc); + + return 0; +#ifdef BLE +//bad3: +// ieee80211_ifdetach(ic); +// ath_rate_detach(sc->sc_rc); +bad2: + if (sc->sc_txq[WME_AC_BK].axq_qnum != (u_int) -1) { + ATH_TXQ_LOCK_DESTROY(&sc->sc_txq[WME_AC_BK]); + } + if (sc->sc_txq[WME_AC_BE].axq_qnum != (u_int) -1) { + ATH_TXQ_LOCK_DESTROY(&sc->sc_txq[WME_AC_BE]); + } + if (sc->sc_txq[WME_AC_VI].axq_qnum != (u_int) -1) { + ATH_TXQ_LOCK_DESTROY(&sc->sc_txq[WME_AC_VI]); + } + if (sc->sc_txq[WME_AC_VO].axq_qnum != (u_int) -1) { + ATH_TXQ_LOCK_DESTROY(&sc->sc_txq[WME_AC_VO]); + } + ath_tx_cleanup(sc); + ath_desc_free(sc); +#endif +bad: + if (ah) { + ath5k_hw_detach(ah); + } + sc->sc_invalid = 1; + return error; +} + +static int ath_detach(struct ieee80211_hw *hw) +{ + struct ath_softc *sc = hw->priv; +// struct ieee80211com *ic = &sc->sc_ic; + +// ath_stop(hw); + sc->sc_invalid = 1; + /* + * NB: the order of these is important: + * o call the 802.11 layer before detaching the hal to + * insure callbacks into the driver to delete global + * key cache entries can be handled + * o reclaim the tx queue data structures after calling + * the 802.11 layer as we'll get called back to reclaim + * node state and potentially want to use them + * o to cleanup the tx queues the hal is called, so detach + * it last + * Other than that, it's straightforward... + */ +// ieee80211_ifdetach(ic); +// ath_rate_detach(sc->sc_rc); +// ath_desc_free(sc); +// ath_tx_cleanup(sc); + ath5k_hw_detach(sc->sc_ah); + + /* + * NB: can't reclaim these until after ieee80211_ifdetach + * returns because we'll get called back to reclaim node + * state and potentially want to use them. + */ +// ath_dynamic_sysctl_unregister(sc); +// ath_rawdev_detach(sc); +// unregister_netdev(dev); + + return 0; +} + static int __devinit ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { @@ -184,7 +796,7 @@ static int __devinit ath_pci_probe(struct pci_dev *pdev, goto err_free; } - if (ath_attach(id->device, hw) != 0) + if (ath_attach(pdev, hw) != 0) goto err_irq; athname = ath_hal_probe(id->vendor, id->device); @@ -225,9 +837,13 @@ static void __devexit ath_pci_remove(struct pci_dev *pdev) #ifdef CONFIG_PM static int ath_pci_suspend(struct pci_dev *pdev, pm_message_t state) { - struct net_device *dev = pci_get_drvdata(pdev); + struct ieee80211_hw *hw = pci_get_drvdata(pdev); + struct ath_softc *sc = hw->priv; - ath_suspend(dev); + if (sc->sc_softled) + ath5k_hw_set_gpio(sc->sc_ah, sc->sc_ledpin, 1); + +// ath_stop(hw); pci_save_state(pdev); pci_disable_device(pdev); pci_set_power_state(pdev, PCI_D3hot); @@ -237,7 +853,8 @@ static int ath_pci_suspend(struct pci_dev *pdev, pm_message_t state) static int ath_pci_resume(struct pci_dev *pdev) { - struct net_device *dev = pci_get_drvdata(pdev); + struct ieee80211_hw *hw = pci_get_drvdata(pdev); + struct ath_softc *sc = hw->priv; u32 val; int err; @@ -260,7 +877,12 @@ static int ath_pci_resume(struct pci_dev *pdev) pci_read_config_dword(pdev, 0x40, &val); if ((val & 0x0000ff00) != 0) pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); - ath_resume(dev); + +// ath_init(hw); + if (sc->sc_softled) { + ath5k_hw_set_gpio_output(sc->sc_ah, sc->sc_ledpin); + ath5k_hw_set_gpio(sc->sc_ah, sc->sc_ledpin, 0); + } return 0; } @@ -279,27 +901,94 @@ static struct pci_driver ath_pci_drv_id = { }; /* - * Module glue. + * Static (i.e. global) sysctls. Note that the hal sysctls + * are located under ours by sharing the setting for DEV_ATH. */ -static char version[] = ATH_PCI_VERSION " (EXPERIMENTAL)"; -static char dev_info[] = "ath_pci"; - -int ath_ioctl_ethtool(struct ath_softc *sc, int cmd, void __user *addr) -{ - struct ethtool_drvinfo info; +enum { + DEV_ATH = 9, /* XXX known by hal */ +}; - if (cmd != ETHTOOL_GDRVINFO) - return -EOPNOTSUPP; - memset(&info, 0, sizeof(info)); - info.cmd = cmd; - strncpy(info.driver, dev_info, sizeof(info.driver)-1); - strncpy(info.version, version, sizeof(info.version)-1); +#define CTL_AUTO -2 /* cannot be CTL_ANY or CTL_NONE */ - /* include the device name so later versions of kudzu DTRT */ - strncpy(info.bus_info, pci_name(sc->sc_bdev), sizeof(info.bus_info)-1); +static ctl_table ath_static_sysctls[] = { +#ifdef AR_DEBUG + { .ctl_name = CTL_AUTO, + .procname = "debug", + .mode = 0644, + .data = &ath_debug, + .maxlen = sizeof(ath_debug), + .proc_handler = proc_dointvec + }, +#endif + { .ctl_name = CTL_AUTO, + .procname = "countrycode", + .mode = 0444, + .data = &countrycode, + .maxlen = sizeof(countrycode), + .proc_handler = proc_dointvec + }, +/* { .ctl_name = CTL_AUTO, + .procname = "regdomain", + .mode = 0444, + .data = &ath_regdomain, + .maxlen = sizeof(ath_regdomain), + .proc_handler = proc_dointvec + },*/ + { .ctl_name = CTL_AUTO, + .procname = "outdoor", + .mode = 0444, + .data = &outdoor, + .maxlen = sizeof(outdoor), + .proc_handler = proc_dointvec + }, + { .ctl_name = CTL_AUTO, + .procname = "xchanmode", + .mode = 0444, + .data = &xchanmode, + .maxlen = sizeof(xchanmode), + .proc_handler = proc_dointvec + }, +/* { .ctl_name = CTL_AUTO, + .procname = "dwelltime", + .mode = 0644, + .data = &ath_dwelltime, + .maxlen = sizeof(ath_dwelltime), + .extra1 = &mindwelltime, + .extra2 = &maxint, + .proc_handler = proc_dointvec_minmax + }, + { .ctl_name = CTL_AUTO, + .procname = "calibrate", + .mode = 0644, + .data = &ath_calinterval, + .maxlen = sizeof(ath_calinterval), + .extra1 = &mincalibrate, + .extra2 = &maxint, + .proc_handler = proc_dointvec_minmax + },*/ + { 0 } +}; +static ctl_table ath_ath_table[] = { + { .ctl_name = DEV_ATH, + .procname = "ath", + .mode = 0555, + .child = ath_static_sysctls + }, { 0 } +}; +static ctl_table ath_root_table[] = { + { .ctl_name = CTL_DEV, + .procname = "dev", + .mode = 0555, + .child = ath_ath_table + }, { 0 } +}; +static struct ctl_table_header *ath_sysctl_header; - return copy_to_user(addr, &info, sizeof(info)) ? -EFAULT : 0; -} +/* + * Module glue. + */ +static char version[] = ATH_PCI_VERSION " (EXPERIMENTAL)"; +static char dev_info[] = "ath_pci"; static int __init init_ath_pci(void) { @@ -312,14 +1001,15 @@ static int __init init_ath_pci(void) printk(KERN_ERR "ath_pci: can't register pci driver\n"); return ret; } - ath_sysctl_register(); + ath_sysctl_header = register_sysctl_table(ath_root_table); return 0; } static void __exit exit_ath_pci(void) { - ath_sysctl_unregister(); + if (ath_sysctl_header) + unregister_sysctl_table(ath_sysctl_header); pci_unregister_driver(&ath_pci_drv_id); printk(KERN_INFO "%s: driver unloaded\n", dev_info); diff --git a/ath/if_athvar.h b/ath/if_athvar.h index c9685eb..2a5aedc 100644 --- a/ath/if_athvar.h +++ b/ath/if_athvar.h @@ -324,16 +324,6 @@ struct ath_softc { #define ATH_TXBUF_LOCK_ASSERT(_sc) \ KASSERT(spin_is_locked(&(_sc)->sc_txbuflock), ("txbuf not locked!")) -int ath_attach(u16, struct ieee80211_hw *); -int ath_detach(struct ieee80211_hw *); -void ath_resume(struct ieee80211_hw *); -void ath_suspend(struct ieee80211_hw *); -/* - *Port r1752 - Starting linux kernel v2.6.19 and later - *interrupt handlers are not passed. - */ -irqreturn_t ath_intr(int, void *); -void bus_read_cachesize(struct ath_softc *, u8 *); int ath_ioctl_ethtool(struct ath_softc *, int, void __user *); void ath_sysctl_register(void); void ath_sysctl_unregister(void);