Support for sending fragments one by one and for sending management frames. This patch is based on Andrea Merello's ieee80211 stack from rtl8180-sa2400 project (http://sourceforge.net/projects/rtl8180-sa2400). Signed-off-by: Jiri Benc Signed-off-by: Jirka Bohac Index: netdev/net/ieee80211/ieee80211_tx.c =================================================================== --- netdev.orig/net/ieee80211/ieee80211_tx.c 2005-09-17 14:58:56.000000000 +0200 +++ netdev/net/ieee80211/ieee80211_tx.c 2005-09-17 15:09:39.000000000 +0200 @@ -2,6 +2,15 @@ Copyright(c) 2003 - 2004 Intel Corporation. All rights reserved. + Contact Information: + James P. Ketrenos + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + + Copyright (c) 2005 Andrea Merello + Copyright (c) 2005 Jiri Benc and Jirka Bohac + + Sponsored by SUSE. + This program is free software; you can redistribute it and/or modify it under the terms of version 2 of the GNU General Public License as published by the Free Software Foundation. @@ -18,10 +27,6 @@ The full GNU General Public License is included in this distribution in the file called LICENSE. - Contact Information: - James P. Ketrenos - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - ******************************************************************************/ #include #include @@ -92,6 +97,9 @@ payload of each frame is reduced to 492 */ +static void ieee80211_xmit_frags(struct ieee80211_device *ieee); +static void ieee80211_resume_tx_mgmt(struct ieee80211_device *ieee); + static inline int ieee80211_encrypt_fragment(struct ieee80211_device *ieee, struct sk_buff *frag, int hdr_len) { @@ -156,85 +164,96 @@ static inline int ieee80211_encrypt_fram return 0; } -void ieee80211_txb_free(struct ieee80211_txb *txb) +static void ieee80211_cancel_frags(struct ieee80211_device *ieee) { - int i; - if (unlikely(!txb)) - return; - for (i = 0; i < txb->nr_frags; i++) - if (txb->fragments[i]) - dev_kfree_skb_any(txb->fragments[i]); - kfree(txb); + struct sk_buff *skb; + unsigned long flags; + + spin_lock_irqsave(&ieee->pending_frags.lock, flags); + while ((skb = __skb_dequeue(&ieee->pending_frags))) + dev_kfree_skb_any(skb); + spin_unlock_irqrestore(&ieee->pending_frags.lock, flags); } -static struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size, - int gfp_mask) +static inline void ieee80211_free_frags(struct sk_buff *skb) { - struct ieee80211_txb *txb; + struct sk_buff *next; + + while (skb) { + next = skb->next; + dev_kfree_skb_any(skb); + skb = next; + } +} + +static struct sk_buff *ieee80211_alloc_frags(int nr_frags, int size) +{ + struct sk_buff *skb, *first = NULL; int i; - txb = kmalloc(sizeof(struct ieee80211_txb) + (sizeof(u8 *) * nr_frags), - gfp_mask); - if (!txb) - return NULL; - - memset(txb, 0, sizeof(struct ieee80211_txb)); - txb->nr_frags = nr_frags; - txb->frag_size = txb_size; for (i = 0; i < nr_frags; i++) { - txb->fragments[i] = dev_alloc_skb(txb_size); - if (unlikely(!txb->fragments[i])) { - i--; - break; + skb = dev_alloc_skb(size); + if (unlikely(!skb)) { + ieee80211_free_frags(first); + return NULL; } + skb->prev = NULL; + skb->next = first; + if (first) + first->prev = skb; + first = skb; } - if (unlikely(i != nr_frags)) { - while (i >= 0) - dev_kfree_skb_any(txb->fragments[i--]); - kfree(txb); - return NULL; + return first; +} + +static void ieee80211_queue_frags(struct sk_buff *skb, + struct ieee80211_xmit_info *info, + struct ieee80211_device *ieee) +{ + struct sk_buff *next; + unsigned long flags; + + if (!(ieee->config & IEEE80211_CFG_SINGLE_FRAGMENTS)) + return; + BUG_ON(!skb_queue_empty(&ieee->pending_frags)); + + spin_lock_irqsave(&ieee->pending_frags.lock, flags); + while (skb) { + next = skb->next; + __skb_queue_tail(&ieee->pending_frags, skb); + skb = next; } - return txb; + ieee->pending_frags_info = *info; + spin_unlock_irqrestore(&ieee->pending_frags.lock, flags); } -/* SKBs are added to the ieee->tx_queue. */ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) { struct ieee80211_device *ieee = netdev_priv(dev); struct ieee80211_hdr *header = (struct ieee80211_hdr *)skb->data; - struct ieee80211_txb *txb = NULL; struct ieee80211_hdr *frag_hdr; - int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size; + struct ieee80211_xmit_info info; + int i, bytes_per_frag, bytes_last_frag; unsigned long flags; struct net_device_stats *stats = &ieee->stats; - int type, encrypt; + int type, nondata, encrypt; int bytes, fc, hdr_len; - struct sk_buff *skb_frag; - u8 *dest; + struct sk_buff *skb_first = NULL, *skb_frag; struct ieee80211_crypt_data *crypt; spin_lock_irqsave(&ieee->lock, flags); - /* If there is no driver handler to take the TXB, dont' bother - * creating it... */ - if (!ieee->hard_start_xmit) { - if (printk_ratelimit()) - printk(KERN_WARNING "%s: No xmit handler.\n", - dev->name); - goto success; - } - type = ieee80211_get_proto(header); - dest = ieee80211_get_daddr(header); hdr_len = ieee80211_get_hdrlen(header); + nondata = ieee80211_get_type(header) != IEEE80211_FTYPE_DATA; crypt = ieee->crypt[ieee->tx_keyidx]; - encrypt = !(type == ETH_P_PAE && ieee->ieee802_1x) && + encrypt = !nondata && !(type == ETH_P_PAE && ieee->ieee802_1x) && ieee->host_encrypt && crypt && crypt->ops; - if (!encrypt && ieee->ieee802_1x && + if (!encrypt && ieee->ieee802_1x && !nondata && ieee->drop_unencrypted && type != ETH_P_PAE) { stats->tx_dropped++; goto success; @@ -244,7 +263,7 @@ int ieee80211_xmit(struct sk_buff *skb, goto success; /* Determine total amount of storage required for TXB packets */ - bytes = skb->len - hdr_len; + info.payload_size = skb->len - hdr_len; fc = le16_to_cpu(header->frame_ctl); if (encrypt) @@ -252,17 +271,18 @@ int ieee80211_xmit(struct sk_buff *skb, /* Determine fragmentation size based on destination (multicast * and broadcast are not fragmented) */ - if (!(ieee->config & IEEE80211_CFG_FRAGMENT) || - is_multicast_ether_addr(dest) || is_broadcast_ether_addr(dest)) - frag_size = MAX_FRAG_THRESHOLD; + if (!(ieee->config & IEEE80211_CFG_FRAGMENT) || nondata || + is_multicast_ether_addr(header->addr1) || + is_broadcast_ether_addr(header->addr1)) + info.frag_size = MAX_FRAG_THRESHOLD; else - frag_size = ieee->fts; + info.frag_size = ieee->fts; /* Determine amount of payload per fragment. Regardless of if * this stack is providing the full 802.11 header, one will * eventually be affixed to this fragment -- so we must account for * it when determining the amount of payload space. */ - bytes_per_frag = frag_size - hdr_len - IEEE80211_FCS_LEN; + bytes_per_frag = info.frag_size - hdr_len - IEEE80211_FCS_LEN; /* Each fragment may need to have room for encryptiong pre/postfix */ if (encrypt) @@ -271,37 +291,39 @@ int ieee80211_xmit(struct sk_buff *skb, /* Number of fragments is the total bytes_per_frag / * payload_per_fragment */ - nr_frags = bytes / bytes_per_frag; - bytes_last_frag = bytes % bytes_per_frag; + info.nr_frags = info.payload_size / bytes_per_frag; + bytes_last_frag = info.payload_size % bytes_per_frag; if (bytes_last_frag) - nr_frags++; + info.nr_frags++; else bytes_last_frag = bytes_per_frag; - if (nr_frags > 16) { + if (info.nr_frags > 16) { /* Should never happen */ printk(KERN_WARNING "%s: Fragmentation threshold too low\n", dev->name); goto failed; } - /* When we allocate the TXB we allocate enough space for the reserve - * and full fragment bytes (bytes_per_frag doesn't include prefix, - * postfix, header, FCS, etc.) */ - txb = ieee80211_alloc_txb(nr_frags, frag_size, GFP_ATOMIC); - if (unlikely(!txb)) { - printk(KERN_WARNING "%s: Could not allocate TXB\n", + /* When we allocate fragments we allocate enough space for the + * reserve and full fragment bytes (bytes_per_frag doesn't include + * prefix, postfix, header, FCS, etc.) */ + skb_frag = skb_first = ieee80211_alloc_frags(info.nr_frags, info.frag_size); + if (unlikely(!skb_first)) { + printk(KERN_WARNING "%s: Could not allocate fragments\n", dev->name); goto failed; } - txb->encrypted = encrypt; - txb->payload_size = bytes; + info.encryption = encrypt; - skb_pull(skb, hdr_len); + /* For now, we don't set modulation and rate. */ + info.xmit_flags = 0; + info.modulation = 0; + info.rate = 0; - for (i = 0; i < nr_frags; i++) { - skb_frag = txb->fragments[i]; + skb_pull(skb, hdr_len); + for (i = 0; i < info.nr_frags; i++) { if (encrypt) skb_reserve(skb_frag, crypt->ops->extra_prefix_len); @@ -312,7 +334,7 @@ int ieee80211_xmit(struct sk_buff *skb, /* If this is not the last fragment, then add the MOREFRAGS * bit to the frame control */ - if (i != nr_frags - 1) { + if (i != info.nr_frags - 1) { frag_hdr->frame_ctl = cpu_to_le16(fc | IEEE80211_FCTL_MOREFRAGS); bytes = bytes_per_frag; @@ -334,6 +356,8 @@ int ieee80211_xmit(struct sk_buff *skb, /* FCS */ skb_put(skb_frag, 4); + + skb_frag = skb_frag->next; } /* Sequence number is stored in bits 4 to 15 of the Sequence Control * field in 802.11 header. The seq_number field is organized the @@ -344,16 +368,21 @@ int ieee80211_xmit(struct sk_buff *skb, success: spin_unlock_irqrestore(&ieee->lock, flags); - dev_kfree_skb_any(skb); - - if (txb) { - if (ieee->hard_start_xmit(txb, ieee) == 0) { + if (likely(skb_first)) { + if ((info.nr_frags > 1) && + (ieee->config & IEEE80211_CFG_SINGLE_FRAGMENTS)) { + ieee80211_queue_frags(skb_first, &info, ieee); + ieee80211_xmit_frags(ieee); + } else { + if (ieee->hard_start_xmit(skb_first, &info, ieee) != 0) { + ieee80211_free_frags(skb_first); + return 1; + } stats->tx_packets++; - stats->tx_bytes += txb->payload_size; - return 0; + stats->tx_bytes += info.payload_size; } - ieee80211_txb_free(txb); } + dev_kfree_skb_any(skb); return 0; @@ -365,4 +394,122 @@ int ieee80211_xmit(struct sk_buff *skb, } -EXPORT_SYMBOL(ieee80211_txb_free); + +/* Following are for a simplier TX queue management. Instead of using + * netif_[stop/wake]_queue the driver will use ieee80211_[stop/wake]_queue + * functions, that internally use the kernel netif_* and takes care of + * the ieee802.11 fragmentation. + * So the driver receives a fragment per time and might call the stop + * function when it want without take care to have enough room to TX an + * entire packet. + * This is useful if each fragment need its own descriptor, thus just + * keeping a total free memory > than the max fragmentation treshold is not + * enough. + */ + +static void ieee80211_xmit_frags(struct ieee80211_device *ieee) +{ + struct sk_buff *skb; + unsigned skb_len; + + while (!ieee80211_queue_stopped(ieee) && + (skb = skb_dequeue(&ieee->pending_frags))) { + skb_len = skb->len; + if (ieee->hard_start_xmit(skb, &ieee->pending_frags_info, ieee) != 0) { + skb_queue_head(&ieee->pending_frags, skb); + return; + } + ieee->stats.tx_packets++; + ieee->stats.tx_bytes += skb_len; + ieee->dev->trans_start = jiffies; + } +} + + +void ieee80211_reset_queue(struct ieee80211_device *ieee) +{ + ieee80211_cancel_frags(ieee); + /* TODO */ +} + + +void __ieee80211_tx_task(unsigned long data) +{ + struct ieee80211_device *ieee = (struct ieee80211_device *)data; + + if (ieee->config & IEEE80211_CFG_SINGLE_FRAGMENTS) { + ieee80211_xmit_frags(ieee); + if (ieee80211_queue_stopped(ieee) || + skb_queue_empty(&ieee->pending_frags)) + return; + } + + ieee80211_resume_tx_mgmt(ieee); + + if (!ieee80211_queue_stopped(ieee)) + netif_wake_queue(ieee80211_dev(ieee)); +} + + +/* Management frames transmission */ + + +static void __ieee80211_xmit_mgmt(struct sk_buff *skb, struct ieee80211_device *ieee) +{ + struct net_device *dev = ieee80211_dev(ieee); + unsigned nolock = (dev->features & NETIF_F_LLTX); + int res; + + /* We cannot call dev_queue_xmit as management frames have to be + * transmitted even when carrier flag is not set. But we have to + * deal with locking in the same way as dev_queue_xmit does. */ + local_bh_disable(); + + if (!nolock) { + spin_lock(&dev->xmit_lock); + dev->xmit_lock_owner = smp_processor_id(); + } + if (!netif_queue_stopped(dev)) + res = ieee80211_xmit(skb, dev); + else + res = NETDEV_TX_BUSY; + if (!nolock) { + dev->xmit_lock_owner = -1; + spin_unlock(&dev->xmit_lock); + } + + local_bh_enable(); + + if (!res) + skb_queue_head(&ieee->mgmt_queue, skb); +} + +void ieee80211_xmit_mgmt(struct sk_buff *skb, struct ieee80211_device *ieee) +{ + unsigned long flags; + + spin_lock_irqsave(&ieee->mgmt_queue.lock, flags); + if (!skb_queue_empty(&ieee->mgmt_queue)) { + __skb_queue_tail(&ieee->mgmt_queue, skb); + skb = __skb_dequeue(&ieee->mgmt_queue); + } + spin_unlock_irqrestore(&ieee->mgmt_queue.lock, flags); + + __ieee80211_xmit_mgmt(skb, ieee); +} + + +static void ieee80211_resume_tx_mgmt(struct ieee80211_device *ieee) +{ + struct sk_buff *skb; + + while (!ieee80211_queue_stopped(ieee)) { + skb = skb_dequeue(&ieee->mgmt_queue); + if (!skb) + break; + __ieee80211_xmit_mgmt(skb, ieee); + } +} + + +EXPORT_SYMBOL(ieee80211_reset_queue); Index: netdev/include/net/ieee80211.h =================================================================== --- netdev.orig/include/net/ieee80211.h 2005-09-17 15:04:03.000000000 +0200 +++ netdev/include/net/ieee80211.h 2005-09-17 15:12:37.000000000 +0200 @@ -13,6 +13,11 @@ * * Copyright (c) 2004, Intel Corporation * + * Copyright (c) 2005 Andrea Merello + * Copyright (c) 2005 Jiri Benc and Jirka Bohac + * + * Sponsored by SUSE. + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. See README and COPYING for @@ -444,6 +449,20 @@ extern u8 ieee80211_rate_values[IEEE8021 IEEE80211_RATE_12MB_MASK | \ IEEE80211_RATE_24MB_MASK) +struct ieee80211_xmit_info { + int nr_frags; + int frag_size; + int payload_size; + int encryption; + int modulation; + int rate; + /* The driver is allowed to ignore modulation and rate suggested by + * the ieee80211 layer (however, in such case it is meaningless to + * set IEEE80211_CFG_COMPUTE_DURATION or IEEE80211_CFG_COMPUTE_FCS, + * etc.) */ + u16 xmit_flags; /* WLAN_CAPABILITY_SHORT_PREAMBLE, WLAN_CAPABILITY_SHORT_SLOT_TIME */ +}; + /* NOTE: This data is for statistical purposes; not all hardware provides this * information for frames received. Not setting these will not cause * any adverse affects. */ @@ -657,13 +676,9 @@ struct ieee80211_assoc_response { #define ieee80211_disassoc ieee80211_deauth #define ieee80211_beacon ieee80211_probe_response -struct ieee80211_txb { - u8 nr_frags; - u8 encrypted; - u16 reserved; - u16 frag_size; - u16 payload_size; - struct sk_buff *fragments[0]; +/* Do not use these flags directly; use ieee80211_queue_stopped() instead. */ +enum ieee80211_queue_state { + IEEE80211_TX_STOPPED = 0, }; #define MAX_NETWORK_COUNT 128 @@ -721,7 +736,8 @@ enum ieee80211_state { #define IEEE80211_CFG_COMPUTE_FCS (1 << 0) /* perform software fragmentation */ #define IEEE80211_CFG_FRAGMENT (1 << 1) -/* send fragments to hard_start_xmit one by one */ +/* send fragments to hard_start_xmit one by one (not compatible with + * NETIF_F_LLTX) */ #define IEEE80211_CFG_SINGLE_FRAGMENTS (1 << 2) /* hw needs to be reset on WEP keys change */ #define IEEE80211_CFG_RESET_ON_KEY_CHANGE (1 << 3) @@ -804,14 +820,36 @@ struct ieee80211_device { unsigned modulations; u32 rates; + /* Pending fragments for hard_start_xmit callback */ + struct sk_buff_head pending_frags; + struct ieee80211_xmit_info pending_frags_info; + + /* Management queue */ + struct sk_buff_head mgmt_queue; + + struct tasklet_struct tx_tasklet; + long queue_state; + int perfect_rssi; int worst_rssi; /* Callback functions */ + void (*set_security)(struct ieee80211_device *dev, struct ieee80211_security *sec); - int (*hard_start_xmit)(struct ieee80211_txb *txb, + + /* Used to transmit frames. When IEEE80211_CFG_SINGLE_FRAGMENTS is + * set, fragments are sent one by one. If not set, all the fragments + * are linked together using next and prev fields in sk_buff and the + * first fragment is passed as skb parameter. The callback should + * return nonzero value in case of error; the frame will be + * resubmitted later. When returning zero, the driver is responsible + * for freeing all sk_buffs passed to it. Info parameter must not be + * freed. */ + int (*hard_start_xmit)(struct sk_buff *skb, + struct ieee80211_xmit_info *info, struct ieee80211_device *dev); + int (*reset_port)(struct ieee80211_device *dev); void *priv; @@ -927,6 +965,40 @@ extern inline int ieee80211_get_proto(st ntohs(snap->type) : ETH_P_802_2); } +static inline int ieee80211_get_type(struct ieee80211_hdr *header) +{ + return WLAN_FC_GET_TYPE(le16_to_cpu((header)->frame_ctl)); +} + +static inline int ieee80211_get_stype(struct ieee80211_hdr *header) +{ + return WLAN_FC_GET_STYPE(le16_to_cpu((header)->frame_ctl)); +} + + +static inline void ieee80211_stop_queue(struct ieee80211_device *ieee) +{ + if (!test_and_set_bit(IEEE80211_TX_STOPPED, &ieee->queue_state)) + netif_stop_queue(ieee80211_dev(ieee)); +} + +static inline void ieee80211_start_queue(struct ieee80211_device *ieee) +{ + clear_bit(IEEE80211_TX_STOPPED, &ieee->queue_state);; + netif_start_queue(ieee80211_dev(ieee)); +} + +static inline void ieee80211_wake_queue(struct ieee80211_device *ieee) +{ + if (test_and_clear_bit(IEEE80211_TX_STOPPED, &ieee->queue_state)) + tasklet_schedule(&ieee->tx_tasklet); +} + +static inline int ieee80211_queue_stopped(struct ieee80211_device *ieee) +{ + return test_bit(IEEE80211_TX_STOPPED, &ieee->queue_state); +} + /* ieee80211.c */ extern void free_ieee80211(struct ieee80211_device *ieee); extern struct ieee80211_device *alloc_ieee80211(int sizeof_priv); @@ -936,7 +1008,7 @@ extern int ieee80211_set_encryption(stru /* ieee80211_tx.c */ extern int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev); -extern void ieee80211_txb_free(struct ieee80211_txb *); +extern void ieee80211_reset_queue(struct ieee80211_device *ieee); /* ieee80211_rx.c */ extern int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, Index: netdev/net/ieee80211/ieee80211_module.c =================================================================== --- netdev.orig/net/ieee80211/ieee80211_module.c 2005-09-17 14:52:53.000000000 +0200 +++ netdev/net/ieee80211/ieee80211_module.c 2005-09-17 15:09:39.000000000 +0200 @@ -51,6 +51,7 @@ #include #include +#include "ieee80211_layer.h" MODULE_DESCRIPTION("802.11 data/management/control stack"); MODULE_AUTHOR @@ -151,6 +152,12 @@ struct ieee80211_device *alloc_ieee80211 spin_lock_init(&ieee->lock); + skb_queue_head_init(&ieee->pending_frags); + skb_queue_head_init(&ieee->mgmt_queue); + + tasklet_init(&ieee->tx_tasklet, __ieee80211_tx_task, + (unsigned long)ieee); + ieee->wpa_enabled = 0; ieee->tkip_countermeasures = 0; ieee->drop_unencrypted = 0; @@ -184,6 +191,8 @@ void free_ieee80211(struct ieee80211_dev } } + ieee80211_reset_queue(ieee); + ieee80211_networks_free(ieee); free_netdev(ieee80211_dev(ieee)); } Index: netdev/net/ieee80211/ieee80211_layer.h =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ netdev/net/ieee80211/ieee80211_layer.h 2005-09-17 15:09:39.000000000 +0200 @@ -0,0 +1,19 @@ +/* + * Declarations internal to ieee80211 layer. + * + * Copyright (c) 2005 Jiri Benc and Jirka Bohac + * + * Sponsored by SUSE. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. See README and COPYING for + * more details. + */ +#ifndef IEEE80211_LAYER_H +#define IEEE80211_LAYER_H + +/* ieee80211_tx.c */ +extern void __ieee80211_tx_task(unsigned long data); + +#endif