From mithlesh@linsyssoft.com Fri Jan 16 18:20:59 2009 From: Mithlesh Thukral Date: Mon, 5 Jan 2009 21:14:34 +0530 (IST) Subject: Staging: sxg: Typedef removal - pending work To: Greg KH Cc: Sahara Project , Richard Blackborow , Michael Miles , Christopher Harrer Message-ID: From: Mithlesh Thukral This patch removes all typedefs in the code. These were the typedefs which are still present in driver in staging tree after the cleanup patches. Signed-off-by: LinSysSoft Sahara Team Signed-off-by: Christopher Harrer Signed-off-by: Greg Kroah-Hartman --- drivers/staging/sxg/sxg.c | 263 +++++++++++++++------------------------ drivers/staging/sxg/sxg.h | 80 +++++------ drivers/staging/sxg/sxg_os.h | 22 +-- drivers/staging/sxg/sxgdbg.h | 14 +- drivers/staging/sxg/sxghif.h | 159 +++++++++++------------ drivers/staging/sxg/sxghw.h | 70 +++++----- drivers/staging/sxg/sxgphycode.h | 2 7 files changed, 280 insertions(+), 330 deletions(-) --- a/drivers/staging/sxg/sxg.c +++ b/drivers/staging/sxg/sxg.c @@ -81,41 +81,41 @@ #include "saharadbgdownload.h" static int sxg_allocate_buffer_memory(struct adapter_t *adapter, u32 Size, - enum SXG_BUFFER_TYPE BufferType); + enum sxg_buffer_type BufferType); static void sxg_allocate_rcvblock_complete(struct adapter_t *adapter, void *RcvBlock, dma_addr_t PhysicalAddress, u32 Length); static void sxg_allocate_sgl_buffer_complete(struct adapter_t *adapter, - struct SXG_SCATTER_GATHER *SxgSgl, + struct sxg_scatter_gather *SxgSgl, dma_addr_t PhysicalAddress, u32 Length); static void sxg_mcast_init_crc32(void); -static int sxg_entry_open(p_net_device dev); -static int sxg_entry_halt(p_net_device dev); -static int sxg_ioctl(p_net_device dev, struct ifreq *rq, int cmd); -static int sxg_send_packets(struct sk_buff *skb, p_net_device dev); +static int sxg_entry_open(struct net_device *dev); +static int sxg_entry_halt(struct net_device *dev); +static int sxg_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); +static int sxg_send_packets(struct sk_buff *skb, struct net_device *dev); static int sxg_transmit_packet(struct adapter_t *adapter, struct sk_buff *skb); -static void sxg_dumb_sgl(struct SXG_X64_SGL *pSgl, struct SXG_SCATTER_GATHER *SxgSgl); +static void sxg_dumb_sgl(struct sxg_x64_sgl *pSgl, struct sxg_scatter_gather *SxgSgl); static void sxg_handle_interrupt(struct adapter_t *adapter); static int sxg_process_isr(struct adapter_t *adapter, u32 MessageId); static u32 sxg_process_event_queue(struct adapter_t *adapter, u32 RssId); static void sxg_complete_slow_send(struct adapter_t *adapter); -static struct sk_buff *sxg_slow_receive(struct adapter_t *adapter, struct SXG_EVENT *Event); +static struct sk_buff *sxg_slow_receive(struct adapter_t *adapter, struct sxg_event *Event); static void sxg_process_rcv_error(struct adapter_t *adapter, u32 ErrorStatus); static bool sxg_mac_filter(struct adapter_t *adapter, struct ether_header *EtherHdr, ushort length); #if SLIC_GET_STATS_ENABLED -static struct net_device_stats *sxg_get_stats(p_net_device dev); +static struct net_device_stats *sxg_get_stats(struct net_device *dev); #endif #define XXXTODO 0 -static int sxg_mac_set_address(p_net_device dev, void *ptr); -static void sxg_mcast_set_list(p_net_device dev); +static int sxg_mac_set_address(struct net_device *dev, void *ptr); +static void sxg_mcast_set_list(struct net_device *dev); static void sxg_adapter_set_hwaddr(struct adapter_t *adapter); @@ -141,9 +141,9 @@ static char *sxg_banner = static int sxg_debug = 1; static int debug = -1; -static p_net_device head_netdevice = NULL; +static struct net_device *head_netdevice = NULL; -static struct sxgbase_driver_t sxg_global = { +static struct sxgbase_driver sxg_global = { .dynamic_intagg = 1, }; static int intagg_delay = 100; @@ -223,12 +223,12 @@ static void sxg_dbg_macaddrs(struct adap } /* SXG Globals */ -static struct SXG_DRIVER SxgDriver; +static struct sxg_driver SxgDriver; #ifdef ATKDBG -static struct sxg_trace_buffer_t LSxgTraceBuffer; +static struct sxg_trace_buffer LSxgTraceBuffer; #endif /* ATKDBG */ -static struct sxg_trace_buffer_t *SxgTraceBuffer = NULL; +static struct sxg_trace_buffer *SxgTraceBuffer = NULL; /* * sxg_download_microcode @@ -244,7 +244,7 @@ static struct sxg_trace_buffer_t *SxgTra */ static bool sxg_download_microcode(struct adapter_t *adapter, enum SXG_UCODE_SEL UcodeSel) { - struct SXG_HW_REGS *HwRegs = adapter->HwRegs; + struct sxg_hw_regs *HwRegs = adapter->HwRegs; u32 Section; u32 ThisSectionSize; u32 *Instruction = NULL; @@ -419,8 +419,8 @@ static int sxg_allocate_resources(struct int status; u32 i; u32 RssIds, IsrCount; -/* struct SXG_XMT_RING *XmtRing; */ -/* struct SXG_RCV_RING *RcvRing; */ +/* struct sxg_xmt_ring *XmtRing; */ +/* struct sxg_rcv_ring *RcvRing; */ DBG_ERROR("%s ENTER\n", __func__); @@ -459,13 +459,13 @@ static int sxg_allocate_resources(struct for (;;) { DBG_ERROR("%s Allocate XmtRings size[%x]\n", __func__, - (unsigned int)(sizeof(struct SXG_XMT_RING) * 1)); + (unsigned int)(sizeof(struct sxg_xmt_ring) * 1)); /* Start with big items first - receive and transmit rings. At the moment */ /* I'm going to keep the ring size fixed and adjust the number of */ /* TCBs if we fail. Later we might consider reducing the ring size as well.. */ adapter->XmtRings = pci_alloc_consistent(adapter->pcidev, - sizeof(struct SXG_XMT_RING) * + sizeof(struct sxg_xmt_ring) * 1, &adapter->PXmtRings); DBG_ERROR("%s XmtRings[%p]\n", __func__, adapter->XmtRings); @@ -473,33 +473,33 @@ static int sxg_allocate_resources(struct if (!adapter->XmtRings) { goto per_tcb_allocation_failed; } - memset(adapter->XmtRings, 0, sizeof(struct SXG_XMT_RING) * 1); + memset(adapter->XmtRings, 0, sizeof(struct sxg_xmt_ring) * 1); DBG_ERROR("%s Allocate RcvRings size[%x]\n", __func__, - (unsigned int)(sizeof(struct SXG_RCV_RING) * 1)); + (unsigned int)(sizeof(struct sxg_rcv_ring) * 1)); adapter->RcvRings = pci_alloc_consistent(adapter->pcidev, - sizeof(struct SXG_RCV_RING) * 1, + sizeof(struct sxg_rcv_ring) * 1, &adapter->PRcvRings); DBG_ERROR("%s RcvRings[%p]\n", __func__, adapter->RcvRings); if (!adapter->RcvRings) { goto per_tcb_allocation_failed; } - memset(adapter->RcvRings, 0, sizeof(struct SXG_RCV_RING) * 1); + memset(adapter->RcvRings, 0, sizeof(struct sxg_rcv_ring) * 1); break; per_tcb_allocation_failed: /* an allocation failed. Free any successful allocations. */ if (adapter->XmtRings) { pci_free_consistent(adapter->pcidev, - sizeof(struct SXG_XMT_RING) * 4096, + sizeof(struct sxg_xmt_ring) * 1, adapter->XmtRings, adapter->PXmtRings); adapter->XmtRings = NULL; } if (adapter->RcvRings) { pci_free_consistent(adapter->pcidev, - sizeof(struct SXG_RCV_RING) * 4096, + sizeof(struct sxg_rcv_ring) * 1, adapter->RcvRings, adapter->PRcvRings); adapter->RcvRings = NULL; @@ -515,7 +515,7 @@ static int sxg_allocate_resources(struct /* Sanity check receive data structure format */ ASSERT((adapter->ReceiveBufferSize == SXG_RCV_DATA_BUFFER_SIZE) || (adapter->ReceiveBufferSize == SXG_RCV_JUMBO_BUFFER_SIZE)); - ASSERT(sizeof(struct SXG_RCV_DESCRIPTOR_BLOCK) == + ASSERT(sizeof(struct sxg_rcv_descriptor_block) == SXG_RCV_DESCRIPTOR_BLOCK_SIZE); /* Allocate receive data buffers. We allocate a block of buffers and */ @@ -537,11 +537,11 @@ static int sxg_allocate_resources(struct } DBG_ERROR("%s Allocate EventRings size[%x]\n", __func__, - (unsigned int)(sizeof(struct SXG_EVENT_RING) * RssIds)); + (unsigned int)(sizeof(struct sxg_event_ring) * RssIds)); /* Allocate event queues. */ adapter->EventRings = pci_alloc_consistent(adapter->pcidev, - sizeof(struct SXG_EVENT_RING) * + sizeof(struct sxg_event_ring) * RssIds, &adapter->PEventRings); @@ -552,7 +552,7 @@ static int sxg_allocate_resources(struct status = STATUS_RESOURCES; goto per_tcb_allocation_failed; } - memset(adapter->EventRings, 0, sizeof(struct SXG_EVENT_RING) * RssIds); + memset(adapter->EventRings, 0, sizeof(struct sxg_event_ring) * RssIds); DBG_ERROR("%s Allocate ISR size[%x]\n", __func__, IsrCount); /* Allocate ISR */ @@ -629,12 +629,12 @@ static unsigned char temp_mac_address[6] static inline int sxg_read_config(struct adapter_t *adapter) { //struct sxg_config data; - struct SW_CFG_DATA *data; + struct sw_cfg_data *data; dma_addr_t p_addr; unsigned long status; unsigned long i; - data = pci_alloc_consistent(adapter->pcidev, sizeof(struct SW_CFG_DATA), &p_addr); + data = pci_alloc_consistent(adapter->pcidev, sizeof(struct sw_cfg_data), &p_addr); if(!data) { /* We cant get even this much memory. Raise a hell * Get out of here @@ -669,7 +669,7 @@ static inline int sxg_read_config(struct "Status = %ld\n", __FUNCTION__, status); break; } - pci_free_consistent(adapter->pcidev, sizeof(struct SW_CFG_DATA), data, p_addr); + pci_free_consistent(adapter->pcidev, sizeof(struct sw_cfg_data), data, p_addr); if (adapter->netdev) { memcpy(adapter->netdev->dev_addr, adapter->currmacaddr, 6); memcpy(adapter->netdev->perm_addr, adapter->currmacaddr, 6); @@ -990,7 +990,7 @@ static void sxg_enable_interrupt(struct */ static irqreturn_t sxg_isr(int irq, void *dev_id) { - p_net_device dev = (p_net_device) dev_id; + struct net_device *dev = (struct net_device *) dev_id; struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev); /* u32 CpuMask = 0, i; */ @@ -1019,8 +1019,8 @@ static irqreturn_t sxg_isr(int irq, void for (i = 0; i < adapter->RssSystemInfo->ProcessorInfo.RssCpuCount; i++) { - struct XG_EVENT_RING *EventRing = &adapter->EventRings[i]; - struct SXG_EVENT *Event = + struct sxg_event_ring *EventRing = &adapter->EventRings[i]; + struct sxg_event *Event = &EventRing->Ring[adapter->NextEvent[i]]; unsigned char Cpu = adapter->RssSystemInfo->RssIdToCpu[i]; @@ -1213,8 +1213,8 @@ static int sxg_process_isr(struct adapte */ static u32 sxg_process_event_queue(struct adapter_t *adapter, u32 RssId) { - struct SXG_EVENT_RING *EventRing = &adapter->EventRings[RssId]; - struct SXG_EVENT *Event = &EventRing->Ring[adapter->NextEvent[RssId]]; + struct sxg_event_ring *EventRing = &adapter->EventRings[RssId]; + struct sxg_event *Event = &EventRing->Ring[adapter->NextEvent[RssId]]; u32 EventsProcessed = 0, Batches = 0; u32 num_skbs = 0; struct sk_buff *skb; @@ -1222,7 +1222,7 @@ static u32 sxg_process_event_queue(struc struct sk_buff *prev_skb = NULL; struct sk_buff *IndicationList[SXG_RCV_ARRAYSIZE]; u32 Index; - struct SXG_RCV_DATA_BUFFER_HDR *RcvDataBufferHdr; + struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr; #endif u32 ReturnStatus = 0; @@ -1244,7 +1244,7 @@ static u32 sxg_process_event_queue(struc adapter->NextEvent); switch (Event->Code) { case EVENT_CODE_BUFFERS: - ASSERT(!(Event->CommandIndex & 0xFF00)); /* SXG_RING_INFO Head & Tail == unsigned char */ + ASSERT(!(Event->CommandIndex & 0xFF00)); /* struct sxg_ring_info Head & Tail == unsigned char */ /* */ sxg_complete_descriptor_blocks(adapter, Event->CommandIndex); @@ -1351,10 +1351,10 @@ static u32 sxg_process_event_queue(struc */ static void sxg_complete_slow_send(struct adapter_t *adapter) { - struct SXG_XMT_RING *XmtRing = &adapter->XmtRings[0]; - struct SXG_RING_INFO *XmtRingInfo = &adapter->XmtRingZeroInfo; + struct sxg_xmt_ring *XmtRing = &adapter->XmtRings[0]; + struct sxg_ring_info *XmtRingInfo = &adapter->XmtRingZeroInfo; u32 *ContextType; - struct SXG_CMD *XmtCmd; + struct sxg_cmd *XmtCmd; /* NOTE - This lock is dropped and regrabbed in this loop. */ /* This means two different processors can both be running */ @@ -1377,7 +1377,7 @@ static void sxg_complete_slow_send(struc case SXG_SGL_DUMB: { struct sk_buff *skb; - struct SXG_SCATTER_GATHER *SxgSgl = (struct SXG_SCATTER_GATHER *)ContextType; + struct sxg_scatter_gather *SxgSgl = (struct sxg_scatter_gather *)ContextType; /* Dumb-nic send. Command context is the dumb-nic SGL */ skb = (struct sk_buff *)ContextType; @@ -1419,16 +1419,16 @@ static void sxg_complete_slow_send(struc * Return * skb */ -static struct sk_buff *sxg_slow_receive(struct adapter_t *adapter, struct SXG_EVENT *Event) +static struct sk_buff *sxg_slow_receive(struct adapter_t *adapter, struct sxg_event *Event) { - struct SXG_RCV_DATA_BUFFER_HDR *RcvDataBufferHdr; + struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr; struct sk_buff *Packet; unsigned char*data; int i; char dstr[128]; char *dptr = dstr; - RcvDataBufferHdr = (struct SXG_RCV_DATA_BUFFER_HDR*) Event->HostHandle; + RcvDataBufferHdr = (struct sxg_rcv_data_buffer_hdr *) Event->HostHandle; ASSERT(RcvDataBufferHdr); ASSERT(RcvDataBufferHdr->State == SXG_BUFFER_ONCARD); SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "SlowRcv", Event, @@ -1633,7 +1633,7 @@ static bool sxg_mac_filter(struct adapte return (TRUE); } if (adapter->MacFilter & MAC_MCAST) { - struct SXG_MULTICAST_ADDRESS *MulticastAddrs = + struct sxg_multicast_address *MulticastAddrs = adapter->MulticastAddrs; while (MulticastAddrs) { ETHER_EQ_ADDR(MulticastAddrs->Address, @@ -1736,7 +1736,7 @@ static void sxg_deregister_interrupt(str */ static int sxg_if_init(struct adapter_t *adapter) { - p_net_device dev = adapter->netdev; + struct net_device *dev = adapter->netdev; int status = 0; DBG_ERROR("sxg: %s (%s) ENTER states[%d:%d] flags[%x]\n", @@ -1792,7 +1792,7 @@ static int sxg_if_init(struct adapter_t return (STATUS_SUCCESS); } -static int sxg_entry_open(p_net_device dev) +static int sxg_entry_open(struct net_device *dev) { struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev); int status; @@ -1847,7 +1847,7 @@ static int sxg_entry_open(p_net_device d static void __devexit sxg_entry_remove(struct pci_dev *pcidev) { - p_net_device dev = pci_get_drvdata(pcidev); + struct net_device *dev = pci_get_drvdata(pcidev); u32 mmio_start = 0; unsigned int mmio_len = 0; struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev); @@ -1876,7 +1876,7 @@ static void __devexit sxg_entry_remove(s DBG_ERROR("sxg: %s EXIT\n", __func__); } -static int sxg_entry_halt(p_net_device dev) +static int sxg_entry_halt(struct net_device *dev) { struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev); @@ -1896,7 +1896,7 @@ static int sxg_entry_halt(p_net_device d return (STATUS_SUCCESS); } -static int sxg_ioctl(p_net_device dev, struct ifreq *rq, int cmd) +static int sxg_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { ASSERT(rq); /* DBG_ERROR("sxg: %s cmd[%x] rq[%p] dev[%p]\n", __func__, cmd, rq, dev); */ @@ -1939,7 +1939,7 @@ static int sxg_ioctl(p_net_device dev, s * Return: * 0 regardless of outcome XXXTODO refer to e1000 driver */ -static int sxg_send_packets(struct sk_buff *skb, p_net_device dev) +static int sxg_send_packets(struct sk_buff *skb, struct net_device *dev) { struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev); u32 status = STATUS_SUCCESS; @@ -2011,8 +2011,8 @@ static int sxg_send_packets(struct sk_bu */ static int sxg_transmit_packet(struct adapter_t *adapter, struct sk_buff *skb) { - struct SXG_X64_SGL *pSgl; - struct SXG_SCATTER_GATHER *SxgSgl; + struct sxg_x64_sgl *pSgl; + struct sxg_scatter_gather *SxgSgl; void *SglBuffer; u32 SglBufferLength; @@ -2050,19 +2050,19 @@ static int sxg_transmit_packet(struct ad * * Arguments: * pSgl - - * SxgSgl - SXG_SCATTER_GATHER + * SxgSgl - struct sxg_scatter_gather * * Return Value: * None. */ -static void sxg_dumb_sgl(struct SXG_X64_SGL *pSgl, struct SXG_SCATTER_GATHER *SxgSgl) +static void sxg_dumb_sgl(struct sxg_x64_sgl *pSgl, struct sxg_scatter_gather *SxgSgl) { struct adapter_t *adapter = SxgSgl->adapter; struct sk_buff *skb = SxgSgl->DumbPacket; /* For now, all dumb-nic sends go on RSS queue zero */ - struct SXG_XMT_RING *XmtRing = &adapter->XmtRings[0]; - struct SXG_RING_INFO *XmtRingInfo = &adapter->XmtRingZeroInfo; - struct SXG_CMD *XmtCmd = NULL; + struct sxg_xmt_ring *XmtRing = &adapter->XmtRings[0]; + struct sxg_ring_info *XmtRingInfo = &adapter->XmtRingZeroInfo; + struct sxg_cmd *XmtCmd = NULL; /* u32 Index = 0; */ u32 DataLength = skb->len; /* unsigned int BufLen; */ @@ -2084,7 +2084,7 @@ static void sxg_dumb_sgl(struct SXG_X64_ SxgSgl->pSgl = pSgl; /* Sanity check that our SGL format is as we expect. */ - ASSERT(sizeof(struct SXG_X64_SGE) == sizeof(struct SXG_X64_SGE)); + ASSERT(sizeof(struct sxg_x64_sge) == sizeof(struct sxg_x64_sge)); /* Shouldn't be a vlan tag on this frame */ ASSERT(SxgSgl->VlanTag.VlanTci == 0); ASSERT(SxgSgl->VlanTag.VlanTpid == 0); @@ -2191,7 +2191,7 @@ static void sxg_dumb_sgl(struct SXG_X64_ */ static int sxg_initialize_link(struct adapter_t *adapter) { - struct SXG_HW_REGS *HwRegs = adapter->HwRegs; + struct sxg_hw_regs *HwRegs = adapter->HwRegs; u32 Value; u32 ConfigData; u32 MaxFrame; @@ -2350,7 +2350,7 @@ static int sxg_initialize_link(struct ad static int sxg_phy_init(struct adapter_t *adapter) { u32 Value; - struct PHY_UCODE *p; + struct phy_ucode *p; int status; DBG_ERROR("ENTER %s\n", __func__); @@ -2397,7 +2397,7 @@ static int sxg_phy_init(struct adapter_t */ static void sxg_link_event(struct adapter_t *adapter) { - struct SXG_HW_REGS *HwRegs = adapter->HwRegs; + struct sxg_hw_regs *HwRegs = adapter->HwRegs; enum SXG_LINK_STATE LinkState; int status; u32 Value; @@ -2574,7 +2574,7 @@ static void sxg_link_state(struct adapte static int sxg_write_mdio_reg(struct adapter_t *adapter, u32 DevAddr, u32 RegAddr, u32 Value) { - struct SXG_HW_REGS *HwRegs = adapter->HwRegs; + struct sxg_hw_regs *HwRegs = adapter->HwRegs; u32 AddrOp; /* Address operation (written to MIIM field reg) */ u32 WriteOp; /* Write operation (written to MIIM field reg) */ u32 Cmd; /* Command (written to MIIM command reg) */ @@ -2664,7 +2664,7 @@ static int sxg_write_mdio_reg(struct ada static int sxg_read_mdio_reg(struct adapter_t *adapter, u32 DevAddr, u32 RegAddr, u32 *pValue) { - struct SXG_HW_REGS *HwRegs = adapter->HwRegs; + struct sxg_hw_regs *HwRegs = adapter->HwRegs; u32 AddrOp; /* Address operation (written to MIIM field reg) */ u32 ReadOp; /* Read operation (written to MIIM field reg) */ u32 Cmd; /* Command (written to MIIM command reg) */ @@ -2813,7 +2813,7 @@ static unsigned char sxg_mcast_get_mac_h static void sxg_mcast_set_mask(struct adapter_t *adapter) { - struct SXG_UCODE_REGS *sxg_regs = adapter->UcodeRegs; + struct sxg_ucode_regs *sxg_regs = adapter->UcodeRegs; DBG_ERROR("%s ENTER (%s) macopts[%x] mask[%llx]\n", __func__, adapter->netdev->name, (unsigned int)adapter->MacFilter, @@ -2853,7 +2853,7 @@ static void sxg_mcast_set_mask(struct ad */ static int sxg_mcast_add_list(struct adapter_t *adapter, char *address) { - struct mcast_address_t *mcaddr, *mlist; + struct mcast_address *mcaddr, *mlist; bool equaladdr; /* Check to see if it already exists */ @@ -2867,7 +2867,7 @@ static int sxg_mcast_add_list(struct ada } /* Doesn't already exist. Allocate a structure to hold it */ - mcaddr = kmalloc(sizeof(struct mcast_address_t), GFP_ATOMIC); + mcaddr = kmalloc(sizeof(struct mcast_address), GFP_ATOMIC); if (mcaddr == NULL) return 1; @@ -2895,14 +2895,10 @@ static void sxg_mcast_set_bit(struct ada adapter->MulticastMask |= (u64) 1 << crcpoly; } -static void sxg_mcast_set_list(p_net_device dev) +static void sxg_mcast_set_list(struct net_device *dev) { struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev); int status = STATUS_SUCCESS; - int i; - char *addresses; - struct dev_mc_list *mc_list = dev->mc_list; - int mc_count = dev->mc_count; ASSERT(adapter); if (dev->flags & IFF_PROMISC) { @@ -2910,53 +2906,6 @@ static void sxg_mcast_set_list(p_net_dev } //XXX handle other flags as well sxg_mcast_set_mask(adapter); - -#if 0 - - for (i = 1; i <= mc_count; i++) { - addresses = (char *)&mc_list->dmi_addr; - if (mc_list->dmi_addrlen == 6) { - status = sxg_mcast_add_list(adapter, addresses); - if (status != STATUS_SUCCESS) { - break; - } - } else { - status = -EINVAL; - break; - } - sxg_mcast_set_bit(adapter, addresses); - mc_list = mc_list->next; - } - - DBG_ERROR("%s a->devflags_prev[%x] dev->flags[%x] status[%x]\n", - __func__, adapter->devflags_prev, dev->flags, status); - if (adapter->devflags_prev != dev->flags) { - adapter->macopts = MAC_DIRECTED; - if (dev->flags) { - if (dev->flags & IFF_BROADCAST) { - adapter->macopts |= MAC_BCAST; - } - if (dev->flags & IFF_PROMISC) { - adapter->macopts |= MAC_PROMISC; - } - if (dev->flags & IFF_ALLMULTI) { - adapter->macopts |= MAC_ALLMCAST; - } - if (dev->flags & IFF_MULTICAST) { - adapter->macopts |= MAC_MCAST; - } - } - adapter->devflags_prev = dev->flags; - DBG_ERROR("%s call sxg_config_set adapter->macopts[%x]\n", - __func__, adapter->macopts); - sxg_config_set(adapter, TRUE); - } else { - if (status == STATUS_SUCCESS) { - sxg_mcast_set_mask(adapter); - } - } - return; -#endif } static void sxg_unmap_mmio_space(struct adapter_t *adapter) @@ -3007,7 +2956,7 @@ void SxgFreeResources(struct adapter_t * /* Free event queues. */ if (adapter->EventRings) { pci_free_consistent(adapter->pcidev, - sizeof(struct SXG_EVENT_RING) * RssIds, + sizeof(struct sxg_event_ring) * RssIds, adapter->EventRings, adapter->PEventRings); } if (adapter->Isr) { @@ -3086,7 +3035,7 @@ void SxgFreeResources(struct adapter_t * static void sxg_allocate_complete(struct adapter_t *adapter, void *VirtualAddress, dma_addr_t PhysicalAddress, - u32 Length, enum SXG_BUFFER_TYPE Context) + u32 Length, enum sxg_buffer_type Context) { SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocCmp", adapter, VirtualAddress, Length, Context); @@ -3101,7 +3050,7 @@ static void sxg_allocate_complete(struct PhysicalAddress, Length); break; case SXG_BUFFER_TYPE_SGL: - sxg_allocate_sgl_buffer_complete(adapter, (struct SXG_SCATTER_GATHER*) + sxg_allocate_sgl_buffer_complete(adapter, (struct sxg_scatter_gather *) VirtualAddress, PhysicalAddress, Length); break; @@ -3123,7 +3072,7 @@ static void sxg_allocate_complete(struct * int */ static int sxg_allocate_buffer_memory(struct adapter_t *adapter, - u32 Size, enum SXG_BUFFER_TYPE BufferType) + u32 Size, enum sxg_buffer_type BufferType) { int status; void *Buffer; @@ -3182,11 +3131,11 @@ static void sxg_allocate_rcvblock_comple u32 i; u32 BufferSize = adapter->ReceiveBufferSize; u64 Paddr; - struct SXG_RCV_BLOCK_HDR *RcvBlockHdr; + struct sxg_rcv_block_hdr *RcvBlockHdr; unsigned char *RcvDataBuffer; - struct SXG_RCV_DATA_BUFFER_HDR *RcvDataBufferHdr; - struct SXG_RCV_DESCRIPTOR_BLOCK *RcvDescriptorBlock; - struct SXG_RCV_DESCRIPTOR_BLOCK_HDR *RcvDescriptorBlockHdr; + struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr; + struct sxg_rcv_descriptor_block *RcvDescriptorBlock; + struct sxg_rcv_descriptor_block_hdr *RcvDescriptorBlockHdr; SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AlRcvBlk", adapter, RcvBlock, Length, 0); @@ -3212,7 +3161,7 @@ static void sxg_allocate_rcvblock_comple i++, Paddr += BufferSize, RcvDataBuffer += BufferSize) { /* */ RcvDataBufferHdr = - (struct SXG_RCV_DATA_BUFFER_HDR*) (RcvDataBuffer + + (struct sxg_rcv_data_buffer_hdr*) (RcvDataBuffer + SXG_RCV_DATA_BUFFER_HDR_OFFSET (BufferSize)); RcvDataBufferHdr->VirtualAddress = RcvDataBuffer; @@ -3234,7 +3183,7 @@ static void sxg_allocate_rcvblock_comple /* Place this entire block of memory on the AllRcvBlocks queue so it can be */ /* free later */ RcvBlockHdr = - (struct SXG_RCV_BLOCK_HDR*) ((unsigned char *)RcvBlock + + (struct sxg_rcv_block_hdr*) ((unsigned char *)RcvBlock + SXG_RCV_BLOCK_HDR_OFFSET(BufferSize)); RcvBlockHdr->VirtualAddress = RcvBlock; RcvBlockHdr->PhysicalAddress = PhysicalAddress; @@ -3248,7 +3197,7 @@ static void sxg_allocate_rcvblock_comple for (i = 0, Paddr = PhysicalAddress; i < SXG_RCV_DESCRIPTORS_PER_BLOCK; i++, Paddr += BufferSize, RcvDataBuffer += BufferSize) { - RcvDataBufferHdr = (struct SXG_RCV_DATA_BUFFER_HDR*) (RcvDataBuffer + + RcvDataBufferHdr = (struct sxg_rcv_data_buffer_hdr*) (RcvDataBuffer + SXG_RCV_DATA_BUFFER_HDR_OFFSET (BufferSize)); spin_lock(&adapter->RcvQLock); @@ -3258,11 +3207,11 @@ static void sxg_allocate_rcvblock_comple /* Locate the descriptor block and put it on a separate free queue */ RcvDescriptorBlock = - (struct SXG_RCV_DESCRIPTOR_BLOCK*) ((unsigned char *)RcvBlock + + (struct sxg_rcv_descriptor_block *) ((unsigned char *)RcvBlock + SXG_RCV_DESCRIPTOR_BLOCK_OFFSET (BufferSize)); RcvDescriptorBlockHdr = - (struct SXG_RCV_DESCRIPTOR_BLOCK_HDR*) ((unsigned char *)RcvBlock + + (struct sxg_rcv_descriptor_block_hdr *) ((unsigned char *)RcvBlock + SXG_RCV_DESCRIPTOR_BLOCK_HDR_OFFSET (BufferSize)); RcvDescriptorBlockHdr->VirtualAddress = RcvDescriptorBlock; @@ -3280,7 +3229,7 @@ static void sxg_allocate_rcvblock_comple for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK; i++, RcvDataBuffer += BufferSize) { RcvDataBufferHdr = - (struct SXG_RCV_DATA_BUFFER_HDR*) (RcvDataBuffer + + (struct sxg_rcv_data_buffer_hdr *) (RcvDataBuffer + SXG_RCV_DATA_BUFFER_HDR_OFFSET (BufferSize)); SXG_FREE_RCV_PACKET(RcvDataBufferHdr); @@ -3300,7 +3249,7 @@ static void sxg_allocate_rcvblock_comple * * Arguments - * adapter - A pointer to our adapter structure - * SxgSgl - SXG_SCATTER_GATHER buffer + * SxgSgl - struct sxg_scatter_gather buffer * PhysicalAddress - Physical address * Length - Memory length * @@ -3308,7 +3257,7 @@ static void sxg_allocate_rcvblock_comple * */ static void sxg_allocate_sgl_buffer_complete(struct adapter_t *adapter, - struct SXG_SCATTER_GATHER *SxgSgl, + struct sxg_scatter_gather *SxgSgl, dma_addr_t PhysicalAddress, u32 Length) { @@ -3316,7 +3265,7 @@ static void sxg_allocate_sgl_buffer_comp adapter, SxgSgl, Length, 0); spin_lock(&adapter->SglQLock); adapter->AllSglBufferCount++; - memset(SxgSgl, 0, sizeof(struct SXG_SCATTER_GATHER*)); + memset(SxgSgl, 0, sizeof(struct sxg_scatter_gather)); SxgSgl->PhysicalAddress = PhysicalAddress; /* *PhysicalAddress; */ SxgSgl->adapter = adapter; /* Initialize backpointer once */ InsertTailList(&adapter->AllSglBuffers, &SxgSgl->AllList); @@ -3335,7 +3284,7 @@ static void sxg_adapter_set_hwaddr(struc /* */ /* sxg_dbg_macaddrs(adapter); */ - memcpy(adapter->macaddr, temp_mac_address, sizeof(struct SXG_CONFIG_MAC)); + memcpy(adapter->macaddr, temp_mac_address, sizeof(struct sxg_config_mac)); /* DBG_ERROR ("%s AFTER copying from config.macinfo into currmacaddr\n", __func__); */ /* sxg_dbg_macaddrs(adapter); */ if (!(adapter->currmacaddr[0] || @@ -3355,7 +3304,7 @@ static void sxg_adapter_set_hwaddr(struc } #if XXXTODO -static int sxg_mac_set_address(p_net_device dev, void *ptr) +static int sxg_mac_set_address(struct net_device *dev, void *ptr) { struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev); struct sockaddr *addr = ptr; @@ -3413,7 +3362,7 @@ static int sxg_initialize_adapter(struct /* Sanity check SXG_UCODE_REGS structure definition to */ /* make sure the length is correct */ - ASSERT(sizeof(struct SXG_UCODE_REGS) == SXG_REGISTER_SIZE_PER_CPU); + ASSERT(sizeof(struct sxg_ucode_regs) == SXG_REGISTER_SIZE_PER_CPU); /* Disable interrupts */ SXG_DISABLE_ALL_INTERRUPTS(adapter); @@ -3500,15 +3449,15 @@ static int sxg_initialize_adapter(struct * status */ static int sxg_fill_descriptor_block(struct adapter_t *adapter, - struct SXG_RCV_DESCRIPTOR_BLOCK_HDR + struct sxg_rcv_descriptor_block_hdr *RcvDescriptorBlockHdr) { u32 i; - struct SXG_RING_INFO *RcvRingInfo = &adapter->RcvRingZeroInfo; - struct SXG_RCV_DATA_BUFFER_HDR *RcvDataBufferHdr; - struct SXG_RCV_DESCRIPTOR_BLOCK *RcvDescriptorBlock; - struct SXG_CMD *RingDescriptorCmd; - struct SXG_RCV_RING *RingZero = &adapter->RcvRings[0]; + struct sxg_ring_info *RcvRingInfo = &adapter->RcvRingZeroInfo; + struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr; + struct sxg_rcv_descriptor_block *RcvDescriptorBlock; + struct sxg_cmd *RingDescriptorCmd; + struct sxg_rcv_ring *RingZero = &adapter->RcvRings[0]; SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "FilBlk", adapter, adapter->RcvBuffersOnCard, @@ -3529,7 +3478,7 @@ static int sxg_fill_descriptor_block(str ASSERT(RingDescriptorCmd); RcvDescriptorBlockHdr->State = SXG_BUFFER_ONCARD; RcvDescriptorBlock = - (struct SXG_RCV_DESCRIPTOR_BLOCK*) RcvDescriptorBlockHdr->VirtualAddress; + (struct sxg_rcv_descriptor_block *) RcvDescriptorBlockHdr->VirtualAddress; /* Fill in the descriptor block */ for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK; i++) { @@ -3579,7 +3528,7 @@ static int sxg_fill_descriptor_block(str */ static void sxg_stock_rcv_buffers(struct adapter_t *adapter) { - struct SXG_RCV_DESCRIPTOR_BLOCK_HDR *RcvDescriptorBlockHdr; + struct sxg_rcv_descriptor_block_hdr *RcvDescriptorBlockHdr; SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "StockBuf", adapter, adapter->RcvBuffersOnCard, @@ -3600,14 +3549,14 @@ static void sxg_stock_rcv_buffers(struct /* Now grab the RcvQLock lock and proceed */ spin_lock(&adapter->RcvQLock); while (adapter->RcvBuffersOnCard < SXG_RCV_DATA_BUFFERS) { - struct LIST_ENTRY *_ple; + struct list_entry *_ple; /* Get a descriptor block */ RcvDescriptorBlockHdr = NULL; if (adapter->FreeRcvBlockCount) { _ple = RemoveHeadList(&adapter->FreeRcvBlocks); RcvDescriptorBlockHdr = - container_of(_ple, struct SXG_RCV_DESCRIPTOR_BLOCK_HDR, + container_of(_ple, struct sxg_rcv_descriptor_block_hdr, FreeList); adapter->FreeRcvBlockCount--; RcvDescriptorBlockHdr->State = SXG_BUFFER_BUSY; @@ -3647,10 +3596,10 @@ static void sxg_stock_rcv_buffers(struct static void sxg_complete_descriptor_blocks(struct adapter_t *adapter, unsigned char Index) { - struct SXG_RCV_RING *RingZero = &adapter->RcvRings[0]; - struct SXG_RING_INFO *RcvRingInfo = &adapter->RcvRingZeroInfo; - struct SXG_RCV_DESCRIPTOR_BLOCK_HDR *RcvDescriptorBlockHdr; - struct SXG_CMD *RingDescriptorCmd; + struct sxg_rcv_ring *RingZero = &adapter->RcvRings[0]; + struct sxg_ring_info *RcvRingInfo = &adapter->RcvRingZeroInfo; + struct sxg_rcv_descriptor_block_hdr *RcvDescriptorBlockHdr; + struct sxg_cmd *RingDescriptorCmd; SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpRBlks", adapter, Index, RcvRingInfo->Head, RcvRingInfo->Tail); --- a/drivers/staging/sxg/sxgdbg.h +++ b/drivers/staging/sxg/sxgdbg.h @@ -79,7 +79,7 @@ extern ulong ATKTimerDiv; /* - * trace_entry_t - + * trace_entry - * * This structure defines an entry in the trace buffer. The * first few fields mean the same from entry to entry, while @@ -87,7 +87,7 @@ extern ulong ATKTimerDiv; * needs of the trace entry. Typically they are function call * parameters. */ -struct trace_entry_t { +struct trace_entry { char name[8]; /* 8 character name - like 's'i'm'b'a'r'c'v' */ u32 time; /* Current clock tic */ unsigned char cpu; /* Current CPU */ @@ -101,7 +101,7 @@ struct trace_entry_t { }; /* - * Driver types for driver field in trace_entry_t + * Driver types for driver field in struct trace_entry */ #define TRACE_SXG 1 #define TRACE_VPCI 2 @@ -109,12 +109,12 @@ struct trace_entry_t { #define TRACE_ENTRIES 1024 -struct sxg_trace_buffer_t { +struct sxg_trace_buffer { unsigned int size; /* aid for windbg extension */ unsigned int in; /* Where to add */ unsigned int level; /* Current Trace level */ spinlock_t lock; /* For MP tracing */ - struct trace_entry_t entries[TRACE_ENTRIES];/* The circular buffer */ + struct trace_entry entries[TRACE_ENTRIES];/* The circular buffer */ }; /* @@ -137,7 +137,7 @@ struct sxg_trace_buffer_t { #if ATK_TRACE_ENABLED #define SXG_TRACE_INIT(buffer, tlevel) \ { \ - memset((buffer), 0, sizeof(struct sxg_trace_buffer_t)); \ + memset((buffer), 0, sizeof(struct sxg_trace_buffer)); \ (buffer)->level = (tlevel); \ (buffer)->size = TRACE_ENTRIES; \ spin_lock_init(&(buffer)->lock); \ @@ -154,7 +154,7 @@ struct sxg_trace_buffer_t { if ((buffer) && ((buffer)->level >= (tlevel))) { \ unsigned int trace_irql = 0; /* ?????? FIX THIS */ \ unsigned int trace_len; \ - struct trace_entry_t *trace_entry; \ + struct trace_entry *trace_entry; \ struct timeval timev; \ \ spin_lock(&(buffer)->lock); \ --- a/drivers/staging/sxg/sxg.h +++ b/drivers/staging/sxg/sxg.h @@ -43,9 +43,9 @@ #define __SXG_DRIVER_H__ #define p_net_device struct net_device * -// SXG_STATS - Probably move these to someplace where +// struct sxg_stats - Probably move these to someplace where // the slicstat (sxgstat?) program can get them. -struct SXG_STATS { +struct sxg_stats { // Xmt u32 XmtNBL; // Offload send NBL count u64 DumbXmtBytes; // Dumbnic send bytes @@ -183,7 +183,7 @@ struct SXG_STATS { {} /*_NdisReinitializePacket(_Packet)*/ /* this is not necessary with an skb */ // Definitions to initialize Dumb-nic Receive NBLs -#define SXG_RCV_PACKET_BUFFER_HDR(_Packet) (((PSXG_RCV_NBL_RESERVED)((_Packet)->MiniportReservedEx))->RcvDataBufferHdr) +#define SXG_RCV_PACKET_BUFFER_HDR(_Packet) (((struct sxg_rcv_nbl_reserved *)((_Packet)->MiniportReservedEx))->RcvDataBufferHdr) #define SXG_RCV_SET_CHECKSUM_INFO(_Packet, _Cpi) \ NDIS_PER_PACKET_INFO_FROM_PACKET((_Packet), TcpIpChecksumPacketInfo) = (PVOID)(_Cpi) @@ -215,12 +215,12 @@ struct SXG_STATS { /////////////////////////////////////////////////////////////////////////////// // NOTE - Lock must be held with RCV macros #define SXG_GET_RCV_DATA_BUFFER(_pAdapt, _Hdr) { \ - struct LIST_ENTRY *_ple; \ + struct list_entry *_ple; \ _Hdr = NULL; \ if((_pAdapt)->FreeRcvBufferCount) { \ ASSERT(!(IsListEmpty(&(_pAdapt)->FreeRcvBuffers))); \ _ple = RemoveHeadList(&(_pAdapt)->FreeRcvBuffers); \ - (_Hdr) = container_of(_ple, struct SXG_RCV_DATA_BUFFER_HDR, FreeList); \ + (_Hdr) = container_of(_ple, struct sxg_rcv_data_buffer_hdr, FreeList); \ (_pAdapt)->FreeRcvBufferCount--; \ ASSERT((_Hdr)->State == SXG_BUFFER_FREE); \ } \ @@ -263,12 +263,12 @@ struct SXG_STATS { // until after that. We're dealing with round numbers here, so we don't need to, // and not grabbing it avoids a possible double-trip. #define SXG_GET_SGL_BUFFER(_pAdapt, _Sgl) { \ - struct LIST_ENTRY *_ple; \ + struct list_entry *_ple; \ if ((_pAdapt->FreeSglBufferCount < SXG_MIN_SGL_BUFFERS) && \ (_pAdapt->AllSglBufferCount < SXG_MAX_SGL_BUFFERS) && \ (_pAdapt->AllocationsPending == 0)) { \ sxg_allocate_buffer_memory(_pAdapt, \ - (sizeof(struct SXG_SCATTER_GATHER) + SXG_SGL_BUF_SIZE),\ + (sizeof(struct sxg_scatter_gather) + SXG_SGL_BUF_SIZE),\ SXG_BUFFER_TYPE_SGL); \ } \ _Sgl = NULL; \ @@ -276,7 +276,7 @@ struct SXG_STATS { if((_pAdapt)->FreeSglBufferCount) { \ ASSERT(!(IsListEmpty(&(_pAdapt)->FreeSglBuffers))); \ _ple = RemoveHeadList(&(_pAdapt)->FreeSglBuffers); \ - (_Sgl) = container_of(_ple, struct SXG_SCATTER_GATHER, FreeList); \ + (_Sgl) = container_of(_ple, struct sxg_scatter_gather, FreeList); \ (_pAdapt)->FreeSglBufferCount--; \ ASSERT((_Sgl)->State == SXG_BUFFER_FREE); \ (_Sgl)->State = SXG_BUFFER_BUSY; \ @@ -286,12 +286,12 @@ struct SXG_STATS { } // -// SXG_MULTICAST_ADDRESS +// struct sxg_multicast_address // // Linked list of multicast addresses. -struct SXG_MULTICAST_ADDRESS { +struct sxg_multicast_address { unsigned char Address[6]; - struct SXG_MULTICAST_ADDRESS *Next; + struct sxg_multicast_address *Next; }; // Structure to maintain chimney send and receive buffer queues. @@ -299,7 +299,7 @@ struct SXG_MULTICAST_ADDRESS { // given to us via the Chimney MiniportTcpOffloadSend and // MiniportTcpOffloadReceive routines. This structure DOES NOT // manage our data buffer queue -struct SXG_BUFFER_QUEUE { +struct sxg_buffer_queue { u32 Type; // Slow or fast - See below u32 Direction; // Xmt or Rcv u32 Bytes; // Byte count @@ -380,11 +380,11 @@ enum SXG_UCODE_SEL { #define SXG_ERROR DPFLTR_ERROR_LEVEL // -// SXG_DRIVER structure - +// struct sxg_driver structure - // // contains information about the sxg driver. There is only // one of these, and it is defined as a global. -struct SXG_DRIVER { +struct sxg_driver { struct adapter_t *Adapters; // Linked list of adapters ushort AdapterID; // Maintain unique adapter ID }; @@ -416,9 +416,9 @@ struct SXG_DRIVER { #define MIN(a, b) ((u32)(a) < (u32)(b) ? (a) : (b)) #define MAX(a, b) ((u32)(a) > (u32)(b) ? (a) : (b)) -struct mcast_address_t { +struct mcast_address { unsigned char address[6]; - struct mcast_address_t *next; + struct mcast_address *next; }; #define CARD_DOWN 0x00000000 @@ -481,27 +481,27 @@ struct ether_header { #define NUM_CFG_SPACES 2 #define NUM_CFG_REGS 64 -struct physcard_t { +struct physcard { struct adapter_t *adapter[SLIC_MAX_PORTS]; - struct physcard_t *next; + struct physcard *next; unsigned int adapters_allocd; }; -struct sxgbase_driver_t { +struct sxgbase_driver { spinlock_t driver_lock; unsigned long flags; /* irqsave for spinlock */ u32 num_sxg_cards; u32 num_sxg_ports; u32 num_sxg_ports_active; u32 dynamic_intagg; - struct physcard_t *phys_card; + struct physcard *phys_card; }; struct adapter_t { void * ifp; unsigned int port; - struct physcard_t *physcard; + struct physcard *physcard; unsigned int physport; unsigned int slotnumber; unsigned int functionnumber; @@ -525,7 +525,7 @@ struct adapter_t { u32 macopts; ushort devflags_prev; u64 mcastmask; - struct mcast_address_t *mcastaddrs; + struct mcast_address *mcastaddrs; struct timer_list pingtimer; u32 pingtimerset; struct timer_list statstimer; @@ -567,44 +567,44 @@ struct adapter_t { u32 PowerState; // NDIS power state struct adapter_t *Next; // Linked list ushort AdapterID; // 1..n - p_net_device netdev; - p_net_device next_netdevice; + struct net_device * netdev; + struct net_device * next_netdevice; struct pci_dev * pcidev; - struct SXG_MULTICAST_ADDRESS *MulticastAddrs; // Multicast list + struct sxg_multicast_address *MulticastAddrs; // Multicast list u64 MulticastMask; // Multicast mask u32 * InterruptHandle; // Register Interrupt handle u32 InterruptLevel; // From Resource list u32 InterruptVector; // From Resource list spinlock_t AdapterLock; /* Serialize access adapter routines */ spinlock_t Bit64RegLock; /* For writing 64-bit addresses */ - struct SXG_HW_REGS *HwRegs; // Sahara HW Register Memory (BAR0/1) - struct SXG_UCODE_REGS *UcodeRegs; // Microcode Register Memory (BAR2/3) - struct SXG_TCB_REGS *TcbRegs; // Same as Ucode regs - See sxghw.h + struct sxg_hw_regs *HwRegs; // Sahara HW Register Memory (BAR0/1) + struct sxg_ucode_regs *UcodeRegs; // Microcode Register Memory (BAR2/3) + struct sxg_tcb_regs *TcbRegs; // Same as Ucode regs - See sxghw.h ushort FrameSize; // Maximum frame size u32 * DmaHandle; // NDIS DMA handle u32 * PacketPoolHandle; // Used with NDIS 5.2 only. Don't ifdef out u32 * BufferPoolHandle; // Used with NDIS 5.2 only. Don't ifdef out u32 MacFilter; // NDIS MAC Filter - struct SXG_EVENT_RING *EventRings; // Host event rings. 1/CPU to 16 max + struct sxg_event_ring *EventRings; // Host event rings. 1/CPU to 16 max dma_addr_t PEventRings; // Physical address u32 NextEvent[SXG_MAX_RSS]; // Current location in ring dma_addr_t PTcbBuffers; // TCB Buffers - physical address dma_addr_t PTcbCompBuffers; // TCB Composite Buffers - phys addr - struct SXG_XMT_RING *XmtRings; // Transmit rings + struct sxg_xmt_ring *XmtRings; // Transmit rings dma_addr_t PXmtRings; // Transmit rings - physical address - struct SXG_RING_INFO XmtRingZeroInfo; // Transmit ring 0 info + struct sxg_ring_info XmtRingZeroInfo; // Transmit ring 0 info spinlock_t XmtZeroLock; /* Transmit ring 0 lock */ u32 * XmtRingZeroIndex; // Shared XMT ring 0 index dma_addr_t PXmtRingZeroIndex; // Shared XMT ring 0 index - physical - struct LIST_ENTRY FreeProtocolHeaders;// Free protocol headers + struct list_entry FreeProtocolHeaders;// Free protocol headers u32 FreeProtoHdrCount; // Count void * ProtocolHeaders; // Block of protocol header dma_addr_t PProtocolHeaders; // Block of protocol headers - phys - struct SXG_RCV_RING *RcvRings; // Receive rings + struct sxg_rcv_ring *RcvRings; // Receive rings dma_addr_t PRcvRings; // Receive rings - physical address - struct SXG_RING_INFO RcvRingZeroInfo; // Receive ring 0 info + struct sxg_ring_info RcvRingZeroInfo; // Receive ring 0 info u32 * Isr; // Interrupt status register dma_addr_t PIsr; // ISR - physical address @@ -618,9 +618,9 @@ struct adapter_t { u32 HashInformation; // Receive buffer queues spinlock_t RcvQLock; /* Receive Queue Lock */ - struct LIST_ENTRY FreeRcvBuffers; // Free SXG_DATA_BUFFER queue - struct LIST_ENTRY FreeRcvBlocks; // Free SXG_RCV_DESCRIPTOR_BLOCK Q - struct LIST_ENTRY AllRcvBlocks; // All SXG_RCV_BLOCKs + struct list_entry FreeRcvBuffers; // Free SXG_DATA_BUFFER queue + struct list_entry FreeRcvBlocks; // Free SXG_RCV_DESCRIPTOR_BLOCK Q + struct list_entry AllRcvBlocks; // All SXG_RCV_BLOCKs ushort FreeRcvBufferCount; // Number of free rcv data buffers ushort FreeRcvBlockCount; // # of free rcv descriptor blocks ushort AllRcvBlockCount; // Number of total receive blocks @@ -629,8 +629,8 @@ struct adapter_t { u32 RcvBuffersOnCard; // SXG_DATA_BUFFERS owned by card // SGL buffers spinlock_t SglQLock; /* SGL Queue Lock */ - struct LIST_ENTRY FreeSglBuffers; // Free SXG_SCATTER_GATHER - struct LIST_ENTRY AllSglBuffers; // All SXG_SCATTER_GATHER + struct list_entry FreeSglBuffers; // Free SXG_SCATTER_GATHER + struct list_entry AllSglBuffers; // All SXG_SCATTER_GATHER ushort FreeSglBufferCount; // Number of free SGL buffers ushort AllSglBufferCount; // Number of total SGL buffers u32 CurrentTime; // Tick count @@ -652,7 +652,7 @@ struct adapter_t { // Stats u32 PendingRcvCount; // Outstanding rcv indications u32 PendingXmtCount; // Outstanding send requests - struct SXG_STATS Stats; // Statistics + struct sxg_stats Stats; // Statistics u32 ReassBufs; // Number of reassembly buffers // Card Crash Info ushort CrashLocation; // Microcode crash location --- a/drivers/staging/sxg/sxghif.h +++ b/drivers/staging/sxg/sxghif.h @@ -12,7 +12,7 @@ /******************************************************************************* * UCODE Registers *******************************************************************************/ -struct SXG_UCODE_REGS { +struct sxg_ucode_regs { // Address 0 - 0x3F = Command codes 0-15 for TCB 0. Excode 0 u32 Icr; // Code = 0 (extended), ExCode = 0 - Int control u32 RsvdReg1; // Code = 1 - TOE -NA @@ -180,9 +180,9 @@ struct SXG_UCODE_REGS { * above, but defined differently. Bits 17:06 of the address define the TCB, * which means each TCB area occupies 0x40 (64) bytes, or 16 u32S. What really * is happening is that these registers occupy the "PadEx[15]" areas in the - * SXG_UCODE_REGS definition above + * struct sxg_ucode_regs definition above */ -struct SXG_TCB_REGS { +struct sxg_tcb_regs { u32 ExCode; /* Extended codes - see SXG_UCODE_REGS */ u32 Xmt; /* Code = 1 - # of Xmt descriptors added to ring */ u32 Rcv; /* Code = 2 - # of Rcv descriptors added to ring */ @@ -286,7 +286,7 @@ struct SXG_TCB_REGS { * */ #pragma pack(push, 1) -struct SXG_EVENT { +struct sxg_event { u32 Pad[1]; // not used u32 SndUna; // SndUna value u32 Resid; // receive MDL resid @@ -335,8 +335,8 @@ struct SXG_EVENT { #define EVENT_RING_BATCH 16 // Hand entries back 16 at a time. #define EVENT_BATCH_LIMIT 256 // Stop processing events after 4096 (256 * 16) -struct SXG_EVENT_RING { - struct SXG_EVENT Ring[EVENT_RING_SIZE]; +struct sxg_event_ring { + struct sxg_event Ring[EVENT_RING_SIZE]; }; /*************************************************************************** @@ -414,7 +414,7 @@ struct SXG_EVENT_RING { #define SXG_MAX_ENTRIES 4096 // Structure and macros to manage a ring -struct SXG_RING_INFO { +struct sxg_ring_info { unsigned char Head; // Where we add entries - Note unsigned char:RING_SIZE unsigned char Tail; // Where we pull off completed entries ushort Size; // Ring size - Must be multiple of 2 @@ -495,7 +495,7 @@ struct SXG_RING_INFO { * |_________|_________|_________|_________|28 0x1c */ #pragma pack(push, 1) -struct SXG_CMD { +struct sxg_cmd { dma_addr_t Sgl; // Physical address of SGL union { struct { @@ -536,7 +536,7 @@ struct SXG_CMD { #pragma pack(pop) #pragma pack(push, 1) -struct VLAN_HDR { +struct vlan_hdr { ushort VlanTci; ushort VlanTpid; }; @@ -578,19 +578,19 @@ struct VLAN_HDR { #define SXG_SLOWCMD_CSUM_TCP 0x02 // Checksum TCP #define SXG_SLOWCMD_LSO 0x04 // Large segment send -struct SXG_XMT_RING { - struct SXG_CMD Descriptors[SXG_XMT_RING_SIZE]; +struct sxg_xmt_ring { + struct sxg_cmd Descriptors[SXG_XMT_RING_SIZE]; }; -struct SXG_RCV_RING { - struct SXG_CMD Descriptors[SXG_RCV_RING_SIZE]; +struct sxg_rcv_ring { + struct sxg_cmd Descriptors[SXG_RCV_RING_SIZE]; }; /*************************************************************************** * Share memory buffer types - Used to identify asynchronous * shared memory allocation ***************************************************************************/ -enum SXG_BUFFER_TYPE { +enum sxg_buffer_type { SXG_BUFFER_TYPE_RCV, // Receive buffer SXG_BUFFER_TYPE_SGL // SGL buffer }; @@ -611,32 +611,32 @@ enum SXG_BUFFER_TYPE { * DMA data into, and a virtual address, which is given back * to the host in the "HostHandle" portion of an event. * The receive descriptor data structure is defined below - * as SXG_RCV_DATA_DESCRIPTOR, and the corresponding block - * is defined as SXG_RCV_DESCRIPTOR_BLOCK. + * as sxg_rcv_data_descriptor, and the corresponding block + * is defined as sxg_rcv_descriptor_block. * * This receive descriptor block is given to the card by filling - * in the Sgl field of a SXG_CMD entry from pAdapt->RcvRings[0] + * in the Sgl field of a sxg_cmd entry from pAdapt->RcvRings[0] * with the physical address of the receive descriptor block. * * Both the receive buffers and the receive descriptor blocks * require additional data structures to maintain them * on a free queue and contain other information associated with them. - * Those data structures are defined as the SXG_RCV_DATA_BUFFER_HDR - * and SXG_RCV_DESCRIPTOR_BLOCK_HDR respectively. + * Those data structures are defined as the sxg_rcv_data_buffer_hdr + * and sxg_rcv_descriptor_block_hdr respectively. * * Since both the receive buffers and the receive descriptor block * must be accessible by the card, both must be allocated out of * shared memory. To ensure that we always have a descriptor * block available for every 128 buffers, we allocate all of * these resources together in a single block. This entire - * block is managed by a SXG_RCV_BLOCK_HDR, who's sole purpose + * block is managed by a struct sxg_rcv_block_hdr, who's sole purpose * is to maintain address information so that the entire block * can be free later. * * Further complicating matters is the fact that the receive * buffers must be variable in length in order to accomodate * jumbo frame configurations. We configure the buffer - * length so that the buffer and it's corresponding SXG_RCV_DATA_BUFFER_HDR + * length so that the buffer and it's corresponding struct sxg_rcv_data_buffer_hdr * structure add up to an even boundary. Then we place the * remaining data structures after 128 of them as shown in * the following diagram: @@ -646,35 +646,35 @@ enum SXG_BUFFER_TYPE { * | Variable length receive buffer #1 | * |_________________________________________| * | | - * | SXG_RCV_DATA_BUFFER_HDR #1 | + * | sxg_rcv_data_buffer_hdr #1 | * |_________________________________________| <== Even 2k or 10k boundary * | | * | ... repeat 2-128 .. | * |_________________________________________| * | | - * | SXG_RCV_DESCRIPTOR_BLOCK | - * | Contains SXG_RCV_DATA_DESCRIPTOR * 128 | + * | struct sxg_rcv_descriptor_block | + * | Contains sxg_rcv_data_descriptor * 128 | * |_________________________________________| * | | - * | SXG_RCV_DESCRIPTOR_BLOCK_HDR | + * | struct sxg_rcv_descriptor_block_hdr | * |_________________________________________| * | | - * | SXG_RCV_BLOCK_HDR | + * | struct sxg_rcv_block_hdr | * |_________________________________________| * * Memory consumption: * Non-jumbo: - * Buffers and SXG_RCV_DATA_BUFFER_HDR = 2k * 128 = 256k - * + SXG_RCV_DESCRIPTOR_BLOCK = 2k - * + SXG_RCV_DESCRIPTOR_BLOCK_HDR = ~32 - * + SXG_RCV_BLOCK_HDR = ~32 + * Buffers and sxg_rcv_data_buffer_hdr = 2k * 128 = 256k + * + struct sxg_rcv_descriptor_block = 2k + * + struct sxg_rcv_descriptor_block_hdr = ~32 + * + struct sxg_rcv_block_hdr = ~32 * => Total = ~258k/block * * Jumbo: - * Buffers and SXG_RCV_DATA_BUFFER_HDR = 10k * 128 = 1280k - * + SXG_RCV_DESCRIPTOR_BLOCK = 2k - * + SXG_RCV_DESCRIPTOR_BLOCK_HDR = ~32 - * + SXG_RCV_BLOCK_HDR = ~32 + * Buffers and sxg_rcv_data_buffer_hdr = 10k * 128 = 1280k + * + struct sxg_rcv_descriptor_block = 2k + * + struct sxg_rcv_descriptor_block_hdr = ~32 + * + struct sxg_rcv_block_hdr = ~32 * => Total = ~1282k/block * ***************************************************************************/ @@ -684,29 +684,29 @@ enum SXG_BUFFER_TYPE { #define SXG_MAX_RCV_BLOCKS 256 // = 32k receive buffers // Receive buffer header -struct SXG_RCV_DATA_BUFFER_HDR { +struct sxg_rcv_data_buffer_hdr { dma_addr_t PhysicalAddress; // Buffer physical address // Note - DO NOT USE the VirtualAddress field to locate data. // Use the sxg.h:SXG_RECEIVE_DATA_LOCATION macro instead. void *VirtualAddress; // Start of buffer u32 Size; // Buffer size - struct SXG_RCV_DATA_BUFFER_HDR *Next; // Fastpath data buffer queue - struct LIST_ENTRY FreeList; // Free queue of buffers + struct sxg_rcv_data_buffer_hdr *Next; // Fastpath data buffer queue + struct list_entry FreeList; // Free queue of buffers unsigned char State; // See SXG_BUFFER state above unsigned char Status; // Event status (to log PUSH) struct sk_buff *skb; // Double mapped (nbl and pkt) }; // SxgSlowReceive uses the PACKET (skb) contained -// in the SXG_RCV_DATA_BUFFER_HDR when indicating dumb-nic data +// in the struct sxg_rcv_data_buffer_hdr when indicating dumb-nic data #define SxgDumbRcvPacket skb -#define SXG_RCV_DATA_HDR_SIZE 256 // Space for SXG_RCV_DATA_BUFFER_HDR +#define SXG_RCV_DATA_HDR_SIZE 256 // Space for struct sxg_rcv_data_buffer_hdr #define SXG_RCV_DATA_BUFFER_SIZE 2048 // Non jumbo = 2k including HDR #define SXG_RCV_JUMBO_BUFFER_SIZE 10240 // jumbo = 10k including HDR // Receive data descriptor -struct SXG_RCV_DATA_DESCRIPTOR { +struct sxg_rcv_data_descriptor { union { struct sk_buff *VirtualAddress; // Host handle u64 ForceTo8Bytes; // Force x86 to 8-byte boundary @@ -718,31 +718,31 @@ struct SXG_RCV_DATA_DESCRIPTOR { #define SXG_RCV_DESCRIPTORS_PER_BLOCK 128 #define SXG_RCV_DESCRIPTOR_BLOCK_SIZE 2048 // For sanity check -struct SXG_RCV_DESCRIPTOR_BLOCK { - struct SXG_RCV_DATA_DESCRIPTOR Descriptors[SXG_RCV_DESCRIPTORS_PER_BLOCK]; +struct sxg_rcv_descriptor_block { + struct sxg_rcv_data_descriptor Descriptors[SXG_RCV_DESCRIPTORS_PER_BLOCK]; }; // Receive descriptor block header -struct SXG_RCV_DESCRIPTOR_BLOCK_HDR { +struct sxg_rcv_descriptor_block_hdr { void *VirtualAddress; // Start of 2k buffer dma_addr_t PhysicalAddress; // ..and it's physical address - struct LIST_ENTRY FreeList; // Free queue of descriptor blocks + struct list_entry FreeList; // Free queue of descriptor blocks unsigned char State; // See SXG_BUFFER state above }; // Receive block header -struct SXG_RCV_BLOCK_HDR { +struct sxg_rcv_block_hdr { void *VirtualAddress; // Start of virtual memory dma_addr_t PhysicalAddress; // ..and it's physical address - struct LIST_ENTRY AllList; // Queue of all SXG_RCV_BLOCKS + struct list_entry AllList; // Queue of all SXG_RCV_BLOCKS }; // Macros to determine data structure offsets into receive block #define SXG_RCV_BLOCK_SIZE(_Buffersize) \ (((_Buffersize) * SXG_RCV_DESCRIPTORS_PER_BLOCK) + \ - (sizeof(struct SXG_RCV_DESCRIPTOR_BLOCK)) + \ - (sizeof(struct SXG_RCV_DESCRIPTOR_BLOCK_HDR)) + \ - (sizeof(struct SXG_RCV_BLOCK_HDR))) + (sizeof(struct sxg_rcv_descriptor_block)) + \ + (sizeof(struct sxg_rcv_descriptor_block_hdr)) + \ + (sizeof(struct sxg_rcv_block_hdr))) #define SXG_RCV_BUFFER_DATA_SIZE(_Buffersize) \ ((_Buffersize) - SXG_RCV_DATA_HDR_SIZE) #define SXG_RCV_DATA_BUFFER_HDR_OFFSET(_Buffersize) \ @@ -751,11 +751,11 @@ struct SXG_RCV_BLOCK_HDR { ((_Buffersize) * SXG_RCV_DESCRIPTORS_PER_BLOCK) #define SXG_RCV_DESCRIPTOR_BLOCK_HDR_OFFSET(_Buffersize) \ (((_Buffersize) * SXG_RCV_DESCRIPTORS_PER_BLOCK) + \ - (sizeof(struct SXG_RCV_DESCRIPTOR_BLOCK))) + (sizeof(struct sxg_rcv_descriptor_block))) #define SXG_RCV_BLOCK_HDR_OFFSET(_Buffersize) \ (((_Buffersize) * SXG_RCV_DESCRIPTORS_PER_BLOCK) + \ - (sizeof(struct SXG_RCV_DESCRIPTOR_BLOCK)) + \ - (sizeof(struct SXG_RCV_DESCRIPTOR_BLOCK_HDR))) + (sizeof(struct sxg_rcv_descriptor_block)) + \ + (sizeof(struct sxg_rcv_descriptor_block_hdr))) /*************************************************************************** * Scatter gather list buffer @@ -783,14 +783,14 @@ struct SXG_RCV_BLOCK_HDR { // entries, of the SGL for that pool. The SGEntries is determined by // dividing the NBSize by the expected page size (4k), and then padding // it by some appropriate amount as insurance (20% or so..??). -typedef struct _SXG_SGL_POOL_PROPERTIES { +struct sxg_sgl_pool_properties { u32 NBSize; // Largest NET_BUFFER size for this pool ushort SGEntries; // Number of entries in SGL ushort InitialBuffers; // Number to allocate at initializationtime ushort MinBuffers; // When to get more ushort MaxBuffers; // When to stop ushort PerCpuThreshold;// See sxgh.h:SXG_RESOURCES -} SXG_SGL_POOL_PROPERTIES, *PSXG_SGL_POOL_PROPERTIES; +}; // At the moment I'm going to statically initialize 4 pools: // 100k buffer pool: The vast majority of the expected buffers are expected to @@ -814,7 +814,7 @@ typedef struct _SXG_SGL_POOL_PROPERTIES // We will likely adjust the number of pools and/or pool properties over time.. #define SXG_NUM_SGL_POOLS 4 #define INITIALIZE_SGL_POOL_PROPERTIES \ -SXG_SGL_POOL_PROPERTIES SxgSglPoolProperties[SXG_NUM_SGL_POOLS] = \ +struct sxg_sgl_pool_properties SxgSglPoolProperties[SXG_NUM_SGL_POOLS] = \ { \ { 102400, 30, 8192, 2048, 16384, 256}, \ { 1048576, 300, 256, 128, 1024, 16}, \ @@ -822,7 +822,7 @@ SXG_SGL_POOL_PROPERTIES SxgSglPoolProper {10485760, 2700, 2, 4, 32, 0}, \ }; -extern SXG_SGL_POOL_PROPERTIES SxgSglPoolProperties[]; +extern struct sxg_sgl_pool_properties SxgSglPoolProperties[]; #define SXG_MAX_SGL_BUFFER_SIZE \ SxgSglPoolProperties[SXG_NUM_SGL_POOLS - 1].NBSize @@ -843,15 +843,15 @@ extern SXG_SGL_POOL_PROPERTIES SxgSglPoo // Allocate SGLs in blocks so we can skip over invalid entries. // We allocation 64k worth of SGL buffers, including the -// SXG_SGL_BLOCK_HDR, plus one for padding +// struct sxg_sgl_block_hdr, plus one for padding #define SXG_SGL_BLOCK_SIZE 65536 #define SXG_SGL_ALLOCATION_SIZE(_Pool) SXG_SGL_BLOCK_SIZE + SXG_SGL_SIZE(_Pool) -typedef struct _SXG_SGL_BLOCK_HDR { - ushort Pool; // Associated SGL pool - struct LIST_ENTRY List; // SXG_SCATTER_GATHER blocks - dma64_addr_t PhysicalAddress;// physical address -} SXG_SGL_BLOCK_HDR, *PSXG_SGL_BLOCK_HDR; +struct sxg_sgl_block_hdr { + ushort Pool; // Associated SGL pool + struct list_entry List; // SXG_SCATTER_GATHER blocks + dma64_addr_t PhysicalAddress;// physical address +}; // The following definition denotes the maximum block of memory that the @@ -873,7 +873,7 @@ enum SXG_SGL_TYPE { // to the card directly. For x86 systems we must reconstruct // the SGL. The following structure defines an x64 // formatted SGL entry -struct SXG_X64_SGE { +struct sxg_x64_sge { dma64_addr_t Address; // same as wdm.h u32 Length; // same as wdm.h u32 CompilerPad; // The compiler pads to 8-bytes @@ -883,19 +883,19 @@ struct SXG_X64_SGE { // Our SGL structure - Essentially the same as // wdm.h:SCATTER_GATHER_LIST. Note the variable number of // elements based on the pool specified above -struct SXG_X64_SGL { +struct sxg_x64_sgl { u32 NumberOfElements; u32 *Reserved; - struct SXG_X64_SGE Elements[1]; // Variable + struct sxg_x64_sge Elements[1]; // Variable }; -struct SXG_SCATTER_GATHER { +struct sxg_scatter_gather { enum SXG_SGL_TYPE Type; // FIRST! Dumb-nic or offload ushort Pool; // Associated SGL pool ushort Entries; // SGL total entries void *adapter; // Back pointer to adapter - struct LIST_ENTRY FreeList; // Free SXG_SCATTER_GATHER blocks - struct LIST_ENTRY AllList; // All SXG_SCATTER_GATHER blocks + struct list_entry FreeList; // Free SXG_SCATTER_GATHER blocks + struct list_entry AllList; // All SXG_SCATTER_GATHER blocks dma_addr_t PhysicalAddress; // physical address unsigned char State; // See SXG_BUFFER state above unsigned char CmdIndex; // Command ring index @@ -903,20 +903,21 @@ struct SXG_SCATTER_GATHER { u32 Direction; // For asynchronous completions u32 CurOffset; // Current SGL offset u32 SglRef; // SGL reference count - struct VLAN_HDR VlanTag; // VLAN tag to be inserted into SGL - struct SXG_X64_SGL *pSgl; // SGL Addr. Possibly &Sgl - struct SXG_X64_SGL Sgl; // SGL handed to card + struct vlan_hdr VlanTag; // VLAN tag to be inserted into SGL + struct sxg_x64_sgl *pSgl; // SGL Addr. Possibly &Sgl + struct sxg_x64_sgl Sgl; // SGL handed to card }; -// Note - the "- 1" is because SXG_SCATTER_GATHER=>SXG_X64_SGL includes 1 SGE.. -#define SXG_SGL_SIZE(_Pool) \ - (sizeof(struct SXG_SCATTER_GATHER) + \ - ((SxgSglPoolProperties[_Pool].SGEntries - 1) * sizeof(struct SXG_X64_SGE))) +// Note - the "- 1" is because SXG_SCATTER_GATHER=>struct sxg_x64_sgl includes 1 SGE.. +#define SXG_SGL_SIZE(_Pool) \ + (sizeof(struct sxg_scatter_gather) + \ + ((SxgSglPoolProperties[_Pool].SGEntries - 1) * \ + sizeof(struct sxg_x64_sge))) #if defined(CONFIG_X86_64) #define SXG_SGL_BUFFER(_SxgSgl) (&_SxgSgl->Sgl) -#define SXG_SGL_BUFFER_LENGTH(_SxgSgl) ((_SxgSgl)->Entries * sizeof(struct SXG_X64_SGE)) -#define SXG_SGL_BUF_SIZE sizeof(struct SXG_X64_SGL) +#define SXG_SGL_BUFFER_LENGTH(_SxgSgl) ((_SxgSgl)->Entries * sizeof(struct sxg_x64_sge)) +#define SXG_SGL_BUF_SIZE sizeof(struct sxg_x64_sgl) #elif defined(CONFIG_X86) // Force NDIS to give us it's own buffer so we can reformat to our own #define SXG_SGL_BUFFER(_SxgSgl) NULL @@ -929,7 +930,7 @@ struct SXG_SCATTER_GATHER { /*************************************************************************** * Microcode statistics ***************************************************************************/ -typedef struct _SXG_UCODE_STATS { +struct sxg_ucode_stats { u32 RPDQOflow; // PDQ overflow (unframed ie dq & drop 1st) u32 XDrops; // Xmt drops due to no xmt buffer u32 ERDrops; // Rcv drops due to ER full @@ -938,6 +939,6 @@ typedef struct _SXG_UCODE_STATS { u32 BFDrops; // Rcv drops due to bad frame: no link addr match, frlen > max u32 UPDrops; // Rcv drops due to UPFq full u32 XNoBufs; // Xmt drop due to no DRAM Xmit buffer or PxyBuf -} SXG_UCODE_STATS, *PSXG_UCODE_STATS; +}; --- a/drivers/staging/sxg/sxghw.h +++ b/drivers/staging/sxg/sxghw.h @@ -48,7 +48,7 @@ #define SXG_HWREG_MEMSIZE 0x4000 // 16k #pragma pack(push, 1) -struct SXG_HW_REGS { +struct sxg_hw_regs { u32 Reset; // Write 0xdead to invoke soft reset u32 Pad1; // No register defined at offset 4 u32 InterruptMask0; // Deassert legacy interrupt on function 0 @@ -240,7 +240,7 @@ struct SXG_HW_REGS { #define XMT_CONFIG_INITIAL_IPID 0x0000FFFF // Initial IPID /*************************************************************************** - * A-XGMAC Registers - Occupy 0x80 - 0xD4 of the SXG_HW_REGS + * A-XGMAC Registers - Occupy 0x80 - 0xD4 of the struct sxg_hw_regs * * Full register descriptions can be found in axgmac.pdf ***************************************************************************/ @@ -524,7 +524,7 @@ struct SXG_HW_REGS { #define XS_LANE_ALIGN 0x1000 // XS transmit lanes aligned // PHY Microcode download data structure -struct PHY_UCODE { +struct phy_ucode { ushort Addr; ushort Data; }; @@ -557,7 +557,7 @@ struct PHY_UCODE { // all commands - see the Sahara spec for details. Note that this structure is // only valid when compiled on a little endian machine. #pragma pack(push, 1) -struct XMT_DESC { +struct xmt_desc { ushort XmtLen; // word 0, bits [15:0] - transmit length unsigned char XmtCtl; // word 0, bits [23:16] - transmit control byte unsigned char Cmd; // word 0, bits [31:24] - transmit command plus misc. @@ -574,7 +574,7 @@ struct XMT_DESC { }; #pragma pack(pop) -// XMT_DESC Cmd byte definitions +// struct xmt_desc Cmd byte definitions // command codes #define XMT_DESC_CMD_RAW_SEND 0 // raw send descriptor #define XMT_DESC_CMD_CSUM_INSERT 1 // checksum insert descriptor @@ -587,7 +587,7 @@ struct XMT_DESC { #define XMT_FORMAT (XMT_DESC_CMD_FORMAT << XMT_DESC_CMD_CODE_SHFT) #define XMT_PRIME (XMT_DESC_CMD_PRIME << XMT_DESC_CMD_CODE_SHFT) -// XMT_DESC Control Byte (XmtCtl) definitions +// struct xmt_desc Control Byte (XmtCtl) definitions // NOTE: These bits do not work on Sahara (Rev A)! #define XMT_CTL_PAUSE_FRAME 0x80 // current frame is a pause control frame (for statistics) #define XMT_CTL_CONTROL_FRAME 0x40 // current frame is a control frame (for statistics) @@ -602,7 +602,7 @@ struct XMT_DESC { #define XMT_CTL_DELAY_FCS_2 0x02 // delay FCS calculation by 2 (4-byte) words #define XMT_CTL_DELAY_FCS_3 0x03 // delay FCS calculation by 3 (4-byte) words -// XMT_DESC XmtBufId definition +// struct xmt_desc XmtBufId definition #define XMT_BUF_ID_SHFT 8 // The Xmt buffer ID is formed by dividing // the buffer (DRAM) address by 256 (or << 8) @@ -620,7 +620,7 @@ struct XMT_DESC { // Format of the 18 byte Receive Buffer returned by the // Receive Sequencer for received packets #pragma pack(push, 1) -struct RCV_BUF_HDR { +struct rcv_buf_hdr { u32 Status; // Status word from Rcv Seq Parser ushort Length; // Rcv packet byte count union { @@ -702,24 +702,24 @@ struct RCV_BUF_HDR { #pragma pack(push, 1) // Structure for an element of H/W configuration data. // Read by the Sahara hardware -struct HW_CFG_DATA { +struct hw_cfg_data { ushort Addr; ushort Data; }; -// Number of HW_CFG_DATA structures to put in the configuration data -// data structure (SXG_CONFIG or SXG_CONFIG_A). The number is computed +// Number of struct hw_cfg_data structures to put in the configuration data +// data structure (struct sxg_config or struct sxg_config_a). The number is computed // to fill the entire H/W config section of the structure. -#define NUM_HW_CFG_ENTRIES (HW_CFG_SECTION_SIZE / sizeof(struct HW_CFG_DATA)) -#define NUM_HW_CFG_ENTRIES_A (HW_CFG_SECTION_SIZE_A / sizeof(struct HW_CFG_DATA)) +#define NUM_HW_CFG_ENTRIES (HW_CFG_SECTION_SIZE / sizeof(struct hw_cfg_data)) +#define NUM_HW_CFG_ENTRIES_A (HW_CFG_SECTION_SIZE_A / sizeof(struct hw_cfg_data)) /* MAC address structure */ -struct SXG_CONFIG_MAC { +struct sxg_config_mac { unsigned char MacAddr[6]; /* MAC Address */ }; /* FRU data structure */ -struct ATK_FRU { +struct atk_fru { unsigned char PartNum[6]; unsigned char Revision[2]; unsigned char Serial[14]; @@ -737,53 +737,53 @@ struct ATK_FRU { #define ATK_OEM_ASSY_SIZE 10 // assy num is 9 chars plus \0 // OEM FRU structure for Alacritech -struct ATK_OEM { +struct atk_oem { unsigned char Assy[ATK_OEM_ASSY_SIZE]; }; #define OEM_EEPROM_FRUSIZE 74 // size of OEM fru info - size // chosen to fill out the S/W section -union OEM_FRU { // OEM FRU information +union oem_fru { // OEM FRU information unsigned char OemFru[OEM_EEPROM_FRUSIZE]; - struct ATK_OEM AtkOem; + struct atk_oem AtkOem; }; // Structure to hold the S/W configuration data. -struct SW_CFG_DATA { +struct sw_cfg_data { ushort MagicWord; // Magic word for section 2 ushort Version; // Format version - struct SXG_CONFIG_MAC MacAddr[4]; // space for 4 MAC addresses - struct ATK_FRU AtkFru; // FRU information + struct sxg_config_mac MacAddr[4]; // space for 4 MAC addresses + struct atk_fru AtkFru; // FRU information ushort OemFruFormat; // OEM FRU format type - union OEM_FRU OemFru; // OEM FRU information + union oem_fru OemFru; // OEM FRU information ushort Checksum; // Checksum of section 2 }; /* EEPROM/Flash Format */ -struct SXG_CONFIG { +struct sxg_config { /* * H/W Section - Read by Sahara hardware (512 bytes) */ - struct HW_CFG_DATA HwCfg[NUM_HW_CFG_ENTRIES]; + struct hw_cfg_data HwCfg[NUM_HW_CFG_ENTRIES]; /* * S/W Section - Other configuration data (128 bytes) */ - struct SW_CFG_DATA SwCfg; + struct sw_cfg_data SwCfg; }; // EEPROM/Flash Format (Sahara rev A) -struct SXG_CONFIG_A { +struct sxg_config_a { /* * H/W Section - Read by Sahara hardware (256 bytes) */ - struct HW_CFG_DATA HwCfg[NUM_HW_CFG_ENTRIES_A]; + struct hw_cfg_data HwCfg[NUM_HW_CFG_ENTRIES_A]; /* * S/W Section - Other configuration data (128 bytes) */ - struct SW_CFG_DATA SwCfg; + struct sw_cfg_data SwCfg; }; #ifdef WINDOWS_COMPILER @@ -801,17 +801,17 @@ struct SXG_CONFIG_A { // structure was built incorrectly. Unfortunately, the error message produced // is meaningless. But this is apparently the only way to catch this problem // at compile time. -compile_time_assert (offsetof(SXG_CONFIG, SwCfg) == SW_CFG_SECTION_START); -compile_time_assert (sizeof(SXG_CONFIG) == HW_CFG_SECTION_SIZE + SW_CFG_SECTION_SIZE); +compile_time_assert (offsetof(struct sxg_config, SwCfg) == SW_CFG_SECTION_START); +compile_time_assert (sizeof(struct sxg_config) == HW_CFG_SECTION_SIZE + SW_CFG_SECTION_SIZE); -compile_time_assert (offsetof(SXG_CONFIG_A, SwCfg) == SW_CFG_SECTION_START_A); -compile_time_assert (sizeof(SXG_CONFIG_A) == HW_CFG_SECTION_SIZE_A + SW_CFG_SECTION_SIZE); +compile_time_assert (offsetof(struct sxg_config_a, SwCfg) == SW_CFG_SECTION_START_A); +compile_time_assert (sizeof(struct sxg_config_a) == HW_CFG_SECTION_SIZE_A + SW_CFG_SECTION_SIZE); #endif /* * Structure used to pass information between driver and user-mode * control application */ -struct ADAPT_USERINFO { +struct adapt_userinfo { bool LinkUp; // u32 LinkState; // use LinkUp - any need for other states? u32 LinkSpeed; // not currently needed @@ -821,9 +821,9 @@ struct ADAPT_USERINFO { ushort PciLanes; unsigned char MacAddr[6]; unsigned char CurrMacAddr[6]; - struct ATK_FRU AtkFru; + struct atk_fru AtkFru; ushort OemFruFormat; - union OEM_FRU OemFru; + union oem_fru OemFru; }; #pragma pack(pop) --- a/drivers/staging/sxg/sxg_os.h +++ b/drivers/staging/sxg/sxg_os.h @@ -44,9 +44,9 @@ #define FALSE (0) #define TRUE (1) -struct LIST_ENTRY { - struct LIST_ENTRY *nle_flink; - struct LIST_ENTRY *nle_blink; +struct list_entry { + struct list_entry *nle_flink; + struct list_entry *nle_blink; }; #define InitializeListHead(l) \ @@ -68,10 +68,10 @@ struct LIST_ENTRY { /* These two have to be inlined since they return things. */ -static __inline struct LIST_ENTRY *RemoveHeadList(struct LIST_ENTRY *l) +static __inline struct list_entry *RemoveHeadList(struct list_entry *l) { - struct LIST_ENTRY *f; - struct LIST_ENTRY *e; + struct list_entry *f; + struct list_entry *e; e = l->nle_flink; f = e->nle_flink; @@ -81,10 +81,10 @@ static __inline struct LIST_ENTRY *Remov return (e); } -static __inline struct LIST_ENTRY *RemoveTailList(struct LIST_ENTRY *l) +static __inline struct list_entry *RemoveTailList(struct list_entry *l) { - struct LIST_ENTRY *b; - struct LIST_ENTRY *e; + struct list_entry *b; + struct list_entry *e; e = l->nle_blink; b = e->nle_blink; @@ -96,7 +96,7 @@ static __inline struct LIST_ENTRY *Remov #define InsertTailList(l, e) \ do { \ - struct LIST_ENTRY *b; \ + struct list_entry *b; \ \ b = (l)->nle_blink; \ (e)->nle_flink = (l); \ @@ -107,7 +107,7 @@ static __inline struct LIST_ENTRY *Remov #define InsertHeadList(l, e) \ do { \ - struct LIST_ENTRY *f; \ + struct list_entry *f; \ \ f = (l)->nle_flink; \ (e)->nle_flink = f; \ --- a/drivers/staging/sxg/sxgphycode.h +++ b/drivers/staging/sxg/sxgphycode.h @@ -18,7 +18,7 @@ /* * Download for AEL2005C PHY with SR/LR transceiver (10GBASE-SR or 10GBASE-LR) */ -static struct PHY_UCODE PhyUcode[] = { +static struct phy_ucode PhyUcode[] = { /* * NOTE: An address of 0 is a special case. When the download routine * sees an address of 0, it does not write to the PHY. Instead, it