From alan@linux.intel.com Thu Aug 20 14:39:04 2009 From: Alan Cox Date: Wed, 19 Aug 2009 18:21:50 +0100 Subject: Staging: et131x: spinlocks To: greg@kroah.com, linux-kernel@vger.kernel.org Message-ID: <20090819172150.21152.48059.stgit@localhost.localdomain> Switch to the more normal "flags" naming. Also fix up the nested use of spin_lock_irqsave Signed-off-by: Alan Cox Signed-off-by: Greg Kroah-Hartman --- drivers/staging/et131x/et1310_phy.c | 10 ++-- drivers/staging/et131x/et1310_pm.c | 6 +- drivers/staging/et131x/et1310_rx.c | 28 ++++++------ drivers/staging/et131x/et1310_tx.c | 74 ++++++++++++++++---------------- drivers/staging/et131x/et131x_initpci.c | 6 +- drivers/staging/et131x/et131x_netdev.c | 14 +++--- 6 files changed, 69 insertions(+), 69 deletions(-) --- a/drivers/staging/et131x/et1310_phy.c +++ b/drivers/staging/et131x/et1310_phy.c @@ -484,7 +484,7 @@ void et131x_Mii_check(struct et131x_adap uint32_t uiMdiMdix; uint32_t uiMasterSlave; uint32_t uiPolarity; - unsigned long lockflags; + unsigned long flags; DBG_ENTER(et131x_dbginfo); @@ -495,12 +495,12 @@ void et131x_Mii_check(struct et131x_adap /* Update our state variables and indicate the * connected state */ - spin_lock_irqsave(&etdev->Lock, lockflags); + spin_lock_irqsave(&etdev->Lock, flags); etdev->MediaState = NETIF_STATUS_MEDIA_CONNECT; MP_CLEAR_FLAG(etdev, fMP_ADAPTER_LINK_DETECTION); - spin_unlock_irqrestore(&etdev->Lock, lockflags); + spin_unlock_irqrestore(&etdev->Lock, flags); /* Don't indicate state if we're in loopback mode */ if (etdev->RegistryPhyLoopbk == false) @@ -533,11 +533,11 @@ void et131x_Mii_check(struct et131x_adap (etdev, fMP_ADAPTER_LINK_DETECTION)) || (etdev->MediaState == NETIF_STATUS_MEDIA_DISCONNECT)) { - spin_lock_irqsave(&etdev->Lock, lockflags); + spin_lock_irqsave(&etdev->Lock, flags); etdev->MediaState = NETIF_STATUS_MEDIA_DISCONNECT; spin_unlock_irqrestore(&etdev->Lock, - lockflags); + flags); /* Only indicate state if we're in loopback * mode --- a/drivers/staging/et131x/et1310_pm.c +++ b/drivers/staging/et131x/et1310_pm.c @@ -119,7 +119,7 @@ extern dbg_info_t *et131x_dbginfo; */ void EnablePhyComa(struct et131x_adapter *etdev) { - unsigned long lockflags; + unsigned long flags; PM_CSR_t GlobalPmCSR; int32_t LoopCounter = 10; @@ -134,9 +134,9 @@ void EnablePhyComa(struct et131x_adapter etdev->PoMgmt.PowerDownDuplex = etdev->AiForceDpx; /* Stop sending packets. */ - spin_lock_irqsave(&etdev->SendHWLock, lockflags); + spin_lock_irqsave(&etdev->SendHWLock, flags); MP_SET_FLAG(etdev, fMP_ADAPTER_LOWER_POWER); - spin_unlock_irqrestore(&etdev->SendHWLock, lockflags); + spin_unlock_irqrestore(&etdev->SendHWLock, flags); /* Wait for outstanding Receive packets */ while ((MP_GET_RCV_REF(etdev) != 0) && (LoopCounter-- > 0)) --- a/drivers/staging/et131x/et1310_rx.c +++ b/drivers/staging/et131x/et1310_rx.c @@ -685,7 +685,7 @@ void ConfigRxDmaRegs(struct et131x_adapt PFBR_DESC_t pFbrEntry; uint32_t iEntry; RXDMA_PSR_NUM_DES_t psr_num_des; - unsigned long lockflags; + unsigned long flags; DBG_ENTER(et131x_dbginfo); @@ -718,7 +718,7 @@ void ConfigRxDmaRegs(struct et131x_adapt writel((psr_num_des.bits.psr_ndes * LO_MARK_PERCENT_FOR_PSR) / 100, &pRxDma->psr_min_des.value); - spin_lock_irqsave(&etdev->RcvLock, lockflags); + spin_lock_irqsave(&etdev->RcvLock, flags); /* These local variables track the PSR in the adapter structure */ pRxLocal->local_psr_full.bits.psr_full = 0; @@ -801,7 +801,7 @@ void ConfigRxDmaRegs(struct et131x_adapt */ writel(etdev->RegistryRxTimeInterval, &pRxDma->max_pkt_time.value); - spin_unlock_irqrestore(&etdev->RcvLock, lockflags); + spin_unlock_irqrestore(&etdev->RcvLock, flags); DBG_LEAVE(et131x_dbginfo); } @@ -914,7 +914,7 @@ PMP_RFD nic_rx_pkts(struct et131x_adapte PMP_RFD pMpRfd; uint32_t nIndex; uint8_t *pBufVa; - unsigned long lockflags; + unsigned long flags; struct list_head *element; uint8_t ringIndex; uint16_t bufferIndex; @@ -1013,7 +1013,7 @@ PMP_RFD nic_rx_pkts(struct et131x_adapte } /* Get and fill the RFD. */ - spin_lock_irqsave(&etdev->RcvLock, lockflags); + spin_lock_irqsave(&etdev->RcvLock, flags); pMpRfd = NULL; element = pRxLocal->RecvList.next; @@ -1023,14 +1023,14 @@ PMP_RFD nic_rx_pkts(struct et131x_adapte DBG_RX(et131x_dbginfo, "NULL RFD returned from RecvList via list_entry()\n"); DBG_RX_LEAVE(et131x_dbginfo); - spin_unlock_irqrestore(&etdev->RcvLock, lockflags); + spin_unlock_irqrestore(&etdev->RcvLock, flags); return NULL; } list_del(&pMpRfd->list_node); pRxLocal->nReadyRecv--; - spin_unlock_irqrestore(&etdev->RcvLock, lockflags); + spin_unlock_irqrestore(&etdev->RcvLock, flags); pMpRfd->iBufferIndex = bufferIndex; pMpRfd->iRingIndex = ringIndex; @@ -1260,9 +1260,9 @@ void et131x_handle_recv_interrupt(struct * Besides, we don't really need (at this point) the * pending list anyway. */ - /* spin_lock_irqsave( &etdev->RcvPendLock, lockflags ); + /* spin_lock_irqsave( &etdev->RcvPendLock, flags ); * list_add_tail( &pMpRfd->list_node, &etdev->RxRing.RecvPendingList ); - * spin_unlock_irqrestore( &etdev->RcvPendLock, lockflags ); + * spin_unlock_irqrestore( &etdev->RcvPendLock, flags ); */ /* Update the number of outstanding Recvs */ @@ -1302,7 +1302,7 @@ void nic_return_rfd(struct et131x_adapte struct _RXDMA_t __iomem *pRxDma = &etdev->CSRAddress->rxdma; uint16_t bi = pMpRfd->iBufferIndex; uint8_t ri = pMpRfd->iRingIndex; - unsigned long lockflags; + unsigned long flags; DBG_RX_ENTER(et131x_dbginfo); @@ -1314,7 +1314,7 @@ void nic_return_rfd(struct et131x_adapte (ri == 0 && bi < pRxLocal->Fbr0NumEntries) || #endif (ri == 1 && bi < pRxLocal->Fbr1NumEntries)) { - spin_lock_irqsave(&etdev->FbrLock, lockflags); + spin_lock_irqsave(&etdev->FbrLock, flags); if (ri == 1) { PFBR_DESC_t pNextDesc = @@ -1362,7 +1362,7 @@ void nic_return_rfd(struct et131x_adapte &pRxDma->fbr0_full_offset.value); } #endif - spin_unlock_irqrestore(&etdev->FbrLock, lockflags); + spin_unlock_irqrestore(&etdev->FbrLock, flags); } else { DBG_ERROR(et131x_dbginfo, "NICReturnRFD illegal Buffer Index returned\n"); @@ -1371,10 +1371,10 @@ void nic_return_rfd(struct et131x_adapte /* The processing on this RFD is done, so put it back on the tail of * our list */ - spin_lock_irqsave(&etdev->RcvLock, lockflags); + spin_lock_irqsave(&etdev->RcvLock, flags); list_add_tail(&pMpRfd->list_node, &pRxLocal->RecvList); pRxLocal->nReadyRecv++; - spin_unlock_irqrestore(&etdev->RcvLock, lockflags); + spin_unlock_irqrestore(&etdev->RcvLock, flags); DBG_ASSERT(pRxLocal->nReadyRecv <= pRxLocal->NumRfd); DBG_RX_LEAVE(et131x_dbginfo); --- a/drivers/staging/et131x/et1310_tx.c +++ b/drivers/staging/et131x/et1310_tx.c @@ -461,7 +461,7 @@ static int et131x_send_packet(struct sk_ int status = 0; PMP_TCB pMpTcb = NULL; uint16_t *pShBufVa; - unsigned long lockflags; + unsigned long flags; DBG_TX_ENTER(et131x_dbginfo); @@ -482,12 +482,12 @@ static int et131x_send_packet(struct sk_ } /* Get a TCB for this packet */ - spin_lock_irqsave(&etdev->TCBReadyQLock, lockflags); + spin_lock_irqsave(&etdev->TCBReadyQLock, flags); pMpTcb = etdev->TxRing.TCBReadyQueueHead; if (pMpTcb == NULL) { - spin_unlock_irqrestore(&etdev->TCBReadyQLock, lockflags); + spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags); DBG_WARNING(et131x_dbginfo, "Can't obtain a TCB\n"); DBG_TX_LEAVE(et131x_dbginfo); @@ -499,7 +499,7 @@ static int et131x_send_packet(struct sk_ if (etdev->TxRing.TCBReadyQueueHead == NULL) etdev->TxRing.TCBReadyQueueTail = NULL; - spin_unlock_irqrestore(&etdev->TCBReadyQLock, lockflags); + spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags); pMpTcb->PacketLength = skb->len; pMpTcb->Packet = skb; @@ -522,7 +522,7 @@ static int et131x_send_packet(struct sk_ status = nic_send_packet(etdev, pMpTcb); if (status != 0) { - spin_lock_irqsave(&etdev->TCBReadyQLock, lockflags); + spin_lock_irqsave(&etdev->TCBReadyQLock, flags); if (etdev->TxRing.TCBReadyQueueTail) { etdev->TxRing.TCBReadyQueueTail->Next = pMpTcb; @@ -533,7 +533,7 @@ static int et131x_send_packet(struct sk_ etdev->TxRing.TCBReadyQueueTail = pMpTcb; - spin_unlock_irqrestore(&etdev->TCBReadyQLock, lockflags); + spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags); DBG_TX_LEAVE(et131x_dbginfo); return status; @@ -561,7 +561,7 @@ static int nic_send_packet(struct et131x struct sk_buff *pPacket = pMpTcb->Packet; uint32_t FragListCount = skb_shinfo(pPacket)->nr_frags + 1; struct skb_frag_struct *pFragList = &skb_shinfo(pPacket)->frags[0]; - unsigned long lockflags1, lockflags2; + unsigned long flags; DBG_TX_ENTER(et131x_dbginfo); @@ -726,7 +726,7 @@ static int nic_send_packet(struct et131x pMpTcb->WrIndexStart = etdev->TxRing.txDmaReadyToSend; pMpTcb->PacketStaleCount = 0; - spin_lock_irqsave(&etdev->SendHWLock, lockflags1); + spin_lock_irqsave(&etdev->SendHWLock, flags); iThisCopy = NUM_DESC_PER_RING_TX - etdev->TxRing.txDmaReadyToSend.bits.val; @@ -771,7 +771,7 @@ static int nic_send_packet(struct et131x pMpTcb->WrIndex.value = etdev->TxRing.txDmaReadyToSend.value - 1; - spin_lock_irqsave(&etdev->TCBSendQLock, lockflags2); + spin_lock(&etdev->TCBSendQLock); if (etdev->TxRing.CurrSendTail) etdev->TxRing.CurrSendTail->Next = pMpTcb; @@ -784,7 +784,7 @@ static int nic_send_packet(struct et131x etdev->TxRing.nBusySend++; - spin_unlock_irqrestore(&etdev->TCBSendQLock, lockflags2); + spin_unlock(&etdev->TCBSendQLock); /* Write the new write pointer back to the device. */ writel(etdev->TxRing.txDmaReadyToSend.value, @@ -798,7 +798,7 @@ static int nic_send_packet(struct et131x &etdev->CSRAddress->global.watchdog_timer); } - spin_unlock_irqrestore(&etdev->SendHWLock, lockflags1); + spin_unlock_irqrestore(&etdev->SendHWLock, flags); DBG_TX_LEAVE(et131x_dbginfo); return 0; @@ -829,7 +829,7 @@ static int nic_send_packet(struct et131x TX_DESC_ENTRY_t *CurDescPostCopy = NULL; uint32_t SlotsAvailable; DMA10W_t ServiceComplete; - unsigned int lockflags1, lockflags2; + unsigned int flags; struct sk_buff *pPacket = pMpTcb->Packet; uint32_t FragListCount = skb_shinfo(pPacket)->nr_frags + 1; struct skb_frag_struct *pFragList = &skb_shinfo(pPacket)->frags[0]; @@ -875,7 +875,7 @@ static int nic_send_packet(struct et131x SegmentSize = (pPacket->len - pPacket->data_len) / 2; } - spin_lock_irqsave(&etdev->SendHWLock, lockflags1); + spin_lock_irqsave(&etdev->SendHWLock, flags); if (etdev->TxRing.txDmaReadyToSend.bits.serv_req_wrap == ServiceComplete.bits.serv_cpl_wrap) { @@ -896,7 +896,7 @@ static int nic_send_packet(struct et131x if ((FragListCount + iSplitFirstElement) > SlotsAvailable) { DBG_WARNING(et131x_dbginfo, "Not Enough Space in Tx Desc Ring\n"); - spin_unlock_irqrestore(&etdev->SendHWLock, lockflags1); + spin_unlock_irqrestore(&etdev->SendHWLock, flags); return -ENOMEM; } @@ -1185,7 +1185,7 @@ static int nic_send_packet(struct et131x NIC_MIN_PACKET_SIZE - pMpTcb->PacketLength); } - spin_lock_irqsave(&etdev->TCBSendQLock, lockflags2); + spin_lock(&etdev->TCBSendQLock); if (etdev->TxRing.CurrSendTail) etdev->TxRing.CurrSendTail->Next = pMpTcb; @@ -1198,7 +1198,7 @@ static int nic_send_packet(struct et131x etdev->TxRing.nBusySend++; - spin_unlock_irqrestore(&etdev->TCBSendQLock, lockflags2); + spin_unlock(&etdev->TCBSendQLock); /* Write the new write pointer back to the device. */ writel(etdev->TxRing.txDmaReadyToSend.value, @@ -1216,7 +1216,7 @@ static int nic_send_packet(struct et131x &etdev->CSRAddress->global.watchdog_timer); } - spin_unlock_irqrestore(&etdev->SendHWLock, lockflags1); + spin_unlock_irqrestore(&etdev->SendHWLock, flags); DBG_TX_LEAVE(et131x_dbginfo); return 0; @@ -1234,7 +1234,7 @@ static int nic_send_packet(struct et131x inline void et131x_free_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb) { - unsigned long lockflags; + unsigned long flags; TX_DESC_ENTRY_t *desc = NULL; struct net_device_stats *stats = &etdev->net_stats; @@ -1311,7 +1311,7 @@ inline void et131x_free_send_packet(stru memset(pMpTcb, 0, sizeof(MP_TCB)); /* Add the TCB to the Ready Q */ - spin_lock_irqsave(&etdev->TCBReadyQLock, lockflags); + spin_lock_irqsave(&etdev->TCBReadyQLock, flags); etdev->Stats.opackets++; @@ -1324,7 +1324,7 @@ inline void et131x_free_send_packet(stru etdev->TxRing.TCBReadyQueueTail = pMpTcb; - spin_unlock_irqrestore(&etdev->TCBReadyQLock, lockflags); + spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags); DBG_ASSERT(etdev->TxRing.nBusySend >= 0); } @@ -1339,16 +1339,16 @@ void et131x_free_busy_send_packets(struc { PMP_TCB pMpTcb; struct list_head *pEntry; - unsigned long lockflags; + unsigned long flags; uint32_t FreeCounter = 0; DBG_ENTER(et131x_dbginfo); while (!list_empty(&etdev->TxRing.SendWaitQueue)) { - spin_lock_irqsave(&etdev->SendWaitLock, lockflags); + spin_lock_irqsave(&etdev->SendWaitLock, flags); etdev->TxRing.nWaitSend--; - spin_unlock_irqrestore(&etdev->SendWaitLock, lockflags); + spin_unlock_irqrestore(&etdev->SendWaitLock, flags); pEntry = etdev->TxRing.SendWaitQueue.next; } @@ -1356,7 +1356,7 @@ void et131x_free_busy_send_packets(struc etdev->TxRing.nWaitSend = 0; /* Any packets being sent? Check the first TCB on the send list */ - spin_lock_irqsave(&etdev->TCBSendQLock, lockflags); + spin_lock_irqsave(&etdev->TCBSendQLock, flags); pMpTcb = etdev->TxRing.CurrSendHead; @@ -1370,14 +1370,14 @@ void et131x_free_busy_send_packets(struc etdev->TxRing.nBusySend--; - spin_unlock_irqrestore(&etdev->TCBSendQLock, lockflags); + spin_unlock_irqrestore(&etdev->TCBSendQLock, flags); DBG_VERBOSE(et131x_dbginfo, "pMpTcb = 0x%p\n", pMpTcb); FreeCounter++; MP_FREE_SEND_PACKET_FUN(etdev, pMpTcb); - spin_lock_irqsave(&etdev->TCBSendQLock, lockflags); + spin_lock_irqsave(&etdev->TCBSendQLock, flags); pMpTcb = etdev->TxRing.CurrSendHead; } @@ -1388,7 +1388,7 @@ void et131x_free_busy_send_packets(struc BUG(); } - spin_unlock_irqrestore(&etdev->TCBSendQLock, lockflags); + spin_unlock_irqrestore(&etdev->TCBSendQLock, flags); etdev->TxRing.nBusySend = 0; @@ -1429,7 +1429,7 @@ void et131x_handle_send_interrupt(struct */ static void et131x_update_tcb_list(struct et131x_adapter *etdev) { - unsigned long lockflags; + unsigned long flags; DMA10W_t ServiceComplete; PMP_TCB pMpTcb; @@ -1439,7 +1439,7 @@ static void et131x_update_tcb_list(struc /* Has the ring wrapped? Process any descriptors that do not have * the same "wrap" indicator as the current completion indicator */ - spin_lock_irqsave(&etdev->TCBSendQLock, lockflags); + spin_lock_irqsave(&etdev->TCBSendQLock, flags); pMpTcb = etdev->TxRing.CurrSendHead; while (pMpTcb && @@ -1450,9 +1450,9 @@ static void et131x_update_tcb_list(struc if (pMpTcb->Next == NULL) etdev->TxRing.CurrSendTail = NULL; - spin_unlock_irqrestore(&etdev->TCBSendQLock, lockflags); + spin_unlock_irqrestore(&etdev->TCBSendQLock, flags); MP_FREE_SEND_PACKET_FUN(etdev, pMpTcb); - spin_lock_irqsave(&etdev->TCBSendQLock, lockflags); + spin_lock_irqsave(&etdev->TCBSendQLock, flags); /* Goto the next packet */ pMpTcb = etdev->TxRing.CurrSendHead; @@ -1465,9 +1465,9 @@ static void et131x_update_tcb_list(struc if (pMpTcb->Next == NULL) etdev->TxRing.CurrSendTail = NULL; - spin_unlock_irqrestore(&etdev->TCBSendQLock, lockflags); + spin_unlock_irqrestore(&etdev->TCBSendQLock, flags); MP_FREE_SEND_PACKET_FUN(etdev, pMpTcb); - spin_lock_irqsave(&etdev->TCBSendQLock, lockflags); + spin_lock_irqsave(&etdev->TCBSendQLock, flags); /* Goto the next packet */ pMpTcb = etdev->TxRing.CurrSendHead; @@ -1477,7 +1477,7 @@ static void et131x_update_tcb_list(struc if (etdev->TxRing.nBusySend <= (NUM_TCB / 3)) netif_wake_queue(etdev->netdev); - spin_unlock_irqrestore(&etdev->TCBSendQLock, lockflags); + spin_unlock_irqrestore(&etdev->TCBSendQLock, flags); } /** @@ -1489,9 +1489,9 @@ static void et131x_update_tcb_list(struc */ static void et131x_check_send_wait_list(struct et131x_adapter *etdev) { - unsigned long lockflags; + unsigned long flags; - spin_lock_irqsave(&etdev->SendWaitLock, lockflags); + spin_lock_irqsave(&etdev->SendWaitLock, flags); while (!list_empty(&etdev->TxRing.SendWaitQueue) && MP_TCB_RESOURCES_AVAILABLE(etdev)) { @@ -1508,5 +1508,5 @@ static void et131x_check_send_wait_list( etdev->TxRing.nWaitSend); } - spin_unlock_irqrestore(&etdev->SendWaitLock, lockflags); + spin_unlock_irqrestore(&etdev->SendWaitLock, flags); } --- a/drivers/staging/et131x/et131x_initpci.c +++ b/drivers/staging/et131x/et131x_initpci.c @@ -492,18 +492,18 @@ void et131x_error_timer_handler(unsigned void et131x_link_detection_handler(unsigned long data) { struct et131x_adapter *etdev = (struct et131x_adapter *) data; - unsigned long lockflags; + unsigned long flags; /* Let everyone know that we have run */ etdev->bLinkTimerActive = false; if (etdev->MediaState == 0) { - spin_lock_irqsave(&etdev->Lock, lockflags); + spin_lock_irqsave(&etdev->Lock, flags); etdev->MediaState = NETIF_STATUS_MEDIA_DISCONNECT; MP_CLEAR_FLAG(etdev, fMP_ADAPTER_LINK_DETECTION); - spin_unlock_irqrestore(&etdev->Lock, lockflags); + spin_unlock_irqrestore(&etdev->Lock, flags); netif_carrier_off(etdev->netdev); --- a/drivers/staging/et131x/et131x_netdev.c +++ b/drivers/staging/et131x/et131x_netdev.c @@ -467,12 +467,12 @@ void et131x_multicast(struct net_device struct et131x_adapter *adapter = netdev_priv(netdev); uint32_t PacketFilter = 0; uint32_t count; - unsigned long lockflags; + unsigned long flags; struct dev_mc_list *mclist = netdev->mc_list; DBG_ENTER(et131x_dbginfo); - spin_lock_irqsave(&adapter->Lock, lockflags); + spin_lock_irqsave(&adapter->Lock, flags); /* Before we modify the platform-independent filter flags, store them * locally. This allows us to determine if anything's changed and if @@ -552,7 +552,7 @@ void et131x_multicast(struct net_device "NO UPDATE REQUIRED, FLAGS didn't change\n"); } - spin_unlock_irqrestore(&adapter->Lock, lockflags); + spin_unlock_irqrestore(&adapter->Lock, flags); DBG_LEAVE(et131x_dbginfo); } @@ -610,7 +610,7 @@ void et131x_tx_timeout(struct net_device { struct et131x_adapter *etdev = netdev_priv(netdev); PMP_TCB pMpTcb; - unsigned long lockflags; + unsigned long flags; DBG_WARNING(et131x_dbginfo, "TX TIMEOUT\n"); @@ -635,7 +635,7 @@ void et131x_tx_timeout(struct net_device } /* Is send stuck? */ - spin_lock_irqsave(&etdev->TCBSendQLock, lockflags); + spin_lock_irqsave(&etdev->TCBSendQLock, flags); pMpTcb = etdev->TxRing.CurrSendHead; @@ -660,7 +660,7 @@ void et131x_tx_timeout(struct net_device } spin_unlock_irqrestore(&etdev->TCBSendQLock, - lockflags); + flags); DBG_WARNING(et131x_dbginfo, "Send stuck - reset. pMpTcb->WrIndex %x, Flags 0x%08x\n", @@ -689,7 +689,7 @@ void et131x_tx_timeout(struct net_device } } - spin_unlock_irqrestore(&etdev->TCBSendQLock, lockflags); + spin_unlock_irqrestore(&etdev->TCBSendQLock, flags); } /**