GIT a358d9f3786b8920b16b5d4c1bf6f52a1236d07a git://electric-eye.fr.zoreil.com/home/romieu/linux-2.6.git#qla3xxx commit Author: Francois Romieu Date: Wed Dec 27 21:27:24 2006 +0100 qla3xxx: propagate the return status of ql_sem_spinlock Signed-off-by: Francois Romieu commit e68accc157345b867f4a885514732b6dc1246236 Author: Francois Romieu Date: Fri Dec 22 15:17:49 2006 +0100 qla3xxx: trim #include list Signed-off-by: Francois Romieu commit 06cd0d4869fd7340b72f324e0035a539e0317ad9 Author: Francois Romieu Date: Fri Dec 22 15:13:59 2006 +0100 qla3xxx: minor ql3xxx_probe changes - ql_display_dev_info - display driver and device name on each line. - ql3xxx_probe - remove unneeded initializations - ql3_adapter.index is almost useless. Use ql_version_printed instead - error path review. No problem. Signed-off-by: Francois Romieu commit 9e4389c97508c808235f0bf76f701f9b80190e35 Author: Francois Romieu Date: Fri Dec 22 14:56:03 2006 +0100 qla3xxx: factor out ql_write_page[012]_reg Signed-off-by: Francois Romieu commit 2548f0be9491aace84e8f52082038aca662f9c2c Author: Francois Romieu Date: Fri Dec 22 14:52:49 2006 +0100 qla3xxx: prettify ql_populate_free_queue It is shorter too. Signed-off-by: Francois Romieu commit 91f39ee0c47409730a76d59bb0b07081db2972a4 Author: Francois Romieu Date: Fri Dec 22 14:44:47 2006 +0100 qla3xxx: parenthesis diet Signed-off-by: Francois Romieu commit d2dee30034c25acca94144032c5b25c8ac0fbf6d Author: Francois Romieu Date: Fri Dec 22 13:56:51 2006 +0100 qla3xxx: add local variables to access the registers It makes the code shorter and helps with indentation. Signed-off-by: Francois Romieu commit 6cd4980d423d66a217f2684c12dcda178efa2e25 Author: Francois Romieu Date: Thu Dec 21 18:57:16 2006 +0100 qla3xxx: add/remove spaces (codingstyle) Signed-off-by: Francois Romieu commit e0bfaa59a8807b991baa9ac3da4f973bef1f223e Author: Francois Romieu Date: Thu Dec 21 15:06:18 2006 +0100 qla3xxx: remove return statements in void returning functions commit 9f4685ecacc4f9f5583b44bafd8d654aadc723a4 Author: Francois Romieu Date: Fri Dec 22 13:11:39 2006 +0100 qla3xxx: rename 'mem_map_registers' to 'ioaddr' It is just the usual netdev ioremaped area. Signed-off-by: Francois Romieu commit e992b944d600e5030cbc0d3b777a16ff7de73a78 Author: Francois Romieu Date: Thu Sep 28 23:01:12 2006 +0200 qla3xxx: remove unused field in struct ql3_adapter The symbol does not appear in the whole kernel sources nor does it prevent the driver to compile. Signed-off-by: Francois Romieu commit 2018a1c0f2b6a70f5943f37a5b22c43fc8871900 Author: Francois Romieu Date: Fri Dec 22 13:06:51 2006 +0100 qla3xxx: rename 'hw_flags' to 'flags' for use with spinlock (cosmetic) Idiom. Signed-off-by: Francois Romieu commit 94b2a3706c183ec27308009add22a132bb8c5003 Author: Francois Romieu Date: Fri Dec 22 13:05:26 2006 +0100 qla3xxx: prettify ql_reset_work() Signed-off-by: Francois Romieu commit f48fb30a6bcd529e6861a7d7f70dd504eaf0316b Author: Francois Romieu Date: Wed Dec 20 22:42:46 2006 +0100 qla3xxx: de-obfuscate ql_wait_for_drvr_lock() Signed-off-by: Francois Romieu commit 74a25df9a62103bf3c1708a07d81e66a027cf183 Author: Francois Romieu Date: Tue Sep 26 00:21:22 2006 +0200 qla3xxx: tab/space and duplicate variable Signed-off-by: Francois Romieu commit 0cf14ffc0c674888f74848c78101d3531ef926c9 Author: Francois Romieu Date: Wed Dec 20 22:34:14 2006 +0100 qla3xxx: remove cast to void * Signed-off-by: Francois Romieu commit 1fd99a34de319b37e4b600ea6effbe5c7045d5bf Author: Francois Romieu Date: Tue Sep 26 00:21:22 2006 +0200 qla3xxx: comment broken ssleep They are issued with spinlock held. Signed-off-by: Francois Romieu commit 1c248e0ef2eccb91779523520eec7479d919fab2 Author: Francois Romieu Date: Wed Dec 20 22:27:50 2006 +0100 qla3xxx: netdev_priv() use and duplicate initialization Signed-off-by: Francois Romieu commit 66692f86d03a0664a7c6741252f59132fe943441 Author: Francois Romieu Date: Wed Dec 20 22:26:38 2006 +0100 qla3xxx: needless casts Signed-off-by: Francois Romieu commit c649ab5317414dab412c2a5b2284f858c8d7d679 Author: Ron Mercer Date: Tue Dec 19 10:22:37 2006 -0800 qla3xxx: Add delay to NVRAM register access Some platforms showed issues when no delay was used. This code only runs during the probe. Signed-off-by: Ron Mercer commit ce4f757aa717340a18f5e159b0108b5b20e92c49 Author: Ron Mercer Date: Tue Dec 19 10:22:36 2006 -0800 qla3xxx: Change version to v2.03.00-k2 Signed-off-by: Ron Mercer commit 377907ca2c8fcac25e1b65392ab706b84ff51758 Author: Ron Mercer Date: Tue Dec 19 22:48:35 2006 +0100 qla3xxx: Add support for Qlogic 4032 chip Signed-off-by: Ron Mercer commit 052b32a34908edf965e427b060e1186f274ac664 Author: Ron Mercer Date: Tue Dec 19 10:22:38 2006 -0800 qla3xxx: Remove NETIF_F_LLTX from driver features The TX locking was removed some time ago but this flag was overlooked. Signed-off-by: Ron Mercer drivers/net/qla3xxx.c | 1645 ++++++++++++++++++++++++++----------------------- drivers/net/qla3xxx.h | 96 ++- 2 files changed, 957 insertions(+), 784 deletions(-) diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c index d79d141..aa41d4c 100644 --- a/drivers/net/qla3xxx.c +++ b/drivers/net/qla3xxx.c @@ -9,36 +9,28 @@ #include #include #include #include -#include #include #include -#include -#include -#include -#include #include #include #include #include #include #include -#include -#include +#include #include -#include #include #include #include #include -#include #include #include #include "qla3xxx.h" -#define DRV_NAME "qla3xxx" -#define DRV_STRING "QLogic ISP3XXX Network Driver" -#define DRV_VERSION "v2.02.00-k36" +#define DRV_NAME "qla3xxx" +#define DRV_STRING "QLogic ISP3XXX Network Driver" +#define DRV_VERSION "v2.03.00-k2" #define PFX DRV_NAME " " static const char ql3xxx_driver_name[] = DRV_NAME; @@ -62,9 +54,10 @@ module_param(msi, int, 0); MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts."); static struct pci_device_id ql3xxx_pci_tbl[] __devinitdata = { - {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)}, + { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID) }, + { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID) }, /* required last entry */ - {0,} + { 0, } }; MODULE_DEVICE_TABLE(pci, ql3xxx_pci_tbl); @@ -72,39 +65,36 @@ MODULE_DEVICE_TABLE(pci, ql3xxx_pci_tbl) /* * Caller must take hw_lock. */ -static int ql_sem_spinlock(struct ql3_adapter *qdev, - u32 sem_mask, u32 sem_bits) +static int ql_sem_spinlock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits) { - struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; - u32 value; + u32 __iomem *reg = &qdev->ioaddr->CommonRegs.semaphoreReg; unsigned int seconds = 3; + u32 value; do { - writel((sem_mask | sem_bits), - &port_regs->CommonRegs.semaphoreReg); - value = readl(&port_regs->CommonRegs.semaphoreReg); + writel(sem_mask | sem_bits, reg); + value = readl(reg); if ((value & (sem_mask >> 16)) == sem_bits) return 0; ssleep(1); - } while(--seconds); - return -1; + } while (--seconds); + return -EBUSY; } static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask) { - struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; - writel(sem_mask, &port_regs->CommonRegs.semaphoreReg); - readl(&port_regs->CommonRegs.semaphoreReg); + u32 __iomem *reg = &qdev->ioaddr->CommonRegs.semaphoreReg; + + writel(sem_mask, reg); + readl(reg); } static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits) { - struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; - u32 value; + u32 __iomem *reg = &qdev->ioaddr->CommonRegs.semaphoreReg; - writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg); - value = readl(&port_regs->CommonRegs.semaphoreReg); - return ((value & (sem_mask >> 16)) == sem_bits); + writel(sem_mask | sem_bits, reg); + return (readl(reg) & (sem_mask >> 16)) == sem_bits; } /* @@ -112,92 +102,79 @@ static int ql_sem_lock(struct ql3_adapte */ static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev) { - int i = 0; + u32 bits = (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index * 2)) << 1; + unsigned int i; - while (1) { - if (!ql_sem_lock(qdev, - QL_DRVR_SEM_MASK, - (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) - * 2) << 1)) { - if (i < 10) { - ssleep(1); - i++; - } else { - printk(KERN_ERR PFX "%s: Timed out waiting for " - "driver lock...\n", - qdev->ndev->name); - return 0; - } - } else { - printk(KERN_DEBUG PFX - "%s: driver lock acquired.\n", + for (i = 0; i < 10; i++) { + if (ql_sem_lock(qdev, QL_DRVR_SEM_MASK, bits)) { + printk(KERN_DEBUG PFX "%s: driver lock acquired.\n", qdev->ndev->name); return 1; } + ssleep(1); } + printk(KERN_ERR PFX "%s: Timed out waiting for driver lock...\n", + qdev->ndev->name); + return 0; } static void ql_set_register_page(struct ql3_adapter *qdev, u32 page) { - struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; + u32 __iomem *reg = &qdev->ioaddr->CommonRegs.ispControlStatus; - writel(((ISP_CONTROL_NP_MASK << 16) | page), - &port_regs->CommonRegs.ispControlStatus); - readl(&port_regs->CommonRegs.ispControlStatus); + writel((ISP_CONTROL_NP_MASK << 16) | page, reg); + readl(reg); qdev->current_page = page; } -static u32 ql_read_common_reg_l(struct ql3_adapter *qdev, - u32 __iomem * reg) +static u32 ql_read_common_reg_l(struct ql3_adapter *qdev, u32 __iomem * reg) { + unsigned long flags; u32 value; - unsigned long hw_flags; - spin_lock_irqsave(&qdev->hw_lock, hw_flags); + spin_lock_irqsave(&qdev->hw_lock, flags); value = readl(reg); - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + spin_unlock_irqrestore(&qdev->hw_lock, flags); return value; } -static u32 ql_read_common_reg(struct ql3_adapter *qdev, - u32 __iomem * reg) +static u32 ql_read_common_reg(struct ql3_adapter *qdev, u32 __iomem * reg) { return readl(reg); } static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg) { + unsigned long flags; u32 value; - unsigned long hw_flags; - spin_lock_irqsave(&qdev->hw_lock, hw_flags); + spin_lock_irqsave(&qdev->hw_lock, flags); if (qdev->current_page != 0) - ql_set_register_page(qdev,0); + ql_set_register_page(qdev, 0); value = readl(reg); - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + spin_unlock_irqrestore(&qdev->hw_lock, flags); return value; } static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg) { if (qdev->current_page != 0) - ql_set_register_page(qdev,0); + ql_set_register_page(qdev, 0); return readl(reg); } static void ql_write_common_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg, u32 value) { - unsigned long hw_flags; + unsigned long flags; - spin_lock_irqsave(&qdev->hw_lock, hw_flags); + spin_lock_irqsave(&qdev->hw_lock, flags); writel(value, reg); readl(reg); - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); - return; + spin_unlock_irqrestore(&qdev->hw_lock, flags); } static void ql_write_common_reg(struct ql3_adapter *qdev, @@ -205,17 +182,29 @@ static void ql_write_common_reg(struct q { writel(value, reg); readl(reg); - return; } -static void ql_write_page0_reg(struct ql3_adapter *qdev, - u32 __iomem *reg, u32 value) +static void ql_write_nvram_reg(struct ql3_adapter *qdev, + u32 __iomem *reg, u32 value) { - if (qdev->current_page != 0) - ql_set_register_page(qdev,0); writel(value, reg); readl(reg); - return; + udelay(1); +} + +static void ql_write_page_reg(struct ql3_adapter *qdev, + u32 __iomem *reg, u32 value, unsigned int page) +{ + if (qdev->current_page != page) + ql_set_register_page(qdev, page); + writel(value, reg); + readl(reg); +} + +static void ql_write_page0_reg(struct ql3_adapter *qdev, + u32 __iomem *reg, u32 value) +{ + ql_write_page_reg(qdev, reg, value, 0); } /* @@ -224,11 +213,7 @@ static void ql_write_page0_reg(struct ql static void ql_write_page1_reg(struct ql3_adapter *qdev, u32 __iomem *reg, u32 value) { - if (qdev->current_page != 1) - ql_set_register_page(qdev,1); - writel(value, reg); - readl(reg); - return; + ql_write_page_reg(qdev, reg, value, 1); } /* @@ -237,29 +222,21 @@ static void ql_write_page1_reg(struct ql static void ql_write_page2_reg(struct ql3_adapter *qdev, u32 __iomem *reg, u32 value) { - if (qdev->current_page != 2) - ql_set_register_page(qdev,2); - writel(value, reg); - readl(reg); - return; + ql_write_page_reg(qdev, reg, value, 2); } static void ql_disable_interrupts(struct ql3_adapter *qdev) { - struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; - - ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg, - (ISP_IMR_ENABLE_INT << 16)); + u32 __iomem *reg = &qdev->ioaddr->CommonRegs.ispInterruptMaskReg; + ql_write_common_reg_l(qdev, reg, ISP_IMR_ENABLE_INT << 16); } static void ql_enable_interrupts(struct ql3_adapter *qdev) { - struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; - - ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg, - ((0xff << 16) | ISP_IMR_ENABLE_INT)); + u32 __iomem *reg = &qdev->ioaddr->CommonRegs.ispInterruptMaskReg; + ql_write_common_reg_l(qdev, reg, (0xff << 16) | ISP_IMR_ENABLE_INT); } static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev, @@ -309,10 +286,11 @@ static void ql_release_to_lrg_buf_free_l static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter *qdev) { - struct ql_rcv_buf_cb *lrg_buf_cb; + struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head; - if ((lrg_buf_cb = qdev->lrg_buf_free_head) != NULL) { - if ((qdev->lrg_buf_free_head = lrg_buf_cb->next) == NULL) + if (lrg_buf_cb) { + qdev->lrg_buf_free_head = lrg_buf_cb->next; + if (!qdev->lrg_buf_free_head) qdev->lrg_buf_free_tail = NULL; qdev->lrg_buf_free_count--; } @@ -332,14 +310,12 @@ static void eeprom_readword(struct ql3_a */ static void fm93c56a_select(struct ql3_adapter *qdev) { - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; + u32 __iomem *reg = &qdev->ioaddr->CommonRegs.serialPortInterfaceReg; qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1; - ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, - ISP_NVRAM_MASK | qdev->eeprom_cmd_data); - ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, - ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data)); + ql_write_nvram_reg(qdev, reg, ISP_NVRAM_MASK | qdev->eeprom_cmd_data); + ql_write_nvram_reg(qdev, reg, + (ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data); } /* @@ -347,25 +323,19 @@ static void fm93c56a_select(struct ql3_a */ static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr) { + u32 __iomem *reg = &qdev->ioaddr->CommonRegs.serialPortInterfaceReg; int i; u32 mask; u32 dataBit; u32 previousBit; - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; /* Clock in a zero, then do the start bit */ - ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, - ISP_NVRAM_MASK | qdev->eeprom_cmd_data | - AUBURN_EEPROM_DO_1); - ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, - ISP_NVRAM_MASK | qdev-> - eeprom_cmd_data | AUBURN_EEPROM_DO_1 | - AUBURN_EEPROM_CLK_RISE); - ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, - ISP_NVRAM_MASK | qdev-> - eeprom_cmd_data | AUBURN_EEPROM_DO_1 | - AUBURN_EEPROM_CLK_FALL); + ql_write_nvram_reg(qdev, reg, ISP_NVRAM_MASK | qdev->eeprom_cmd_data | + AUBURN_EEPROM_DO_1); + ql_write_nvram_reg(qdev, reg, ISP_NVRAM_MASK | qdev->eeprom_cmd_data | + AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_RISE); + ql_write_nvram_reg(qdev, reg, ISP_NVRAM_MASK | qdev->eeprom_cmd_data | + AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_FALL); mask = 1 << (FM93C56A_CMD_BITS - 1); /* Force the previous data bit to be different */ @@ -378,25 +348,16 @@ static void fm93c56a_cmd(struct ql3_adap * If the bit changed, then change the DO state to * match */ - ql_write_common_reg(qdev, - &port_regs->CommonRegs. - serialPortInterfaceReg, - ISP_NVRAM_MASK | qdev-> - eeprom_cmd_data | dataBit); + ql_write_nvram_reg(qdev, reg, ISP_NVRAM_MASK | + qdev->eeprom_cmd_data | dataBit); previousBit = dataBit; } - ql_write_common_reg(qdev, - &port_regs->CommonRegs. - serialPortInterfaceReg, - ISP_NVRAM_MASK | qdev-> - eeprom_cmd_data | dataBit | - AUBURN_EEPROM_CLK_RISE); - ql_write_common_reg(qdev, - &port_regs->CommonRegs. - serialPortInterfaceReg, - ISP_NVRAM_MASK | qdev-> - eeprom_cmd_data | dataBit | - AUBURN_EEPROM_CLK_FALL); + ql_write_nvram_reg(qdev, reg, ISP_NVRAM_MASK | + qdev->eeprom_cmd_data | dataBit | + AUBURN_EEPROM_CLK_RISE); + ql_write_nvram_reg(qdev, reg, ISP_NVRAM_MASK | + qdev->eeprom_cmd_data | dataBit | + AUBURN_EEPROM_CLK_FALL); cmd = cmd << 1; } @@ -404,33 +365,23 @@ static void fm93c56a_cmd(struct ql3_adap /* Force the previous data bit to be different */ previousBit = 0xffff; for (i = 0; i < addrBits; i++) { - dataBit = - (eepromAddr & mask) ? AUBURN_EEPROM_DO_1 : - AUBURN_EEPROM_DO_0; + dataBit = (eepromAddr & mask) ? + AUBURN_EEPROM_DO_1 : AUBURN_EEPROM_DO_0; if (previousBit != dataBit) { /* * If the bit changed, then change the DO state to * match */ - ql_write_common_reg(qdev, - &port_regs->CommonRegs. - serialPortInterfaceReg, - ISP_NVRAM_MASK | qdev-> - eeprom_cmd_data | dataBit); + ql_write_nvram_reg(qdev, reg, ISP_NVRAM_MASK | + qdev->eeprom_cmd_data | dataBit); previousBit = dataBit; } - ql_write_common_reg(qdev, - &port_regs->CommonRegs. - serialPortInterfaceReg, - ISP_NVRAM_MASK | qdev-> - eeprom_cmd_data | dataBit | - AUBURN_EEPROM_CLK_RISE); - ql_write_common_reg(qdev, - &port_regs->CommonRegs. - serialPortInterfaceReg, - ISP_NVRAM_MASK | qdev-> - eeprom_cmd_data | dataBit | - AUBURN_EEPROM_CLK_FALL); + ql_write_nvram_reg(qdev, reg, ISP_NVRAM_MASK | + qdev-> eeprom_cmd_data | dataBit | + AUBURN_EEPROM_CLK_RISE); + ql_write_nvram_reg(qdev, reg, ISP_NVRAM_MASK | + qdev->eeprom_cmd_data | dataBit | + AUBURN_EEPROM_CLK_FALL); eepromAddr = eepromAddr << 1; } } @@ -440,11 +391,10 @@ static void fm93c56a_cmd(struct ql3_adap */ static void fm93c56a_deselect(struct ql3_adapter *qdev) { - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; + u32 __iomem *reg = &qdev->ioaddr->CommonRegs.serialPortInterfaceReg; + qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0; - ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, - ISP_NVRAM_MASK | qdev->eeprom_cmd_data); + ql_write_nvram_reg(qdev, reg, ISP_NVRAM_MASK | qdev->eeprom_cmd_data); } /* @@ -452,31 +402,22 @@ static void fm93c56a_deselect(struct ql3 */ static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value) { - int i; + u32 __iomem *reg = &qdev->ioaddr->CommonRegs.serialPortInterfaceReg; + unsigned int i; u32 data = 0; u32 dataBit; - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; /* Read the data bits */ /* The first bit is a dummy. Clock right over it. */ for (i = 0; i < dataBits; i++) { - ql_write_common_reg(qdev, - &port_regs->CommonRegs. - serialPortInterfaceReg, - ISP_NVRAM_MASK | qdev->eeprom_cmd_data | - AUBURN_EEPROM_CLK_RISE); - ql_write_common_reg(qdev, - &port_regs->CommonRegs. - serialPortInterfaceReg, - ISP_NVRAM_MASK | qdev->eeprom_cmd_data | - AUBURN_EEPROM_CLK_FALL); - dataBit = - (ql_read_common_reg - (qdev, - &port_regs->CommonRegs. - serialPortInterfaceReg) & AUBURN_EEPROM_DI_1) ? 1 : 0; - data = (data << 1) | dataBit; + ql_write_nvram_reg(qdev, reg, ISP_NVRAM_MASK | + qdev->eeprom_cmd_data | + AUBURN_EEPROM_CLK_RISE); + ql_write_nvram_reg(qdev, reg, ISP_NVRAM_MASK | + qdev->eeprom_cmd_data | + AUBURN_EEPROM_CLK_FALL); + dataBit = ql_read_common_reg(qdev, reg) & AUBURN_EEPROM_DI_1; + data = ((data ? 1 : 0) << 1) | dataBit; } *value = (u16) data; } @@ -511,22 +452,22 @@ #endif static int ql_get_nvram_params(struct ql3_adapter *qdev) { + u32 bits = (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index * 2)) << 10; + unsigned long flags; u16 *pEEPROMData; u16 checksum = 0; u32 index; - unsigned long hw_flags; + int rc; - spin_lock_irqsave(&qdev->hw_lock, hw_flags); + spin_lock_irqsave(&qdev->hw_lock, flags); pEEPROMData = (u16 *) & qdev->nvram_data; qdev->eeprom_cmd_data = 0; - if(ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK, - (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * - 2) << 10)) { + rc = ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK, bits); + if (rc < 0) { printk(KERN_ERR PFX"%s: Failed ql_sem_spinlock().\n", __func__); - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); - return -1; + goto out_unlock; } for (index = 0; index < EEPROM_SIZE; index++) { @@ -539,8 +480,8 @@ static int ql_get_nvram_params(struct ql if (checksum != 0) { printk(KERN_ERR PFX "%s: checksum should be zero, is %x!!\n", qdev->ndev->name, checksum); - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); - return -1; + rc = -EIO; + goto out_unlock; } /* @@ -555,8 +496,10 @@ static int ql_get_nvram_params(struct ql pEEPROMData = (u16 *) & qdev->nvram_data.version; *pEEPROMData = le16_to_cpu(*pEEPROMData); - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); - return checksum; +out_unlock: + spin_unlock_irqrestore(&qdev->hw_lock, flags); + + return rc; } static const u32 PHYAddr[2] = { @@ -565,30 +508,28 @@ static const u32 PHYAddr[2] = { static int ql_wait_for_mii_ready(struct ql3_adapter *qdev) { - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - u32 temp; - int count = 1000; + u32 __iomem *reg = &qdev->ioaddr->macMIIStatusReg; + unsigned int i; - while (count) { - temp = ql_read_page0_reg(qdev, &port_regs->macMIIStatusReg); - if (!(temp & MAC_MII_STATUS_BSY)) + for (i = 0; i < 1000; i++) { + if (!(ql_read_page0_reg(qdev, reg) & MAC_MII_STATUS_BSY)) return 0; udelay(10); - count--; } return -1; } +#define MAC_MII_CONTROL_SC_AS (MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC) + static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev) { - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; + u32 __iomem *ctrl = &qdev->ioaddr->macMIIMgmtControlReg; + u32 __iomem *addr = &qdev->ioaddr->macMIIMgmtAddrReg; u32 scanControl; if (qdev->numPorts > 1) { /* Auto scan will cycle through multiple ports */ - scanControl = MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC; + scanControl = MAC_MII_CONTROL_SC_AS; } else { scanControl = MAC_MII_CONTROL_SC; } @@ -599,40 +540,27 @@ static void ql_mii_enable_scan_mode(stru * The autoscan starts from the first register, completes * the last one before rolling over to the first */ - ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, - PHYAddr[0] | MII_SCAN_REGISTER); - - ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, - (scanControl) | - ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS) << 16)); + ql_write_page0_reg(qdev, addr, PHYAddr[0] | MII_SCAN_REGISTER); + ql_write_page0_reg(qdev, ctrl, + scanControl | (MAC_MII_CONTROL_SC_AS << 16)); } static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev) { + u32 __iomem *ctrl = &qdev->ioaddr->macMIIMgmtControlReg; + u32 __iomem *addr = &qdev->ioaddr->macMIIMgmtAddrReg; u8 ret; - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - - /* See if scan mode is enabled before we turn it off */ - if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) & - (MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC)) { - /* Scan is enabled */ - ret = 1; - } else { - /* Scan is disabled */ - ret = 0; - } + + /* See if scan mode is enabled (1) or not (0) before we turn it off */ + ret = (ql_read_page0_reg(qdev, ctrl) & MAC_MII_CONTROL_SC_AS) ? 1 : 0; /* * When disabling scan mode you must first change the MII register * address */ - ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, - PHYAddr[0] | MII_SCAN_REGISTER); - - ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, - ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS | - MAC_MII_CONTROL_RC) << 16)); + ql_write_page0_reg(qdev, addr, PHYAddr[0] | MII_SCAN_REGISTER); + ql_write_page0_reg(qdev, ctrl, + (MAC_MII_CONTROL_SC_AS | MAC_MII_CONTROL_RC) << 16); return ret; } @@ -640,8 +568,8 @@ static u8 ql_mii_disable_scan_mode(struc static int ql_mii_write_reg_ex(struct ql3_adapter *qdev, u16 regAddr, u16 value, u32 mac_index) { - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; + u32 __iomem *addr = &qdev->ioaddr->macMIIMgmtAddrReg; + u32 __iomem *data = &qdev->ioaddr->macMIIMgmtDataReg; u8 scanWasEnabled; scanWasEnabled = ql_mii_disable_scan_mode(qdev); @@ -655,10 +583,8 @@ static int ql_mii_write_reg_ex(struct ql return -1; } - ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, - PHYAddr[mac_index] | regAddr); - - ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value); + ql_write_page0_reg(qdev, addr, PHYAddr[mac_index] | regAddr); + ql_write_page0_reg(qdev, data, value); /* Wait for write to complete 9/10/04 SJP */ if (ql_wait_for_mii_ready(qdev)) { @@ -679,8 +605,7 @@ static int ql_mii_write_reg_ex(struct ql static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr, u16 * value, u32 mac_index) { - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; + struct ql3xxx_port_registers __iomem *port_regs = qdev->ioaddr; u8 scanWasEnabled; u32 temp; @@ -699,7 +624,7 @@ static int ql_mii_read_reg_ex(struct ql3 PHYAddr[mac_index] | regAddr); ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, - (MAC_MII_CONTROL_RC << 16)); + MAC_MII_CONTROL_RC << 16); ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC); @@ -725,8 +650,8 @@ static int ql_mii_read_reg_ex(struct ql3 static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value) { - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; + u32 __iomem *addr = &qdev->ioaddr->macMIIMgmtAddrReg; + u32 __iomem *data = &qdev->ioaddr->macMIIMgmtDataReg; ql_mii_disable_scan_mode(qdev); @@ -739,10 +664,8 @@ static int ql_mii_write_reg(struct ql3_a return -1; } - ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, - qdev->PHYAddr | regAddr); - - ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value); + ql_write_page0_reg(qdev, addr, qdev->PHYAddr | regAddr); + ql_write_page0_reg(qdev, data, value); /* Wait for write to complete. */ if (ql_wait_for_mii_ready(qdev)) { @@ -761,9 +684,8 @@ static int ql_mii_write_reg(struct ql3_a static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value) { + struct ql3xxx_port_registers __iomem *port_regs = qdev->ioaddr; u32 temp; - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; ql_mii_disable_scan_mode(qdev); @@ -780,7 +702,7 @@ static int ql_mii_read_reg(struct ql3_ad qdev->PHYAddr | regAddr); ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, - (MAC_MII_CONTROL_RC << 16)); + MAC_MII_CONTROL_RC << 16); ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC); @@ -879,7 +801,7 @@ static int ql_phy_get_speed(struct ql3_a if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, ®) < 0) return 0; - reg = (((reg & 0x18) >> 3) & 3); + reg = ((reg & 0x18) >> 3) & 3; if (reg == 2) return SPEED_1000; @@ -916,14 +838,11 @@ static int ql_is_phy_neg_pause(struct ql */ static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable) { - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - u32 value; + struct ql3xxx_port_registers __iomem *port_regs = qdev->ioaddr; + u32 value = MAC_CONFIG_REG_PE << 16; if (enable) - value = (MAC_CONFIG_REG_PE | (MAC_CONFIG_REG_PE << 16)); - else - value = (MAC_CONFIG_REG_PE << 16); + value |= MAC_CONFIG_REG_PE; if (qdev->mac_index) ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); @@ -936,14 +855,11 @@ static void ql_mac_enable(struct ql3_ada */ static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable) { - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - u32 value; + struct ql3xxx_port_registers __iomem *port_regs = qdev->ioaddr; + u32 value = MAC_CONFIG_REG_SR << 16; if (enable) - value = (MAC_CONFIG_REG_SR | (MAC_CONFIG_REG_SR << 16)); - else - value = (MAC_CONFIG_REG_SR << 16); + value |= MAC_CONFIG_REG_SR; if (qdev->mac_index) ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); @@ -956,14 +872,11 @@ static void ql_mac_cfg_soft_reset(struct */ static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable) { - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - u32 value; + struct ql3xxx_port_registers __iomem *port_regs = qdev->ioaddr; + u32 value = MAC_CONFIG_REG_GM << 16; if (enable) - value = (MAC_CONFIG_REG_GM | (MAC_CONFIG_REG_GM << 16)); - else - value = (MAC_CONFIG_REG_GM << 16); + value |= MAC_CONFIG_REG_GM; if (qdev->mac_index) ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); @@ -976,14 +889,11 @@ static void ql_mac_cfg_gig(struct ql3_ad */ static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable) { - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - u32 value; + struct ql3xxx_port_registers __iomem *port_regs = qdev->ioaddr; + u32 value = MAC_CONFIG_REG_FD << 16; if (enable) - value = (MAC_CONFIG_REG_FD | (MAC_CONFIG_REG_FD << 16)); - else - value = (MAC_CONFIG_REG_FD << 16); + value |= MAC_CONFIG_REG_FD; if (qdev->mac_index) ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); @@ -996,8 +906,7 @@ static void ql_mac_cfg_full_dup(struct q */ static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable) { - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; + struct ql3xxx_port_registers __iomem *port_regs = qdev->ioaddr; u32 value; if (enable) @@ -1018,8 +927,7 @@ static void ql_mac_cfg_pause(struct ql3_ */ static int ql_is_fiber(struct ql3_adapter *qdev) { - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; + struct ql3xxx_port_registers __iomem *port_regs = qdev->ioaddr; u32 bitToCheck = 0; u32 temp; @@ -1048,8 +956,7 @@ static int ql_is_auto_cfg(struct ql3_ada */ static int ql_is_auto_neg_complete(struct ql3_adapter *qdev) { - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; + struct ql3xxx_port_registers __iomem *port_regs = qdev->ioaddr; u32 bitToCheck = 0; u32 temp; @@ -1091,8 +998,7 @@ static int ql_is_neg_pause(struct ql3_ad static int ql_auto_neg_error(struct ql3_adapter *qdev) { - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; + struct ql3xxx_port_registers __iomem *port_regs = qdev->ioaddr; u32 bitToCheck = 0; u32 temp; @@ -1129,10 +1035,8 @@ static int ql_is_link_full_dup(struct ql */ static int ql_link_down_detect(struct ql3_adapter *qdev) { - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; + u32 __iomem *reg = &qdev->ioaddr->CommonRegs.ispControlStatus; u32 bitToCheck = 0; - u32 temp; switch (qdev->mac_index) { case 0: @@ -1143,9 +1047,7 @@ static int ql_link_down_detect(struct ql break; } - temp = - ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus); - return (temp & bitToCheck) != 0; + return (ql_read_common_reg(qdev, reg) & bitToCheck) != 0; } /* @@ -1153,21 +1055,16 @@ static int ql_link_down_detect(struct ql */ static int ql_link_down_detect_clear(struct ql3_adapter *qdev) { - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; + u32 __iomem *reg = &qdev->ioaddr->CommonRegs.ispControlStatus; switch (qdev->mac_index) { case 0: - ql_write_common_reg(qdev, - &port_regs->CommonRegs.ispControlStatus, - (ISP_CONTROL_LINK_DN_0) | + ql_write_common_reg(qdev, reg, ISP_CONTROL_LINK_DN_0 | (ISP_CONTROL_LINK_DN_0 << 16)); break; case 1: - ql_write_common_reg(qdev, - &port_regs->CommonRegs.ispControlStatus, - (ISP_CONTROL_LINK_DN_1) | + ql_write_common_reg(qdev, reg, ISP_CONTROL_LINK_DN_1 | (ISP_CONTROL_LINK_DN_1 << 16)); break; @@ -1184,8 +1081,6 @@ static int ql_link_down_detect_clear(str static int ql_this_adapter_controls_port(struct ql3_adapter *qdev, u32 mac_index) { - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; u32 bitToCheck = 0; u32 temp; @@ -1200,7 +1095,7 @@ static int ql_this_adapter_controls_port break; } - temp = ql_read_page0_reg(qdev, &port_regs->portStatus); + temp = ql_read_page0_reg(qdev, &qdev->ioaddr->portStatus); if (temp & bitToCheck) { if (netif_msg_link(qdev)) printk(KERN_DEBUG PFX @@ -1242,8 +1137,6 @@ static void ql_phy_init_ex(struct ql3_ad */ static u32 ql_get_link_state(struct ql3_adapter *qdev) { - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; u32 bitToCheck = 0; u32 temp, linkState; @@ -1255,7 +1148,7 @@ static u32 ql_get_link_state(struct ql3_ bitToCheck = PORT_STATUS_UP1; break; } - temp = ql_read_page0_reg(qdev, &port_regs->portStatus); + temp = ql_read_page0_reg(qdev, &qdev->ioaddr->portStatus); if (temp & bitToCheck) { linkState = LS_UP; } else { @@ -1269,10 +1162,12 @@ static u32 ql_get_link_state(struct ql3_ static int ql_port_start(struct ql3_adapter *qdev) { - if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, - (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * - 2) << 7)) - return -1; + u32 bits = (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index * 2)) << 7; + int rc; + + rc = ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, bits); + if (rc < 0) + goto out; if (ql_is_fiber(qdev)) { ql_petbi_init(qdev); @@ -1282,16 +1177,18 @@ static int ql_port_start(struct ql3_adap } ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); - return 0; +out: + return rc; } static int ql_finish_auto_neg(struct ql3_adapter *qdev) { + u32 bits = (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index * 2)) << 7; + int rc; - if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, - (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * - 2) << 7)) - return -1; + rc = ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, bits); + if (rc < 0) + goto out; if (!ql_auto_neg_error(qdev)) { if (test_bit(QL_LINK_MASTER,&qdev->flags)) { @@ -1299,27 +1196,19 @@ static int ql_finish_auto_neg(struct ql3 if (netif_msg_link(qdev)) printk(KERN_DEBUG PFX "%s: Configuring link.\n", - qdev->ndev-> - name); + qdev->ndev->name); ql_mac_cfg_soft_reset(qdev, 1); ql_mac_cfg_gig(qdev, - (ql_get_link_speed - (qdev) == - SPEED_1000)); - ql_mac_cfg_full_dup(qdev, - ql_is_link_full_dup - (qdev)); - ql_mac_cfg_pause(qdev, - ql_is_neg_pause - (qdev)); + (ql_get_link_speed(qdev) == SPEED_1000)); + ql_mac_cfg_full_dup(qdev, ql_is_link_full_dup(qdev)); + ql_mac_cfg_pause(qdev, ql_is_neg_pause(qdev)); ql_mac_cfg_soft_reset(qdev, 0); /* enable the MAC */ if (netif_msg_link(qdev)) printk(KERN_DEBUG PFX "%s: Enabling mac.\n", - qdev->ndev-> - name); + qdev->ndev->name); ql_mac_enable(qdev, 1); } @@ -1340,48 +1229,49 @@ static int ql_finish_auto_neg(struct ql3 } else { /* Remote error detected */ - if (test_bit(QL_LINK_MASTER,&qdev->flags)) { + if (test_bit(QL_LINK_MASTER, &qdev->flags)) { if (netif_msg_link(qdev)) printk(KERN_DEBUG PFX "%s: Remote error detected. " "Calling ql_port_start().\n", - qdev->ndev-> - name); + qdev->ndev->name); /* * ql_port_start() is shared code and needs * to lock the PHY on it's own. */ ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); - if(ql_port_start(qdev)) {/* Restart port */ + if (ql_port_start(qdev)) {/* Restart port */ return -1; } else return 0; } } ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); - return 0; +out: + return rc; } static void ql_link_state_machine(struct ql3_adapter *qdev) { u32 curr_link_state; - unsigned long hw_flags; + unsigned long flags; - spin_lock_irqsave(&qdev->hw_lock, hw_flags); + spin_lock_irqsave(&qdev->hw_lock, flags); curr_link_state = ql_get_link_state(qdev); - if (test_bit(QL_RESET_ACTIVE,&qdev->flags)) { + if (test_bit(QL_RESET_ACTIVE, &qdev->flags)) { if (netif_msg_link(qdev)) printk(KERN_INFO PFX "%s: Reset in progress, skip processing link " "state.\n", qdev->ndev->name); + /* FIXME: return with spinlock held. */ return; } switch (qdev->port_link_state) { default: - if (test_bit(QL_LINK_MASTER,&qdev->flags)) { + if (test_bit(QL_LINK_MASTER, &qdev->flags)) { ql_port_start(qdev); } qdev->port_link_state = LS_DOWN; @@ -1419,7 +1309,7 @@ static void ql_link_state_machine(struct } break; } - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + spin_unlock_irqrestore(&qdev->hw_lock, flags); } /* @@ -1428,9 +1318,9 @@ static void ql_link_state_machine(struct static void ql_get_phy_owner(struct ql3_adapter *qdev) { if (ql_this_adapter_controls_port(qdev, qdev->mac_index)) - set_bit(QL_LINK_MASTER,&qdev->flags); + set_bit(QL_LINK_MASTER, &qdev->flags); else - clear_bit(QL_LINK_MASTER,&qdev->flags); + clear_bit(QL_LINK_MASTER, &qdev->flags); } /* @@ -1440,7 +1330,7 @@ static void ql_init_scan_mode(struct ql3 { ql_mii_enable_scan_mode(qdev); - if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) { + if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) { if (ql_this_adapter_controls_port(qdev, qdev->mac_index)) ql_petbi_init_ex(qdev, qdev->mac_index); } else { @@ -1457,30 +1347,32 @@ static void ql_init_scan_mode(struct ql3 */ static int ql_mii_setup(struct ql3_adapter *qdev) { - u32 reg; - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; + u32 bits = (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index * 2)) << 7; + u32 __iomem *ctrl = &qdev->ioaddr->macMIIMgmtControlReg; + int rc; - if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, - (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * - 2) << 7)) - return -1; + rc = ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, bits); + if (rc < 0) + goto out; + + if (qdev->device_id == QL3032_DEVICE_ID) + ql_write_page0_reg(qdev, ctrl, 0x0f00000); /* Divide 125MHz clock by 28 to meet PHY timing requirements */ - reg = MAC_MII_CONTROL_CLK_SEL_DIV28; - ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, - reg | ((MAC_MII_CONTROL_CLK_SEL_MASK) << 16)); + ql_write_page0_reg(qdev, ctrl, MAC_MII_CONTROL_CLK_SEL_DIV28 | + (MAC_MII_CONTROL_CLK_SEL_MASK << 16)); ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); - return 0; +out: + return rc; } static u32 ql_supported_modes(struct ql3_adapter *qdev) { u32 supported; - if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) { + if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) { supported = SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE | SUPPORTED_Autoneg; } else { @@ -1496,48 +1388,67 @@ static u32 ql_supported_modes(struct ql3 return supported; } +// FIXME: the caller does not check the returned value. static int ql_get_auto_cfg_status(struct ql3_adapter *qdev) { + u32 bits = (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index * 2)) << 7; + unsigned long flags; int status; - unsigned long hw_flags; - spin_lock_irqsave(&qdev->hw_lock, hw_flags); - if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, - (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * - 2) << 7)) - return 0; + int rc; + + spin_lock_irqsave(&qdev->hw_lock, flags); + + rc = ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, bits); + if (rc < 0) { + status = 0; + goto out_unlock; + } status = ql_is_auto_cfg(qdev); ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); +out_unlock: + spin_unlock_irqrestore(&qdev->hw_lock, flags); return status; } +// FIXME: the caller does not check the returned value. static u32 ql_get_speed(struct ql3_adapter *qdev) { + u32 bits = (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index * 2)) << 7; + unsigned long flags; u32 status; - unsigned long hw_flags; - spin_lock_irqsave(&qdev->hw_lock, hw_flags); - if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, - (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * - 2) << 7)) - return 0; + int rc; + + spin_lock_irqsave(&qdev->hw_lock, flags); + rc = ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, bits); + if (rc < 0) { + status = 0; + goto out_unlock; + } status = ql_get_link_speed(qdev); ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); +out_unlock: + spin_unlock_irqrestore(&qdev->hw_lock, flags); return status; } +// FIXME: the caller does not check the returned value. static int ql_get_full_dup(struct ql3_adapter *qdev) { + u32 bits = (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index * 2)) << 7; + unsigned long flags; int status; - unsigned long hw_flags; - spin_lock_irqsave(&qdev->hw_lock, hw_flags); - if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, - (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * - 2) << 7)) - return 0; + int rc; + + spin_lock_irqsave(&qdev->hw_lock, flags); + rc = ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, bits); + if (rc < 0) { + status = 0; + goto out_unlock; + } status = ql_is_link_full_dup(qdev); ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); +out_unlock: + spin_unlock_irqrestore(&qdev->hw_lock, flags); return status; } @@ -1549,7 +1460,7 @@ static int ql_get_settings(struct net_de ecmd->transceiver = XCVR_INTERNAL; ecmd->supported = ql_supported_modes(qdev); - if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) { + if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) { ecmd->port = PORT_FIBRE; } else { ecmd->port = PORT_TP; @@ -1566,6 +1477,7 @@ static void ql_get_drvinfo(struct net_de struct ethtool_drvinfo *drvinfo) { struct ql3_adapter *qdev = netdev_priv(ndev); + strncpy(drvinfo->driver, ql3xxx_driver_name, 32); strncpy(drvinfo->version, ql3xxx_driver_version, 32); strncpy(drvinfo->fw_version, "N/A", 32); @@ -1579,12 +1491,14 @@ static void ql_get_drvinfo(struct net_de static u32 ql_get_msglevel(struct net_device *ndev) { struct ql3_adapter *qdev = netdev_priv(ndev); + return qdev->msg_enable; } static void ql_set_msglevel(struct net_device *ndev, u32 value) { struct ql3_adapter *qdev = netdev_priv(ndev); + qdev->msg_enable = value; } @@ -1599,42 +1513,39 @@ static const struct ethtool_ops ql3xxx_e static int ql_populate_free_queue(struct ql3_adapter *qdev) { - struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head; - u64 map; + struct ql_rcv_buf_cb *rx_cb; - while (lrg_buf_cb) { - if (!lrg_buf_cb->skb) { - lrg_buf_cb->skb = dev_alloc_skb(qdev->lrg_buffer_len); - if (unlikely(!lrg_buf_cb->skb)) { - printk(KERN_DEBUG PFX - "%s: Failed dev_alloc_skb().\n", - qdev->ndev->name); - break; - } else { - /* - * We save some space to copy the ethhdr from - * first buffer - */ - skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE); - map = pci_map_single(qdev->pdev, - lrg_buf_cb->skb->data, - qdev->lrg_buffer_len - - QL_HEADER_SPACE, - PCI_DMA_FROMDEVICE); - lrg_buf_cb->buf_phy_addr_low = - cpu_to_le32(LS_64BITS(map)); - lrg_buf_cb->buf_phy_addr_high = - cpu_to_le32(MS_64BITS(map)); - pci_unmap_addr_set(lrg_buf_cb, mapaddr, map); - pci_unmap_len_set(lrg_buf_cb, maplen, - qdev->lrg_buffer_len - - QL_HEADER_SPACE); - --qdev->lrg_buf_skb_check; - if (!qdev->lrg_buf_skb_check) - return 1; - } + for (rx_cb = qdev->lrg_buf_free_head; rx_cb; rx_cb = rx_cb->next) { + u64 map; + + if (rx_cb->skb) + continue; + + rx_cb->skb = dev_alloc_skb(qdev->lrg_buffer_len); + if (unlikely(!rx_cb->skb)) { + printk(KERN_DEBUG PFX "%s: Failed dev_alloc_skb().\n", + qdev->ndev->name); + break; } - lrg_buf_cb = lrg_buf_cb->next; + /* + * We save some space to copy the ethhdr from + * first buffer + */ + skb_reserve(rx_cb->skb, QL_HEADER_SPACE); + map = pci_map_single(qdev->pdev, rx_cb->skb->data, + qdev->lrg_buffer_len - QL_HEADER_SPACE, + PCI_DMA_FROMDEVICE); + + rx_cb->buf_phy_addr_low = cpu_to_le32(LS_64BITS(map)); + rx_cb->buf_phy_addr_high = cpu_to_le32(MS_64BITS(map)); + + pci_unmap_addr_set(rx_cb, mapaddr, map); + pci_unmap_len_set(rx_cb, maplen, + qdev->lrg_buffer_len - QL_HEADER_SPACE); + + --qdev->lrg_buf_skb_check; + if (!qdev->lrg_buf_skb_check) + return 1; } return 0; } @@ -1644,10 +1555,10 @@ static int ql_populate_free_queue(struct */ static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev) { + u32 __iomem *idx = &qdev->ioaddr->CommonRegs.rxLargeQProducerIndex; struct bufq_addr_element *lrg_buf_q_ele; int i; struct ql_rcv_buf_cb *lrg_buf_cb; - struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; if ((qdev->lrg_buf_free_count >= 8) && (qdev->lrg_buf_release_cnt >= 16)) { @@ -1686,10 +1597,7 @@ static void ql_update_lrg_bufq_prod_inde qdev->lrg_buf_next_free = lrg_buf_q_ele; - ql_write_common_reg(qdev, - &port_regs->CommonRegs. - rxLargeQProducerIndex, - qdev->lrg_buf_q_producer_index); + ql_write_common_reg(qdev, idx, qdev->lrg_buf_q_producer_index); } } @@ -1697,18 +1605,42 @@ static void ql_process_mac_tx_intr(struc struct ob_mac_iocb_rsp *mac_rsp) { struct ql_tx_buf_cb *tx_cb; + int i; tx_cb = &qdev->tx_buf[mac_rsp->transaction_id]; + pci_unmap_single(qdev->pdev, - pci_unmap_addr(tx_cb, mapaddr), - pci_unmap_len(tx_cb, maplen), PCI_DMA_TODEVICE); - dev_kfree_skb_irq(tx_cb->skb); + pci_unmap_addr(tx_cb->map, mapaddr), + pci_unmap_len(tx_cb->map, maplen), + PCI_DMA_TODEVICE); + tx_cb->seg_count--; + if (tx_cb->seg_count) { + for (i = 1; i < tx_cb->seg_count; i++) { + pci_unmap_page(qdev->pdev, + pci_unmap_addr(&tx_cb->map[i], mapaddr), + pci_unmap_len(&tx_cb->map[i], maplen), + PCI_DMA_TODEVICE); + } + } qdev->stats.tx_packets++; qdev->stats.tx_bytes += tx_cb->skb->len; + dev_kfree_skb_irq(tx_cb->skb); tx_cb->skb = NULL; atomic_inc(&qdev->tx_count); } +/* + * The difference between 3022 and 3032 for inbound completions: + * 3022 uses two buffers per completion. The first buffer contains + * (some) header info, the second the remainder of the headers plus + * the data. For this chip we reserve some space at the top of the + * receive buffer so that the header info in buffer one can be + * prepended to the buffer two. Buffer two is the sent up while + * buffer one is returned to the hardware to be reused. + * 3032 receives all of it's data and headers in one buffer for a + * simpler process. 3032 also supports checksum verification as + * can be seen in ql_process_macip_rx_intr(). + */ static void ql_process_mac_rx_intr(struct ql3_adapter *qdev, struct ib_mac_iocb_rsp *ib_mac_rsp_ptr) { @@ -1731,14 +1663,16 @@ static void ql_process_mac_rx_intr(struc qdev->last_rsp_offset = qdev->small_buf_phy_addr_low + offset; qdev->small_buf_release_cnt++; - /* start of first buffer */ - lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr); - lrg_buf_cb1 = &qdev->lrg_buf[qdev->lrg_buf_index]; - qdev->lrg_buf_release_cnt++; - if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS) - qdev->lrg_buf_index = 0; - curr_ial_ptr++; /* 64-bit pointers require two incs. */ - curr_ial_ptr++; + if (qdev->device_id == QL3022_DEVICE_ID) { + /* start of first buffer (3022 only) */ + lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr); + lrg_buf_cb1 = &qdev->lrg_buf[qdev->lrg_buf_index]; + qdev->lrg_buf_release_cnt++; + if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS) + qdev->lrg_buf_index = 0; + curr_ial_ptr++; /* 64-bit pointers require two incs. */ + curr_ial_ptr++; + } /* start of second buffer */ lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr); @@ -1769,7 +1703,8 @@ static void ql_process_mac_rx_intr(struc qdev->ndev->last_rx = jiffies; lrg_buf_cb2->skb = NULL; - ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1); + if (qdev->device_id == QL3022_DEVICE_ID) + ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1); ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2); } @@ -1781,7 +1716,7 @@ static void ql_process_macip_rx_intr(str struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL; struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL; u32 *curr_ial_ptr; - struct sk_buff *skb1, *skb2; + struct sk_buff *skb1 = NULL, *skb2; struct net_device *ndev = qdev->ndev; u16 length = le16_to_cpu(ib_ip_rsp_ptr->length); u16 size = 0; @@ -1797,16 +1732,20 @@ static void ql_process_macip_rx_intr(str qdev->last_rsp_offset = qdev->small_buf_phy_addr_low + offset; qdev->small_buf_release_cnt++; - /* start of first buffer */ - lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr); - lrg_buf_cb1 = &qdev->lrg_buf[qdev->lrg_buf_index]; - - qdev->lrg_buf_release_cnt++; - if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS) - qdev->lrg_buf_index = 0; - skb1 = lrg_buf_cb1->skb; - curr_ial_ptr++; /* 64-bit pointers require two incs. */ - curr_ial_ptr++; + if (qdev->device_id == QL3022_DEVICE_ID) { + /* start of first buffer on 3022 */ + lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr); + lrg_buf_cb1 = &qdev->lrg_buf[qdev->lrg_buf_index]; + qdev->lrg_buf_release_cnt++; + if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS) + qdev->lrg_buf_index = 0; + skb1 = lrg_buf_cb1->skb; + curr_ial_ptr++; /* 64-bit pointers require two incs. */ + curr_ial_ptr++; + size = ETH_HLEN; + if (*((u16 *) skb1->data) != 0xFFFF) + size += VLAN_ETH_HLEN - ETH_HLEN; + } /* start of second buffer */ lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr); @@ -1816,18 +1755,6 @@ static void ql_process_macip_rx_intr(str if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS) qdev->lrg_buf_index = 0; - qdev->stats.rx_packets++; - qdev->stats.rx_bytes += length; - - /* - * Copy the ethhdr from first buffer to second. This - * is necessary for IP completions. - */ - if (*((u16 *) skb1->data) != 0xFFFF) - size = VLAN_ETH_HLEN; - else - size = ETH_HLEN; - skb_put(skb2, length); /* Just the second buffer length here. */ pci_unmap_single(qdev->pdev, pci_unmap_addr(lrg_buf_cb2, mapaddr), @@ -1835,26 +1762,55 @@ static void ql_process_macip_rx_intr(str PCI_DMA_FROMDEVICE); prefetch(skb2->data); - memcpy(skb_push(skb2, size), skb1->data + VLAN_ID_LEN, size); + if (qdev->device_id == QL3022_DEVICE_ID) { + /* + * Copy the ethhdr from first buffer to second. This + * is necessary for 3022 IP completions. + */ + memcpy(skb_push(skb2, size), skb1->data + VLAN_ID_LEN, size); + skb2->ip_summed = CHECKSUM_NONE; + } else { + skb2->ip_summed = CHECKSUM_UNNECESSARY; + if (ib_ip_rsp_ptr->checksum & IB_IP_IOCB_RSP_3032_ICE) { + skb2->ip_summed = CHECKSUM_NONE; + printk(KERN_ERR + "%s: Bad checksum for this IP packet.\n", + __func__); + } else if ((ib_ip_rsp_ptr->checksum & + (IB_IP_IOCB_RSP_3032_TCP | + IB_IP_IOCB_RSP_3032_UDP)) && + (ib_ip_rsp_ptr->checksum & + (IB_IP_IOCB_RSP_3032_NUC | + IB_IP_IOCB_RSP_3032_CE))) { + skb2->ip_summed = CHECKSUM_NONE; + printk(KERN_ERR + "%s: Bad checksum for this %s packet.\n", + __func__, + ((ib_ip_rsp_ptr->checksum & + IB_IP_IOCB_RSP_3032_TCP) ? "TCP" : "UDP")); + } + } skb2->dev = qdev->ndev; - skb2->ip_summed = CHECKSUM_NONE; skb2->protocol = eth_type_trans(skb2, qdev->ndev); - netif_receive_skb(skb2); + qdev->stats.rx_packets++; + qdev->stats.rx_bytes += length; ndev->last_rx = jiffies; lrg_buf_cb2->skb = NULL; - ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1); + if (qdev->device_id == QL3022_DEVICE_ID) + ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1); ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2); } static int ql_tx_rx_clean(struct ql3_adapter *qdev, int *tx_cleaned, int *rx_cleaned, int work_to_do) { - struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; + struct ql3xxx_common_registers __iomem *regs = + &qdev->ioaddr->CommonRegs; struct net_rsp_iocb *net_rsp; struct net_device *ndev = qdev->ndev; - unsigned long hw_flags; + unsigned long flags; /* While there are entries in the completion queue. */ while ((cpu_to_le32(*(qdev->prsp_producer_index)) != @@ -1871,22 +1827,24 @@ static int ql_tx_rx_clean(struct ql3_ada break; case OPCODE_IB_MAC_IOCB: + case OPCODE_IB_3032_MAC_IOCB: ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *) net_rsp); (*rx_cleaned)++; break; case OPCODE_IB_IP_IOCB: + case OPCODE_IB_3032_IP_IOCB: ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *) net_rsp); (*rx_cleaned)++; break; + default: { u32 *tmp = (u32 *) net_rsp; printk(KERN_ERR PFX - "%s: Hit default case, not " - "handled!\n" + "%s: Hit default case, not handled!\n" " dropping the packet, opcode = " "%x.\n", ndev->name, net_rsp->opcode); @@ -1909,7 +1867,7 @@ static int ql_tx_rx_clean(struct ql3_ada } } - spin_lock_irqsave(&qdev->hw_lock, hw_flags); + spin_lock_irqsave(&qdev->hw_lock, flags); ql_update_lrg_bufq_prod_index(qdev); @@ -1923,16 +1881,13 @@ static int ql_tx_rx_clean(struct ql3_ada qdev->small_buf_release_cnt -= 8; } - ql_write_common_reg(qdev, - &port_regs->CommonRegs. - rxSmallQProducerIndex, + ql_write_common_reg(qdev, ®s->rxSmallQProducerIndex, qdev->small_buf_q_producer_index); } - ql_write_common_reg(qdev, - &port_regs->CommonRegs.rspQConsumerIndex, + ql_write_common_reg(qdev, ®s->rspQConsumerIndex, qdev->rsp_consumer_index); - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + spin_unlock_irqrestore(&qdev->hw_lock, flags); if (unlikely(netif_queue_stopped(qdev->ndev))) { if (netif_queue_stopped(qdev->ndev) && @@ -1970,13 +1925,11 @@ static irqreturn_t ql3xxx_isr(int irq, v struct net_device *ndev = dev_id; struct ql3_adapter *qdev = netdev_priv(ndev); - struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; + struct ql3xxx_port_registers __iomem *port_regs = qdev->ioaddr; u32 value; int handled = 1; u32 var; - port_regs = qdev->mem_map_registers; - value = ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus); @@ -1986,7 +1939,7 @@ static irqreturn_t ql3xxx_isr(int irq, v netif_carrier_off(qdev->ndev); ql_disable_interrupts(qdev); qdev->port_link_state = LS_DOWN; - set_bit(QL_RESET_ACTIVE,&qdev->flags) ; + set_bit(QL_RESET_ACTIVE, &qdev->flags) ; if (value & ISP_CONTROL_FE) { /* @@ -1998,12 +1951,12 @@ static irqreturn_t ql3xxx_isr(int irq, v printk(KERN_WARNING PFX "%s: Resetting chip. PortFatalErrStatus " "register = 0x%x\n", ndev->name, var); - set_bit(QL_RESET_START,&qdev->flags) ; + set_bit(QL_RESET_START, &qdev->flags) ; } else { /* * Soft Reset Requested. */ - set_bit(QL_RESET_PER_SCSI,&qdev->flags) ; + set_bit(QL_RESET_PER_SCSI, &qdev->flags) ; printk(KERN_ERR PFX "%s: Another function issued a reset to the " "chip. ISR value = %x.\n", ndev->name, value); @@ -2023,50 +1976,190 @@ static irqreturn_t ql3xxx_isr(int irq, v return IRQ_RETVAL(handled); } +/* + * Get the total number of segments needed for the + * given number of fragments. This is necessary because + * outbound address lists (OAL) will be used when more than + * two frags are given. Each address list has 5 addr/len + * pairs. The 5th pair in each AOL is used to point to + * the next AOL if more frags are coming. + * That is why the frags:segment count ratio is not linear. + */ +static int ql_get_seg_count(unsigned short frags) +{ + switch(frags) { + case 0: return 1; /* just the skb->data seg */ + case 1: return 2; /* skb->data + 1 frag */ + case 2: return 3; /* skb->data + 2 frags */ + case 3: return 5; /* skb->data + 1 frag + 1 AOL containting 2 frags */ + case 4: return 6; + case 5: return 7; + case 6: return 8; + case 7: return 10; + case 8: return 11; + case 9: return 12; + case 10: return 13; + case 11: return 15; + case 12: return 16; + case 13: return 17; + case 14: return 18; + case 15: return 20; + case 16: return 21; + case 17: return 22; + case 18: return 23; + } + return -1; +} + +static void ql_hw_csum_setup(struct sk_buff *skb, + struct ob_mac_iocb_req *mac_iocb_ptr) +{ + struct ethhdr *eth; + struct iphdr *ip = NULL; + u8 offset = ETH_HLEN; + + eth = (struct ethhdr *)(skb->data); + + if (eth->h_proto == __constant_htons(ETH_P_IP)) { + ip = (struct iphdr *)&skb->data[ETH_HLEN]; + } else if (eth->h_proto == htons(ETH_P_8021Q) && + ((struct vlan_ethhdr *)skb->data)-> + h_vlan_encapsulated_proto == __constant_htons(ETH_P_IP)) { + ip = (struct iphdr *)&skb->data[VLAN_ETH_HLEN]; + offset = VLAN_ETH_HLEN; + } + + if (ip) { + if (ip->protocol == IPPROTO_TCP) { + mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC; + mac_iocb_ptr->ip_hdr_off = offset; + mac_iocb_ptr->ip_hdr_len = ip->ihl; + } else if (ip->protocol == IPPROTO_UDP) { + mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC; + mac_iocb_ptr->ip_hdr_off = offset; + mac_iocb_ptr->ip_hdr_len = ip->ihl; + } + } +} + +/* + * The difference between 3022 and 3032 sends: + * 3022 only supports a simple single segment transmission. + * 3032 supports checksumming and scatter/gather lists (fragments). + * The 3032 supports sglists by using the 3 addr/len pairs (ALP) + * in the IOCB plus a chain of outbound address lists (OAL) that + * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th) + * will used to point to an OAL when more ALP entries are required. + * The IOCB is always the top of the chain followed by one or more + * OALs (when necessary). + */ static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev) { - struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); - struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; + struct ql3_adapter *qdev = netdev_priv(ndev); + u32 __iomem *idx = &qdev->ioaddr->CommonRegs.reqQProducerIndex; struct ql_tx_buf_cb *tx_cb; + u32 tot_len = skb->len; + struct oal *oal; + struct oal_entry *oal_entry; + int len; struct ob_mac_iocb_req *mac_iocb_ptr; u64 map; + int seg_cnt, seg = 0; + int frag_cnt = (int)skb_shinfo(skb)->nr_frags; if (unlikely(atomic_read(&qdev->tx_count) < 2)) { if (!netif_queue_stopped(ndev)) netif_stop_queue(ndev); return NETDEV_TX_BUSY; } - tx_cb = &qdev->tx_buf[qdev->req_producer_index] ; + tx_cb = &qdev->tx_buf[qdev->req_producer_index]; + seg_cnt = tx_cb->seg_count = ql_get_seg_count(skb_shinfo(skb)->nr_frags); + if (seg_cnt == -1) { + printk(KERN_ERR PFX "%s: invalid segment count!\n", __func__); + return NETDEV_TX_OK; + } mac_iocb_ptr = tx_cb->queue_entry; - memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req)); + memset(mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req)); mac_iocb_ptr->opcode = qdev->mac_ob_opcode; mac_iocb_ptr->flags |= qdev->mb_bit_mask; mac_iocb_ptr->transaction_id = qdev->req_producer_index; - mac_iocb_ptr->data_len = cpu_to_le16((u16) skb->len); + mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len); tx_cb->skb = skb; - map = pci_map_single(qdev->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); - mac_iocb_ptr->buf_addr0_low = cpu_to_le32(LS_64BITS(map)); - mac_iocb_ptr->buf_addr0_high = cpu_to_le32(MS_64BITS(map)); - mac_iocb_ptr->buf_0_len = cpu_to_le32(skb->len | OB_MAC_IOCB_REQ_E); - pci_unmap_addr_set(tx_cb, mapaddr, map); - pci_unmap_len_set(tx_cb, maplen, skb->len); - atomic_dec(&qdev->tx_count); + if (skb->ip_summed == CHECKSUM_PARTIAL) + ql_hw_csum_setup(skb, mac_iocb_ptr); + len = skb_headlen(skb); + map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE); + oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; + oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); + oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); + oal_entry->len = cpu_to_le32(len); + pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); + pci_unmap_len_set(&tx_cb->map[seg], maplen, len); + seg++; + + if (!skb_shinfo(skb)->nr_frags) { + /* Terminate the last segment. */ + oal_entry->len = + cpu_to_le32(le32_to_cpu(oal_entry->len) | OAL_LAST_ENTRY); + } else { + int i; + oal = tx_cb->oal; + for (i = 0; i < frag_cnt; i++, seg++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + oal_entry++; + if ((seg == 2 && seg_cnt > 3) || /* Check for continuation */ + (seg == 7 && seg_cnt > 8) || /* requirements. It's strange */ + (seg == 12 && seg_cnt > 13) || /* but necessary. */ + (seg == 17 && seg_cnt > 18)) { + /* Continuation entry points to outbound address list. */ + map = pci_map_single(qdev->pdev, oal, + sizeof(struct oal), + PCI_DMA_TODEVICE); + oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); + oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); + oal_entry->len = + cpu_to_le32(sizeof(struct oal) | + OAL_CONT_ENTRY); + pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, + map); + pci_unmap_len_set(&tx_cb->map[seg], maplen, + len); + oal_entry = (struct oal_entry *)oal; + oal++; + seg++; + } + map = + pci_map_page(qdev->pdev, frag->page, + frag->page_offset, frag->size, + PCI_DMA_TODEVICE); + oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); + oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); + oal_entry->len = cpu_to_le32(frag->size); + pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); + pci_unmap_len_set(&tx_cb->map[seg], maplen, + frag->size); + } + /* Terminate the last segment. */ + oal_entry->len = + cpu_to_le32(le32_to_cpu(oal_entry->len) | OAL_LAST_ENTRY); + } + wmb(); qdev->req_producer_index++; if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES) qdev->req_producer_index = 0; wmb(); - ql_write_common_reg_l(qdev, - &port_regs->CommonRegs.reqQProducerIndex, - qdev->req_producer_index); + ql_write_common_reg_l(qdev, idx, qdev->req_producer_index); ndev->trans_start = jiffies; if (netif_msg_tx_queued(qdev)) printk(KERN_DEBUG PFX "%s: tx queued, slot %d, len %d\n", ndev->name, qdev->req_producer_index, skb->len); + atomic_dec(&qdev->tx_count); return NETDEV_TX_OK; } + static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev) { qdev->req_q_size = @@ -2102,14 +2195,14 @@ static int ql_alloc_net_req_rsp_queues(s return -ENOMEM; } - set_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags); + set_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags); return 0; } static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev) { - if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags)) { + if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags)) { printk(KERN_INFO PFX "%s: Already done.\n", qdev->ndev->name); return; @@ -2127,7 +2220,7 @@ static void ql_free_net_req_rsp_queues(s qdev->rsp_q_virt_addr = NULL; - clear_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags); + clear_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags); } static int ql_alloc_buffer_queues(struct ql3_adapter *qdev) @@ -2178,13 +2271,13 @@ static int ql_alloc_buffer_queues(struct qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr; qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr; - set_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags); + set_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags); return 0; } static void ql_free_buffer_queues(struct ql3_adapter *qdev) { - if (!test_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags)) { + if (!test_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags)) { printk(KERN_INFO PFX "%s: Already done.\n", qdev->ndev->name); return; @@ -2204,7 +2297,7 @@ static void ql_free_buffer_queues(struct qdev->small_buf_q_virt_addr = NULL; - clear_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags); + clear_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags); } static int ql_alloc_small_buffers(struct ql3_adapter *qdev) @@ -2246,13 +2339,13 @@ static int ql_alloc_small_buffers(struct small_buf_q_entry++; } qdev->small_buf_index = 0; - set_bit(QL_ALLOC_SMALL_BUF_DONE,&qdev->flags); + set_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags); return 0; } static void ql_free_small_buffers(struct ql3_adapter *qdev) { - if (!test_bit(QL_ALLOC_SMALL_BUF_DONE,&qdev->flags)) { + if (!test_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags)) { printk(KERN_INFO PFX "%s: Already done.\n", qdev->ndev->name); return; @@ -2350,20 +2443,39 @@ static int ql_alloc_large_buffers(struct return 0; } -static void ql_create_send_free_list(struct ql3_adapter *qdev) +static void ql_free_send_free_list(struct ql3_adapter *qdev) +{ + struct ql_tx_buf_cb *tx_cb; + int i; + + tx_cb = &qdev->tx_buf[0]; + for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { + if (tx_cb->oal) { + kfree(tx_cb->oal); + tx_cb->oal = NULL; + } + tx_cb++; + } +} + +static int ql_create_send_free_list(struct ql3_adapter *qdev) { + struct ob_mac_iocb_req *req_q_curr = qdev->req_q_virt_addr; struct ql_tx_buf_cb *tx_cb; int i; - struct ob_mac_iocb_req *req_q_curr = - qdev->req_q_virt_addr; /* Create free list of transmit buffers */ for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { + tx_cb = &qdev->tx_buf[i]; tx_cb->skb = NULL; tx_cb->queue_entry = req_q_curr; req_q_curr++; + tx_cb->oal = kmalloc(512, GFP_KERNEL); + if (tx_cb->oal == NULL) + return -1; } + return 0; } static int ql_alloc_mem_resources(struct ql3_adapter *qdev) @@ -2438,12 +2550,14 @@ static int ql_alloc_mem_resources(struct /* Initialize the large buffer queue. */ ql_init_large_buffers(qdev); - ql_create_send_free_list(qdev); + if (ql_create_send_free_list(qdev)) + goto err_free_list; qdev->rsp_current = qdev->rsp_q_virt_addr; return 0; - +err_free_list: + ql_free_send_free_list(qdev); err_small_buffers: ql_free_buffer_queues(qdev); err_buffer_queues: @@ -2459,6 +2573,7 @@ err_req_rsp: static void ql_free_mem_resources(struct ql3_adapter *qdev) { + ql_free_send_free_list(qdev); ql_free_large_buffers(qdev); ql_free_small_buffers(qdev); ql_free_buffer_queues(qdev); @@ -2474,13 +2589,14 @@ static void ql_free_mem_resources(struct static int ql_init_misc_registers(struct ql3_adapter *qdev) { + u32 bits = (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index * 2)) << 4; struct ql3xxx_local_ram_registers __iomem *local_ram = - (void __iomem *)qdev->mem_map_registers; + (void __iomem *)qdev->ioaddr; + int rc; - if(ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK, - (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * - 2) << 4)) - return -1; + rc = ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK, bits); + if (rc < 0) + goto out; ql_write_page2_reg(qdev, &local_ram->bufletSize, qdev->nvram_data.bufletSize); @@ -2527,37 +2643,41 @@ static int ql_init_misc_registers(struct &local_ram->maxDrbCount, qdev->nvram_data.drbTableSize); ql_sem_unlock(qdev, QL_DDR_RAM_SEM_MASK); - return 0; +out: + return rc; } +// FIXME: ugly static int ql_adapter_initialize(struct ql3_adapter *qdev) { u32 value; - struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; + struct ql3xxx_port_registers __iomem *port_regs = qdev->ioaddr; struct ql3xxx_host_memory_registers __iomem *hmem_regs = - (void __iomem *)port_regs; + (void __iomem *)port_regs; + struct ql3xxx_common_registers __iomem *regs = &port_regs->CommonRegs; + unsigned int mac_index = qdev->mac_index; u32 delay = 10; int status = 0; - if(ql_mii_setup(qdev)) + if (ql_mii_setup(qdev)) return -1; /* Bring out PHY out of reset */ - ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, - (ISP_SERIAL_PORT_IF_WE | - (ISP_SERIAL_PORT_IF_WE << 16))); + ql_write_common_reg(qdev, ®s->serialPortInterfaceReg, + (ISP_SERIAL_PORT_IF_WE << 16) | + ISP_SERIAL_PORT_IF_WE); qdev->port_link_state = LS_DOWN; netif_carrier_off(qdev->ndev); /* V2 chip fix for ARS-39168. */ - ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, - (ISP_SERIAL_PORT_IF_SDE | - (ISP_SERIAL_PORT_IF_SDE << 16))); + ql_write_common_reg(qdev, ®s->serialPortInterfaceReg, + (ISP_SERIAL_PORT_IF_SDE << 16) | + ISP_SERIAL_PORT_IF_SDE); /* Request Queue Registers */ *((u32 *) (qdev->preq_consumer_index)) = 0; - atomic_set(&qdev->tx_count,NUM_REQ_Q_ENTRIES); + atomic_set(&qdev->tx_count, NUM_REQ_Q_ENTRIES); qdev->req_producer_index = 0; ql_write_page1_reg(qdev, @@ -2639,13 +2759,9 @@ static int ql_adapter_initialize(struct qdev->lrg_buf_free_head = NULL; qdev->lrg_buf_free_tail = NULL; - ql_write_common_reg(qdev, - &port_regs->CommonRegs. - rxSmallQProducerIndex, + ql_write_common_reg(qdev, ®s->rxSmallQProducerIndex, qdev->small_buf_q_producer_index); - ql_write_common_reg(qdev, - &port_regs->CommonRegs. - rxLargeQProducerIndex, + ql_write_common_reg(qdev, ®s->rxLargeQProducerIndex, qdev->lrg_buf_q_producer_index); /* @@ -2657,7 +2773,7 @@ static int ql_adapter_initialize(struct if ((value & PORT_STATUS_IC) == 0) { /* Chip has not been configured yet, so let it rip. */ - if(ql_init_misc_registers(qdev)) { + if (ql_init_misc_registers(qdev)) { status = -1; goto out; } @@ -2676,27 +2792,27 @@ static int ql_adapter_initialize(struct value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig; - if(ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK, - (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) - * 2) << 13)) { - status = -1; + status = ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK, + (QL_RESOURCE_BITS_BASE_CODE | + (qdev->mac_index * 2) << 13)); + if (status < 0) goto out; - } + +#define INTERNAL_CHIP_SD_WE (INTERNAL_CHIP_SD | INTERNAL_CHIP_WE) + ql_write_page0_reg(qdev, &port_regs->ExternalHWConfig, value); ql_write_page0_reg(qdev, &port_regs->InternalChipConfig, - (((INTERNAL_CHIP_SD | INTERNAL_CHIP_WE) << - 16) | (INTERNAL_CHIP_SD | - INTERNAL_CHIP_WE))); + (INTERNAL_CHIP_SD_WE << 16) | + INTERNAL_CHIP_SD_WE); ql_sem_unlock(qdev, QL_FLASH_SEM_MASK); } - if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, - (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * - 2) << 7)) { - status = -1; + status = ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, + (QL_RESOURCE_BITS_BASE_CODE | + (qdev->mac_index * 2) << 13)); + if (status < 0) goto out; - } ql_init_scan_mode(qdev); ql_get_phy_owner(qdev); @@ -2705,42 +2821,40 @@ static int ql_adapter_initialize(struct /* Program lower 32 bits of the MAC address */ ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, - (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16)); + MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16); ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, - ((qdev->ndev->dev_addr[2] << 24) - | (qdev->ndev->dev_addr[3] << 16) - | (qdev->ndev->dev_addr[4] << 8) - | qdev->ndev->dev_addr[5])); + (qdev->ndev->dev_addr[2] << 24) | + (qdev->ndev->dev_addr[3] << 16) | + (qdev->ndev->dev_addr[4] << 8) | + qdev->ndev->dev_addr[5]); /* Program top 16 bits of the MAC address */ ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, - ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1)); + (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1); ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, - ((qdev->ndev->dev_addr[0] << 8) - | qdev->ndev->dev_addr[1])); + (qdev->ndev->dev_addr[0] << 8) | + qdev->ndev->dev_addr[1]); /* Enable Primary MAC */ ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, - ((MAC_ADDR_INDIRECT_PTR_REG_PE << 16) | - MAC_ADDR_INDIRECT_PTR_REG_PE)); + (MAC_ADDR_INDIRECT_PTR_REG_PE << 16) | + MAC_ADDR_INDIRECT_PTR_REG_PE); /* Clear Primary and Secondary IP addresses */ ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg, - ((IP_ADDR_INDEX_REG_MASK << 16) | - (qdev->mac_index << 2))); + (IP_ADDR_INDEX_REG_MASK << 16) | (mac_index << 2)); ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0); ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg, - ((IP_ADDR_INDEX_REG_MASK << 16) | - ((qdev->mac_index << 2) + 1))); + (IP_ADDR_INDEX_REG_MASK << 16) | + ((mac_index << 2) + 1)); ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0); ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); /* Indicate Configuration Complete */ - ql_write_page0_reg(qdev, - &port_regs->portControl, - ((PORT_CONTROL_CC << 16) | PORT_CONTROL_CC)); + ql_write_page0_reg(qdev, &port_regs->portControl, + (PORT_CONTROL_CC << 16) | PORT_CONTROL_CC); do { value = ql_read_page0_reg(qdev, &port_regs->portStatus); @@ -2757,11 +2871,18 @@ static int ql_adapter_initialize(struct } /* Enable Ethernet Function */ - value = - (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI | - PORT_CONTROL_HH); - ql_write_page0_reg(qdev, &port_regs->portControl, - ((value << 16) | value)); + if (qdev->device_id == QL3032_DEVICE_ID) { + value = QL3032_PORT_CONTROL_EF | QL3032_PORT_CONTROL_KIE | + QL3032_PORT_CONTROL_EIv6 | QL3032_PORT_CONTROL_EIv4; + ql_write_page0_reg(qdev, &port_regs->functionControl, + (value << 16) | value); + } else { + value = PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI | + PORT_CONTROL_HH; + ql_write_page0_reg(qdev, &port_regs->portControl, + (value << 16) | value); + } + out: return status; @@ -2772,7 +2893,7 @@ out: */ static int ql_adapter_reset(struct ql3_adapter *qdev) { - struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; + u32 __iomem *ctl = &qdev->ioaddr->CommonRegs.ispControlStatus; int status = 0; u16 value; int max_wait_time; @@ -2786,9 +2907,7 @@ static int ql_adapter_reset(struct ql3_a printk(KERN_DEBUG PFX "%s: Issue soft reset to chip.\n", qdev->ndev->name); - ql_write_common_reg(qdev, - &port_regs->CommonRegs.ispControlStatus, - ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR)); + ql_write_common_reg(qdev, ctl, (ISP_CONTROL_SR << 16) | ISP_CONTROL_SR); /* Wait 3 seconds for reset to complete. */ printk(KERN_DEBUG PFX @@ -2798,9 +2917,7 @@ static int ql_adapter_reset(struct ql3_a /* Wait until the firmware tells us the Soft Reset is done */ max_wait_time = 5; do { - value = - ql_read_common_reg(qdev, - &port_regs->CommonRegs.ispControlStatus); + value = ql_read_common_reg(qdev, ctl); if ((value & ISP_CONTROL_SR) == 0) break; @@ -2811,34 +2928,25 @@ static int ql_adapter_reset(struct ql3_a * Also, make sure that the Network Reset Interrupt bit has been * cleared after the soft reset has taken place. */ - value = - ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus); + value = ql_read_common_reg(qdev, ctl); if (value & ISP_CONTROL_RI) { printk(KERN_DEBUG PFX "ql_adapter_reset: clearing RI after reset.\n"); - ql_write_common_reg(qdev, - &port_regs->CommonRegs. - ispControlStatus, - ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI)); + ql_write_common_reg(qdev, ctl, + (ISP_CONTROL_RI << 16) | ISP_CONTROL_RI); } if (max_wait_time == 0) { /* Issue Force Soft Reset */ - ql_write_common_reg(qdev, - &port_regs->CommonRegs. - ispControlStatus, - ((ISP_CONTROL_FSR << 16) | - ISP_CONTROL_FSR)); + ql_write_common_reg(qdev, ctl, + (ISP_CONTROL_FSR << 16) | ISP_CONTROL_FSR); /* * Wait until the firmware tells us the Force Soft Reset is * done */ max_wait_time = 5; do { - value = - ql_read_common_reg(qdev, - &port_regs->CommonRegs. - ispControlStatus); + value = ql_read_common_reg(qdev, ctl); if ((value & ISP_CONTROL_FSR) == 0) { break; } @@ -2855,13 +2963,13 @@ static int ql_adapter_reset(struct ql3_a static void ql_set_mac_info(struct ql3_adapter *qdev) { - struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; + struct ql3xxx_port_registers __iomem *port_regs = qdev->ioaddr; + u32 __iomem *ctl = &port_regs->CommonRegs.ispControlStatus; u32 value, port_status; u8 func_number; /* Get the function number */ - value = - ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus); + value = ql_read_common_reg_l(qdev, ctl); func_number = (u8) ((value >> 4) & OPCODE_FUNC_ID_MASK); port_status = ql_read_page0_reg(qdev, &port_regs->portStatus); switch (value & ISP_CONTROL_FN_MASK) { @@ -2873,9 +2981,9 @@ static void ql_set_mac_info(struct ql3_a qdev->mb_bit_mask = FN0_MA_BITS_MASK; qdev->PHYAddr = PORT0_PHY_ADDRESS; if (port_status & PORT_STATUS_SM0) - set_bit(QL_LINK_OPTICAL,&qdev->flags); + set_bit(QL_LINK_OPTICAL, &qdev->flags); else - clear_bit(QL_LINK_OPTICAL,&qdev->flags); + clear_bit(QL_LINK_OPTICAL, &qdev->flags); break; case ISP_CONTROL_FN1_NET: @@ -2886,9 +2994,9 @@ static void ql_set_mac_info(struct ql3_a qdev->mb_bit_mask = FN1_MA_BITS_MASK; qdev->PHYAddr = PORT1_PHY_ADDRESS; if (port_status & PORT_STATUS_SM1) - set_bit(QL_LINK_OPTICAL,&qdev->flags); + set_bit(QL_LINK_OPTICAL, &qdev->flags); else - clear_bit(QL_LINK_OPTICAL,&qdev->flags); + clear_bit(QL_LINK_OPTICAL, &qdev->flags); break; case ISP_CONTROL_FN0_SCSI: @@ -2896,7 +3004,7 @@ static void ql_set_mac_info(struct ql3_a default: printk(KERN_DEBUG PFX "%s: Invalid function number, ispControlStatus = 0x%x\n", - qdev->ndev->name,value); + qdev->ndev->name, value); break; } qdev->numPorts = qdev->nvram_data.numPorts; @@ -2904,27 +3012,26 @@ static void ql_set_mac_info(struct ql3_a static void ql_display_dev_info(struct net_device *ndev) { - struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); + struct ql3_adapter *qdev = netdev_priv(ndev); struct pci_dev *pdev = qdev->pdev; printk(KERN_INFO PFX - "\n%s Adapter %d RevisionID %d found on PCI slot %d.\n", - DRV_NAME, qdev->index, qdev->chip_rev_id, qdev->pci_slot); - printk(KERN_INFO PFX - "%s Interface.\n", - test_bit(QL_LINK_OPTICAL,&qdev->flags) ? "OPTICAL" : "COPPER"); + "\n%s: revision id %d. %s on PCI slot %d. %s interface.\n", + ndev->name, qdev->chip_rev_id, + (qdev->device_id == QL3032_DEVICE_ID) ? "QLA3032" : "QLA3022", + qdev->pci_slot, + test_bit(QL_LINK_OPTICAL, &qdev->flags) ? "OPTICAL" : "COPPER"); /* * Print PCI bus width/type. */ printk(KERN_INFO PFX "Bus interface is %s %s.\n", - ((qdev->pci_width == 64) ? "64-bit" : "32-bit"), - ((qdev->pci_x) ? "PCI-X" : "PCI")); + (qdev->pci_width == 64) ? "64-bit" : "32-bit", + (qdev->pci_x) ? "PCI-X" : "PCI"); printk(KERN_INFO PFX - "mem IO base address adjusted = 0x%p\n", - qdev->mem_map_registers); + "mem IO base address adjusted = 0x%p\n", qdev->ioaddr); printk(KERN_INFO PFX "Interrupt number = %d\n", pdev->irq); if (netif_msg_probe(qdev)) @@ -2943,17 +3050,17 @@ static int ql_adapter_down(struct ql3_ad netif_stop_queue(ndev); netif_carrier_off(ndev); - clear_bit(QL_ADAPTER_UP,&qdev->flags); - clear_bit(QL_LINK_MASTER,&qdev->flags); + clear_bit(QL_ADAPTER_UP, &qdev->flags); + clear_bit(QL_LINK_MASTER, &qdev->flags); ql_disable_interrupts(qdev); free_irq(qdev->pdev->irq, ndev); - if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) { + if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) { printk(KERN_INFO PFX "%s: calling pci_disable_msi().\n", qdev->ndev->name); - clear_bit(QL_MSI_ENABLED,&qdev->flags); + clear_bit(QL_MSI_ENABLED, &qdev->flags); pci_disable_msi(qdev->pdev); } @@ -2962,25 +3069,25 @@ static int ql_adapter_down(struct ql3_ad netif_poll_disable(ndev); if (do_reset) { - int soft_reset; - unsigned long hw_flags; + unsigned long flags; - spin_lock_irqsave(&qdev->hw_lock, hw_flags); + /* FIXME: ssleep with spinlock held in ql_wait_for_drvr_lock */ + spin_lock_irqsave(&qdev->hw_lock, flags); if (ql_wait_for_drvr_lock(qdev)) { - if ((soft_reset = ql_adapter_reset(qdev))) { + if (ql_adapter_reset(qdev)) { printk(KERN_ERR PFX - "%s: ql_adapter_reset(%d) FAILED!\n", - ndev->name, qdev->index); + "%s: ql_adapter_reset() FAILED!\n", + ndev->name); } printk(KERN_ERR PFX - "%s: Releaseing driver lock via chip reset.\n",ndev->name); + "%s: Releaseing driver lock via chip reset.\n", ndev->name); } else { printk(KERN_ERR PFX "%s: Could not acquire driver lock to do " "reset!\n", ndev->name); retval = -1; } - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + spin_unlock_irqrestore(&qdev->hw_lock, flags); } ql_free_mem_resources(qdev); return retval; @@ -2991,7 +3098,7 @@ static int ql_adapter_up(struct ql3_adap struct net_device *ndev = qdev->ndev; int err; unsigned long irq_flags = SA_SAMPLE_RANDOM | SA_SHIRQ; - unsigned long hw_flags; + unsigned long flags; if (ql_alloc_mem_resources(qdev)) { printk(KERN_ERR PFX @@ -3008,31 +3115,34 @@ static int ql_adapter_up(struct ql3_adap qdev->msi = 0; } else { printk(KERN_INFO PFX "%s: MSI Enabled...\n", qdev->ndev->name); - set_bit(QL_MSI_ENABLED,&qdev->flags); + set_bit(QL_MSI_ENABLED, &qdev->flags); irq_flags &= ~SA_SHIRQ; } } - if ((err = request_irq(qdev->pdev->irq, - ql3xxx_isr, - irq_flags, ndev->name, ndev))) { + err = request_irq(qdev->pdev->irq, ql3xxx_isr, irq_flags, ndev->name, + ndev); + if (err < 0) { printk(KERN_ERR PFX "%s: Failed to reserve interrupt %d already in use.\n", ndev->name, qdev->pdev->irq); goto err_irq; } - spin_lock_irqsave(&qdev->hw_lock, hw_flags); + /* FIXME: ssleep with spinlock held in ql_wait_for_drvr_lock */ + spin_lock_irqsave(&qdev->hw_lock, flags); - if ((err = ql_wait_for_drvr_lock(qdev))) { - if ((err = ql_adapter_initialize(qdev))) { + err = ql_wait_for_drvr_lock(qdev); + if (err) { + err = ql_adapter_initialize(qdev); + if (err < 0) { printk(KERN_ERR PFX "%s: Unable to initialize adapter.\n", ndev->name); goto err_init; } printk(KERN_ERR PFX - "%s: Releaseing driver lock.\n",ndev->name); + "%s: Releaseing driver lock.\n", ndev->name); ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); } else { printk(KERN_ERR PFX @@ -3041,9 +3151,9 @@ static int ql_adapter_up(struct ql3_adap goto err_lock; } - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + spin_unlock_irqrestore(&qdev->hw_lock, flags); - set_bit(QL_ADAPTER_UP,&qdev->flags); + set_bit(QL_ADAPTER_UP, &qdev->flags); mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); @@ -3056,11 +3166,11 @@ err_init: err_lock: free_irq(qdev->pdev->irq, ndev); err_irq: - if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) { + if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) { printk(KERN_INFO PFX "%s: calling pci_disable_msi().\n", qdev->ndev->name); - clear_bit(QL_MSI_ENABLED,&qdev->flags); + clear_bit(QL_MSI_ENABLED, &qdev->flags); pci_disable_msi(qdev->pdev); } return err; @@ -3068,10 +3178,10 @@ err_irq: static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset) { - if( ql_adapter_down(qdev,reset) || ql_adapter_up(qdev)) { + if (ql_adapter_down(qdev, reset) || ql_adapter_up(qdev)) { printk(KERN_ERR PFX "%s: Driver up/down cycle failed, " - "closing device\n",qdev->ndev->name); + "closing device\n", qdev->ndev->name); dev_close(qdev->ndev); return -1; } @@ -3086,28 +3196,31 @@ static int ql3xxx_close(struct net_devic * Wait for device to recover from a reset. * (Rarely happens, but possible.) */ - while (!test_bit(QL_ADAPTER_UP,&qdev->flags)) + while (!test_bit(QL_ADAPTER_UP, &qdev->flags)) msleep(50); - ql_adapter_down(qdev,QL_DO_RESET); + ql_adapter_down(qdev, QL_DO_RESET); return 0; } static int ql3xxx_open(struct net_device *ndev) { struct ql3_adapter *qdev = netdev_priv(ndev); - return (ql_adapter_up(qdev)); + + return ql_adapter_up(qdev); } static struct net_device_stats *ql3xxx_get_stats(struct net_device *dev) { - struct ql3_adapter *qdev = (struct ql3_adapter *)dev->priv; + struct ql3_adapter *qdev = netdev_priv(dev); + return &qdev->stats; } static int ql3xxx_change_mtu(struct net_device *ndev, int new_mtu) { struct ql3_adapter *qdev = netdev_priv(ndev); + printk(KERN_ERR PFX "%s: new mtu size = %d.\n", ndev->name, new_mtu); if (new_mtu != NORMAL_MTU_SIZE && new_mtu != JUMBO_MTU_SIZE) { printk(KERN_ERR PFX @@ -3123,7 +3236,7 @@ static int ql3xxx_change_mtu(struct net_ } ndev->mtu = new_mtu; - return ql_cycle_adapter(qdev,QL_DO_RESET); + return ql_cycle_adapter(qdev, QL_DO_RESET); } static void ql3xxx_set_multicast_list(struct net_device *ndev) @@ -3136,11 +3249,11 @@ static void ql3xxx_set_multicast_list(st static int ql3xxx_set_mac_address(struct net_device *ndev, void *p) { - struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; + struct ql3_adapter *qdev = netdev_priv(ndev); + u32 __iomem *indirect = &qdev->ioaddr->macAddrIndirectPtrReg; + u32 __iomem *mac_addr = &qdev->ioaddr->macAddrDataReg; struct sockaddr *addr = p; - unsigned long hw_flags; + unsigned long flags; if (netif_running(ndev)) return -EBUSY; @@ -3150,28 +3263,32 @@ static int ql3xxx_set_mac_address(struct memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); - spin_lock_irqsave(&qdev->hw_lock, hw_flags); + spin_lock_irqsave(&qdev->hw_lock, flags); + /* Program lower 32 bits of the MAC address */ - ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, - (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16)); - ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, - ((ndev->dev_addr[2] << 24) | (ndev-> - dev_addr[3] << 16) | - (ndev->dev_addr[4] << 8) | ndev->dev_addr[5])); + ql_write_page0_reg(qdev, indirect, + MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16); + ql_write_page0_reg(qdev, mac_addr, + (ndev->dev_addr[2] << 24) | + (ndev->dev_addr[3] << 16) | + (ndev->dev_addr[4] << 8) | + ndev->dev_addr[5]); /* Program top 16 bits of the MAC address */ - ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, - ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1)); - ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, - ((ndev->dev_addr[0] << 8) | ndev->dev_addr[1])); - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + ql_write_page0_reg(qdev, indirect, + (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1); + ql_write_page0_reg(qdev, mac_addr, + (ndev->dev_addr[0] << 8) | + ndev->dev_addr[1]); + + spin_unlock_irqrestore(&qdev->hw_lock, flags); return 0; } static void ql3xxx_tx_timeout(struct net_device *ndev) { - struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); + struct ql3_adapter *qdev = netdev_priv(ndev); printk(KERN_ERR PFX "%s: Resetting...\n", ndev->name); /* @@ -3189,94 +3306,87 @@ static void ql_reset_work(struct work_st { struct ql3_adapter *qdev = container_of(work, struct ql3_adapter, reset_work.work); - struct net_device *ndev = qdev->ndev; + u32 __iomem *reg = &qdev->ioaddr->CommonRegs.ispControlStatus; + u32 cmd = (ISP_CONTROL_RI << 16) | ISP_CONTROL_RI; + char *name = qdev->ndev->name; + unsigned long flags; + unsigned int i; u32 value; - struct ql_tx_buf_cb *tx_cb; - int max_wait_time, i; - struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; - unsigned long hw_flags; - if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START),&qdev->flags)) { - clear_bit(QL_LINK_MASTER,&qdev->flags); + if (!test_bit(QL_RESET_PER_SCSI | QL_RESET_START, &qdev->flags)) + return; - /* - * Loop through the active list and return the skb. - */ - for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { - tx_cb = &qdev->tx_buf[i]; - if (tx_cb->skb) { + clear_bit(QL_LINK_MASTER, &qdev->flags); - printk(KERN_DEBUG PFX - "%s: Freeing lost SKB.\n", - qdev->ndev->name); - pci_unmap_single(qdev->pdev, - pci_unmap_addr(tx_cb, mapaddr), - pci_unmap_len(tx_cb, maplen), PCI_DMA_TODEVICE); - dev_kfree_skb(tx_cb->skb); - tx_cb->skb = NULL; - } - } + /* + * Loop through the active list and return the skb. + */ + for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { + struct ql_tx_buf_cb *tx_cb = &qdev->tx_buf[i]; + unsigned int j; - printk(KERN_ERR PFX - "%s: Clearing NRI after reset.\n", qdev->ndev->name); - spin_lock_irqsave(&qdev->hw_lock, hw_flags); - ql_write_common_reg(qdev, - &port_regs->CommonRegs. - ispControlStatus, - ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI)); - /* - * Wait the for Soft Reset to Complete. - */ - max_wait_time = 10; - do { - value = ql_read_common_reg(qdev, - &port_regs->CommonRegs. + if (!tx_cb->skb) + continue; - ispControlStatus); - if ((value & ISP_CONTROL_SR) == 0) { - printk(KERN_DEBUG PFX - "%s: reset completed.\n", - qdev->ndev->name); - break; - } + printk(KERN_DEBUG PFX "%s: Freeing lost SKB.\n", name); - if (value & ISP_CONTROL_RI) { - printk(KERN_DEBUG PFX - "%s: clearing NRI after reset.\n", - qdev->ndev->name); - ql_write_common_reg(qdev, - &port_regs-> - CommonRegs. - ispControlStatus, - ((ISP_CONTROL_RI << - 16) | ISP_CONTROL_RI)); - } + for (j = 0; !j || (j < tx_cb->seg_count); j++) { + pci_unmap_page(qdev->pdev, + pci_unmap_addr(&tx_cb->map[j], mapaddr), + pci_unmap_len(&tx_cb->map[j], maplen), + PCI_DMA_TODEVICE); + } - ssleep(1); - } while (--max_wait_time); - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + dev_kfree_skb(tx_cb->skb); + tx_cb->skb = NULL; + } - if (value & ISP_CONTROL_SR) { + printk(KERN_ERR PFX "%s: Clearing NRI after reset.\n", name); - /* - * Set the reset flags and clear the board again. - * Nothing else to do... - */ - printk(KERN_ERR PFX - "%s: Timed out waiting for reset to " - "complete.\n", ndev->name); - printk(KERN_ERR PFX - "%s: Do a reset.\n", ndev->name); - clear_bit(QL_RESET_PER_SCSI,&qdev->flags); - clear_bit(QL_RESET_START,&qdev->flags); - ql_cycle_adapter(qdev,QL_DO_RESET); - return; + spin_lock_irqsave(&qdev->hw_lock, flags); + + ql_write_common_reg(qdev, reg, cmd); + /* + * Wait the for Soft Reset to Complete. + */ + for (i = 0; i < 10; i++) { + value = ql_read_common_reg(qdev, reg); + if (!(value & ISP_CONTROL_SR)) { + printk(KERN_DEBUG PFX "%s: reset completed.\n", name); + break; + } + + if (value & ISP_CONTROL_RI) { + printk(KERN_DEBUG PFX + "%s: clearing NRI after reset.\n", name); + ql_write_common_reg(qdev, reg, cmd); } - clear_bit(QL_RESET_ACTIVE,&qdev->flags); - clear_bit(QL_RESET_PER_SCSI,&qdev->flags); - clear_bit(QL_RESET_START,&qdev->flags); - ql_cycle_adapter(qdev,QL_NO_RESET); + // FIXME: sleeping with spinlock held, yeah ! + ssleep(1); + } + + spin_unlock_irqrestore(&qdev->hw_lock, flags); + + if (value & ISP_CONTROL_SR) { + /* + * Set the reset flags and clear the board again. + * Nothing else to do... + */ + printk(KERN_ERR PFX + "%s: Timed out waiting for reset to complete.\n", name); + printk(KERN_ERR PFX "%s: Do a reset.\n", name); + + clear_bit(QL_RESET_PER_SCSI, &qdev->flags); + clear_bit(QL_RESET_START, &qdev->flags); + + ql_cycle_adapter(qdev, QL_DO_RESET); + } else { + clear_bit(QL_RESET_ACTIVE, &qdev->flags); + clear_bit(QL_RESET_PER_SCSI, &qdev->flags); + clear_bit(QL_RESET_START, &qdev->flags); + + ql_cycle_adapter(qdev, QL_NO_RESET); } } @@ -3290,12 +3400,11 @@ static void ql_tx_timeout_work(struct wo static void ql_get_board_info(struct ql3_adapter *qdev) { - struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 value; - value = ql_read_page0_reg_l(qdev, &port_regs->portStatus); + value = ql_read_page0_reg_l(qdev, &qdev->ioaddr->portStatus); - qdev->chip_rev_id = ((value & PORT_STATUS_REV_ID_MASK) >> 12); + qdev->chip_rev_id = (value & PORT_STATUS_REV_ID_MASK) >> 12; if (value & PORT_STATUS_64) qdev->pci_width = 64; else @@ -3311,7 +3420,7 @@ static void ql3xxx_timer(unsigned long p { struct ql3_adapter *qdev = (struct ql3_adapter *)ptr; - if (test_bit(QL_RESET_ACTIVE,&qdev->flags)) { + if (test_bit(QL_RESET_ACTIVE, &qdev->flags)) { printk(KERN_DEBUG PFX "%s: Reset in progress.\n", qdev->ndev->name); @@ -3328,23 +3437,23 @@ end: static int __devinit ql3xxx_probe(struct pci_dev *pdev, const struct pci_device_id *pci_entry) { - struct net_device *ndev = NULL; - struct ql3_adapter *qdev = NULL; - static int cards_found = 0; + static int ql_version_printed = 0; + struct ql3_adapter *qdev; + struct net_device *ndev; int pci_using_dac, err; err = pci_enable_device(pdev); if (err) { printk(KERN_ERR PFX "%s cannot enable PCI device\n", pci_name(pdev)); - goto err_out; + goto out; } err = pci_request_regions(pdev, DRV_NAME); - if (err) { + if (err < 0) { printk(KERN_ERR PFX "%s cannot obtain PCI resources\n", pci_name(pdev)); - goto err_out_disable_pdev; + goto err_out_disable_pdev_0; } pci_set_master(pdev); @@ -3352,47 +3461,49 @@ static int __devinit ql3xxx_probe(struct if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { pci_using_dac = 1; err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); - } else if (!(err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) { + } else { pci_using_dac = 0; - err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); + err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); + if (!err) + err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); } - if (err) { + if (err < 0) { printk(KERN_ERR PFX "%s no usable DMA configuration\n", pci_name(pdev)); - goto err_out_free_regions; + goto err_out_free_regions_1; } ndev = alloc_etherdev(sizeof(struct ql3_adapter)); if (!ndev) - goto err_out_free_regions; + goto err_out_free_regions_1; SET_MODULE_OWNER(ndev); SET_NETDEV_DEV(ndev, &pdev->dev); - ndev->features = NETIF_F_LLTX; - if (pci_using_dac) - ndev->features |= NETIF_F_HIGHDMA; - pci_set_drvdata(pdev, ndev); qdev = netdev_priv(ndev); - qdev->index = cards_found; qdev->ndev = ndev; qdev->pdev = pdev; + qdev->device_id = pci_entry->device; qdev->port_link_state = LS_DOWN; if (msi) qdev->msi = 1; qdev->msg_enable = netif_msg_init(debug, default_msg); - qdev->mem_map_registers = - ioremap_nocache(pci_resource_start(pdev, 1), - pci_resource_len(qdev->pdev, 1)); - if (!qdev->mem_map_registers) { + if (pci_using_dac) + ndev->features |= NETIF_F_HIGHDMA; + if (qdev->device_id == QL3032_DEVICE_ID) + ndev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; + + qdev->ioaddr = ioremap_nocache(pci_resource_start(pdev, 1), + pci_resource_len(pdev, 1)); + if (!qdev->ioaddr) { printk(KERN_ERR PFX "%s: cannot map device registers\n", pci_name(pdev)); - goto err_out_free_ndev; + goto err_out_free_ndev_2; } spin_lock_init(&qdev->adapter_lock); @@ -3400,15 +3511,15 @@ static int __devinit ql3xxx_probe(struct /* Set driver entry points */ ndev->open = ql3xxx_open; - ndev->hard_start_xmit = ql3xxx_send; ndev->stop = ql3xxx_close; ndev->get_stats = ql3xxx_get_stats; ndev->change_mtu = ql3xxx_change_mtu; - ndev->set_multicast_list = ql3xxx_set_multicast_list; - SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops); - ndev->set_mac_address = ql3xxx_set_mac_address; ndev->tx_timeout = ql3xxx_tx_timeout; ndev->watchdog_timeo = 5 * HZ; + ndev->hard_start_xmit = ql3xxx_send; + ndev->set_mac_address = ql3xxx_set_mac_address; + ndev->set_multicast_list = ql3xxx_set_multicast_list; + SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops); ndev->poll = &ql_poll; ndev->weight = 64; @@ -3416,23 +3527,20 @@ static int __devinit ql3xxx_probe(struct ndev->irq = pdev->irq; /* make sure the EEPROM is good */ - if (ql_get_nvram_params(qdev)) { + err = ql_get_nvram_params(qdev); + if (err < 0) { printk(KERN_ALERT PFX - "ql3xxx_probe: Adapter #%d, Invalid NVRAM parameters.\n", - qdev->index); - goto err_out_iounmap; + "ql3xxx_probe: %s, invalid NVRAM parameters.\n", + pci_name(pdev)); + goto err_out_iounmap_3; } ql_set_mac_info(qdev); /* Validate and set parameters */ - if (qdev->mac_index) { - memcpy(ndev->dev_addr, &qdev->nvram_data.funcCfg_fn2.macAddress, - ETH_ALEN); - } else { - memcpy(ndev->dev_addr, &qdev->nvram_data.funcCfg_fn0.macAddress, - ETH_ALEN); - } + memcpy(ndev->dev_addr, qdev->mac_index ? + &qdev->nvram_data.funcCfg_fn2.macAddress : + &qdev->nvram_data.funcCfg_fn0.macAddress, ETH_ALEN); memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); ndev->tx_queue_len = NUM_REQ_Q_ENTRIES; @@ -3447,19 +3555,17 @@ static int __devinit ql3xxx_probe(struct * Set the Maximum Memory Read Byte Count value. We do this to handle * jumbo frames. */ - if (qdev->pci_x) { + if (qdev->pci_x) pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036); - } err = register_netdev(ndev); - if (err) { + if (err < 0) { printk(KERN_ERR PFX "%s: cannot register net device\n", pci_name(pdev)); - goto err_out_iounmap; + goto err_out_iounmap_3; } /* we're going to reset, so assume we have no link for now */ - netif_carrier_off(ndev); netif_stop_queue(ndev); @@ -3472,27 +3578,25 @@ static int __devinit ql3xxx_probe(struct qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */ qdev->adapter_timer.data = (unsigned long)qdev; - if(!cards_found) { + if (!ql_version_printed) { printk(KERN_ALERT PFX "%s\n", DRV_STRING); printk(KERN_ALERT PFX "Driver name: %s, Version: %s.\n", - DRV_NAME, DRV_VERSION); + DRV_NAME, DRV_VERSION); } ql_display_dev_info(ndev); +out: + return err; - cards_found++; - return 0; - -err_out_iounmap: - iounmap(qdev->mem_map_registers); -err_out_free_ndev: +err_out_iounmap_3: + iounmap(qdev->ioaddr); +err_out_free_ndev_2: + pci_set_drvdata(pdev, NULL); free_netdev(ndev); -err_out_free_regions: +err_out_free_regions_1: pci_release_regions(pdev); -err_out_disable_pdev: +err_out_disable_pdev_0: pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); -err_out: - return err; + goto out; } static void __devexit ql3xxx_remove(struct pci_dev *pdev) @@ -3501,7 +3605,6 @@ static void __devexit ql3xxx_remove(stru struct ql3_adapter *qdev = netdev_priv(ndev); unregister_netdev(ndev); - qdev = netdev_priv(ndev); ql_disable_interrupts(qdev); @@ -3512,7 +3615,7 @@ static void __devexit ql3xxx_remove(stru qdev->workqueue = NULL; } - iounmap(qdev->mem_map_registers); + iounmap(qdev->ioaddr); pci_release_regions(pdev); pci_set_drvdata(pdev, NULL); free_netdev(ndev); @@ -3520,10 +3623,10 @@ static void __devexit ql3xxx_remove(stru static struct pci_driver ql3xxx_driver = { - .name = DRV_NAME, - .id_table = ql3xxx_pci_tbl, - .probe = ql3xxx_probe, - .remove = __devexit_p(ql3xxx_remove), + .name = DRV_NAME, + .id_table = ql3xxx_pci_tbl, + .probe = ql3xxx_probe, + .remove = __devexit_p(ql3xxx_remove), }; static int __init ql3xxx_init_module(void) diff --git a/drivers/net/qla3xxx.h b/drivers/net/qla3xxx.h index ea94de7..ce11455 100644 --- a/drivers/net/qla3xxx.h +++ b/drivers/net/qla3xxx.h @@ -21,7 +21,9 @@ #define OPCODE_UPDATE_NCB_IOCB_FN2 #define OPCODE_UPDATE_NCB_IOCB 0xF0 #define OPCODE_IB_MAC_IOCB 0xF9 +#define OPCODE_IB_3032_MAC_IOCB 0x09 #define OPCODE_IB_IP_IOCB 0xFA +#define OPCODE_IB_3032_IP_IOCB 0x0A #define OPCODE_IB_TCP_IOCB 0xFB #define OPCODE_DUMP_PROTO_IOCB 0xFE #define OPCODE_BUFFER_ALERT_IOCB 0xFB @@ -37,18 +39,23 @@ #define FN1_MA_BITS_MASK 0x80 struct ob_mac_iocb_req { u8 opcode; u8 flags; -#define OB_MAC_IOCB_REQ_MA 0xC0 -#define OB_MAC_IOCB_REQ_F 0x20 -#define OB_MAC_IOCB_REQ_X 0x10 +#define OB_MAC_IOCB_REQ_MA 0xe0 +#define OB_MAC_IOCB_REQ_F 0x10 +#define OB_MAC_IOCB_REQ_X 0x08 #define OB_MAC_IOCB_REQ_D 0x02 #define OB_MAC_IOCB_REQ_I 0x01 - __le16 reserved0; + u8 flags1; +#define OB_3032MAC_IOCB_REQ_IC 0x04 +#define OB_3032MAC_IOCB_REQ_TC 0x02 +#define OB_3032MAC_IOCB_REQ_UC 0x01 + u8 reserved0; __le32 transaction_id; __le16 data_len; - __le16 reserved1; + u8 ip_hdr_off; + u8 ip_hdr_len; + __le32 reserved1; __le32 reserved2; - __le32 reserved3; __le32 buf_addr0_low; __le32 buf_addr0_high; __le32 buf_0_len; @@ -58,8 +65,8 @@ #define OB_MAC_IOCB_REQ_I 0x01 __le32 buf_addr2_low; __le32 buf_addr2_high; __le32 buf_2_len; + __le32 reserved3; __le32 reserved4; - __le32 reserved5; }; /* * The following constants define control bits for buffer @@ -74,6 +81,7 @@ struct ob_mac_iocb_rsp { u8 opcode; u8 flags; #define OB_MAC_IOCB_RSP_P 0x08 +#define OB_MAC_IOCB_RSP_L 0x04 #define OB_MAC_IOCB_RSP_S 0x02 #define OB_MAC_IOCB_RSP_I 0x01 @@ -85,6 +93,7 @@ #define OB_MAC_IOCB_RSP_I 0x01 struct ib_mac_iocb_rsp { u8 opcode; +#define IB_MAC_IOCB_RSP_V 0x80 u8 flags; #define IB_MAC_IOCB_RSP_S 0x80 #define IB_MAC_IOCB_RSP_H1 0x40 @@ -138,6 +147,7 @@ #define OB_IP_IOCB_REQ_R 0x10000000 struct ob_ip_iocb_rsp { u8 opcode; u8 flags; +#define OB_MAC_IOCB_RSP_H 0x10 #define OB_MAC_IOCB_RSP_E 0x08 #define OB_MAC_IOCB_RSP_L 0x04 #define OB_MAC_IOCB_RSP_S 0x02 @@ -220,6 +230,10 @@ #define OB_TCP_IOCB_RSP_SHIFT 4 struct ib_ip_iocb_rsp { u8 opcode; +#define IB_IP_IOCB_RSP_3032_V 0x80 +#define IB_IP_IOCB_RSP_3032_O 0x40 +#define IB_IP_IOCB_RSP_3032_I 0x20 +#define IB_IP_IOCB_RSP_3032_R 0x10 u8 flags; #define IB_IP_IOCB_RSP_S 0x80 #define IB_IP_IOCB_RSP_H1 0x40 @@ -230,6 +244,12 @@ #define IB_IP_IOCB_RSP_MA 0x07 __le16 length; __le16 checksum; +#define IB_IP_IOCB_RSP_3032_ICE 0x01 +#define IB_IP_IOCB_RSP_3032_CE 0x02 +#define IB_IP_IOCB_RSP_3032_NUC 0x04 +#define IB_IP_IOCB_RSP_3032_UDP 0x08 +#define IB_IP_IOCB_RSP_3032_TCP 0x10 +#define IB_IP_IOCB_RSP_3032_IPE 0x20 __le16 reserved; #define IB_IP_IOCB_RSP_R 0x01 __le32 ial_low; @@ -524,6 +544,23 @@ enum { IP_ADDR_INDEX_REG_FUNC_2_SEC = 0x0005, IP_ADDR_INDEX_REG_FUNC_3_PRI = 0x0006, IP_ADDR_INDEX_REG_FUNC_3_SEC = 0x0007, + /* 3032 addition start */ + IP_ADDR_INDEX_REG_6 = 0x0008, + IP_ADDR_INDEX_REG_OFFSET_MASK = 0x0030, + IP_ADDR_INDEX_REG_E = 0x0040, + /* 3032 addition end */ +}; +enum { + QL3032_PORT_CONTROL_DS = 0x0001, + QL3032_PORT_CONTROL_HH = 0x0002, + QL3032_PORT_CONTROL_EIv6 = 0x0004, + QL3032_PORT_CONTROL_EIv4 = 0x0008, + QL3032_PORT_CONTROL_ET = 0x0010, + QL3032_PORT_CONTROL_EF = 0x0020, + QL3032_PORT_CONTROL_DRM = 0x0040, + QL3032_PORT_CONTROL_RLB = 0x0080, + QL3032_PORT_CONTROL_RCB = 0x0100, + QL3032_PORT_CONTROL_KIE = 0x0200, }; enum { @@ -657,7 +694,10 @@ struct ql3xxx_port_registers { u32 internalRamWDataReg; u32 reclaimedBufferAddrRegLow; u32 reclaimedBufferAddrRegHigh; - u32 reserved[2]; + /* 3032 addition start */ + u32 tcpConfiguration; + u32 functionControl; + /* 3032 addition end */ u32 fpgaRevID; u32 localRamAddr; u32 localRamDataAutoIncr; @@ -963,6 +1003,7 @@ #define IPSEC_CONFIG_PRESENT 0x0001 #define QL3XXX_VENDOR_ID 0x1077 #define QL3022_DEVICE_ID 0x3022 +#define QL3032_DEVICE_ID 0x3032 /* MTU & Frame Size stuff */ #define NORMAL_MTU_SIZE ETH_DATA_LEN @@ -1038,11 +1079,41 @@ struct ql_rcv_buf_cb { int index; }; +/* + * Original IOCB has 3 sg entries: + * first points to skb-data area + * second points to first frag + * third points to next oal. + * OAL has 5 entries: + * 1 thru 4 point to frags + * fifth points to next oal. + */ +#define MAX_OAL_CNT ((MAX_SKB_FRAGS-1)/4 + 1) + +struct oal_entry { + u32 dma_lo; + u32 dma_hi; + u32 len; +#define OAL_LAST_ENTRY 0x80000000 /* Last valid buffer in list. */ +#define OAL_CONT_ENTRY 0x40000000 /* points to an OAL. (continuation) */ + u32 reserved; +}; + +struct oal { + struct oal_entry oal_entry[5]; +}; + +struct map_list { + DECLARE_PCI_UNMAP_ADDR(mapaddr); + DECLARE_PCI_UNMAP_LEN(maplen); +}; + struct ql_tx_buf_cb { struct sk_buff *skb; struct ob_mac_iocb_req *queue_entry ; - DECLARE_PCI_UNMAP_ADDR(mapaddr); - DECLARE_PCI_UNMAP_LEN(maplen); + int seg_count; + struct oal *oal; + struct map_list map[MAX_SKB_FRAGS+1]; }; /* definitions for type field */ @@ -1086,15 +1157,13 @@ struct ql3_adapter { u8 pci_width; u8 pci_x; u32 msi; - int index; struct timer_list adapter_timer; /* timer used for various functions */ spinlock_t adapter_lock; spinlock_t hw_lock; /* PCI Bus Relative Register Addresses */ - u8 __iomem *mmap_virt_base; /* stores return value from ioremap() */ - struct ql3xxx_port_registers __iomem *mem_map_registers; + struct ql3xxx_port_registers __iomem *ioaddr; u32 current_page; /* tracks current register page */ u32 msg_enable; @@ -1189,6 +1258,7 @@ struct ql3_adapter { struct delayed_work reset_work; struct delayed_work tx_timeout_work; u32 max_frame_size; + u32 device_id; }; #endif /* _QLA3XXX_H_ */