diff --git a/Makefile b/Makefile index 3d94974..6ff6f2d 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ VERSION = 2 PATCHLEVEL = 6 SUBLEVEL = 37 -EXTRAVERSION = -rc5 +EXTRAVERSION = -rc5-stor3 NAME = Flesh-Eating Bats with Fangs # *DOCUMENTATION* diff --git a/block/blk-map.c b/block/blk-map.c index 5d5dbe4..e663ac2 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -201,12 +201,13 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, for (i = 0; i < iov_count; i++) { unsigned long uaddr = (unsigned long)iov[i].iov_base; + if (!iov[i].iov_len) + return -EINVAL; + if (uaddr & queue_dma_alignment(q)) { unaligned = 1; break; } - if (!iov[i].iov_len) - return -EINVAL; } if (unaligned || (q->dma_pad_mask & len) || map_data) diff --git a/block/blk-merge.c b/block/blk-merge.c index 77b7c26..74bc4a7 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -21,7 +21,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, return 0; fbio = bio; - cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); + cluster = blk_queue_cluster(q); seg_size = 0; nr_phys_segs = 0; for_each_bio(bio) { @@ -87,7 +87,7 @@ EXPORT_SYMBOL(blk_recount_segments); static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, struct bio *nxt) { - if (!test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags)) + if (!blk_queue_cluster(q)) return 0; if (bio->bi_seg_back_size + nxt->bi_seg_front_size > @@ -123,7 +123,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq, int nsegs, cluster; nsegs = 0; - cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); + cluster = blk_queue_cluster(q); /* * for each bio in rq diff --git a/block/blk-settings.c b/block/blk-settings.c index 701859f..e55f5fc 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -126,7 +126,7 @@ void blk_set_default_limits(struct queue_limits *lim) lim->alignment_offset = 0; lim->io_opt = 0; lim->misaligned = 0; - lim->no_cluster = 0; + lim->cluster = 1; } EXPORT_SYMBOL(blk_set_default_limits); @@ -464,15 +464,6 @@ EXPORT_SYMBOL(blk_queue_io_opt); void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) { blk_stack_limits(&t->limits, &b->limits, 0); - - if (!t->queue_lock) - WARN_ON_ONCE(1); - else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) { - unsigned long flags; - spin_lock_irqsave(t->queue_lock, flags); - queue_flag_clear(QUEUE_FLAG_CLUSTER, t); - spin_unlock_irqrestore(t->queue_lock, flags); - } } EXPORT_SYMBOL(blk_queue_stack_limits); @@ -545,7 +536,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, t->io_min = max(t->io_min, b->io_min); t->io_opt = lcm(t->io_opt, b->io_opt); - t->no_cluster |= b->no_cluster; + t->cluster &= b->cluster; t->discard_zeroes_data &= b->discard_zeroes_data; /* Physical block size a multiple of the logical block size? */ @@ -641,7 +632,6 @@ void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, sector_t offset) { struct request_queue *t = disk->queue; - struct request_queue *b = bdev_get_queue(bdev); if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) { char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE]; @@ -652,17 +642,6 @@ void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n", top, bottom); } - - if (!t->queue_lock) - WARN_ON_ONCE(1); - else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) { - unsigned long flags; - - spin_lock_irqsave(t->queue_lock, flags); - if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) - queue_flag_clear(QUEUE_FLAG_CLUSTER, t); - spin_unlock_irqrestore(t->queue_lock, flags); - } } EXPORT_SYMBOL(disk_stack_limits); diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 013457f..41fb691 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -119,7 +119,7 @@ static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char * static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) { - if (test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags)) + if (blk_queue_cluster(q)) return queue_var_show(queue_max_segment_size(q), (page)); return queue_var_show(PAGE_CACHE_SIZE, (page)); diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 004be80..381b09b 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -355,6 +355,12 @@ throtl_start_new_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw) tg->slice_end[rw], jiffies); } +static inline void throtl_set_slice_end(struct throtl_data *td, + struct throtl_grp *tg, bool rw, unsigned long jiffy_end) +{ + tg->slice_end[rw] = roundup(jiffy_end, throtl_slice); +} + static inline void throtl_extend_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw, unsigned long jiffy_end) { @@ -391,6 +397,16 @@ throtl_trim_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw) if (throtl_slice_used(td, tg, rw)) return; + /* + * A bio has been dispatched. Also adjust slice_end. It might happen + * that initially cgroup limit was very low resulting in high + * slice_end, but later limit was bumped up and bio was dispached + * sooner, then we need to reduce slice_end. A high bogus slice_end + * is bad because it does not allow new slice to start. + */ + + throtl_set_slice_end(td, tg, rw, jiffies + throtl_slice); + time_elapsed = jiffies - tg->slice_start[rw]; nr_slices = time_elapsed / throtl_slice; @@ -709,26 +725,21 @@ static void throtl_process_limit_change(struct throtl_data *td) struct throtl_grp *tg; struct hlist_node *pos, *n; - /* - * Make sure atomic_inc() effects from - * throtl_update_blkio_group_read_bps(), group of functions are - * visible. - * Is this required or smp_mb__after_atomic_inc() was suffcient - * after the atomic_inc(). - */ - smp_rmb(); if (!atomic_read(&td->limits_changed)) return; throtl_log(td, "limit changed =%d", atomic_read(&td->limits_changed)); - hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) { - /* - * Do I need an smp_rmb() here to make sure tg->limits_changed - * update is visible. I am relying on smp_rmb() at the - * beginning of function and not putting a new one here. - */ + /* + * Make sure updates from throtl_update_blkio_group_read_bps() group + * of functions to tg->limits_changed are visible. We do not + * want update td->limits_changed to be visible but update to + * tg->limits_changed not being visible yet on this cpu. Hence + * the read barrier. + */ + smp_rmb(); + hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) { if (throtl_tg_on_rr(tg) && tg->limits_changed) { throtl_log_tg(td, tg, "limit change rbps=%llu wbps=%llu" " riops=%u wiops=%u", tg->bps[READ], diff --git a/block/bsg.c b/block/bsg.c index f20d6a7..0c8b64a 100644 --- a/block/bsg.c +++ b/block/bsg.c @@ -250,6 +250,14 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm, int ret, rw; unsigned int dxfer_len; void *dxferp = NULL; + struct bsg_class_device *bcd = &q->bsg_dev; + + /* if the LLD has been removed then the bsg_unregister_queue will + * eventually be called and the class_dev was freed, so we can no + * longer use this request_queue. Return no such address. + */ + if (!bcd->class_dev) + return ERR_PTR(-ENXIO); dprintk("map hdr %llx/%u %llx/%u\n", (unsigned long long) hdr->dout_xferp, hdr->dout_xfer_len, (unsigned long long) hdr->din_xferp, diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index 11ec911..a1c6fa8 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig @@ -90,6 +90,14 @@ config SATA_INIC162X help This option enables support for Initio 162x Serial ATA. +config SATA_ACARD_AHCI + tristate "ACard AHCI variant (ATP 8620)" + depends on PCI + help + This option enables support for Acard. + + If unsure, say N. + config SATA_SIL24 tristate "Silicon Image 3124/3132 SATA support" depends on PCI diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile index c501af5..3ef3bdb 100644 --- a/drivers/ata/Makefile +++ b/drivers/ata/Makefile @@ -3,6 +3,7 @@ obj-$(CONFIG_ATA) += libata.o # non-SFF interface obj-$(CONFIG_SATA_AHCI) += ahci.o libahci.o +obj-$(CONFIG_SATA_ACARD_AHCI) += acard-ahci.o libahci.o obj-$(CONFIG_SATA_AHCI_PLATFORM) += ahci_platform.o libahci.o obj-$(CONFIG_SATA_FSL) += sata_fsl.o obj-$(CONFIG_SATA_INIC162X) += sata_inic162x.o diff --git a/drivers/ata/acard-ahci.c b/drivers/ata/acard-ahci.c new file mode 100644 index 0000000..339c210 --- /dev/null +++ b/drivers/ata/acard-ahci.c @@ -0,0 +1,528 @@ + +/* + * acard-ahci.c - ACard AHCI SATA support + * + * Maintained by: Jeff Garzik + * Please ALWAYS copy linux-ide@vger.kernel.org + * on emails. + * + * Copyright 2010 Red Hat, Inc. + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, write to + * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. + * + * + * libata documentation is available via 'make {ps|pdf}docs', + * as Documentation/DocBook/libata.* + * + * AHCI hardware documentation: + * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf + * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ahci.h" + +#define DRV_NAME "acard-ahci" +#define DRV_VERSION "1.0" + +/* + Received FIS structure limited to 80h. +*/ + +#define ACARD_AHCI_RX_FIS_SZ 128 + +enum { + AHCI_PCI_BAR = 5, +}; + +enum board_ids { + board_acard_ahci, +}; + +struct acard_sg { + __le32 addr; + __le32 addr_hi; + __le32 reserved; + __le32 size; /* bit 31 (EOT) max==0x10000 (64k) */ +}; + +static void acard_ahci_qc_prep(struct ata_queued_cmd *qc); +static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc); +static int acard_ahci_port_start(struct ata_port *ap); +static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); + +#ifdef CONFIG_PM +static int acard_ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg); +static int acard_ahci_pci_device_resume(struct pci_dev *pdev); +#endif + +static struct scsi_host_template acard_ahci_sht = { + AHCI_SHT("acard-ahci"), +}; + +static struct ata_port_operations acard_ops = { + .inherits = &ahci_ops, + .qc_prep = acard_ahci_qc_prep, + .qc_fill_rtf = acard_ahci_qc_fill_rtf, + .port_start = acard_ahci_port_start, +}; + +#define AHCI_HFLAGS(flags) .private_data = (void *)(flags) + +static const struct ata_port_info acard_ahci_port_info[] = { + [board_acard_ahci] = + { + AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ), + .flags = AHCI_FLAG_COMMON, + .pio_mask = ATA_PIO4, + .udma_mask = ATA_UDMA6, + .port_ops = &acard_ops, + }, +}; + +static const struct pci_device_id acard_ahci_pci_tbl[] = { + /* ACard */ + { PCI_VDEVICE(ARTOP, 0x000d), board_acard_ahci }, /* ATP8620 */ + + { } /* terminate list */ +}; + +static struct pci_driver acard_ahci_pci_driver = { + .name = DRV_NAME, + .id_table = acard_ahci_pci_tbl, + .probe = acard_ahci_init_one, + .remove = ata_pci_remove_one, +#ifdef CONFIG_PM + .suspend = acard_ahci_pci_device_suspend, + .resume = acard_ahci_pci_device_resume, +#endif +}; + +#ifdef CONFIG_PM +static int acard_ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) +{ + struct ata_host *host = dev_get_drvdata(&pdev->dev); + struct ahci_host_priv *hpriv = host->private_data; + void __iomem *mmio = hpriv->mmio; + u32 ctl; + + if (mesg.event & PM_EVENT_SUSPEND && + hpriv->flags & AHCI_HFLAG_NO_SUSPEND) { + dev_printk(KERN_ERR, &pdev->dev, + "BIOS update required for suspend/resume\n"); + return -EIO; + } + + if (mesg.event & PM_EVENT_SLEEP) { + /* AHCI spec rev1.1 section 8.3.3: + * Software must disable interrupts prior to requesting a + * transition of the HBA to D3 state. + */ + ctl = readl(mmio + HOST_CTL); + ctl &= ~HOST_IRQ_EN; + writel(ctl, mmio + HOST_CTL); + readl(mmio + HOST_CTL); /* flush */ + } + + return ata_pci_device_suspend(pdev, mesg); +} + +static int acard_ahci_pci_device_resume(struct pci_dev *pdev) +{ + struct ata_host *host = dev_get_drvdata(&pdev->dev); + int rc; + + rc = ata_pci_device_do_resume(pdev); + if (rc) + return rc; + + if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { + rc = ahci_reset_controller(host); + if (rc) + return rc; + + ahci_init_controller(host); + } + + ata_host_resume(host); + + return 0; +} +#endif + +static int acard_ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac) +{ + int rc; + + if (using_dac && + !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (rc) { + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc) { + dev_printk(KERN_ERR, &pdev->dev, + "64-bit DMA enable failed\n"); + return rc; + } + } + } else { + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc) { + dev_printk(KERN_ERR, &pdev->dev, + "32-bit DMA enable failed\n"); + return rc; + } + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc) { + dev_printk(KERN_ERR, &pdev->dev, + "32-bit consistent DMA enable failed\n"); + return rc; + } + } + return 0; +} + +static void acard_ahci_pci_print_info(struct ata_host *host) +{ + struct pci_dev *pdev = to_pci_dev(host->dev); + u16 cc; + const char *scc_s; + + pci_read_config_word(pdev, 0x0a, &cc); + if (cc == PCI_CLASS_STORAGE_IDE) + scc_s = "IDE"; + else if (cc == PCI_CLASS_STORAGE_SATA) + scc_s = "SATA"; + else if (cc == PCI_CLASS_STORAGE_RAID) + scc_s = "RAID"; + else + scc_s = "unknown"; + + ahci_print_info(host, scc_s); +} + +static unsigned int acard_ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl) +{ + struct scatterlist *sg; + struct acard_sg *acard_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ; + unsigned int si, last_si = 0; + + VPRINTK("ENTER\n"); + + /* + * Next, the S/G list. + */ + for_each_sg(qc->sg, sg, qc->n_elem, si) { + dma_addr_t addr = sg_dma_address(sg); + u32 sg_len = sg_dma_len(sg); + + /* + * ACard note: + * We must set an end-of-table (EOT) bit, + * and the segment cannot exceed 64k (0x10000) + */ + acard_sg[si].addr = cpu_to_le32(addr & 0xffffffff); + acard_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16); + acard_sg[si].size = cpu_to_le32(sg_len); + last_si = si; + } + + acard_sg[last_si].size |= cpu_to_le32(1 << 31); /* set EOT */ + + return si; +} + +static void acard_ahci_qc_prep(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct ahci_port_priv *pp = ap->private_data; + int is_atapi = ata_is_atapi(qc->tf.protocol); + void *cmd_tbl; + u32 opts; + const u32 cmd_fis_len = 5; /* five dwords */ + unsigned int n_elem; + + /* + * Fill in command table information. First, the header, + * a SATA Register - Host to Device command FIS. + */ + cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ; + + ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl); + if (is_atapi) { + memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32); + memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len); + } + + n_elem = 0; + if (qc->flags & ATA_QCFLAG_DMAMAP) + n_elem = acard_ahci_fill_sg(qc, cmd_tbl); + + /* + * Fill in command slot information. + * + * ACard note: prd table length not filled in + */ + opts = cmd_fis_len | (qc->dev->link->pmp << 12); + if (qc->tf.flags & ATA_TFLAG_WRITE) + opts |= AHCI_CMD_WRITE; + if (is_atapi) + opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH; + + ahci_fill_cmd_slot(pp, qc->tag, opts); +} + +static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc) +{ + struct ahci_port_priv *pp = qc->ap->private_data; + u8 *rx_fis = pp->rx_fis; + + if (pp->fbs_enabled) + rx_fis += qc->dev->link->pmp * ACARD_AHCI_RX_FIS_SZ; + + /* + * After a successful execution of an ATA PIO data-in command, + * the device doesn't send D2H Reg FIS to update the TF and + * the host should take TF and E_Status from the preceding PIO + * Setup FIS. + */ + if (qc->tf.protocol == ATA_PROT_PIO && qc->dma_dir == DMA_FROM_DEVICE && + !(qc->flags & ATA_QCFLAG_FAILED)) { + ata_tf_from_fis(rx_fis + RX_FIS_PIO_SETUP, &qc->result_tf); + qc->result_tf.command = (rx_fis + RX_FIS_PIO_SETUP)[15]; + } else + ata_tf_from_fis(rx_fis + RX_FIS_D2H_REG, &qc->result_tf); + + return true; +} + +static int acard_ahci_port_start(struct ata_port *ap) +{ + struct ahci_host_priv *hpriv = ap->host->private_data; + struct device *dev = ap->host->dev; + struct ahci_port_priv *pp; + void *mem; + dma_addr_t mem_dma; + size_t dma_sz, rx_fis_sz; + + pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); + if (!pp) + return -ENOMEM; + + /* check FBS capability */ + if ((hpriv->cap & HOST_CAP_FBS) && sata_pmp_supported(ap)) { + void __iomem *port_mmio = ahci_port_base(ap); + u32 cmd = readl(port_mmio + PORT_CMD); + if (cmd & PORT_CMD_FBSCP) + pp->fbs_supported = true; + else if (hpriv->flags & AHCI_HFLAG_YES_FBS) { + dev_printk(KERN_INFO, dev, + "port %d can do FBS, forcing FBSCP\n", + ap->port_no); + pp->fbs_supported = true; + } else + dev_printk(KERN_WARNING, dev, + "port %d is not capable of FBS\n", + ap->port_no); + } + + if (pp->fbs_supported) { + dma_sz = AHCI_PORT_PRIV_FBS_DMA_SZ; + rx_fis_sz = ACARD_AHCI_RX_FIS_SZ * 16; + } else { + dma_sz = AHCI_PORT_PRIV_DMA_SZ; + rx_fis_sz = ACARD_AHCI_RX_FIS_SZ; + } + + mem = dmam_alloc_coherent(dev, dma_sz, &mem_dma, GFP_KERNEL); + if (!mem) + return -ENOMEM; + memset(mem, 0, dma_sz); + + /* + * First item in chunk of DMA memory: 32-slot command table, + * 32 bytes each in size + */ + pp->cmd_slot = mem; + pp->cmd_slot_dma = mem_dma; + + mem += AHCI_CMD_SLOT_SZ; + mem_dma += AHCI_CMD_SLOT_SZ; + + /* + * Second item: Received-FIS area + */ + pp->rx_fis = mem; + pp->rx_fis_dma = mem_dma; + + mem += rx_fis_sz; + mem_dma += rx_fis_sz; + + /* + * Third item: data area for storing a single command + * and its scatter-gather table + */ + pp->cmd_tbl = mem; + pp->cmd_tbl_dma = mem_dma; + + /* + * Save off initial list of interrupts to be enabled. + * This could be changed later + */ + pp->intr_mask = DEF_PORT_IRQ; + + ap->private_data = pp; + + /* engage engines, captain */ + return ahci_port_resume(ap); +} + +static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + static int printed_version; + unsigned int board_id = ent->driver_data; + struct ata_port_info pi = acard_ahci_port_info[board_id]; + const struct ata_port_info *ppi[] = { &pi, NULL }; + struct device *dev = &pdev->dev; + struct ahci_host_priv *hpriv; + struct ata_host *host; + int n_ports, i, rc; + + VPRINTK("ENTER\n"); + + WARN_ON(ATA_MAX_QUEUE > AHCI_MAX_CMDS); + + if (!printed_version++) + dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); + + /* acquire resources */ + rc = pcim_enable_device(pdev); + if (rc) + return rc; + + /* AHCI controllers often implement SFF compatible interface. + * Grab all PCI BARs just in case. + */ + rc = pcim_iomap_regions_request_all(pdev, 1 << AHCI_PCI_BAR, DRV_NAME); + if (rc == -EBUSY) + pcim_pin_device(pdev); + if (rc) + return rc; + + hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL); + if (!hpriv) + return -ENOMEM; + hpriv->flags |= (unsigned long)pi.private_data; + + if (!(hpriv->flags & AHCI_HFLAG_NO_MSI)) + pci_enable_msi(pdev); + + hpriv->mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR]; + + /* save initial config */ + ahci_save_initial_config(&pdev->dev, hpriv, 0, 0); + + /* prepare host */ + if (hpriv->cap & HOST_CAP_NCQ) + pi.flags |= ATA_FLAG_NCQ; + + if (hpriv->cap & HOST_CAP_PMP) + pi.flags |= ATA_FLAG_PMP; + + ahci_set_em_messages(hpriv, &pi); + + /* CAP.NP sometimes indicate the index of the last enabled + * port, at other times, that of the last possible port, so + * determining the maximum port number requires looking at + * both CAP.NP and port_map. + */ + n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map)); + + host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); + if (!host) + return -ENOMEM; + host->private_data = hpriv; + + if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss) + host->flags |= ATA_HOST_PARALLEL_SCAN; + else + printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n"); + + for (i = 0; i < host->n_ports; i++) { + struct ata_port *ap = host->ports[i]; + + ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar"); + ata_port_pbar_desc(ap, AHCI_PCI_BAR, + 0x100 + ap->port_no * 0x80, "port"); + + /* set initial link pm policy */ + /* + ap->pm_policy = NOT_AVAILABLE; + */ + /* disabled/not-implemented port */ + if (!(hpriv->port_map & (1 << i))) + ap->ops = &ata_dummy_port_ops; + } + + /* initialize adapter */ + rc = acard_ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64); + if (rc) + return rc; + + rc = ahci_reset_controller(host); + if (rc) + return rc; + + ahci_init_controller(host); + acard_ahci_pci_print_info(host); + + pci_set_master(pdev); + return ata_host_activate(host, pdev->irq, ahci_interrupt, IRQF_SHARED, + &acard_ahci_sht); +} + +static int __init acard_ahci_init(void) +{ + return pci_register_driver(&acard_ahci_pci_driver); +} + +static void __exit acard_ahci_exit(void) +{ + pci_unregister_driver(&acard_ahci_pci_driver); +} + +MODULE_AUTHOR("Jeff Garzik"); +MODULE_DESCRIPTION("ACard AHCI SATA low-level driver"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, acard_ahci_pci_tbl); +MODULE_VERSION(DRV_VERSION); + +module_init(acard_ahci_init); +module_exit(acard_ahci_exit); diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h index 329cbbb..3e606c3 100644 --- a/drivers/ata/ahci.h +++ b/drivers/ata/ahci.h @@ -311,6 +311,8 @@ extern struct device_attribute *ahci_sdev_attrs[]; extern struct ata_port_operations ahci_ops; +void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag, + u32 opts); void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv, unsigned int force_port_map, @@ -326,6 +328,7 @@ int ahci_stop_engine(struct ata_port *ap); void ahci_start_engine(struct ata_port *ap); int ahci_check_ready(struct ata_link *link); int ahci_kick_engine(struct ata_port *ap); +int ahci_port_resume(struct ata_port *ap); void ahci_set_em_messages(struct ahci_host_priv *hpriv, struct ata_port_info *pi); int ahci_reset_em(struct ata_host *host); diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index ebc08d6..26d4523 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -87,10 +87,7 @@ static int ahci_hardreset(struct ata_link *link, unsigned int *class, static void ahci_postreset(struct ata_link *link, unsigned int *class); static void ahci_error_handler(struct ata_port *ap); static void ahci_post_internal_cmd(struct ata_queued_cmd *qc); -static int ahci_port_resume(struct ata_port *ap); static void ahci_dev_config(struct ata_device *dev); -static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag, - u32 opts); #ifdef CONFIG_PM static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg); #endif @@ -1133,8 +1130,8 @@ static unsigned int ahci_dev_classify(struct ata_port *ap) return ata_dev_classify(&tf); } -static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag, - u32 opts) +void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag, + u32 opts) { dma_addr_t cmd_tbl_dma; @@ -1145,6 +1142,7 @@ static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag, pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff); pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16); } +EXPORT_SYMBOL_GPL(ahci_fill_cmd_slot); int ahci_kick_engine(struct ata_port *ap) { @@ -1918,7 +1916,7 @@ static void ahci_pmp_detach(struct ata_port *ap) writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); } -static int ahci_port_resume(struct ata_port *ap) +int ahci_port_resume(struct ata_port *ap) { ahci_power_up(ap); ahci_start_port(ap); @@ -1930,6 +1928,7 @@ static int ahci_port_resume(struct ata_port *ap) return 0; } +EXPORT_SYMBOL_GPL(ahci_port_resume); #ifdef CONFIG_PM static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg) diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 66aa4be..5defc74 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -346,12 +346,11 @@ struct device_attribute *ata_common_sdev_attrs[] = { }; EXPORT_SYMBOL_GPL(ata_common_sdev_attrs); -static void ata_scsi_invalid_field(struct scsi_cmnd *cmd, - void (*done)(struct scsi_cmnd *)) +static void ata_scsi_invalid_field(struct scsi_cmnd *cmd) { ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x24, 0x0); /* "Invalid field in cbd" */ - done(cmd); + cmd->scsi_done(cmd); } /** @@ -719,7 +718,6 @@ EXPORT_SYMBOL_GPL(ata_scsi_ioctl); * ata_scsi_qc_new - acquire new ata_queued_cmd reference * @dev: ATA device to which the new command is attached * @cmd: SCSI command that originated this ATA command - * @done: SCSI command completion function * * Obtain a reference to an unused ata_queued_cmd structure, * which is the basic libata structure representing a single @@ -736,21 +734,20 @@ EXPORT_SYMBOL_GPL(ata_scsi_ioctl); * Command allocated, or %NULL if none available. */ static struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev, - struct scsi_cmnd *cmd, - void (*done)(struct scsi_cmnd *)) + struct scsi_cmnd *cmd) { struct ata_queued_cmd *qc; qc = ata_qc_new_init(dev); if (qc) { qc->scsicmd = cmd; - qc->scsidone = done; + qc->scsidone = cmd->scsi_done; qc->sg = scsi_sglist(cmd); qc->n_elem = scsi_sg_count(cmd); } else { cmd->result = (DID_OK << 16) | (QUEUE_FULL << 1); - done(cmd); + cmd->scsi_done(cmd); } return qc; @@ -1735,7 +1732,6 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc) * ata_scsi_translate - Translate then issue SCSI command to ATA device * @dev: ATA device to which the command is addressed * @cmd: SCSI command to execute - * @done: SCSI command completion function * @xlat_func: Actor which translates @cmd to an ATA taskfile * * Our ->queuecommand() function has decided that the SCSI @@ -1759,7 +1755,6 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc) * needs to be deferred. */ static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd, - void (*done)(struct scsi_cmnd *), ata_xlat_func_t xlat_func) { struct ata_port *ap = dev->link->ap; @@ -1768,7 +1763,7 @@ static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd, VPRINTK("ENTER\n"); - qc = ata_scsi_qc_new(dev, cmd, done); + qc = ata_scsi_qc_new(dev, cmd); if (!qc) goto err_mem; @@ -1804,14 +1799,14 @@ static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd, early_finish: ata_qc_free(qc); - qc->scsidone(cmd); + cmd->scsi_done(cmd); DPRINTK("EXIT - early finish (good or error)\n"); return 0; err_did: ata_qc_free(qc); cmd->result = (DID_ERROR << 16); - qc->scsidone(cmd); + cmd->scsi_done(cmd); err_mem: DPRINTK("EXIT - internal\n"); return 0; @@ -3116,7 +3111,6 @@ static inline void ata_scsi_dump_cdb(struct ata_port *ap, } static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, - void (*done)(struct scsi_cmnd *), struct ata_device *dev) { u8 scsi_op = scmd->cmnd[0]; @@ -3150,9 +3144,9 @@ static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, } if (xlat_func) - rc = ata_scsi_translate(dev, scmd, done, xlat_func); + rc = ata_scsi_translate(dev, scmd, xlat_func); else - ata_scsi_simulate(dev, scmd, done); + ata_scsi_simulate(dev, scmd); return rc; @@ -3160,7 +3154,7 @@ static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, DPRINTK("bad CDB len=%u, scsi_op=0x%02x, max=%u\n", scmd->cmd_len, scsi_op, dev->cdb_len); scmd->result = DID_ERROR << 16; - done(scmd); + scmd->scsi_done(scmd); return 0; } @@ -3199,7 +3193,7 @@ int ata_scsi_queuecmd(struct Scsi_Host *shost, struct scsi_cmnd *cmd) dev = ata_scsi_find_dev(ap, scsidev); if (likely(dev)) - rc = __ata_scsi_queuecmd(cmd, cmd->scsi_done, dev); + rc = __ata_scsi_queuecmd(cmd, dev); else { cmd->result = (DID_BAD_TARGET << 16); cmd->scsi_done(cmd); @@ -3214,7 +3208,6 @@ int ata_scsi_queuecmd(struct Scsi_Host *shost, struct scsi_cmnd *cmd) * ata_scsi_simulate - simulate SCSI command on ATA device * @dev: the target device * @cmd: SCSI command being sent to device. - * @done: SCSI command completion function. * * Interprets and directly executes a select list of SCSI commands * that can be handled internally. @@ -3223,8 +3216,7 @@ int ata_scsi_queuecmd(struct Scsi_Host *shost, struct scsi_cmnd *cmd) * spin_lock_irqsave(host lock) */ -void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd, - void (*done)(struct scsi_cmnd *)) +void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd) { struct ata_scsi_args args; const u8 *scsicmd = cmd->cmnd; @@ -3233,17 +3225,17 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd, args.dev = dev; args.id = dev->id; args.cmd = cmd; - args.done = done; + args.done = cmd->scsi_done; switch(scsicmd[0]) { /* TODO: worth improving? */ case FORMAT_UNIT: - ata_scsi_invalid_field(cmd, done); + ata_scsi_invalid_field(cmd); break; case INQUIRY: if (scsicmd[1] & 2) /* is CmdDt set? */ - ata_scsi_invalid_field(cmd, done); + ata_scsi_invalid_field(cmd); else if ((scsicmd[1] & 1) == 0) /* is EVPD clear? */ ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std); else switch (scsicmd[2]) { @@ -3269,7 +3261,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd, ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b2); break; default: - ata_scsi_invalid_field(cmd, done); + ata_scsi_invalid_field(cmd); break; } break; @@ -3281,7 +3273,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd, case MODE_SELECT: /* unconditionally return */ case MODE_SELECT_10: /* bad-field-in-cdb */ - ata_scsi_invalid_field(cmd, done); + ata_scsi_invalid_field(cmd); break; case READ_CAPACITY: @@ -3292,7 +3284,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd, if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16) ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap); else - ata_scsi_invalid_field(cmd, done); + ata_scsi_invalid_field(cmd); break; case REPORT_LUNS: @@ -3302,7 +3294,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd, case REQUEST_SENSE: ata_scsi_set_sense(cmd, 0, 0, 0); cmd->result = (DRIVER_SENSE << 24); - done(cmd); + cmd->scsi_done(cmd); break; /* if we reach this, then writeback caching is disabled, @@ -3324,14 +3316,14 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd, if ((tmp8 == 0x4) && (!scsicmd[3]) && (!scsicmd[4])) ata_scsi_rbuf_fill(&args, ata_scsiop_noop); else - ata_scsi_invalid_field(cmd, done); + ata_scsi_invalid_field(cmd); break; /* all other commands */ default: ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x20, 0x0); /* "Invalid command operation code" */ - done(cmd); + cmd->scsi_done(cmd); break; } } @@ -3858,7 +3850,6 @@ EXPORT_SYMBOL_GPL(ata_sas_slave_configure); /** * ata_sas_queuecmd - Issue SCSI cdb to libata-managed device * @cmd: SCSI command to be sent - * @done: Completion function, called when command is complete * @ap: ATA port to which the command is being sent * * RETURNS: @@ -3866,18 +3857,17 @@ EXPORT_SYMBOL_GPL(ata_sas_slave_configure); * 0 otherwise. */ -int ata_sas_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), - struct ata_port *ap) +int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap) { int rc = 0; ata_scsi_dump_cdb(ap, cmd); if (likely(ata_dev_enabled(ap->link.device))) - rc = __ata_scsi_queuecmd(cmd, done, ap->link.device); + rc = __ata_scsi_queuecmd(cmd, ap->link.device); else { cmd->result = (DID_BAD_TARGET << 16); - done(cmd); + cmd->scsi_done(cmd); } return rc; } diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 89d8a7c..24487d4 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -3627,17 +3627,19 @@ static void drbdd(struct drbd_conf *mdev) } shs = drbd_cmd_handler[cmd].pkt_size - sizeof(union p_header); - rv = drbd_recv(mdev, &header->h80.payload, shs); - if (unlikely(rv != shs)) { - dev_err(DEV, "short read while reading sub header: rv=%d\n", rv); - goto err_out; - } - if (packet_size - shs > 0 && !drbd_cmd_handler[cmd].expect_payload) { dev_err(DEV, "No payload expected %s l:%d\n", cmdname(cmd), packet_size); goto err_out; } + if (shs) { + rv = drbd_recv(mdev, &header->h80.payload, shs); + if (unlikely(rv != shs)) { + dev_err(DEV, "short read while reading sub header: rv=%d\n", rv); + goto err_out; + } + } + rv = drbd_cmd_handler[cmd].function(mdev, cmd, packet_size - shs); if (unlikely(!rv)) { diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h index 181ea03..ab2bd09 100644 --- a/drivers/block/drbd/drbd_req.h +++ b/drivers/block/drbd/drbd_req.h @@ -339,7 +339,8 @@ static inline int _req_mod(struct drbd_request *req, enum drbd_req_event what) } /* completion of master bio is outside of spinlock. - * If you need it irqsave, do it your self! */ + * If you need it irqsave, do it your self! + * Which means: don't use from bio endio callback. */ static inline int req_mod(struct drbd_request *req, enum drbd_req_event what) { diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index 47d223c..34f224b 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -193,8 +193,10 @@ void drbd_endio_sec(struct bio *bio, int error) */ void drbd_endio_pri(struct bio *bio, int error) { + unsigned long flags; struct drbd_request *req = bio->bi_private; struct drbd_conf *mdev = req->mdev; + struct bio_and_error m; enum drbd_req_event what; int uptodate = bio_flagged(bio, BIO_UPTODATE); @@ -220,7 +222,13 @@ void drbd_endio_pri(struct bio *bio, int error) bio_put(req->private_bio); req->private_bio = ERR_PTR(error); - req_mod(req, what); + /* not req_mod(), we need irqsave here! */ + spin_lock_irqsave(&mdev->req_lock, flags); + __req_mod(req, what, &m); + spin_unlock_irqrestore(&mdev->req_lock, flags); + + if (m.bio) + complete_master_bio(mdev, &m); } int w_read_retry_remote(struct drbd_conf *mdev, struct drbd_work *w, int cancel) diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 90267f8..e2da191 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1131,11 +1131,6 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, */ q->limits = *limits; - if (limits->no_cluster) - queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q); - else - queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q); - if (!dm_table_supports_discards(t)) queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); else diff --git a/drivers/md/md.c b/drivers/md/md.c index 84c46a1..52694d2 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -4296,9 +4296,6 @@ static int md_alloc(dev_t dev, char *name) goto abort; mddev->queue->queuedata = mddev; - /* Can be unlocked because the queue is new: no concurrency */ - queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, mddev->queue); - blk_queue_make_request(mddev->queue, md_make_request); disk = alloc_disk(1 << shift); diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index d37c733..0bcd580 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -156,6 +156,8 @@ static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter, if (!(a_status & ZFCP_STATUS_COMMON_RUNNING) || a_status & ZFCP_STATUS_COMMON_ERP_FAILED) return 0; + if (p_status & ZFCP_STATUS_COMMON_NOESC) + return need; if (!(a_status & ZFCP_STATUS_COMMON_UNBLOCKED)) need = ZFCP_ERP_ACTION_REOPEN_ADAPTER; /* fall through */ @@ -188,6 +190,9 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status, atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &zfcp_sdev->status); erp_action = &zfcp_sdev->erp_action; + memset(erp_action, 0, sizeof(struct zfcp_erp_action)); + erp_action->port = port; + erp_action->sdev = sdev; if (!(atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_RUNNING)) act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY; @@ -200,6 +205,8 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status, zfcp_erp_action_dismiss_port(port); atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status); erp_action = &port->erp_action; + memset(erp_action, 0, sizeof(struct zfcp_erp_action)); + erp_action->port = port; if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_RUNNING)) act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY; break; @@ -209,6 +216,7 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status, zfcp_erp_action_dismiss_adapter(adapter); atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status); erp_action = &adapter->erp_action; + memset(erp_action, 0, sizeof(struct zfcp_erp_action)); if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_RUNNING)) act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY; @@ -218,10 +226,7 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status, return NULL; } - memset(erp_action, 0, sizeof(struct zfcp_erp_action)); erp_action->adapter = adapter; - erp_action->port = port; - erp_action->sdev = sdev; erp_action->action = need; erp_action->status = act_status; diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index be03174..2eb7dd5 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -851,7 +851,7 @@ struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *scmnd) zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); - req->data = zfcp_sdev; + req->data = sdev; req->handler = zfcp_fsf_abort_fcp_command_handler; req->qtcb->header.lun_handle = zfcp_sdev->lun_handle; req->qtcb->header.port_handle = zfcp_sdev->port->handle; @@ -2069,8 +2069,6 @@ static void zfcp_fsf_fcp_cmnd_handler(struct zfcp_fsf_req *req) struct fcp_resp_with_ext *fcp_rsp; unsigned long flags; - zfcp_fsf_fcp_handler_common(req); - read_lock_irqsave(&req->adapter->abort_lock, flags); scpnt = req->data; @@ -2079,6 +2077,8 @@ static void zfcp_fsf_fcp_cmnd_handler(struct zfcp_fsf_req *req) return; } + zfcp_fsf_fcp_handler_common(req); + if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) { set_host_byte(scpnt, DID_TRANSPORT_DISRUPTED); goto skip_fsfstatus; @@ -2170,12 +2170,13 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd) struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; struct zfcp_qdio *qdio = adapter->qdio; struct fsf_qtcb_bottom_io *io; + unsigned long flags; if (unlikely(!(atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_UNBLOCKED))) return -EBUSY; - spin_lock(&qdio->req_q_lock); + spin_lock_irqsave(&qdio->req_q_lock, flags); if (atomic_read(&qdio->req_q_free) <= 0) { atomic_inc(&qdio->req_q_full); goto out; @@ -2239,7 +2240,7 @@ failed_scsi_cmnd: zfcp_fsf_req_free(req); scsi_cmnd->host_scribble = NULL; out: - spin_unlock(&qdio->req_q_lock); + spin_unlock_irqrestore(&qdio->req_q_lock, flags); return retval; } diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 6bd2dbc..63529ed 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -76,8 +76,8 @@ static void zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result) scpnt->scsi_done(scpnt); } -static int zfcp_scsi_queuecommand_lck(struct scsi_cmnd *scpnt, - void (*done) (struct scsi_cmnd *)) +static +int zfcp_scsi_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scpnt) { struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device); struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; @@ -87,7 +87,6 @@ static int zfcp_scsi_queuecommand_lck(struct scsi_cmnd *scpnt, /* reset the status for this request */ scpnt->result = 0; scpnt->host_scribble = NULL; - scpnt->scsi_done = done; scsi_result = fc_remote_port_chkready(rport); if (unlikely(scsi_result)) { @@ -127,8 +126,6 @@ static int zfcp_scsi_queuecommand_lck(struct scsi_cmnd *scpnt, return ret; } -static DEF_SCSI_QCMD(zfcp_scsi_queuecommand) - static int zfcp_scsi_slave_alloc(struct scsi_device *sdev) { struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index b2fb2b2..a6dea08 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -90,11 +90,7 @@ static const struct pci_device_id hpsa_pci_device_id[] = { {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3252}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3253}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3254}, -#define PCI_DEVICE_ID_HP_CISSF 0x333f - {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x333F}, - {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, - PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, - {PCI_VENDOR_ID_COMPAQ, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, + {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, {0,} }; @@ -113,8 +109,6 @@ static struct board_type products[] = { {0x3249103C, "Smart Array P812", &SA5_access}, {0x324a103C, "Smart Array P712m", &SA5_access}, {0x324b103C, "Smart Array P711m", &SA5_access}, - {0x3233103C, "StorageWorks P1210m", &SA5_access}, - {0x333F103C, "StorageWorks P1210m", &SA5_access}, {0x3250103C, "Smart Array", &SA5_access}, {0x3250113C, "Smart Array", &SA5_access}, {0x3250123C, "Smart Array", &SA5_access}, diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index de2e09e..d3c5905 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -5748,7 +5748,7 @@ static int ipr_queuecommand_lck(struct scsi_cmnd *scsi_cmd, } if (ipr_is_gata(res) && res->sata_port) - return ata_sas_queuecmd(scsi_cmd, done, res->sata_port->ap); + return ata_sas_queuecmd(scsi_cmd, res->sata_port->ap); ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); ioarcb = &ipr_cmd->ioarcb; diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c index 29251fa..5815cbe 100644 --- a/drivers/scsi/libsas/sas_scsi_host.c +++ b/drivers/scsi/libsas/sas_scsi_host.c @@ -211,8 +211,7 @@ static int sas_queuecommand_lck(struct scsi_cmnd *cmd, unsigned long flags; spin_lock_irqsave(dev->sata_dev.ap->lock, flags); - res = ata_sas_queuecmd(cmd, scsi_done, - dev->sata_dev.ap); + res = ata_sas_queuecmd(cmd, dev->sata_dev.ap); spin_unlock_irqrestore(dev->sata_dev.ap->lock, flags); goto out; } diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c index 0433ea6..b37c8a3 100644 --- a/drivers/scsi/osd/osd_initiator.c +++ b/drivers/scsi/osd/osd_initiator.c @@ -951,8 +951,8 @@ static int _osd_req_finalize_cdb_cont(struct osd_request *or, const u8 *cap_key) /* create a bio for continuation segment */ bio = bio_map_kern(req_q, or->cdb_cont.buff, or->cdb_cont.total_bytes, GFP_KERNEL); - if (unlikely(!bio)) - return -ENOMEM; + if (IS_ERR(bio)) + return PTR_ERR(bio); bio->bi_rw |= REQ_WRITE; diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c index 5e76a62..300d59f 100644 --- a/drivers/scsi/pmcraid.c +++ b/drivers/scsi/pmcraid.c @@ -62,6 +62,7 @@ static unsigned int pmcraid_debug_log; static unsigned int pmcraid_disable_aen; static unsigned int pmcraid_log_level = IOASC_LOG_LEVEL_MUST; +static unsigned int pmcraid_enable_msix; /* * Data structures to support multiple adapters by the LLD. @@ -4691,7 +4692,8 @@ pmcraid_register_interrupt_handler(struct pmcraid_instance *pinstance) int rc; struct pci_dev *pdev = pinstance->pdev; - if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) { + if ((pmcraid_enable_msix) && + (pci_find_capability(pdev, PCI_CAP_ID_MSIX))) { int num_hrrq = PMCRAID_NUM_MSIX_VECTORS; struct msix_entry entries[PMCRAID_NUM_MSIX_VECTORS]; int i; diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h index 1134279..4db210d 100644 --- a/drivers/scsi/pmcraid.h +++ b/drivers/scsi/pmcraid.h @@ -42,7 +42,7 @@ */ #define PMCRAID_DRIVER_NAME "PMC MaxRAID" #define PMCRAID_DEVFILE "pmcsas" -#define PMCRAID_DRIVER_VERSION "2.0.3" +#define PMCRAID_DRIVER_VERSION "1.0.3" #define PMCRAID_DRIVER_DATE __DATE__ #define PMCRAID_FW_VERSION_1 0x002 @@ -333,11 +333,9 @@ struct pmcraid_config_table_entry { __u8 lun[PMCRAID_LUN_LEN]; } __attribute__((packed, aligned(4))); -/* extended configuration table sizes are of 64 bytes in size */ -#define PMCRAID_CFGTE_EXT_SIZE 32 +/* extended configuration table sizes are also of 32 bytes in size */ struct pmcraid_config_table_entry_ext { struct pmcraid_config_table_entry cfgte; - __u8 cfgte_ext[PMCRAID_CFGTE_EXT_SIZE]; }; /* resource types (config_table_entry.resource_type values) */ diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 3a22eff..9ce539d 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -2409,7 +2409,6 @@ struct qla_hw_data { uint32_t enable_target_reset :1; uint32_t enable_lip_full_login :1; uint32_t enable_led_scheme :1; - uint32_t inta_enabled :1; uint32_t msi_enabled :1; uint32_t msix_enabled :1; uint32_t disable_serdes :1; diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 5f94430..4c1ba62 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -1061,6 +1061,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, fcp_cmnd->additional_cdb_len |= 2; int_to_scsilun(sp->cmd->device->lun, &fcp_cmnd->lun); + host_to_fcp_swap((uint8_t *)&fcp_cmnd->lun, sizeof(fcp_cmnd->lun)); memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len); cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32( diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 1f06ddd..7f77898 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -2491,14 +2491,15 @@ skip_msix: skip_msi: ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, - IRQF_SHARED, QLA2XXX_DRIVER_NAME, rsp); + ha->flags.msi_enabled ? 0 : IRQF_SHARED, + QLA2XXX_DRIVER_NAME, rsp); if (ret) { qla_printk(KERN_WARNING, ha, "Failed to reserve interrupt %d already in use.\n", ha->pdev->irq); goto fail; } - ha->flags.inta_enabled = 1; + clear_risc_ints: /* diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c index 8d9edfb..ae2acac 100644 --- a/drivers/scsi/qla2xxx/qla_nx.c +++ b/drivers/scsi/qla2xxx/qla_nx.c @@ -2749,6 +2749,7 @@ sufficient_dsds: goto queuing_error_fcp_cmnd; int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun); + host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); /* build FCP_CMND IU */ memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd)); diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 1644eab..2c0876c 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -829,7 +829,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) { scsi_qla_host_t *vha = shost_priv(cmd->device->host); srb_t *sp; - int ret; + int ret = SUCCESS; unsigned int id, lun; unsigned long flags; int wait = 0; @@ -2064,6 +2064,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) ha->init_cb_size = sizeof(struct mid_init_cb_81xx); ha->gid_list_info_size = 8; ha->optrom_size = OPTROM_SIZE_82XX; + ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; ha->isp_ops = &qla82xx_isp_ops; ha->flash_conf_off = FARX_ACCESS_FLASH_CONF; ha->flash_data_off = FARX_ACCESS_FLASH_DATA; diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index 8edbccb..cf0075a 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h @@ -7,9 +7,9 @@ /* * Driver version */ -#define QLA2XXX_VERSION "8.03.04-k0" +#define QLA2XXX_VERSION "8.03.05-k0" #define QLA_DRIVER_MAJOR_VER 8 #define QLA_DRIVER_MINOR_VER 3 -#define QLA_DRIVER_PATCH_VER 4 +#define QLA_DRIVER_PATCH_VER 5 #define QLA_DRIVER_BETA_VER 0 diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 4677129..cf78738 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -615,7 +615,7 @@ static int scsi_try_bus_device_reset(struct scsi_cmnd *scmd) return rtn; } -static int __scsi_try_to_abort_cmd(struct scsi_cmnd *scmd) +static int scsi_try_to_abort_cmd(struct scsi_cmnd *scmd) { if (!scmd->device->host->hostt->eh_abort_handler) return FAILED; @@ -623,31 +623,9 @@ static int __scsi_try_to_abort_cmd(struct scsi_cmnd *scmd) return scmd->device->host->hostt->eh_abort_handler(scmd); } -/** - * scsi_try_to_abort_cmd - Ask host to abort a running command. - * @scmd: SCSI cmd to abort from Lower Level. - * - * Notes: - * This function will not return until the user's completion function - * has been called. there is no timeout on this operation. if the - * author of the low-level driver wishes this operation to be timed, - * they can provide this facility themselves. helper functions in - * scsi_error.c can be supplied to make this easier to do. - */ -static int scsi_try_to_abort_cmd(struct scsi_cmnd *scmd) -{ - /* - * scsi_done was called just after the command timed out and before - * we had a chance to process it. (db) - */ - if (scmd->serial_number == 0) - return SUCCESS; - return __scsi_try_to_abort_cmd(scmd); -} - static void scsi_abort_eh_cmnd(struct scsi_cmnd *scmd) { - if (__scsi_try_to_abort_cmd(scmd) != SUCCESS) + if (scsi_try_to_abort_cmd(scmd) != SUCCESS) if (scsi_try_bus_device_reset(scmd) != SUCCESS) if (scsi_try_target_reset(scmd) != SUCCESS) if (scsi_try_bus_reset(scmd) != SUCCESS) diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index eafeeda..4a38422 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1403,11 +1403,6 @@ static void scsi_softirq_done(struct request *rq) INIT_LIST_HEAD(&cmd->eh_entry); - /* - * Set the serial numbers back to zero - */ - cmd->serial_number = 0; - atomic_inc(&cmd->device->iodone_cnt); if (cmd->result) atomic_inc(&cmd->device->ioerr_cnt); @@ -1642,9 +1637,8 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost, blk_queue_max_segment_size(q, dma_get_max_seg_size(dev)); - /* New queue, no concurrency on queue_flags */ if (!shost->use_clustering) - queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q); + q->limits.cluster = 0; /* * set a reasonable default alignment on word boundaries: the diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index aae86fd..95aeeeb 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -250,7 +250,7 @@ struct queue_limits { unsigned char misaligned; unsigned char discard_misaligned; - unsigned char no_cluster; + unsigned char cluster; signed char discard_zeroes_data; }; @@ -380,7 +380,6 @@ struct request_queue #endif }; -#define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ #define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ @@ -403,7 +402,6 @@ struct request_queue #define QUEUE_FLAG_SECDISCARD 19 /* supports SECDISCARD */ #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ - (1 << QUEUE_FLAG_CLUSTER) | \ (1 << QUEUE_FLAG_STACKABLE) | \ (1 << QUEUE_FLAG_SAME_COMP) | \ (1 << QUEUE_FLAG_ADD_RANDOM)) @@ -510,6 +508,11 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) #define rq_data_dir(rq) ((rq)->cmd_flags & 1) +static inline unsigned int blk_queue_cluster(struct request_queue *q) +{ + return q->limits.cluster; +} + /* * We regard a request as sync, if either a read or a sync write */ diff --git a/include/linux/libata.h b/include/linux/libata.h index d947b12..c9c5d7a 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -996,8 +996,7 @@ extern int ata_sas_port_init(struct ata_port *); extern int ata_sas_port_start(struct ata_port *ap); extern void ata_sas_port_stop(struct ata_port *ap); extern int ata_sas_slave_configure(struct scsi_device *, struct ata_port *); -extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), - struct ata_port *ap); +extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap); extern int sata_scr_valid(struct ata_link *link); extern int sata_scr_read(struct ata_link *link, int reg, u32 *val); extern int sata_scr_write(struct ata_link *link, int reg, u32 val); @@ -1040,8 +1039,7 @@ extern unsigned int ata_do_dev_read_id(struct ata_device *dev, struct ata_taskfile *tf, u16 *id); extern void ata_qc_complete(struct ata_queued_cmd *qc); extern int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active); -extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd, - void (*done)(struct scsi_cmnd *)); +extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd); extern int ata_std_bios_param(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[]);