From: HighPoint Linux Team A wholesale replacement for the first submission. Merge Andrew Morton's patches: - Provide locking for global list - Fix debug printks - uninline function with multiple callsites - coding style fixups - remove unneeded casts of void* - kfree(NULL) is legal - Don't "succeed" if register_chrdev() failed - otherwise we'll later unregister a not-registered chrdev. - Don't return from hptiop_do_ioctl() with the spinlock held. - uninline __hpt_do_ioctl() Update for Arjan van de Ven's comments: - put all asm/ includes after the linux/ ones - replace mdelay with msleep - add pci posting flush - do not set pci command reqister in map_pci_bar - do not try merging sg elements in hptiop_buildsgl() - remove unused outstandingcommands member from hba structure - remove unimplemented hptiop_abort() handler - remove typedef u32 hpt_id_t Other updates: - fix endianess Signed-off-by: HighPoint Linux Team Signed-off-by: Andrew Morton --- Documentation/scsi/hptiop.txt | 2 drivers/scsi/hptiop.c | 217 ++++++++++++++------------------ drivers/scsi/hptiop.h | 156 +++++++++++------------ 3 files changed, 178 insertions(+), 197 deletions(-) diff -puN Documentation/scsi/hptiop.txt~hptiop-highpoint-rocketraid-3xxx-controller-driver-redone Documentation/scsi/hptiop.txt --- devel/Documentation/scsi/hptiop.txt~hptiop-highpoint-rocketraid-3xxx-controller-driver-redone 2006-05-17 22:48:16.000000000 -0700 +++ devel-akpm/Documentation/scsi/hptiop.txt 2006-05-17 22:48:16.000000000 -0700 @@ -74,7 +74,7 @@ The driver exposes following sysfs attri firmware-version R firmware version string The driver registers char device "hptiop" to communicate with HighPoint RAID -management software. Its ioctl routine acts as a general binary interface +management software. Its ioctl routine acts as a general binary interface between the IOP firmware and HighPoint RAID management software. New management functions can be implemented in application/firmware without modification in driver code. diff -puN drivers/scsi/hptiop.c~hptiop-highpoint-rocketraid-3xxx-controller-driver-redone drivers/scsi/hptiop.c --- devel/drivers/scsi/hptiop.c~hptiop-highpoint-rocketraid-3xxx-controller-driver-redone 2006-05-17 22:48:16.000000000 -0700 +++ devel-akpm/drivers/scsi/hptiop.c 2006-05-17 22:48:16.000000000 -0700 @@ -26,10 +26,10 @@ #include #include #include +#include #include #include #include -#include #include #include #include @@ -53,6 +53,11 @@ static void hptiop_host_request_callback static void hptiop_iop_request_callback(struct hptiop_hba *hba, u32 tag); static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg); +static inline void hptiop_pci_posting_flush(struct hpt_iopmu __iomem *iop) +{ + readl(&iop->outbound_intstatus); +} + static int iop_wait_ready(struct hpt_iopmu __iomem *iop, u32 millisec) { u32 req = 0; @@ -60,13 +65,14 @@ static int iop_wait_ready(struct hpt_iop for (i = 0; i < millisec; i++) { req = readl(&iop->inbound_queue); - if (req != IOPMU_QUEUE_EMPTY) + if (req != IOPMU_QUEUE_EMPTY) break; - mdelay(1); + msleep(1); } if (req != IOPMU_QUEUE_EMPTY) { writel(req, &iop->outbound_queue); + hptiop_pci_posting_flush(iop); return 0; } @@ -146,11 +152,13 @@ static int iop_send_sync_request(struct writel((unsigned long)req - (unsigned long)hba->iop, &hba->iop->inbound_queue); + hptiop_pci_posting_flush(hba->iop); + for (i = 0; i < millisec; i++) { __iop_intr(hba); if (readl(&req->context)) return 0; - mdelay(1); + msleep(1); } return -1; @@ -164,11 +172,15 @@ static int iop_send_sync_msg(struct hpti writel(msg, &hba->iop->inbound_msgaddr0); + hptiop_pci_posting_flush(hba->iop); + for (i = 0; i < millisec; i++) { + spin_lock_irq(hba->host->host_lock); __iop_intr(hba); + spin_unlock_irq(hba->host->host_lock); if (hba->msg_done) break; - mdelay(1); + msleep(1); } return hba->msg_done? 0 : -1; @@ -239,7 +251,7 @@ static int hptiop_initialize_iop(struct struct hpt_iopmu __iomem *iop = hba->iop; /* enable interrupts */ - writel(~(IOPMU_OUTBOUND_INT_POSTQUEUE|IOPMU_OUTBOUND_INT_MSG0), + writel(~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0), &iop->outbound_intmask); hba->initialized = 1; @@ -256,16 +268,10 @@ static int hptiop_initialize_iop(struct static int hptiop_map_pci_bar(struct hptiop_hba *hba) { - u8 cmd; u32 mem_base_phy, length; void __iomem *mem_base_virt; struct pci_dev *pcidev = hba->pcidev; - pci_read_config_byte(pcidev, PCI_COMMAND, &cmd); - pci_write_config_byte(pcidev, PCI_COMMAND, - cmd | PCI_COMMAND_MASTER | - PCI_COMMAND_MEMORY | PCI_COMMAND_INVALIDATE); - if (!(pci_resource_flags(pcidev, 0) & IORESOURCE_MEM)) { printk(KERN_ERR "scsi%d: pci resource invalid\n", hba->host->host_no); @@ -334,7 +340,7 @@ static void hptiop_host_request_callback req->header.context, tag); BUG_ON(!req->header.result); - BUG_ON(req->header.type != IOP_REQUEST_TYPE_SCSI_COMMAND); + BUG_ON(req->header.type != cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND)); scp = hba->reqs[tag].scp; @@ -353,7 +359,7 @@ static void hptiop_host_request_callback ); } - switch(req->header.result) { + switch (le32_to_cpu(req->header.result)) { case IOP_RESULT_SUCCESS: scp->result = (DID_OK<<16); break; @@ -377,7 +383,7 @@ static void hptiop_host_request_callback memset(&scp->sense_buffer, 0, sizeof(scp->sense_buffer)); memcpy(&scp->sense_buffer, - &req->sg_list, req->dataxfer_length); + &req->sg_list, le32_to_cpu(req->dataxfer_length)); break; default: @@ -389,7 +395,6 @@ static void hptiop_host_request_callback dprintk("scsi_done(%p)\n", scp); scp->scsi_done(scp); free_req(hba, &hba->reqs[tag]); - atomic_dec(&hba->outstandingcommands); } void hptiop_iop_request_callback(struct hptiop_hba *hba, u32 tag) @@ -434,7 +439,7 @@ void hptiop_iop_request_callback(struct static irqreturn_t hptiop_intr(int irq, void *dev_id, struct pt_regs *regs) { struct hptiop_hba *hba = dev_id; - int handled = 0; + int handled; unsigned long flags; spin_lock_irqsave(hba->host->host_lock, flags); @@ -446,15 +451,15 @@ static irqreturn_t hptiop_intr(int irq, static int hptiop_buildsgl(struct scsi_cmnd *scp, struct hpt_iopsg *psg) { - int sg_count = 0; - struct hpt_iopsg *psg_start = psg; struct Scsi_Host *host = scp->device->host; struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; struct scatterlist *sglist = (struct scatterlist *)scp->request_buffer; + /* + * though we'll not get non-use_sg fields anymore, + * keep use_sg checking anyway + */ if (scp->use_sg) { - u64 addr, last = 0; - unsigned int length; int idx; HPT_SCP(scp)->sgcnt = pci_map_sg(hba->pcidev, @@ -463,28 +468,15 @@ static int hptiop_buildsgl(struct scsi_c HPT_SCP(scp)->mapped = 1; BUG_ON(HPT_SCP(scp)->sgcnt > hba->max_sg_descriptors); - psg--; for (idx = 0; idx < HPT_SCP(scp)->sgcnt; idx++) { - addr = sg_dma_address(&sglist[idx]); - length = sg_dma_len(&sglist[idx]); - /* merge the sg elements if possible */ - if (idx && last==addr && psg->size && - psg->size + length <= 0x10000 && - (addr & 0xffffffff) != 0) { - psg->size += length; - last += length; - } - else { - psg++; - psg->pci_address = addr; - psg->size = length; - last = addr + length; - } - psg->eot = (idx == HPT_SCP(scp)->sgcnt - 1)? 1 : 0; + psg[idx].pci_address = + cpu_to_le64(sg_dma_address(&sglist[idx])); + psg[idx].size = cpu_to_le32(sg_dma_len(&sglist[idx])); + psg[idx].eot = (idx == HPT_SCP(scp)->sgcnt - 1) ? + cpu_to_le32(1) : 0; } - sg_count = psg - psg_start + 1; - BUG_ON(sg_count > hba->max_sg_descriptors); + return HPT_SCP(scp)->sgcnt; } else { HPT_SCP(scp)->dma_handle = pci_map_single( hba->pcidev, @@ -493,12 +485,11 @@ static int hptiop_buildsgl(struct scsi_c scp->sc_data_direction ); HPT_SCP(scp)->mapped = 1; - psg->pci_address = HPT_SCP(scp)->dma_handle; - psg->size = (u32)scp->request_bufflen; - psg->eot = 1; - sg_count = 1; + psg->pci_address = cpu_to_le64(HPT_SCP(scp)->dma_handle); + psg->size = cpu_to_le32(scp->request_bufflen); + psg->eot = cpu_to_le32(1); + return 1; } - return sg_count; } static int hptiop_queuecommand(struct scsi_cmnd *scp, @@ -551,26 +542,26 @@ static int hptiop_queuecommand(struct sc req = (struct hpt_iop_request_scsi_command *)_req->req_virt; - atomic_inc(&hba->outstandingcommands); - /* build S/G table */ if (scp->request_bufflen) sg_count = hptiop_buildsgl(scp, req->sg_list); else HPT_SCP(scp)->mapped = 0; - req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT; - req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND; - req->header.result = IOP_RESULT_PENDING; - req->header.context = (u64)(IOPMU_QUEUE_ADDR_HOST_BIT | + req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT); + req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND); + req->header.result = cpu_to_le32(IOP_RESULT_PENDING); + req->header.context = cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT | (u32)_req->index); - req->dataxfer_length = scp->bufflen; + req->header.context_hi32 = 0; + req->dataxfer_length = cpu_to_le32(scp->bufflen); req->channel = scp->device->channel; req->target = scp->device->id; req->lun = scp->device->lun; - req->header.size = sizeof(struct hpt_iop_request_scsi_command) + req->header.size = cpu_to_le32( + sizeof(struct hpt_iop_request_scsi_command) - sizeof(struct hpt_iopsg) - + sg_count * sizeof(struct hpt_iopsg); + + sg_count * sizeof(struct hpt_iopsg)); memcpy(req->cdb, scp->cmnd, sizeof(req->cdb)); @@ -590,20 +581,13 @@ static const char *hptiop_info(struct Sc return driver_name_long; } -static int hptiop_abort(struct scsi_cmnd *scp) -{ - dprintk("hptiop_abort(%d/%d/%d) scp=%p\n", - scp->device->host->host_no, scp->device->channel, - scp->device->id, scp); - return FAILED; -} - static int hptiop_reset_hba(struct hptiop_hba *hba) { if (atomic_xchg(&hba->resetting, 1) == 0) { atomic_inc(&hba->reset_count); writel(IOPMU_INBOUND_MSG0_RESET, &hba->iop->outbound_msgaddr0); + hptiop_pci_posting_flush(hba->iop); } wait_event_timeout(hba->reset_wq, @@ -615,19 +599,12 @@ static int hptiop_reset_hba(struct hptio return -1; } - /* all scp should be finished */ - BUG_ON(atomic_read(&hba->outstandingcommands) != 0); - - spin_lock_irq(hba->host->host_lock); - if (iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) { dprintk("scsi%d: fail to start background task\n", hba->host->host_no); } - spin_unlock_irq(hba->host->host_lock); - return 0; } @@ -751,7 +728,8 @@ retry: /* * use the buffer on the IOP local memory first, then copy it - * back to host + * back to host. + * the caller's request buffer shoudl be little-endian. */ if (arg->inbuf_size) memcpy_toio(req->buf, arg->inbuf, arg->inbuf_size); @@ -776,6 +754,7 @@ retry: arg->done = hptiop_ioctl_done; writel(val, &hba->iop->inbound_queue); + hptiop_pci_posting_flush(hba->iop); spin_unlock_irq(hba->host->host_lock); @@ -806,7 +785,10 @@ static int __hpt_do_ioctl(struct hptiop_ return arg.result; } -#define hpt_id_valid(id) ((id) && ((u32)(id) != 0xffffffff)) +static inline int hpt_id_valid(__le32 id) +{ + return id != 0 && id != cpu_to_le32(0xffffffff); +} static int hptiop_get_controller_info(struct hptiop_hba *hba, struct hpt_controller_info *pinfo) @@ -831,42 +813,44 @@ static int hptiop_get_channel_info(struc } static int hptiop_get_logical_devices(struct hptiop_hba *hba, - hpt_id_t *pids, int maxcount) + __le32 *pids, int maxcount) { int i; u32 count = maxcount - 1; if (__hpt_do_ioctl(hba, HPT_IOCTL_GET_LOGICAL_DEVICES, &count, sizeof(u32), - pids, sizeof(hpt_id_t) * maxcount)) + pids, sizeof(u32) * maxcount)) return -1; - maxcount = (int)pids[0]; + maxcount = le32_to_cpu(pids[0]); for (i = 0; i < maxcount; i++) pids[i] = pids[i+1]; return maxcount; } -static int hptiop_get_device_info_v3(struct hptiop_hba *hba, hpt_id_t id, +static int hptiop_get_device_info_v3(struct hptiop_hba *hba, __le32 id, struct hpt_logical_device_info_v3 *pinfo) { return __hpt_do_ioctl(hba, HPT_IOCTL_GET_DEVICE_INFO_V3, - &id, sizeof(hpt_id_t), + &id, sizeof(u32), pinfo, sizeof(*pinfo)); } static const char *get_array_status(struct hpt_logical_device_info_v3 *devinfo) { static char s[64]; - u32 flags = devinfo->u.array.flags; + u32 flags = le32_to_cpu(devinfo->u.array.flags); + u32 trans_prog = le32_to_cpu(devinfo->u.array.transforming_progress); + u32 reb_prog = le32_to_cpu(devinfo->u.array.rebuilding_progress); if (flags & ARRAY_FLAG_DISABLED) return "Disabled"; else if (flags & ARRAY_FLAG_TRANSFORMING) sprintf(s, "Expanding/Migrating %d.%d%%%s%s", - devinfo->u.array.transforming_progress / 100, - devinfo->u.array.transforming_progress % 100, + trans_prog / 100, + trans_prog % 100, (flags & (ARRAY_FLAG_NEEDBUILDING|ARRAY_FLAG_BROKEN))? ", Critical" : "", ((flags & ARRAY_FLAG_NEEDINITIALIZING) && @@ -882,18 +866,18 @@ static const char *get_array_status(stru "%sBackground initializing %d.%d%%" : "%sRebuilding %d.%d%%", (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "", - devinfo->u.array.rebuilding_progress / 100, - devinfo->u.array.rebuilding_progress % 100); + reb_prog / 100, + reb_prog % 100); else if (flags & ARRAY_FLAG_VERIFYING) sprintf(s, "%sVerifying %d.%d%%", (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "", - devinfo->u.array.rebuilding_progress / 100, - devinfo->u.array.rebuilding_progress % 100); + reb_prog / 100, + reb_prog % 100); else if (flags & ARRAY_FLAG_INITIALIZING) sprintf(s, "%sForground initializing %d.%d%%", (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "", - devinfo->u.array.rebuilding_progress / 100, - devinfo->u.array.rebuilding_progress % 100); + reb_prog / 100, + reb_prog % 100); else if (flags & ARRAY_FLAG_NEEDTRANSFORM) sprintf(s,"%s%s%s", "Need Expanding/Migrating", (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "", @@ -915,7 +899,7 @@ static const char *get_array_status(stru } static void hptiop_dump_devinfo(struct hptiop_hba *hba, - struct hptiop_getinfo *pinfo, hpt_id_t id, int indent) + struct hptiop_getinfo *pinfo, __le32 id, int indent) { struct hpt_logical_device_info_v3 devinfo; int i; @@ -933,27 +917,28 @@ static void hptiop_dump_devinfo(struct h case LDT_DEVICE: { struct hd_driveid *driveid; + u32 flags = le32_to_cpu(devinfo.u.device.flags); driveid = (struct hd_driveid *)devinfo.u.device.ident; /* model[] is 40 chars long, but we just want 20 chars here */ driveid->model[20] = 0; if (indent) - if (devinfo.u.device.flags & DEVICE_FLAG_DISABLED) + if (flags & DEVICE_FLAG_DISABLED) hptiop_copy_info(pinfo,"Missing\n"); else hptiop_copy_info(pinfo, "CH%d %s\n", devinfo.u.device.path_id + 1, driveid->model); else { - capacity = devinfo.capacity*512; + capacity = le64_to_cpu(devinfo.capacity) * 512; do_div(capacity, 1000000); hptiop_copy_info(pinfo, "CH%d %s, %lluMB, %s %s%s%s%s\n", devinfo.u.device.path_id + 1, driveid->model, capacity, - (devinfo.u.device.flags & DEVICE_FLAG_DISABLED)? + (flags & DEVICE_FLAG_DISABLED)? "Disabled" : "Normal", devinfo.u.device.read_ahead_enabled? "[RA]" : "", @@ -973,7 +958,7 @@ static void hptiop_dump_devinfo(struct h hptiop_copy_info(pinfo, "[DISK %d_%d] ", devinfo.vbus_id, devinfo.target_id); - capacity = devinfo.capacity * 512; + capacity = le64_to_cpu(devinfo.capacity) * 512; do_div(capacity, 1000000); hptiop_copy_info(pinfo, "%s (%s), %lluMB, %s\n", devinfo.u.array.name, @@ -987,7 +972,8 @@ static void hptiop_dump_devinfo(struct h get_array_status(&devinfo)); for (i = 0; i < devinfo.u.array.ndisk; i++) { if (hpt_id_valid(devinfo.u.array.members[i])) { - if ((1<host = host; hba->initialized = 0; - atomic_set(&hba->outstandingcommands, 0); atomic_set(&hba->resetting, 0); atomic_set(&hba->reset_count, 0); @@ -1296,22 +1281,23 @@ static int __devinit hptiop_probe(struct goto unmap_pci_bar; } - hba->max_requests = min(iop_config.max_requests, HPTIOP_MAX_REQUESTS); - hba->max_devices = iop_config.max_devices; - hba->max_request_size = iop_config.request_size; - hba->max_sg_descriptors = iop_config.max_sg_count; - hba->firmware_version = iop_config.firmware_version; - hba->sdram_size = iop_config.sdram_size; - - host->max_sectors = iop_config.data_transfer_length >> 9; - host->max_id = iop_config.max_devices; - host->sg_tablesize = iop_config.max_sg_count; - host->can_queue = iop_config.max_requests; - host->cmd_per_lun = iop_config.max_requests; + hba->max_requests = min(le32_to_cpu(iop_config.max_requests), + HPTIOP_MAX_REQUESTS); + hba->max_devices = le32_to_cpu(iop_config.max_devices); + hba->max_request_size = le32_to_cpu(iop_config.request_size); + hba->max_sg_descriptors = le32_to_cpu(iop_config.max_sg_count); + hba->firmware_version = le32_to_cpu(iop_config.firmware_version); + hba->sdram_size = le32_to_cpu(iop_config.sdram_size); + + host->max_sectors = le32_to_cpu(iop_config.data_transfer_length) >> 9; + host->max_id = le32_to_cpu(iop_config.max_devices); + host->sg_tablesize = le32_to_cpu(iop_config.max_sg_count); + host->can_queue = le32_to_cpu(iop_config.max_requests); + host->cmd_per_lun = le32_to_cpu(iop_config.max_requests); host->max_cmd_len = 16; - set_config.vbus_id = host->host_no; - set_config.iop_id = host->host_no; + set_config.vbus_id = cpu_to_le32(host->host_no); + set_config.iop_id = cpu_to_le32(host->host_no); if (iop_set_config(hba, &set_config)) { printk(KERN_ERR "scsi%d: set config failed\n", @@ -1423,20 +1409,17 @@ static void hptiop_shutdown(struct pci_d dprintk("hptiop_shutdown(%p)\n", hba); - /* all outstandingcommands should be finished */ - BUG_ON(atomic_read(&hba->outstandingcommands) != 0); - /* stop the iop */ - spin_lock_irq(hba->host->host_lock); if (iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000)) printk(KERN_ERR "scsi%d: shutdown the iop timeout\n", hba->host->host_no); - spin_unlock_irq(hba->host->host_lock); /* disable all outbound interrupts */ int_mask = readl(&iop->outbound_intmask); - writel(int_mask|IOPMU_OUTBOUND_INT_MSG0|IOPMU_OUTBOUND_INT_POSTQUEUE, - &iop->outbound_intmask); + writel(int_mask | + IOPMU_OUTBOUND_INT_MSG0 | IOPMU_OUTBOUND_INT_POSTQUEUE, + &iop->outbound_intmask); + hptiop_pci_posting_flush(iop); } static void hptiop_remove(struct pci_dev *pcidev) @@ -1446,7 +1429,9 @@ static void hptiop_remove(struct pci_dev dprintk("scsi%d: hptiop_remove\n", hba->host->host_no); + spin_lock(&hptiop_hba_list_lock); list_del_init(&hba->link); + spin_unlock(&hptiop_hba_list_lock); hptiop_shutdown(pcidev); diff -puN drivers/scsi/hptiop.h~hptiop-highpoint-rocketraid-3xxx-controller-driver-redone drivers/scsi/hptiop.h --- devel/drivers/scsi/hptiop.h~hptiop-highpoint-rocketraid-3xxx-controller-driver-redone 2006-05-17 22:48:16.000000000 -0700 +++ devel-akpm/drivers/scsi/hptiop.h 2006-05-17 22:48:16.000000000 -0700 @@ -18,8 +18,6 @@ #ifndef _HPTIOP_H_ #define _HPTIOP_H_ -typedef u32 hpt_id_t; - /* * logical device type. * Identify array (logical device) and physical device. @@ -88,13 +86,13 @@ typedef u32 hpt_id_t; * Controller information. */ struct hpt_controller_info { - u8 chip_type; /* chip type */ - u8 interrupt_level; /* IRQ level */ - u8 num_buses; /* bus count */ - u8 chip_flags; + u8 chip_type; /* chip type */ + u8 interrupt_level; /* IRQ level */ + u8 num_buses; /* bus count */ + u8 chip_flags; - u8 product_id[MAX_NAME_LENGTH];/* product name */ - u8 vendor_id[MAX_NAME_LENGTH]; /* vendor name */ + u8 product_id[MAX_NAME_LENGTH];/* product name */ + u8 vendor_id[MAX_NAME_LENGTH]; /* vendor name */ } __attribute__((packed)); @@ -102,10 +100,9 @@ __attribute__((packed)); * Channel information. */ struct hpt_channel_info { - u32 io_port; /* IDE Base Port Address */ - u32 control_port; /* IDE Control Port Address */ - - hpt_id_t devices[2]; /* device connected to this channel */ + __le32 io_port; /* IDE Base Port Address */ + __le32 control_port; /* IDE Control Port Address */ + __le32 devices[2]; /* device connected to this channel */ } __attribute__((packed)); @@ -116,26 +113,26 @@ struct hpt_array_info_v3 { u8 name[MAX_ARRAYNAME_LEN]; /* array name */ u8 description[64]; /* array description */ u8 create_manager[16]; /* who created it */ - u32 create_time; /* when created it */ + __le32 create_time; /* when created it */ u8 array_type; /* array type */ u8 block_size_shift; /* stripe size */ u8 ndisk; /* Number of ID in Members[] */ u8 reserved; - u32 flags; /* working flags, see ARRAY_FLAG_XXX */ - u32 members[MAX_ARRAY_MEMBERS_V2]; /* member array/disks */ + __le32 flags; /* working flags, see ARRAY_FLAG_XXX */ + __le32 members[MAX_ARRAY_MEMBERS_V2]; /* member array/disks */ - u32 rebuilding_progress; - u64 rebuilt_sectors; /* rebuilding point (LBA) for single member */ + __le32 rebuilding_progress; + __le64 rebuilt_sectors; /* rebuilding point (LBA) for single member */ - hpt_id_t transform_source; - hpt_id_t transform_target; /* destination device ID */ - u32 transforming_progress; - u32 signature; /* persistent identification*/ - u16 critical_members; /* bit mask of critical members */ - u16 reserve2; - u32 reserve; + __le32 transform_source; + __le32 transform_target; /* destination device ID */ + __le32 transforming_progress; + __le32 signature; /* persistent identification*/ + __le16 critical_members; /* bit mask of critical members */ + __le16 reserve2; + __le32 reserve; } __attribute__((packed)); @@ -177,13 +174,13 @@ struct hpt_device_info_v2 { u8 reserved6: 6; #endif - u32 flags; /* working flags, see DEVICE_FLAG_XXX */ - u8 ident[150]; /* (partitial) Identify Data of this device */ + __le32 flags; /* working flags, see DEVICE_FLAG_XXX */ + u8 ident[150]; /* (partitial) Identify Data of this device */ - u64 total_free; - u64 max_free; - u64 bad_sectors; - hpt_id_t parent_arrays[MAX_PARENTS_PER_DISK]; + __le64 total_free; + __le64 max_free; + __le64 bad_sectors; + __le32 parent_arrays[MAX_PARENTS_PER_DISK]; } __attribute__((packed)); @@ -199,15 +196,15 @@ struct hpt_logical_device_info_v3 { u8 vbus_id; /* vbus sequence in vbus_list */ u8 target_id; /* OS target id. 0xFF is invalid */ /* OS name: DISK $VBusId_$TargetId */ - u64 capacity; /* array capacity */ - hpt_id_t parent_array; /* don't use this field for physical + __le64 capacity; /* array capacity */ + __le32 parent_array; /* don't use this field for physical device. use ParentArrays field in hpt_device_info_v2 */ /* reserved statistic fields */ - u32 stat1; - u32 stat2; - u32 stat3; - u32 stat4; + __le32 stat1; + __le32 stat2; + __le32 stat3; + __le32 stat4; union { struct hpt_array_info_v3 array; @@ -236,20 +233,20 @@ __attribute__((packed)); struct hpt_iopmu { - u32 resrved0[4]; - u32 inbound_msgaddr0; - u32 inbound_msgaddr1; - u32 outbound_msgaddr0; - u32 outbound_msgaddr1; - u32 inbound_doorbell; - u32 inbound_intstatus; - u32 inbound_intmask; - u32 outbound_doorbell; - u32 outbound_intstatus; - u32 outbound_intmask; - u32 reserved1[2]; - u32 inbound_queue; - u32 outbound_queue; + __le32 resrved0[4]; + __le32 inbound_msgaddr0; + __le32 inbound_msgaddr1; + __le32 outbound_msgaddr0; + __le32 outbound_msgaddr1; + __le32 inbound_doorbell; + __le32 inbound_intstatus; + __le32 inbound_intmask; + __le32 outbound_doorbell; + __le32 outbound_intstatus; + __le32 outbound_intmask; + __le32 reserved1[2]; + __le32 inbound_queue; + __le32 outbound_queue; }; #define IOPMU_QUEUE_EMPTY 0xffffffff @@ -288,12 +285,12 @@ enum hpt_iopmu_message { struct hpt_iop_request_header { - u32 size; - u32 type; - u32 flags; - u32 result; - u32 context; /* host context */ - u32 context_hi32; + __le32 size; + __le32 type; + __le32 flags; + __le32 result; + __le32 context; /* host context */ + __le32 context_hi32; }; #define IOP_REQUEST_FLAG_SYNC_REQUEST 1 @@ -324,30 +321,30 @@ enum hpt_iop_result_type { struct hpt_iop_request_get_config { struct hpt_iop_request_header header; - u32 interface_version; - u32 firmware_version; - u32 max_requests; - u32 request_size; - u32 max_sg_count; - u32 data_transfer_length; - u32 alignment_mask; - u32 max_devices; - u32 sdram_size; + __le32 interface_version; + __le32 firmware_version; + __le32 max_requests; + __le32 request_size; + __le32 max_sg_count; + __le32 data_transfer_length; + __le32 alignment_mask; + __le32 max_devices; + __le32 sdram_size; }; struct hpt_iop_request_set_config { struct hpt_iop_request_header header; - u32 iop_id; - u32 vbus_id; - u32 reserve[6]; + __le32 iop_id; + __le32 vbus_id; + __le32 reserve[6]; }; struct hpt_iopsg { - u32 size; - u32 eot; /* non-zero: end of table */ - u64 pci_address; + __le32 size; + __le32 eot; /* non-zero: end of table */ + __le64 pci_address; }; struct hpt_iop_request_block_command @@ -357,9 +354,9 @@ struct hpt_iop_request_block_command u8 target; u8 lun; u8 pad1; - u16 command; /* IOP_BLOCK_COMMAND_{READ,WRITE} */ - u16 sectors; - u64 lba; + __le16 command; /* IOP_BLOCK_COMMAND_{READ,WRITE} */ + __le16 sectors; + __le64 lba; struct hpt_iopsg sg_list[1]; }; @@ -377,17 +374,17 @@ struct hpt_iop_request_scsi_command u8 lun; u8 pad1; u8 cdb[16]; - u32 dataxfer_length; + __le32 dataxfer_length; struct hpt_iopsg sg_list[1]; }; struct hpt_iop_request_ioctl_command { struct hpt_iop_request_header header; - u32 ioctl_code; - u32 inbuf_size; - u32 outbuf_size; - u32 bytes_returned; + __le32 ioctl_code; + __le32 inbuf_size; + __le32 outbuf_size; + __le32 bytes_returned; u8 buf[1]; /* out data should be put at buf[(inbuf_size+3)&~3] */ }; @@ -436,7 +433,6 @@ struct hptiop_hba { void * dma_coherent; dma_addr_t dma_coherent_handle; - atomic_t outstandingcommands; atomic_t reset_count; atomic_t resetting; _