diff --git a/Makefile b/Makefile index 1f47495..c76497f 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ VERSION = 2 PATCHLEVEL = 6 SUBLEVEL = 38 -EXTRAVERSION = -rc2 +EXTRAVERSION = -rc2-stor8 NAME = Flesh-Eating Bats with Fangs # *DOCUMENTATION* diff --git a/arch/arm/mach-omap1/Kconfig b/arch/arm/mach-omap1/Kconfig index 8d2f2da..e0a0281 100644 --- a/arch/arm/mach-omap1/Kconfig +++ b/arch/arm/mach-omap1/Kconfig @@ -9,6 +9,7 @@ config ARCH_OMAP730 depends on ARCH_OMAP1 bool "OMAP730 Based System" select CPU_ARM926T + select OMAP_MPU_TIMER select ARCH_OMAP_OTG config ARCH_OMAP850 @@ -22,6 +23,7 @@ config ARCH_OMAP15XX default y bool "OMAP15xx Based System" select CPU_ARM925T + select OMAP_MPU_TIMER config ARCH_OMAP16XX depends on ARCH_OMAP1 diff --git a/arch/arm/mach-omap1/Makefile b/arch/arm/mach-omap1/Makefile index 6ee1950..ba6009f 100644 --- a/arch/arm/mach-omap1/Makefile +++ b/arch/arm/mach-omap1/Makefile @@ -3,12 +3,11 @@ # # Common support -obj-y := io.o id.o sram.o irq.o mux.o flash.o serial.o devices.o dma.o +obj-y := io.o id.o sram.o time.o irq.o mux.o flash.o serial.o devices.o dma.o obj-y += clock.o clock_data.o opp_data.o obj-$(CONFIG_OMAP_MCBSP) += mcbsp.o -obj-$(CONFIG_OMAP_MPU_TIMER) += time.o obj-$(CONFIG_OMAP_32K_TIMER) += timer32k.o # Power Management diff --git a/arch/arm/mach-omap1/time.c b/arch/arm/mach-omap1/time.c index ed7a61f..f83fc33 100644 --- a/arch/arm/mach-omap1/time.c +++ b/arch/arm/mach-omap1/time.c @@ -44,16 +44,21 @@ #include #include #include +#include #include #include #include #include +#include + #include #include #include +#ifdef CONFIG_OMAP_MPU_TIMER + #define OMAP_MPU_TIMER_BASE OMAP_MPU_TIMER1_BASE #define OMAP_MPU_TIMER_OFFSET 0x100 @@ -67,7 +72,7 @@ typedef struct { ((volatile omap_mpu_timer_regs_t*)OMAP1_IO_ADDRESS(OMAP_MPU_TIMER_BASE + \ (n)*OMAP_MPU_TIMER_OFFSET)) -static inline unsigned long omap_mpu_timer_read(int nr) +static inline unsigned long notrace omap_mpu_timer_read(int nr) { volatile omap_mpu_timer_regs_t* timer = omap_mpu_timer_base(nr); return timer->read_tim; @@ -212,6 +217,32 @@ static struct clocksource clocksource_mpu = { .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; +static DEFINE_CLOCK_DATA(cd); + +static inline unsigned long long notrace _omap_mpu_sched_clock(void) +{ + u32 cyc = mpu_read(&clocksource_mpu); + return cyc_to_sched_clock(&cd, cyc, (u32)~0); +} + +#ifndef CONFIG_OMAP_32K_TIMER +unsigned long long notrace sched_clock(void) +{ + return _omap_mpu_sched_clock(); +} +#else +static unsigned long long notrace omap_mpu_sched_clock(void) +{ + return _omap_mpu_sched_clock(); +} +#endif + +static void notrace mpu_update_sched_clock(void) +{ + u32 cyc = mpu_read(&clocksource_mpu); + update_sched_clock(&cd, cyc, (u32)~0); +} + static void __init omap_init_clocksource(unsigned long rate) { static char err[] __initdata = KERN_ERR @@ -219,17 +250,13 @@ static void __init omap_init_clocksource(unsigned long rate) setup_irq(INT_TIMER2, &omap_mpu_timer2_irq); omap_mpu_timer_start(1, ~0, 1); + init_sched_clock(&cd, mpu_update_sched_clock, 32, rate); if (clocksource_register_hz(&clocksource_mpu, rate)) printk(err, clocksource_mpu.name); } -/* - * --------------------------------------------------------------------------- - * Timer initialization - * --------------------------------------------------------------------------- - */ -static void __init omap_timer_init(void) +static void __init omap_mpu_timer_init(void) { struct clk *ck_ref = clk_get(NULL, "ck_ref"); unsigned long rate; @@ -246,6 +273,66 @@ static void __init omap_timer_init(void) omap_init_clocksource(rate); } +#else +static inline void omap_mpu_timer_init(void) +{ + pr_err("Bogus timer, should not happen\n"); +} +#endif /* CONFIG_OMAP_MPU_TIMER */ + +#if defined(CONFIG_OMAP_MPU_TIMER) && defined(CONFIG_OMAP_32K_TIMER) +static unsigned long long (*preferred_sched_clock)(void); + +unsigned long long notrace sched_clock(void) +{ + if (!preferred_sched_clock) + return 0; + + return preferred_sched_clock(); +} + +static inline void preferred_sched_clock_init(bool use_32k_sched_clock) +{ + if (use_32k_sched_clock) + preferred_sched_clock = omap_32k_sched_clock; + else + preferred_sched_clock = omap_mpu_sched_clock; +} +#else +static inline void preferred_sched_clock_init(bool use_32k_sched_clcok) +{ +} +#endif + +static inline int omap_32k_timer_usable(void) +{ + int res = false; + + if (cpu_is_omap730() || cpu_is_omap15xx()) + return res; + +#ifdef CONFIG_OMAP_32K_TIMER + res = omap_32k_timer_init(); +#endif + + return res; +} + +/* + * --------------------------------------------------------------------------- + * Timer initialization + * --------------------------------------------------------------------------- + */ +static void __init omap_timer_init(void) +{ + if (omap_32k_timer_usable()) { + preferred_sched_clock_init(1); + } else { + omap_mpu_timer_init(); + preferred_sched_clock_init(0); + } +} + struct sys_timer omap_timer = { .init = omap_timer_init, }; diff --git a/arch/arm/mach-omap1/timer32k.c b/arch/arm/mach-omap1/timer32k.c index 20cfbcc..13d7b8f 100644 --- a/arch/arm/mach-omap1/timer32k.c +++ b/arch/arm/mach-omap1/timer32k.c @@ -52,10 +52,9 @@ #include #include #include +#include #include -struct sys_timer omap_timer; - /* * --------------------------------------------------------------------------- * 32KHz OS timer @@ -181,14 +180,14 @@ static __init void omap_init_32k_timer(void) * Timer initialization * --------------------------------------------------------------------------- */ -static void __init omap_timer_init(void) +bool __init omap_32k_timer_init(void) { + omap_init_clocksource_32k(); + #ifdef CONFIG_OMAP_DM_TIMER omap_dm_timer_init(); #endif omap_init_32k_timer(); -} -struct sys_timer omap_timer = { - .init = omap_timer_init, -}; + return true; +} diff --git a/arch/arm/mach-omap2/board-cm-t3517.c b/arch/arm/mach-omap2/board-cm-t3517.c index 5b0c777..8f9a64d 100644 --- a/arch/arm/mach-omap2/board-cm-t3517.c +++ b/arch/arm/mach-omap2/board-cm-t3517.c @@ -124,8 +124,9 @@ static inline void cm_t3517_init_hecc(void) {} #if defined(CONFIG_RTC_DRV_V3020) || defined(CONFIG_RTC_DRV_V3020_MODULE) #define RTC_IO_GPIO (153) #define RTC_WR_GPIO (154) -#define RTC_RD_GPIO (160) +#define RTC_RD_GPIO (53) #define RTC_CS_GPIO (163) +#define RTC_CS_EN_GPIO (160) struct v3020_platform_data cm_t3517_v3020_pdata = { .use_gpio = 1, @@ -145,6 +146,16 @@ static struct platform_device cm_t3517_rtc_device = { static void __init cm_t3517_init_rtc(void) { + int err; + + err = gpio_request(RTC_CS_EN_GPIO, "rtc cs en"); + if (err) { + pr_err("CM-T3517: rtc cs en gpio request failed: %d\n", err); + return; + } + + gpio_direction_output(RTC_CS_EN_GPIO, 1); + platform_device_register(&cm_t3517_rtc_device); } #else @@ -214,12 +225,12 @@ static struct mtd_partition cm_t3517_nand_partitions[] = { }, { .name = "linux", - .offset = MTDPART_OFS_APPEND, /* Offset = 0x280000 */ + .offset = MTDPART_OFS_APPEND, /* Offset = 0x2A0000 */ .size = 32 * NAND_BLOCK_SIZE, }, { .name = "rootfs", - .offset = MTDPART_OFS_APPEND, /* Offset = 0x680000 */ + .offset = MTDPART_OFS_APPEND, /* Offset = 0x6A0000 */ .size = MTDPART_SIZ_FULL, }, }; @@ -256,11 +267,19 @@ static void __init cm_t3517_init_irq(void) static struct omap_board_mux board_mux[] __initdata = { /* GPIO186 - Green LED */ OMAP3_MUX(SYS_CLKOUT2, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT), - /* RTC GPIOs: IO, WR#, RD#, CS# */ + + /* RTC GPIOs: */ + /* IO - GPIO153 */ OMAP3_MUX(MCBSP4_DR, OMAP_MUX_MODE4 | OMAP_PIN_INPUT), + /* WR# - GPIO154 */ OMAP3_MUX(MCBSP4_DX, OMAP_MUX_MODE4 | OMAP_PIN_INPUT), - OMAP3_MUX(MCBSP_CLKS, OMAP_MUX_MODE4 | OMAP_PIN_INPUT), + /* RD# - GPIO53 */ + OMAP3_MUX(GPMC_NCS2, OMAP_MUX_MODE4 | OMAP_PIN_INPUT), + /* CS# - GPIO163 */ OMAP3_MUX(UART3_CTS_RCTX, OMAP_MUX_MODE4 | OMAP_PIN_INPUT), + /* CS EN - GPIO160 */ + OMAP3_MUX(MCBSP_CLKS, OMAP_MUX_MODE4 | OMAP_PIN_INPUT), + /* HSUSB1 RESET */ OMAP3_MUX(UART2_TX, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT), /* HSUSB2 RESET */ diff --git a/arch/arm/mach-omap2/board-devkit8000.c b/arch/arm/mach-omap2/board-devkit8000.c index 00bb1fc..e906e05 100644 --- a/arch/arm/mach-omap2/board-devkit8000.c +++ b/arch/arm/mach-omap2/board-devkit8000.c @@ -275,8 +275,7 @@ static struct twl4030_gpio_platform_data devkit8000_gpio_data = { .irq_base = TWL4030_GPIO_IRQ_BASE, .irq_end = TWL4030_GPIO_IRQ_END, .use_leds = true, - .pullups = BIT(1), - .pulldowns = BIT(2) | BIT(6) | BIT(7) | BIT(8) | BIT(13) + .pulldowns = BIT(1) | BIT(2) | BIT(6) | BIT(8) | BIT(13) | BIT(15) | BIT(16) | BIT(17), .setup = devkit8000_twl_gpio_setup, }; diff --git a/arch/arm/mach-omap2/clock44xx_data.c b/arch/arm/mach-omap2/clock44xx_data.c index e8cb32f..de9ec8d 100644 --- a/arch/arm/mach-omap2/clock44xx_data.c +++ b/arch/arm/mach-omap2/clock44xx_data.c @@ -34,7 +34,6 @@ #include "cm2_44xx.h" #include "cm-regbits-44xx.h" #include "prm44xx.h" -#include "prm44xx.h" #include "prm-regbits-44xx.h" #include "control.h" #include "scrm44xx.h" diff --git a/arch/arm/mach-omap2/clockdomain.c b/arch/arm/mach-omap2/clockdomain.c index e20b986..58e42f7 100644 --- a/arch/arm/mach-omap2/clockdomain.c +++ b/arch/arm/mach-omap2/clockdomain.c @@ -423,6 +423,12 @@ int clkdm_add_wkdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2) { struct clkdm_dep *cd; + if (!cpu_is_omap24xx() && !cpu_is_omap34xx()) { + pr_err("clockdomain: %s/%s: %s: not yet implemented\n", + clkdm1->name, clkdm2->name, __func__); + return -EINVAL; + } + if (!clkdm1 || !clkdm2) return -EINVAL; @@ -458,6 +464,12 @@ int clkdm_del_wkdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2) { struct clkdm_dep *cd; + if (!cpu_is_omap24xx() && !cpu_is_omap34xx()) { + pr_err("clockdomain: %s/%s: %s: not yet implemented\n", + clkdm1->name, clkdm2->name, __func__); + return -EINVAL; + } + if (!clkdm1 || !clkdm2) return -EINVAL; @@ -500,6 +512,12 @@ int clkdm_read_wkdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2) if (!clkdm1 || !clkdm2) return -EINVAL; + if (!cpu_is_omap24xx() && !cpu_is_omap34xx()) { + pr_err("clockdomain: %s/%s: %s: not yet implemented\n", + clkdm1->name, clkdm2->name, __func__); + return -EINVAL; + } + cd = _clkdm_deps_lookup(clkdm2, clkdm1->wkdep_srcs); if (IS_ERR(cd)) { pr_debug("clockdomain: hardware cannot set/clear wake up of " @@ -527,6 +545,12 @@ int clkdm_clear_all_wkdeps(struct clockdomain *clkdm) struct clkdm_dep *cd; u32 mask = 0; + if (!cpu_is_omap24xx() && !cpu_is_omap34xx()) { + pr_err("clockdomain: %s: %s: not yet implemented\n", + clkdm->name, __func__); + return -EINVAL; + } + if (!clkdm) return -EINVAL; @@ -830,8 +854,7 @@ void omap2_clkdm_allow_idle(struct clockdomain *clkdm) * dependency code and data for OMAP4. */ if (cpu_is_omap44xx()) { - WARN_ONCE(1, "clockdomain: OMAP4 wakeup/sleep dependency " - "support is not yet implemented\n"); + pr_err("clockdomain: %s: OMAP4 wakeup/sleep dependency support: not yet implemented\n", clkdm->name); } else { if (atomic_read(&clkdm->usecount) > 0) _clkdm_add_autodeps(clkdm); @@ -872,8 +895,7 @@ void omap2_clkdm_deny_idle(struct clockdomain *clkdm) * dependency code and data for OMAP4. */ if (cpu_is_omap44xx()) { - WARN_ONCE(1, "clockdomain: OMAP4 wakeup/sleep dependency " - "support is not yet implemented\n"); + pr_err("clockdomain: %s: OMAP4 wakeup/sleep dependency support: not yet implemented\n", clkdm->name); } else { if (atomic_read(&clkdm->usecount) > 0) _clkdm_del_autodeps(clkdm); diff --git a/arch/arm/mach-omap2/clockdomains44xx_data.c b/arch/arm/mach-omap2/clockdomains44xx_data.c index 51920fc..10622c9 100644 --- a/arch/arm/mach-omap2/clockdomains44xx_data.c +++ b/arch/arm/mach-omap2/clockdomains44xx_data.c @@ -30,8 +30,6 @@ #include "cm1_44xx.h" #include "cm2_44xx.h" -#include "cm1_44xx.h" -#include "cm2_44xx.h" #include "cm-regbits-44xx.h" #include "prm44xx.h" #include "prcm44xx.h" diff --git a/arch/arm/mach-omap2/powerdomain2xxx_3xxx.c b/arch/arm/mach-omap2/powerdomain2xxx_3xxx.c index d523389..cf600e2 100644 --- a/arch/arm/mach-omap2/powerdomain2xxx_3xxx.c +++ b/arch/arm/mach-omap2/powerdomain2xxx_3xxx.c @@ -19,7 +19,6 @@ #include #include "powerdomain.h" -#include "prm-regbits-34xx.h" #include "prm.h" #include "prm-regbits-24xx.h" #include "prm-regbits-34xx.h" diff --git a/arch/arm/mach-omap2/timer-gp.c b/arch/arm/mach-omap2/timer-gp.c index 4e48e78..7b7c268 100644 --- a/arch/arm/mach-omap2/timer-gp.c +++ b/arch/arm/mach-omap2/timer-gp.c @@ -42,6 +42,8 @@ #include "timer-gp.h" +#include + /* MAX_GPTIMER_ID: number of GPTIMERs on the chip */ #define MAX_GPTIMER_ID 12 @@ -176,10 +178,14 @@ static void __init omap2_gp_clockevent_init(void) /* * When 32k-timer is enabled, don't use GPTimer for clocksource * instead, just leave default clocksource which uses the 32k - * sync counter. See clocksource setup in see plat-omap/common.c. + * sync counter. See clocksource setup in plat-omap/counter_32k.c */ -static inline void __init omap2_gp_clocksource_init(void) {} +static void __init omap2_gp_clocksource_init(void) +{ + omap_init_clocksource_32k(); +} + #else /* * clocksource diff --git a/arch/arm/plat-omap/Kconfig b/arch/arm/plat-omap/Kconfig index 18fe3cb..b6333ae 100644 --- a/arch/arm/plat-omap/Kconfig +++ b/arch/arm/plat-omap/Kconfig @@ -144,12 +144,9 @@ config OMAP_IOMMU_DEBUG config OMAP_IOMMU_IVA2 bool -choice - prompt "System timer" - default OMAP_32K_TIMER if !ARCH_OMAP15XX - config OMAP_MPU_TIMER bool "Use mpu timer" + depends on ARCH_OMAP1 help Select this option if you want to use the OMAP mpu timer. This timer provides more intra-tick resolution than the 32KHz timer, @@ -158,6 +155,7 @@ config OMAP_MPU_TIMER config OMAP_32K_TIMER bool "Use 32KHz timer" depends on ARCH_OMAP16XX || ARCH_OMAP2PLUS + default y if (ARCH_OMAP16XX || ARCH_OMAP2PLUS) help Select this option if you want to enable the OMAP 32KHz timer. This timer saves power compared to the OMAP_MPU_TIMER, and has @@ -165,8 +163,6 @@ config OMAP_32K_TIMER intra-tick resolution than OMAP_MPU_TIMER. The 32KHz timer is currently only available for OMAP16XX, 24XX, 34XX and OMAP4. -endchoice - config OMAP3_L2_AUX_SECURE_SAVE_RESTORE bool "OMAP3 HS/EMU save and restore for L2 AUX control register" depends on ARCH_OMAP3 && PM diff --git a/arch/arm/plat-omap/counter_32k.c b/arch/arm/plat-omap/counter_32k.c index ea46440..862dda9 100644 --- a/arch/arm/plat-omap/counter_32k.c +++ b/arch/arm/plat-omap/counter_32k.c @@ -36,8 +36,6 @@ #define OMAP16XX_TIMER_32K_SYNCHRONIZED 0xfffbc410 -#if !(defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP15XX)) - #include /* @@ -122,12 +120,24 @@ static DEFINE_CLOCK_DATA(cd); #define SC_MULT 4000000000u #define SC_SHIFT 17 -unsigned long long notrace sched_clock(void) +static inline unsigned long long notrace _omap_32k_sched_clock(void) { u32 cyc = clocksource_32k.read(&clocksource_32k); return cyc_to_fixed_sched_clock(&cd, cyc, (u32)~0, SC_MULT, SC_SHIFT); } +#ifndef CONFIG_OMAP_MPU_TIMER +unsigned long long notrace sched_clock(void) +{ + return _omap_32k_sched_clock(); +} +#else +unsigned long long notrace omap_32k_sched_clock(void) +{ + return _omap_32k_sched_clock(); +} +#endif + static void notrace omap_update_sched_clock(void) { u32 cyc = clocksource_32k.read(&clocksource_32k); @@ -160,7 +170,7 @@ void read_persistent_clock(struct timespec *ts) *ts = *tsp; } -static int __init omap_init_clocksource_32k(void) +int __init omap_init_clocksource_32k(void) { static char err[] __initdata = KERN_ERR "%s: can't register clocksource!\n"; @@ -195,7 +205,3 @@ static int __init omap_init_clocksource_32k(void) } return 0; } -arch_initcall(omap_init_clocksource_32k); - -#endif /* !(defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP15XX)) */ - diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c index c4b2b47..8536308 100644 --- a/arch/arm/plat-omap/dma.c +++ b/arch/arm/plat-omap/dma.c @@ -53,7 +53,7 @@ enum { DMA_CHAIN_STARTED, DMA_CHAIN_NOTSTARTED }; #endif #define OMAP_DMA_ACTIVE 0x01 -#define OMAP2_DMA_CSR_CLEAR_MASK 0xffe +#define OMAP2_DMA_CSR_CLEAR_MASK 0xffffffff #define OMAP_FUNC_MUX_ARM_BASE (0xfffe1000 + 0xec) @@ -1873,7 +1873,7 @@ static int omap2_dma_handle_ch(int ch) printk(KERN_INFO "DMA misaligned error with device %d\n", dma_chan[ch].dev_id); - p->dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, ch); + p->dma_write(status, CSR, ch); p->dma_write(1 << ch, IRQSTATUS_L0, ch); /* read back the register to flush the write */ p->dma_read(IRQSTATUS_L0, ch); @@ -1893,10 +1893,9 @@ static int omap2_dma_handle_ch(int ch) OMAP_DMA_CHAIN_INCQHEAD(chain_id); status = p->dma_read(CSR, ch); + p->dma_write(status, CSR, ch); } - p->dma_write(status, CSR, ch); - if (likely(dma_chan[ch].callback != NULL)) dma_chan[ch].callback(ch, status, dma_chan[ch].data); diff --git a/arch/arm/plat-omap/include/plat/common.h b/arch/arm/plat-omap/include/plat/common.h index 6b8088e..29b2afb 100644 --- a/arch/arm/plat-omap/include/plat/common.h +++ b/arch/arm/plat-omap/include/plat/common.h @@ -35,6 +35,9 @@ struct sys_timer; extern void omap_map_common_io(void); extern struct sys_timer omap_timer; +extern bool omap_32k_timer_init(void); +extern int __init omap_init_clocksource_32k(void); +extern unsigned long long notrace omap_32k_sched_clock(void); extern void omap_reserve(void); diff --git a/arch/powerpc/kernel/perf_event_fsl_emb.c b/arch/powerpc/kernel/perf_event_fsl_emb.c index 4dcf5f8..b0dc8f7 100644 --- a/arch/powerpc/kernel/perf_event_fsl_emb.c +++ b/arch/powerpc/kernel/perf_event_fsl_emb.c @@ -596,6 +596,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val, if (left <= 0) left = period; record = 1; + event->hw.last_period = event->hw.sample_period; } if (left < 0x80000000LL) val = 0x80000000LL - left; diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h index 63e35ec..62f0844 100644 --- a/arch/x86/include/asm/cacheflush.h +++ b/arch/x86/include/asm/cacheflush.h @@ -1,48 +1,8 @@ #ifndef _ASM_X86_CACHEFLUSH_H #define _ASM_X86_CACHEFLUSH_H -/* Keep includes the same across arches. */ -#include - /* Caches aren't brain-dead on the intel. */ -static inline void flush_cache_all(void) { } -static inline void flush_cache_mm(struct mm_struct *mm) { } -static inline void flush_cache_dup_mm(struct mm_struct *mm) { } -static inline void flush_cache_range(struct vm_area_struct *vma, - unsigned long start, unsigned long end) { } -static inline void flush_cache_page(struct vm_area_struct *vma, - unsigned long vmaddr, unsigned long pfn) { } -#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 -static inline void flush_dcache_page(struct page *page) { } -static inline void flush_dcache_mmap_lock(struct address_space *mapping) { } -static inline void flush_dcache_mmap_unlock(struct address_space *mapping) { } -static inline void flush_icache_range(unsigned long start, - unsigned long end) { } -static inline void flush_icache_page(struct vm_area_struct *vma, - struct page *page) { } -static inline void flush_icache_user_range(struct vm_area_struct *vma, - struct page *page, - unsigned long addr, - unsigned long len) { } -static inline void flush_cache_vmap(unsigned long start, unsigned long end) { } -static inline void flush_cache_vunmap(unsigned long start, - unsigned long end) { } - -static inline void copy_to_user_page(struct vm_area_struct *vma, - struct page *page, unsigned long vaddr, - void *dst, const void *src, - unsigned long len) -{ - memcpy(dst, src, len); -} - -static inline void copy_from_user_page(struct vm_area_struct *vma, - struct page *page, unsigned long vaddr, - void *dst, const void *src, - unsigned long len) -{ - memcpy(dst, src, len); -} +#include #ifdef CONFIG_X86_PAT /* diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h index 4fab24d..6e6e755 100644 --- a/arch/x86/include/asm/cpu.h +++ b/arch/x86/include/asm/cpu.h @@ -32,5 +32,6 @@ extern void arch_unregister_cpu(int); DECLARE_PER_CPU(int, cpu_state); +int __cpuinit mwait_usable(const struct cpuinfo_x86 *); #endif /* _ASM_X86_CPU_H */ diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h index f52d42e..574dbc2 100644 --- a/arch/x86/include/asm/jump_label.h +++ b/arch/x86/include/asm/jump_label.h @@ -14,7 +14,7 @@ do { \ asm goto("1:" \ JUMP_LABEL_INITIAL_NOP \ - ".pushsection __jump_table, \"a\" \n\t"\ + ".pushsection __jump_table, \"aw\" \n\t"\ _ASM_PTR "1b, %l[" #label "], %c0 \n\t" \ ".popsection \n\t" \ : : "i" (key) : : label); \ diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 7283e98..ec2c19a 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c @@ -45,6 +45,7 @@ static const struct _cache_table __cpuinitconst cache_table[] = { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */ { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */ { 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */ + { 0x0e, LVL_1_DATA, 24 }, /* 6-way set assoc, 64 byte line size */ { 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */ { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */ { 0x23, LVL_3, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */ @@ -66,6 +67,7 @@ static const struct _cache_table __cpuinitconst cache_table[] = { 0x45, LVL_2, MB(2) }, /* 4-way set assoc, 32 byte line size */ { 0x46, LVL_3, MB(4) }, /* 4-way set assoc, 64 byte line size */ { 0x47, LVL_3, MB(8) }, /* 8-way set assoc, 64 byte line size */ + { 0x48, LVL_2, MB(3) }, /* 12-way set assoc, 64 byte line size */ { 0x49, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */ { 0x4a, LVL_3, MB(6) }, /* 12-way set assoc, 64 byte line size */ { 0x4b, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */ @@ -87,6 +89,7 @@ static const struct _cache_table __cpuinitconst cache_table[] = { 0x7c, LVL_2, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */ { 0x7d, LVL_2, MB(2) }, /* 8-way set assoc, 64 byte line size */ { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */ + { 0x80, LVL_2, 512 }, /* 8-way set assoc, 64 byte line size */ { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */ { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */ { 0x84, LVL_2, MB(1) }, /* 8-way set assoc, 32 byte line size */ diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c index e12246f..6f8c5e9 100644 --- a/arch/x86/kernel/cpu/mcheck/therm_throt.c +++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c @@ -59,6 +59,7 @@ struct thermal_state { /* Callback to handle core threshold interrupts */ int (*platform_thermal_notify)(__u64 msr_val); +EXPORT_SYMBOL(platform_thermal_notify); static DEFINE_PER_CPU(struct thermal_state, thermal_state); diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index d8286ed..e764fc0 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -505,7 +506,7 @@ static void poll_idle(void) #define MWAIT_ECX_EXTENDED_INFO 0x01 #define MWAIT_EDX_C1 0xf0 -static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c) +int __cpuinit mwait_usable(const struct cpuinfo_x86 *c) { u32 eax, ebx, ecx, edx; diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 763df77..0cbe8c0 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -1402,8 +1402,9 @@ static inline void mwait_play_dead(void) unsigned int highest_subcstate = 0; int i; void *mwait_ptr; + struct cpuinfo_x86 *c = __this_cpu_ptr(&cpu_info); - if (!cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_MWAIT)) + if (!(cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c))) return; if (!cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLSH)) return; diff --git a/block/blk-core.c b/block/blk-core.c index 2f4002f..72dd23b 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -149,39 +149,29 @@ EXPORT_SYMBOL(blk_rq_init); static void req_bio_endio(struct request *rq, struct bio *bio, unsigned int nbytes, int error) { - struct request_queue *q = rq->q; - - if (&q->flush_rq != rq) { - if (error) - clear_bit(BIO_UPTODATE, &bio->bi_flags); - else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) - error = -EIO; - - if (unlikely(nbytes > bio->bi_size)) { - printk(KERN_ERR "%s: want %u bytes done, %u left\n", - __func__, nbytes, bio->bi_size); - nbytes = bio->bi_size; - } + if (error) + clear_bit(BIO_UPTODATE, &bio->bi_flags); + else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) + error = -EIO; + + if (unlikely(nbytes > bio->bi_size)) { + printk(KERN_ERR "%s: want %u bytes done, %u left\n", + __func__, nbytes, bio->bi_size); + nbytes = bio->bi_size; + } - if (unlikely(rq->cmd_flags & REQ_QUIET)) - set_bit(BIO_QUIET, &bio->bi_flags); + if (unlikely(rq->cmd_flags & REQ_QUIET)) + set_bit(BIO_QUIET, &bio->bi_flags); - bio->bi_size -= nbytes; - bio->bi_sector += (nbytes >> 9); + bio->bi_size -= nbytes; + bio->bi_sector += (nbytes >> 9); - if (bio_integrity(bio)) - bio_integrity_advance(bio, nbytes); + if (bio_integrity(bio)) + bio_integrity_advance(bio, nbytes); - if (bio->bi_size == 0) - bio_endio(bio, error); - } else { - /* - * Okay, this is the sequenced flush request in - * progress, just record the error; - */ - if (error && !q->flush_err) - q->flush_err = error; - } + /* don't actually finish bio if it's part of flush sequence */ + if (bio->bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ)) + bio_endio(bio, error); } void blk_dump_rq_flags(struct request *rq, char *msg) @@ -540,7 +530,9 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) init_timer(&q->unplug_timer); setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); INIT_LIST_HEAD(&q->timeout_list); - INIT_LIST_HEAD(&q->pending_flushes); + INIT_LIST_HEAD(&q->flush_queue[0]); + INIT_LIST_HEAD(&q->flush_queue[1]); + INIT_LIST_HEAD(&q->flush_data_in_flight); INIT_WORK(&q->unplug_work, blk_unplug_work); kobject_init(&q->kobj, &blk_queue_ktype); @@ -1219,7 +1211,7 @@ static int __make_request(struct request_queue *q, struct bio *bio) spin_lock_irq(q->queue_lock); if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) { - where = ELEVATOR_INSERT_FRONT; + where = ELEVATOR_INSERT_FLUSH; goto get_rq; } @@ -1804,7 +1796,7 @@ static void blk_account_io_done(struct request *req) * normal IO on queueing nor completion. Accounting the * containing request is enough. */ - if (blk_do_io_stat(req) && req != &req->q->flush_rq) { + if (blk_do_io_stat(req) && !(req->cmd_flags & REQ_FLUSH_SEQ)) { unsigned long duration = jiffies - req->start_time; const int rw = rq_data_dir(req); struct hd_struct *part; diff --git a/block/blk-flush.c b/block/blk-flush.c index 54b123d..a867e3f 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -1,6 +1,69 @@ /* * Functions to sequence FLUSH and FUA writes. + * + * Copyright (C) 2011 Max Planck Institute for Gravitational Physics + * Copyright (C) 2011 Tejun Heo + * + * This file is released under the GPLv2. + * + * REQ_{FLUSH|FUA} requests are decomposed to sequences consisted of three + * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request + * properties and hardware capability. + * + * If a request doesn't have data, only REQ_FLUSH makes sense, which + * indicates a simple flush request. If there is data, REQ_FLUSH indicates + * that the device cache should be flushed before the data is executed, and + * REQ_FUA means that the data must be on non-volatile media on request + * completion. + * + * If the device doesn't have writeback cache, FLUSH and FUA don't make any + * difference. The requests are either completed immediately if there's no + * data or executed as normal requests otherwise. + * + * If the device has writeback cache and supports FUA, REQ_FLUSH is + * translated to PREFLUSH but REQ_FUA is passed down directly with DATA. + * + * If the device has writeback cache and doesn't support FUA, REQ_FLUSH is + * translated to PREFLUSH and REQ_FUA to POSTFLUSH. + * + * The actual execution of flush is double buffered. Whenever a request + * needs to execute PRE or POSTFLUSH, it queues at + * q->flush_queue[q->flush_pending_idx]. Once certain criteria are met, a + * flush is issued and the pending_idx is toggled. When the flush + * completes, all the requests which were pending are proceeded to the next + * step. This allows arbitrary merging of different types of FLUSH/FUA + * requests. + * + * Currently, the following conditions are used to determine when to issue + * flush. + * + * C1. At any given time, only one flush shall be in progress. This makes + * double buffering sufficient. + * + * C2. Flush is deferred if any request is executing DATA of its sequence. + * This avoids issuing separate POSTFLUSHes for requests which shared + * PREFLUSH. + * + * C3. The second condition is ignored if there is a request which has + * waited longer than FLUSH_PENDING_TIMEOUT. This is to avoid + * starvation in the unlikely case where there are continuous stream of + * FUA (without FLUSH) requests. + * + * For devices which support FUA, it isn't clear whether C2 (and thus C3) + * is beneficial. + * + * Note that a sequenced FLUSH/FUA request with DATA is completed twice. + * Once while executing DATA and again after the whole sequence is + * complete. The first completion updates the contained bio but doesn't + * finish it so that the bio submitter is notified only after the whole + * sequence is complete. This is implemented by testing REQ_FLUSH_SEQ in + * req_bio_endio(). + * + * The above peculiarity requires that each FLUSH/FUA request has only one + * bio attached to it, which is guaranteed as they aren't allowed to be + * merged in the usual way. */ + #include #include #include @@ -11,184 +74,290 @@ /* FLUSH/FUA sequences */ enum { - QUEUE_FSEQ_STARTED = (1 << 0), /* flushing in progress */ - QUEUE_FSEQ_PREFLUSH = (1 << 1), /* pre-flushing in progress */ - QUEUE_FSEQ_DATA = (1 << 2), /* data write in progress */ - QUEUE_FSEQ_POSTFLUSH = (1 << 3), /* post-flushing in progress */ - QUEUE_FSEQ_DONE = (1 << 4), + REQ_FSEQ_PREFLUSH = (1 << 0), /* pre-flushing in progress */ + REQ_FSEQ_DATA = (1 << 1), /* data write in progress */ + REQ_FSEQ_POSTFLUSH = (1 << 2), /* post-flushing in progress */ + REQ_FSEQ_DONE = (1 << 3), + + REQ_FSEQ_ACTIONS = REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA | + REQ_FSEQ_POSTFLUSH, + + /* + * If flush has been pending longer than the following timeout, + * it's issued even if flush_data requests are still in flight. + */ + FLUSH_PENDING_TIMEOUT = 5 * HZ, }; -static struct request *queue_next_fseq(struct request_queue *q); +static bool blk_kick_flush(struct request_queue *q); -unsigned blk_flush_cur_seq(struct request_queue *q) +static unsigned int blk_flush_policy(unsigned int fflags, struct request *rq) { - if (!q->flush_seq) - return 0; - return 1 << ffz(q->flush_seq); + unsigned int policy = 0; + + if (fflags & REQ_FLUSH) { + if (rq->cmd_flags & REQ_FLUSH) + policy |= REQ_FSEQ_PREFLUSH; + if (blk_rq_sectors(rq)) + policy |= REQ_FSEQ_DATA; + if (!(fflags & REQ_FUA) && (rq->cmd_flags & REQ_FUA)) + policy |= REQ_FSEQ_POSTFLUSH; + } + return policy; } -static struct request *blk_flush_complete_seq(struct request_queue *q, - unsigned seq, int error) +static unsigned int blk_flush_cur_seq(struct request *rq) { - struct request *next_rq = NULL; - - if (error && !q->flush_err) - q->flush_err = error; - - BUG_ON(q->flush_seq & seq); - q->flush_seq |= seq; - - if (blk_flush_cur_seq(q) != QUEUE_FSEQ_DONE) { - /* not complete yet, queue the next flush sequence */ - next_rq = queue_next_fseq(q); - } else { - /* complete this flush request */ - __blk_end_request_all(q->orig_flush_rq, q->flush_err); - q->orig_flush_rq = NULL; - q->flush_seq = 0; - - /* dispatch the next flush if there's one */ - if (!list_empty(&q->pending_flushes)) { - next_rq = list_entry_rq(q->pending_flushes.next); - list_move(&next_rq->queuelist, &q->queue_head); - } - } - return next_rq; + return 1 << ffz(rq->flush.seq); } -static void blk_flush_complete_seq_end_io(struct request_queue *q, - unsigned seq, int error) +static void blk_flush_restore_request(struct request *rq) { - bool was_empty = elv_queue_empty(q); - struct request *next_rq; - - next_rq = blk_flush_complete_seq(q, seq, error); - /* - * Moving a request silently to empty queue_head may stall the - * queue. Kick the queue in those cases. + * After flush data completion, @rq->bio is %NULL but we need to + * complete the bio again. @rq->biotail is guaranteed to equal the + * original @rq->bio. Restore it. */ - if (was_empty && next_rq) - __blk_run_queue(q); + rq->bio = rq->biotail; + + /* make @rq a normal request */ + rq->cmd_flags &= ~REQ_FLUSH_SEQ; + rq->end_io = NULL; } -static void pre_flush_end_io(struct request *rq, int error) +/** + * blk_flush_complete_seq - complete flush sequence + * @rq: FLUSH/FUA request being sequenced + * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero) + * @error: whether an error occurred + * + * @rq just completed @seq part of its flush sequence, record the + * completion and trigger the next step. + * + * CONTEXT: + * spin_lock_irq(q->queue_lock) + * + * RETURNS: + * %true if requests were added to the dispatch queue, %false otherwise. + */ +static bool blk_flush_complete_seq(struct request *rq, unsigned int seq, + int error) { - elv_completed_request(rq->q, rq); - blk_flush_complete_seq_end_io(rq->q, QUEUE_FSEQ_PREFLUSH, error); + struct request_queue *q = rq->q; + struct list_head *pending = &q->flush_queue[q->flush_pending_idx]; + bool queued = false; + + BUG_ON(rq->flush.seq & seq); + rq->flush.seq |= seq; + + if (likely(!error)) + seq = blk_flush_cur_seq(rq); + else + seq = REQ_FSEQ_DONE; + + switch (seq) { + case REQ_FSEQ_PREFLUSH: + case REQ_FSEQ_POSTFLUSH: + /* queue for flush */ + if (list_empty(pending)) + q->flush_pending_since = jiffies; + list_move_tail(&rq->flush.list, pending); + break; + + case REQ_FSEQ_DATA: + list_move_tail(&rq->flush.list, &q->flush_data_in_flight); + list_add(&rq->queuelist, &q->queue_head); + queued = true; + break; + + case REQ_FSEQ_DONE: + /* + * @rq was previously adjusted by blk_flush_issue() for + * flush sequencing and may already have gone through the + * flush data request completion path. Restore @rq for + * normal completion and end it. + */ + BUG_ON(!list_empty(&rq->queuelist)); + list_del_init(&rq->flush.list); + blk_flush_restore_request(rq); + __blk_end_request_all(rq, error); + break; + + default: + BUG(); + } + + return blk_kick_flush(q) | queued; } -static void flush_data_end_io(struct request *rq, int error) +static void flush_end_io(struct request *flush_rq, int error) { - elv_completed_request(rq->q, rq); - blk_flush_complete_seq_end_io(rq->q, QUEUE_FSEQ_DATA, error); + struct request_queue *q = flush_rq->q; + struct list_head *running = &q->flush_queue[q->flush_running_idx]; + bool was_empty = elv_queue_empty(q); + bool queued = false; + struct request *rq, *n; + + BUG_ON(q->flush_pending_idx == q->flush_running_idx); + + /* account completion of the flush request */ + q->flush_running_idx ^= 1; + elv_completed_request(q, flush_rq); + + /* and push the waiting requests to the next stage */ + list_for_each_entry_safe(rq, n, running, flush.list) { + unsigned int seq = blk_flush_cur_seq(rq); + + BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH); + queued |= blk_flush_complete_seq(rq, seq, error); + } + + /* after populating an empty queue, kick it to avoid stall */ + if (queued && was_empty) + __blk_run_queue(q); } -static void post_flush_end_io(struct request *rq, int error) +/** + * blk_kick_flush - consider issuing flush request + * @q: request_queue being kicked + * + * Flush related states of @q have changed, consider issuing flush request. + * Please read the comment at the top of this file for more info. + * + * CONTEXT: + * spin_lock_irq(q->queue_lock) + * + * RETURNS: + * %true if flush was issued, %false otherwise. + */ +static bool blk_kick_flush(struct request_queue *q) { - elv_completed_request(rq->q, rq); - blk_flush_complete_seq_end_io(rq->q, QUEUE_FSEQ_POSTFLUSH, error); + struct list_head *pending = &q->flush_queue[q->flush_pending_idx]; + struct request *first_rq = + list_first_entry(pending, struct request, flush.list); + + /* C1 described at the top of this file */ + if (q->flush_pending_idx != q->flush_running_idx || list_empty(pending)) + return false; + + /* C2 and C3 */ + if (!list_empty(&q->flush_data_in_flight) && + time_before(jiffies, + q->flush_pending_since + FLUSH_PENDING_TIMEOUT)) + return false; + + /* + * Issue flush and toggle pending_idx. This makes pending_idx + * different from running_idx, which means flush is in flight. + */ + blk_rq_init(q, &q->flush_rq); + q->flush_rq.cmd_type = REQ_TYPE_FS; + q->flush_rq.cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ; + q->flush_rq.rq_disk = first_rq->rq_disk; + q->flush_rq.end_io = flush_end_io; + + q->flush_pending_idx ^= 1; + elv_insert(q, &q->flush_rq, ELEVATOR_INSERT_FRONT); + return true; } -static void init_flush_request(struct request *rq, struct gendisk *disk) +static void flush_data_end_io(struct request *rq, int error) { - rq->cmd_type = REQ_TYPE_FS; - rq->cmd_flags = WRITE_FLUSH; - rq->rq_disk = disk; + struct request_queue *q = rq->q; + bool was_empty = elv_queue_empty(q); + + /* after populating an empty queue, kick it to avoid stall */ + if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error) && was_empty) + __blk_run_queue(q); } -static struct request *queue_next_fseq(struct request_queue *q) +/** + * blk_insert_flush - insert a new FLUSH/FUA request + * @rq: request to insert + * + * To be called from elv_insert() for %ELEVATOR_INSERT_FLUSH insertions. + * @rq is being submitted. Analyze what needs to be done and put it on the + * right queue. + * + * CONTEXT: + * spin_lock_irq(q->queue_lock) + */ +void blk_insert_flush(struct request *rq) { - struct request *orig_rq = q->orig_flush_rq; - struct request *rq = &q->flush_rq; + struct request_queue *q = rq->q; + unsigned int fflags = q->flush_flags; /* may change, cache */ + unsigned int policy = blk_flush_policy(fflags, rq); - blk_rq_init(q, rq); + BUG_ON(rq->end_io); + BUG_ON(!rq->bio || rq->bio != rq->biotail); - switch (blk_flush_cur_seq(q)) { - case QUEUE_FSEQ_PREFLUSH: - init_flush_request(rq, orig_rq->rq_disk); - rq->end_io = pre_flush_end_io; - break; - case QUEUE_FSEQ_DATA: - init_request_from_bio(rq, orig_rq->bio); - /* - * orig_rq->rq_disk may be different from - * bio->bi_bdev->bd_disk if orig_rq got here through - * remapping drivers. Make sure rq->rq_disk points - * to the same one as orig_rq. - */ - rq->rq_disk = orig_rq->rq_disk; - rq->cmd_flags &= ~(REQ_FLUSH | REQ_FUA); - rq->cmd_flags |= orig_rq->cmd_flags & (REQ_FLUSH | REQ_FUA); - rq->end_io = flush_data_end_io; - break; - case QUEUE_FSEQ_POSTFLUSH: - init_flush_request(rq, orig_rq->rq_disk); - rq->end_io = post_flush_end_io; - break; - default: - BUG(); + /* + * @policy now records what operations need to be done. Adjust + * REQ_FLUSH and FUA for the driver. + */ + rq->cmd_flags &= ~REQ_FLUSH; + if (!(fflags & REQ_FUA)) + rq->cmd_flags &= ~REQ_FUA; + + /* + * If there's data but flush is not necessary, the request can be + * processed directly without going through flush machinery. Queue + * for normal execution. + */ + if ((policy & REQ_FSEQ_DATA) && + !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) { + list_add(&rq->queuelist, &q->queue_head); + return; } - elv_insert(q, rq, ELEVATOR_INSERT_FRONT); - return rq; + /* + * @rq should go through flush machinery. Mark it part of flush + * sequence and submit for further processing. + */ + memset(&rq->flush, 0, sizeof(rq->flush)); + INIT_LIST_HEAD(&rq->flush.list); + rq->cmd_flags |= REQ_FLUSH_SEQ; + rq->end_io = flush_data_end_io; + + blk_flush_complete_seq(rq, REQ_FSEQ_ACTIONS & ~policy, 0); } -struct request *blk_do_flush(struct request_queue *q, struct request *rq) +/** + * blk_abort_flushes - @q is being aborted, abort flush requests + * @q: request_queue being aborted + * + * To be called from elv_abort_queue(). @q is being aborted. Prepare all + * FLUSH/FUA requests for abortion. + * + * CONTEXT: + * spin_lock_irq(q->queue_lock) + */ +void blk_abort_flushes(struct request_queue *q) { - unsigned int fflags = q->flush_flags; /* may change, cache it */ - bool has_flush = fflags & REQ_FLUSH, has_fua = fflags & REQ_FUA; - bool do_preflush = has_flush && (rq->cmd_flags & REQ_FLUSH); - bool do_postflush = has_flush && !has_fua && (rq->cmd_flags & REQ_FUA); - unsigned skip = 0; + struct request *rq, *n; + int i; /* - * Special case. If there's data but flush is not necessary, - * the request can be issued directly. - * - * Flush w/o data should be able to be issued directly too but - * currently some drivers assume that rq->bio contains - * non-zero data if it isn't NULL and empty FLUSH requests - * getting here usually have bio's without data. + * Requests in flight for data are already owned by the dispatch + * queue or the device driver. Just restore for normal completion. */ - if (blk_rq_sectors(rq) && !do_preflush && !do_postflush) { - rq->cmd_flags &= ~REQ_FLUSH; - if (!has_fua) - rq->cmd_flags &= ~REQ_FUA; - return rq; + list_for_each_entry_safe(rq, n, &q->flush_data_in_flight, flush.list) { + list_del_init(&rq->flush.list); + blk_flush_restore_request(rq); } /* - * Sequenced flushes can't be processed in parallel. If - * another one is already in progress, queue for later - * processing. + * We need to give away requests on flush queues. Restore for + * normal completion and put them on the dispatch queue. */ - if (q->flush_seq) { - list_move_tail(&rq->queuelist, &q->pending_flushes); - return NULL; + for (i = 0; i < ARRAY_SIZE(q->flush_queue); i++) { + list_for_each_entry_safe(rq, n, &q->flush_queue[i], + flush.list) { + list_del_init(&rq->flush.list); + blk_flush_restore_request(rq); + list_add_tail(&rq->queuelist, &q->queue_head); + } } - - /* - * Start a new flush sequence - */ - q->flush_err = 0; - q->flush_seq |= QUEUE_FSEQ_STARTED; - - /* adjust FLUSH/FUA of the original request and stash it away */ - rq->cmd_flags &= ~REQ_FLUSH; - if (!has_fua) - rq->cmd_flags &= ~REQ_FUA; - blk_dequeue_request(rq); - q->orig_flush_rq = rq; - - /* skip unneded sequences and return the first one */ - if (!do_preflush) - skip |= QUEUE_FSEQ_PREFLUSH; - if (!blk_rq_sectors(rq)) - skip |= QUEUE_FSEQ_DATA; - if (!do_postflush) - skip |= QUEUE_FSEQ_POSTFLUSH; - return blk_flush_complete_seq(q, skip, 0); } static void bio_end_flush(struct bio *bio, int err) diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 381b09b..a89043a 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -168,7 +168,15 @@ static struct throtl_grp * throtl_find_alloc_tg(struct throtl_data *td, * tree of blkg (instead of traversing through hash list all * the time. */ - tg = tg_of_blkg(blkiocg_lookup_group(blkcg, key)); + + /* + * This is the common case when there are no blkio cgroups. + * Avoid lookup in this case + */ + if (blkcg == &blkio_root_cgroup) + tg = &td->root_tg; + else + tg = tg_of_blkg(blkiocg_lookup_group(blkcg, key)); /* Fill in device details for root group */ if (tg && !tg->blkg.dev && bdi->dev && dev_name(bdi->dev)) { diff --git a/block/blk.h b/block/blk.h index 2db8f32..284b500 100644 --- a/block/blk.h +++ b/block/blk.h @@ -51,21 +51,17 @@ static inline void blk_clear_rq_complete(struct request *rq) */ #define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash)) -struct request *blk_do_flush(struct request_queue *q, struct request *rq); +void blk_insert_flush(struct request *rq); +void blk_abort_flushes(struct request_queue *q); static inline struct request *__elv_next_request(struct request_queue *q) { struct request *rq; while (1) { - while (!list_empty(&q->queue_head)) { + if (!list_empty(&q->queue_head)) { rq = list_entry_rq(q->queue_head.next); - if (!(rq->cmd_flags & (REQ_FLUSH | REQ_FUA)) || - rq == &q->flush_rq) - return rq; - rq = blk_do_flush(q, rq); - if (rq) - return rq; + return rq; } if (!q->elevator->ops->elevator_dispatch_fn(q, 0)) diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 501ffdf..ace1686 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -599,7 +599,7 @@ cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg) } static inline unsigned -cfq_scaled_group_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) +cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) { unsigned slice = cfq_prio_to_slice(cfqd, cfqq); if (cfqd->cfq_latency) { @@ -631,7 +631,7 @@ cfq_scaled_group_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) static inline void cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) { - unsigned slice = cfq_scaled_group_slice(cfqd, cfqq); + unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq); cfqq->slice_start = jiffies; cfqq->slice_end = jiffies + slice; @@ -1671,7 +1671,7 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, */ if (timed_out) { if (cfq_cfqq_slice_new(cfqq)) - cfqq->slice_resid = cfq_scaled_group_slice(cfqd, cfqq); + cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq); else cfqq->slice_resid = cfqq->slice_end - jiffies; cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid); diff --git a/block/elevator.c b/block/elevator.c index 2569512..270e097 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -673,6 +673,11 @@ void elv_insert(struct request_queue *q, struct request *rq, int where) q->elevator->ops->elevator_add_req_fn(q, rq); break; + case ELEVATOR_INSERT_FLUSH: + rq->cmd_flags |= REQ_SOFTBARRIER; + blk_insert_flush(rq); + break; + default: printk(KERN_ERR "%s: bad insertion point %d\n", __func__, where); @@ -785,6 +790,8 @@ void elv_abort_queue(struct request_queue *q) { struct request *rq; + blk_abort_flushes(q); + while (!list_empty(&q->queue_head)) { rq = list_entry_rq(q->queue_head.next); rq->cmd_flags |= REQ_QUIET; diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 3288263..b8d96ce 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -260,6 +260,7 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */ { PCI_VDEVICE(INTEL, 0x1d04), board_ahci }, /* PBG RAID */ { PCI_VDEVICE(INTEL, 0x1d06), board_ahci }, /* PBG RAID */ + { PCI_VDEVICE(INTEL, 0x2323), board_ahci }, /* DH89xxCC AHCI */ /* JMicron 360/1/3/5/6, match class to avoid IDE function */ { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, @@ -379,6 +380,8 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */ { PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */ { PCI_DEVICE(0x1b4b, 0x9123), + .class = PCI_CLASS_STORAGE_SATA_AHCI, + .class_mask = 0xffffff, .driver_data = board_ahci_yes_fbs }, /* 88se9128 */ /* Promise */ diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index a31fe96..d4e52e2 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -4138,6 +4138,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { * device and controller are SATA. */ { "PIONEER DVD-RW DVRTD08", "1.00", ATA_HORKAGE_NOSETXFER }, + { "PIONEER DVD-RW DVR-212D", "1.28", ATA_HORKAGE_NOSETXFER }, /* End Marker */ { } diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 5defc74..600f635 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -1099,9 +1099,9 @@ static int ata_scsi_dev_config(struct scsi_device *sdev, struct request_queue *q = sdev->request_queue; void *buf; - /* set the min alignment and padding */ - blk_queue_update_dma_alignment(sdev->request_queue, - ATA_DMA_PAD_SZ - 1); + sdev->sector_size = ATA_SECT_SIZE; + + /* set DMA padding */ blk_queue_update_dma_pad(sdev->request_queue, ATA_DMA_PAD_SZ - 1); @@ -1115,13 +1115,25 @@ static int ata_scsi_dev_config(struct scsi_device *sdev, blk_queue_dma_drain(q, atapi_drain_needed, buf, ATAPI_MAX_DRAIN); } else { - /* ATA devices must be sector aligned */ sdev->sector_size = ata_id_logical_sector_size(dev->id); - blk_queue_update_dma_alignment(sdev->request_queue, - sdev->sector_size - 1); sdev->manage_start_stop = 1; } + /* + * ata_pio_sectors() expects buffer for each sector to not cross + * page boundary. Enforce it by requiring buffers to be sector + * aligned, which works iff sector_size is not larger than + * PAGE_SIZE. ATAPI devices also need the alignment as + * IDENTIFY_PACKET is executed as ATA_PROT_PIO. + */ + if (sdev->sector_size > PAGE_SIZE) + ata_dev_printk(dev, KERN_WARNING, + "sector_size=%u > PAGE_SIZE, PIO may malfunction\n", + sdev->sector_size); + + blk_queue_update_dma_alignment(sdev->request_queue, + sdev->sector_size - 1); + if (dev->flags & ATA_DFLAG_AN) set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events); diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c index d7e57db..538ec38 100644 --- a/drivers/ata/pata_hpt366.c +++ b/drivers/ata/pata_hpt366.c @@ -25,7 +25,7 @@ #include #define DRV_NAME "pata_hpt366" -#define DRV_VERSION "0.6.9" +#define DRV_VERSION "0.6.10" struct hpt_clock { u8 xfer_mode; @@ -160,8 +160,8 @@ static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr, while (list[i] != NULL) { if (!strcmp(list[i], model_num)) { - printk(KERN_WARNING DRV_NAME ": %s is not supported for %s.\n", - modestr, list[i]); + pr_warning(DRV_NAME ": %s is not supported for %s.\n", + modestr, list[i]); return 1; } i++; diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c index efdd18b..4c5b518 100644 --- a/drivers/ata/pata_hpt37x.c +++ b/drivers/ata/pata_hpt37x.c @@ -24,7 +24,7 @@ #include #define DRV_NAME "pata_hpt37x" -#define DRV_VERSION "0.6.18" +#define DRV_VERSION "0.6.22" struct hpt_clock { u8 xfer_speed; @@ -229,8 +229,8 @@ static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr, while (list[i] != NULL) { if (!strcmp(list[i], model_num)) { - printk(KERN_WARNING DRV_NAME ": %s is not supported for %s.\n", - modestr, list[i]); + pr_warning(DRV_NAME ": %s is not supported for %s.\n", + modestr, list[i]); return 1; } i++; @@ -642,7 +642,6 @@ static struct ata_port_operations hpt372_port_ops = { static struct ata_port_operations hpt374_fn1_port_ops = { .inherits = &hpt372_port_ops, .cable_detect = hpt374_fn1_cable_detect, - .prereset = hpt37x_pre_reset, }; /** @@ -803,7 +802,7 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id) .udma_mask = ATA_UDMA6, .port_ops = &hpt302_port_ops }; - /* HPT374 - UDMA100, function 1 uses different prereset method */ + /* HPT374 - UDMA100, function 1 uses different cable_detect method */ static const struct ata_port_info info_hpt374_fn0 = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, @@ -838,7 +837,8 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id) if (rc) return rc; - if (dev->device == PCI_DEVICE_ID_TTI_HPT366) { + switch (dev->device) { + case PCI_DEVICE_ID_TTI_HPT366: /* May be a later chip in disguise. Check */ /* Older chips are in the HPT366 driver. Ignore them */ if (rev < 3) @@ -863,54 +863,50 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id) chip_table = &hpt372; break; default: - printk(KERN_ERR "pata_hpt37x: Unknown HPT366 subtype, " + pr_err(DRV_NAME ": Unknown HPT366 subtype, " "please report (%d).\n", rev); return -ENODEV; } - } else { - switch (dev->device) { - case PCI_DEVICE_ID_TTI_HPT372: - /* 372N if rev >= 2 */ - if (rev >= 2) - return -ENODEV; - ppi[0] = &info_hpt372; - chip_table = &hpt372a; - break; - case PCI_DEVICE_ID_TTI_HPT302: - /* 302N if rev > 1 */ - if (rev > 1) - return -ENODEV; - ppi[0] = &info_hpt302; - /* Check this */ - chip_table = &hpt302; - break; - case PCI_DEVICE_ID_TTI_HPT371: - if (rev > 1) - return -ENODEV; - ppi[0] = &info_hpt302; - chip_table = &hpt371; - /* - * Single channel device, master is not present - * but the BIOS (or us for non x86) must mark it - * absent - */ - pci_read_config_byte(dev, 0x50, &mcr1); - mcr1 &= ~0x04; - pci_write_config_byte(dev, 0x50, mcr1); - break; - case PCI_DEVICE_ID_TTI_HPT374: - chip_table = &hpt374; - if (!(PCI_FUNC(dev->devfn) & 1)) - *ppi = &info_hpt374_fn0; - else - *ppi = &info_hpt374_fn1; - break; - default: - printk(KERN_ERR - "pata_hpt37x: PCI table is bogus, please report (%d).\n", - dev->device); - return -ENODEV; - } + break; + case PCI_DEVICE_ID_TTI_HPT372: + /* 372N if rev >= 2 */ + if (rev >= 2) + return -ENODEV; + ppi[0] = &info_hpt372; + chip_table = &hpt372a; + break; + case PCI_DEVICE_ID_TTI_HPT302: + /* 302N if rev > 1 */ + if (rev > 1) + return -ENODEV; + ppi[0] = &info_hpt302; + /* Check this */ + chip_table = &hpt302; + break; + case PCI_DEVICE_ID_TTI_HPT371: + if (rev > 1) + return -ENODEV; + ppi[0] = &info_hpt302; + chip_table = &hpt371; + /* + * Single channel device, master is not present but the BIOS + * (or us for non x86) must mark it absent + */ + pci_read_config_byte(dev, 0x50, &mcr1); + mcr1 &= ~0x04; + pci_write_config_byte(dev, 0x50, mcr1); + break; + case PCI_DEVICE_ID_TTI_HPT374: + chip_table = &hpt374; + if (!(PCI_FUNC(dev->devfn) & 1)) + *ppi = &info_hpt374_fn0; + else + *ppi = &info_hpt374_fn1; + break; + default: + pr_err(DRV_NAME ": PCI table is bogus, please report (%d).\n", + dev->device); + return -ENODEV; } /* Ok so this is a chip we support */ @@ -957,8 +953,7 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id) u8 sr; u32 total = 0; - printk(KERN_WARNING - "pata_hpt37x: BIOS has not set timing clocks.\n"); + pr_warning(DRV_NAME ": BIOS has not set timing clocks.\n"); /* This is the process the HPT371 BIOS is reported to use */ for (i = 0; i < 128; i++) { @@ -1014,7 +1009,7 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id) (f_high << 16) | f_low | 0x100); } if (adjust == 8) { - printk(KERN_ERR "pata_hpt37x: DPLL did not stabilize!\n"); + pr_err(DRV_NAME ": DPLL did not stabilize!\n"); return -ENODEV; } if (dpll == 3) @@ -1022,8 +1017,8 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id) else private_data = (void *)hpt37x_timings_50; - printk(KERN_INFO "pata_hpt37x: bus clock %dMHz, using %dMHz DPLL.\n", - MHz[clock_slot], MHz[dpll]); + pr_info(DRV_NAME ": bus clock %dMHz, using %dMHz DPLL.\n", + MHz[clock_slot], MHz[dpll]); } else { private_data = (void *)chip_table->clocks[clock_slot]; /* @@ -1036,8 +1031,9 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id) ppi[0] = &info_hpt370_33; if (clock_slot < 2 && ppi[0] == &info_hpt370a) ppi[0] = &info_hpt370a_33; - printk(KERN_INFO "pata_hpt37x: %s using %dMHz bus clock.\n", - chip_table->name, MHz[clock_slot]); + + pr_info(DRV_NAME ": %s using %dMHz bus clock.\n", + chip_table->name, MHz[clock_slot]); } /* Now kick off ATA set up */ diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c index d2239bb..eca68ca 100644 --- a/drivers/ata/pata_hpt3x2n.c +++ b/drivers/ata/pata_hpt3x2n.c @@ -25,7 +25,7 @@ #include #define DRV_NAME "pata_hpt3x2n" -#define DRV_VERSION "0.3.13" +#define DRV_VERSION "0.3.14" enum { HPT_PCI_FAST = (1 << 31), @@ -418,7 +418,7 @@ static int hpt3x2n_pci_clock(struct pci_dev *pdev) u16 sr; u32 total = 0; - printk(KERN_WARNING "pata_hpt3x2n: BIOS clock data not set.\n"); + pr_warning(DRV_NAME ": BIOS clock data not set.\n"); /* This is the process the HPT371 BIOS is reported to use */ for (i = 0; i < 128; i++) { @@ -528,8 +528,7 @@ hpt372n: ppi[0] = &info_hpt372n; break; default: - printk(KERN_ERR - "pata_hpt3x2n: PCI table is bogus please report (%d).\n", + pr_err(DRV_NAME ": PCI table is bogus, please report (%d).\n", dev->device); return -ENODEV; } @@ -579,12 +578,11 @@ hpt372n: pci_write_config_dword(dev, 0x5C, (f_high << 16) | f_low); } if (adjust == 8) { - printk(KERN_ERR "pata_hpt3x2n: DPLL did not stabilize!\n"); + pr_err(DRV_NAME ": DPLL did not stabilize!\n"); return -ENODEV; } - printk(KERN_INFO "pata_hpt37x: bus clock %dMHz, using 66MHz DPLL.\n", - pci_mhz); + pr_info(DRV_NAME ": bus clock %dMHz, using 66MHz DPLL.\n", pci_mhz); /* * Set our private data up. We only need a few flags diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c index 8cc536e..d7d8026 100644 --- a/drivers/ata/pata_mpc52xx.c +++ b/drivers/ata/pata_mpc52xx.c @@ -610,7 +610,7 @@ static struct scsi_host_template mpc52xx_ata_sht = { }; static struct ata_port_operations mpc52xx_ata_port_ops = { - .inherits = &ata_sff_port_ops, + .inherits = &ata_bmdma_port_ops, .sff_dev_select = mpc52xx_ata_dev_select, .set_piomode = mpc52xx_ata_set_piomode, .set_dmamode = mpc52xx_ata_set_dmamode, diff --git a/drivers/block/Makefile b/drivers/block/Makefile index d7f463d..40528ba 100644 --- a/drivers/block/Makefile +++ b/drivers/block/Makefile @@ -39,4 +39,4 @@ obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += xen-blkfront.o obj-$(CONFIG_BLK_DEV_DRBD) += drbd/ obj-$(CONFIG_BLK_DEV_RBD) += rbd.o -swim_mod-objs := swim.o swim_asm.o +swim_mod-y := swim.o swim_asm.o diff --git a/drivers/block/aoe/Makefile b/drivers/block/aoe/Makefile index e76d997..06ea82c 100644 --- a/drivers/block/aoe/Makefile +++ b/drivers/block/aoe/Makefile @@ -3,4 +3,4 @@ # obj-$(CONFIG_ATA_OVER_ETH) += aoe.o -aoe-objs := aoeblk.o aoechr.o aoecmd.o aoedev.o aoemain.o aoenet.o +aoe-y := aoeblk.o aoechr.o aoecmd.o aoedev.o aoemain.o aoenet.o diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 516d5bb..9279272 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -2833,7 +2833,7 @@ static int cciss_revalidate(struct gendisk *disk) sector_t total_size; InquiryData_struct *inq_buff = NULL; - for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) { + for (logvol = 0; logvol <= h->highest_lun; logvol++) { if (!h->drv[logvol]) continue; if (memcmp(h->drv[logvol]->LunID, drv->LunID, diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 44e18c0..49e6a54 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -1641,6 +1641,9 @@ out: static void loop_free(struct loop_device *lo) { + if (!lo->lo_queue->queue_lock) + lo->lo_queue->queue_lock = &lo->lo_queue->__queue_lock; + blk_cleanup_queue(lo->lo_queue); put_disk(lo->lo_disk); list_del(&lo->lo_list); diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 826ab09..fab3d32 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -68,6 +68,7 @@ static struct _intel_private { phys_addr_t gma_bus_addr; u32 PGETBL_save; u32 __iomem *gtt; /* I915G */ + bool clear_fake_agp; /* on first access via agp, fill with scratch */ int num_dcache_entries; union { void __iomem *i9xx_flush_page; @@ -869,21 +870,12 @@ static int intel_fake_agp_free_gatt_table(struct agp_bridge_data *bridge) static int intel_fake_agp_configure(void) { - int i; - if (!intel_enable_gtt()) return -EIO; + intel_private.clear_fake_agp = true; agp_bridge->gart_bus_addr = intel_private.gma_bus_addr; - for (i = 0; i < intel_private.base.gtt_total_entries; i++) { - intel_private.driver->write_entry(intel_private.scratch_page_dma, - i, 0); - } - readl(intel_private.gtt+i-1); /* PCI Posting. */ - - global_cache_flush(); - return 0; } @@ -945,6 +937,13 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem, { int ret = -EINVAL; + if (intel_private.clear_fake_agp) { + int start = intel_private.base.stolen_size / PAGE_SIZE; + int end = intel_private.base.gtt_mappable_entries; + intel_gtt_clear_range(start, end - start); + intel_private.clear_fake_agp = false; + } + if (INTEL_GTT_GEN == 1 && type == AGP_DCACHE_MEMORY) return i810_insert_dcache_entries(mem, pg_start, type); diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c index cfb0f52..effe797 100644 --- a/drivers/clocksource/acpi_pm.c +++ b/drivers/clocksource/acpi_pm.c @@ -202,17 +202,21 @@ static int __init init_acpi_pm_clocksource(void) printk(KERN_INFO "PM-Timer had inconsistent results:" " 0x%#llx, 0x%#llx - aborting.\n", value1, value2); + pmtmr_ioport = 0; return -EINVAL; } if (i == ACPI_PM_READ_CHECKS) { printk(KERN_INFO "PM-Timer failed consistency check " " (0x%#llx) - aborting.\n", value1); + pmtmr_ioport = 0; return -ENODEV; } } - if (verify_pmtmr_rate() != 0) + if (verify_pmtmr_rate() != 0){ + pmtmr_ioport = 0; return -ENODEV; + } return clocksource_register_hz(&clocksource_acpi_pm, PMTMR_TICKS_PER_SEC); diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index bea966f..0902d44 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -100,7 +100,10 @@ config DRM_I830 config DRM_I915 tristate "i915 driver" depends on AGP_INTEL + # we need shmfs for the swappable backing store, and in particular + # the shmem_readpage() which depends upon tmpfs select SHMEM + select TMPFS select DRM_KMS_HELPER select FB_CFB_FILLRECT select FB_CFB_COPYAREA diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 844f3c9..17bd766 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -152,7 +152,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; - struct intel_ring_buffer *ring = LP_RING(dev_priv); + int ret; master_priv->sarea = drm_getsarea(dev); if (master_priv->sarea) { @@ -163,33 +163,22 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) } if (init->ring_size != 0) { - if (ring->obj != NULL) { + if (LP_RING(dev_priv)->obj != NULL) { i915_dma_cleanup(dev); DRM_ERROR("Client tried to initialize ringbuffer in " "GEM mode\n"); return -EINVAL; } - ring->size = init->ring_size; - - ring->map.offset = init->ring_start; - ring->map.size = init->ring_size; - ring->map.type = 0; - ring->map.flags = 0; - ring->map.mtrr = 0; - - drm_core_ioremap_wc(&ring->map, dev); - - if (ring->map.handle == NULL) { + ret = intel_render_ring_init_dri(dev, + init->ring_start, + init->ring_size); + if (ret) { i915_dma_cleanup(dev); - DRM_ERROR("can not ioremap virtual address for" - " ring buffer\n"); - return -ENOMEM; + return ret; } } - ring->virtual_start = ring->map.handle; - dev_priv->cpp = init->cpp; dev_priv->back_offset = init->back_offset; dev_priv->front_offset = init->front_offset; @@ -1226,9 +1215,15 @@ static int i915_load_modeset_init(struct drm_device *dev) if (ret) DRM_INFO("failed to find VBIOS tables\n"); - /* if we have > 1 VGA cards, then disable the radeon VGA resources */ + /* If we have > 1 VGA cards, then we need to arbitrate access + * to the common VGA resources. + * + * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA), + * then we do not take part in VGA arbitration and the + * vga_client_register() fails with -ENODEV. + */ ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); - if (ret) + if (ret && ret != -ENODEV) goto cleanup_ringbuffer; intel_register_dsm_handler(); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 72fea2b..66796bb 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -60,7 +60,7 @@ extern int intel_agp_enabled; #define INTEL_VGA_DEVICE(id, info) { \ .class = PCI_CLASS_DISPLAY_VGA << 8, \ - .class_mask = 0xffff00, \ + .class_mask = 0xff0000, \ .vendor = 0x8086, \ .device = id, \ .subvendor = PCI_ANY_ID, \ @@ -752,6 +752,9 @@ static int __init i915_init(void) driver.driver_features &= ~DRIVER_MODESET; #endif + if (!(driver.driver_features & DRIVER_MODESET)) + driver.get_vblank_timestamp = NULL; + return drm_init(&driver); } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 5969f46..a0149c6 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -543,8 +543,11 @@ typedef struct drm_i915_private { /** List of all objects in gtt_space. Used to restore gtt * mappings on resume */ struct list_head gtt_list; - /** End of mappable part of GTT */ + + /** Usable portion of the GTT for GEM */ + unsigned long gtt_start; unsigned long gtt_mappable_end; + unsigned long gtt_end; struct io_mapping *gtt_mapping; int gtt_mtrr; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 3dfc848..cf4f74c 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -140,12 +140,16 @@ void i915_gem_do_init(struct drm_device *dev, { drm_i915_private_t *dev_priv = dev->dev_private; - drm_mm_init(&dev_priv->mm.gtt_space, start, - end - start); + drm_mm_init(&dev_priv->mm.gtt_space, start, end - start); + dev_priv->mm.gtt_start = start; + dev_priv->mm.gtt_mappable_end = mappable_end; + dev_priv->mm.gtt_end = end; dev_priv->mm.gtt_total = end - start; dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start; - dev_priv->mm.gtt_mappable_end = mappable_end; + + /* Take over this portion of the GTT */ + intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE); } int @@ -1857,7 +1861,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev, seqno = ring->get_seqno(ring); - for (i = 0; i < I915_NUM_RINGS; i++) + for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) if (seqno >= ring->sync_seqno[i]) ring->sync_seqno[i] = 0; diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index dcfdf41..d2f445e 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -1175,7 +1175,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, goto err; seqno = i915_gem_next_request_seqno(dev, ring); - for (i = 0; i < I915_NUM_RINGS-1; i++) { + for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) { if (seqno < ring->sync_seqno[i]) { /* The GPU can not handle its semaphore value wrapping, * so every billion or so execbuffers, we need to stall diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 70433ae..b0abdc6 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -34,6 +34,10 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj; + /* First fill our portion of the GTT with scratch pages */ + intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE, + (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE); + list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { i915_gem_clflush_object(obj); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index b8e509a..062f353 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -274,24 +274,35 @@ int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, return ret; } -int i915_get_vblank_timestamp(struct drm_device *dev, int crtc, +int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, int *max_error, struct timeval *vblank_time, unsigned flags) { - struct drm_crtc *drmcrtc; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_crtc *crtc; - if (crtc < 0 || crtc >= dev->num_crtcs) { - DRM_ERROR("Invalid crtc %d\n", crtc); + if (pipe < 0 || pipe >= dev_priv->num_pipe) { + DRM_ERROR("Invalid crtc %d\n", pipe); return -EINVAL; } /* Get drm_crtc to timestamp: */ - drmcrtc = intel_get_crtc_for_pipe(dev, crtc); + crtc = intel_get_crtc_for_pipe(dev, pipe); + if (crtc == NULL) { + DRM_ERROR("Invalid crtc %d\n", pipe); + return -EINVAL; + } + + if (!crtc->enabled) { + DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); + return -EBUSY; + } /* Helper routine in DRM core does all the work: */ - return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error, - vblank_time, flags, drmcrtc); + return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, + vblank_time, flags, + crtc); } /* @@ -348,8 +359,12 @@ static void notify_ring(struct drm_device *dev, struct intel_ring_buffer *ring) { struct drm_i915_private *dev_priv = dev->dev_private; - u32 seqno = ring->get_seqno(ring); + u32 seqno; + if (ring->obj == NULL) + return; + + seqno = ring->get_seqno(ring); trace_i915_gem_request_complete(dev, seqno); ring->irq_seqno = seqno; @@ -831,6 +846,8 @@ static void i915_capture_error_state(struct drm_device *dev) i++; error->pinned_bo_count = i - error->active_bo_count; + error->active_bo = NULL; + error->pinned_bo = NULL; if (i) { error->active_bo = kmalloc(sizeof(*error->active_bo)*i, GFP_ATOMIC); @@ -1278,12 +1295,12 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr) if (master_priv->sarea_priv) master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; - ret = -ENODEV; if (ring->irq_get(ring)) { DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ, READ_BREADCRUMB(dev_priv) >= irq_nr); ring->irq_put(ring); - } + } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000)) + ret = -EBUSY; if (ret == -EBUSY) { DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 40a407f..5cfc689 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -513,6 +513,10 @@ #define GEN6_BLITTER_SYNC_STATUS (1 << 24) #define GEN6_BLITTER_USER_INTERRUPT (1 << 22) +#define GEN6_BLITTER_ECOSKPD 0x221d0 +#define GEN6_BLITTER_LOCK_SHIFT 16 +#define GEN6_BLITTER_FBC_NOTIFY (1<<3) + #define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050 #define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK (1 << 16) #define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE (1 << 0) @@ -2626,6 +2630,8 @@ #define DISPLAY_PORT_PLL_BIOS_2 0x46014 #define PCH_DSPCLK_GATE_D 0x42020 +# define DPFCUNIT_CLOCK_GATE_DISABLE (1 << 9) +# define DPFCRUNIT_CLOCK_GATE_DISABLE (1 << 8) # define DPFDUNIT_CLOCK_GATE_DISABLE (1 << 7) # define DPARBUNIT_CLOCK_GATE_DISABLE (1 << 5) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 98967f3..d7f237d 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1213,6 +1213,26 @@ static bool g4x_fbc_enabled(struct drm_device *dev) return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; } +static void sandybridge_blit_fbc_update(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 blt_ecoskpd; + + /* Make sure blitter notifies FBC of writes */ + __gen6_force_wake_get(dev_priv); + blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD); + blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY << + GEN6_BLITTER_LOCK_SHIFT; + I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); + blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY; + I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); + blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY << + GEN6_BLITTER_LOCK_SHIFT); + I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); + POSTING_READ(GEN6_BLITTER_ECOSKPD); + __gen6_force_wake_put(dev_priv); +} + static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) { struct drm_device *dev = crtc->dev; @@ -1266,6 +1286,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) I915_WRITE(SNB_DPFC_CTL_SA, SNB_CPU_FENCE_ENABLE | dev_priv->cfb_fence); I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y); + sandybridge_blit_fbc_update(dev); } DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); @@ -6286,7 +6307,9 @@ void intel_enable_clock_gating(struct drm_device *dev) if (IS_GEN5(dev)) { /* Required for FBC */ - dspclk_gate |= DPFDUNIT_CLOCK_GATE_DISABLE; + dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE | + DPFCRUNIT_CLOCK_GATE_DISABLE | + DPFDUNIT_CLOCK_GATE_DISABLE; /* Required for CxSR */ dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE; diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index f295a7a..64fd644 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c @@ -26,6 +26,7 @@ */ #include +#include #include #include "drmP.h" @@ -476,7 +477,7 @@ int intel_opregion_setup(struct drm_device *dev) return -ENOTSUPP; } - base = ioremap(asls, OPREGION_SIZE); + base = acpi_os_ioremap(asls, OPREGION_SIZE); if (!base) return -ENOMEM; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index f6b9baa..6218fa9 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -34,6 +34,14 @@ #include "i915_trace.h" #include "intel_drv.h" +static inline int ring_space(struct intel_ring_buffer *ring) +{ + int space = (ring->head & HEAD_ADDR) - (ring->tail + 8); + if (space < 0) + space += ring->size; + return space; +} + static u32 i915_gem_get_seqno(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; @@ -204,11 +212,9 @@ static int init_ring_common(struct intel_ring_buffer *ring) if (!drm_core_check_feature(ring->dev, DRIVER_MODESET)) i915_kernel_lost_context(ring->dev); else { - ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; + ring->head = I915_READ_HEAD(ring); ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; - ring->space = ring->head - (ring->tail + 8); - if (ring->space < 0) - ring->space += ring->size; + ring->space = ring_space(ring); } return 0; @@ -921,32 +927,34 @@ static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring) } ring->tail = 0; - ring->space = ring->head - 8; + ring->space = ring_space(ring); return 0; } int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n) { - int reread = 0; struct drm_device *dev = ring->dev; struct drm_i915_private *dev_priv = dev->dev_private; unsigned long end; u32 head; + /* If the reported head position has wrapped or hasn't advanced, + * fallback to the slow and accurate path. + */ + head = intel_read_status_page(ring, 4); + if (head > ring->head) { + ring->head = head; + ring->space = ring_space(ring); + if (ring->space >= n) + return 0; + } + trace_i915_ring_wait_begin (dev); end = jiffies + 3 * HZ; do { - /* If the reported head position has wrapped or hasn't advanced, - * fallback to the slow and accurate path. - */ - head = intel_read_status_page(ring, 4); - if (reread) - head = I915_READ_HEAD(ring); - ring->head = head & HEAD_ADDR; - ring->space = ring->head - (ring->tail + 8); - if (ring->space < 0) - ring->space += ring->size; + ring->head = I915_READ_HEAD(ring); + ring->space = ring_space(ring); if (ring->space >= n) { trace_i915_ring_wait_end(dev); return 0; @@ -961,7 +969,6 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n) msleep(1); if (atomic_read(&dev_priv->mm.wedged)) return -EAGAIN; - reread = 1; } while (!time_after(jiffies, end)); trace_i915_ring_wait_end (dev); return -EBUSY; @@ -1292,6 +1299,48 @@ int intel_init_render_ring_buffer(struct drm_device *dev) return intel_init_ring_buffer(dev, ring); } +int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; + + *ring = render_ring; + if (INTEL_INFO(dev)->gen >= 6) { + ring->add_request = gen6_add_request; + ring->irq_get = gen6_render_ring_get_irq; + ring->irq_put = gen6_render_ring_put_irq; + } else if (IS_GEN5(dev)) { + ring->add_request = pc_render_add_request; + ring->get_seqno = pc_render_get_seqno; + } + + ring->dev = dev; + INIT_LIST_HEAD(&ring->active_list); + INIT_LIST_HEAD(&ring->request_list); + INIT_LIST_HEAD(&ring->gpu_write_list); + + ring->size = size; + ring->effective_size = ring->size; + if (IS_I830(ring->dev)) + ring->effective_size -= 128; + + ring->map.offset = start; + ring->map.size = size; + ring->map.type = 0; + ring->map.flags = 0; + ring->map.mtrr = 0; + + drm_core_ioremap_wc(&ring->map, dev); + if (ring->map.handle == NULL) { + DRM_ERROR("can not ioremap virtual address for" + " ring buffer\n"); + return -ENOMEM; + } + + ring->virtual_start = (void __force __iomem *)ring->map.handle; + return 0; +} + int intel_init_bsd_ring_buffer(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 5b0abfa..6d6fde8 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -166,4 +166,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev); u32 intel_ring_get_active_head(struct intel_ring_buffer *ring); void intel_ring_setup_status_page(struct intel_ring_buffer *ring); +/* DRI warts */ +int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size); + #endif /* _INTEL_RINGBUFFER_H_ */ diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index b0ab185..d3ca170 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -606,14 +606,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, args.v1.usPixelClock = cpu_to_le16(mode->clock / 10); args.v1.ucTransmitterID = radeon_encoder->encoder_id; args.v1.ucEncodeMode = encoder_mode; - if (encoder_mode == ATOM_ENCODER_MODE_DP) { - if (ss_enabled) - args.v1.ucConfig |= - ADJUST_DISPLAY_CONFIG_SS_ENABLE; - } else if (encoder_mode == ATOM_ENCODER_MODE_LVDS) { + if (ss_enabled) args.v1.ucConfig |= ADJUST_DISPLAY_CONFIG_SS_ENABLE; - } atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); @@ -624,12 +619,12 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, args.v3.sInput.ucTransmitterID = radeon_encoder->encoder_id; args.v3.sInput.ucEncodeMode = encoder_mode; args.v3.sInput.ucDispPllConfig = 0; + if (ss_enabled) + args.v3.sInput.ucDispPllConfig |= + DISPPLL_CONFIG_SS_ENABLE; if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; if (encoder_mode == ATOM_ENCODER_MODE_DP) { - if (ss_enabled) - args.v3.sInput.ucDispPllConfig |= - DISPPLL_CONFIG_SS_ENABLE; args.v3.sInput.ucDispPllConfig |= DISPPLL_CONFIG_COHERENT_MODE; /* 16200 or 27000 */ @@ -649,18 +644,11 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, } } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { if (encoder_mode == ATOM_ENCODER_MODE_DP) { - if (ss_enabled) - args.v3.sInput.ucDispPllConfig |= - DISPPLL_CONFIG_SS_ENABLE; args.v3.sInput.ucDispPllConfig |= DISPPLL_CONFIG_COHERENT_MODE; /* 16200 or 27000 */ args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10); - } else if (encoder_mode == ATOM_ENCODER_MODE_LVDS) { - if (ss_enabled) - args.v3.sInput.ucDispPllConfig |= - DISPPLL_CONFIG_SS_ENABLE; - } else { + } else if (encoder_mode != ATOM_ENCODER_MODE_LVDS) { if (mode->clock > 165000) args.v3.sInput.ucDispPllConfig |= DISPPLL_CONFIG_DUAL_LINK; diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index a8973ac..677af91 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -2201,6 +2201,9 @@ static int evergreen_gpu_soft_reset(struct radeon_device *rdev) struct evergreen_mc_save save; u32 grbm_reset = 0; + if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) + return 0; + dev_info(rdev->dev, "GPU softreset \n"); dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", RREG32(GRBM_STATUS)); diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 46da514..5968dde 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -3522,7 +3522,7 @@ int r100_ring_test(struct radeon_device *rdev) if (i < rdev->usec_timeout) { DRM_INFO("ring test succeeded in %d usecs\n", i); } else { - DRM_ERROR("radeon: ring test failed (sracth(0x%04X)=0x%08X)\n", + DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n", scratch, tmp); r = -EINVAL; } diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index aca2236..1e10e3e 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -1287,6 +1287,9 @@ int r600_gpu_soft_reset(struct radeon_device *rdev) S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1); u32 tmp; + if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) + return 0; + dev_info(rdev->dev, "GPU softreset \n"); dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n", RREG32(R_008010_GRBM_STATUS)); diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 1573202..5277790 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -387,15 +387,11 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, *line_mux = 0x90; } - /* mac rv630 */ - if ((dev->pdev->device == 0x9588) && - (dev->pdev->subsystem_vendor == 0x106b) && - (dev->pdev->subsystem_device == 0x00a6)) { - if ((supported_device == ATOM_DEVICE_TV1_SUPPORT) && - (*connector_type == DRM_MODE_CONNECTOR_DVII)) { - *connector_type = DRM_MODE_CONNECTOR_9PinDIN; - *line_mux = CONNECTOR_7PIN_DIN_ENUM_ID1; - } + /* mac rv630, rv730, others */ + if ((supported_device == ATOM_DEVICE_TV1_SUPPORT) && + (*connector_type == DRM_MODE_CONNECTOR_DVII)) { + *connector_type = DRM_MODE_CONNECTOR_9PinDIN; + *line_mux = CONNECTOR_7PIN_DIN_ENUM_ID1; } /* ASUS HD 3600 XT board lists the DVI port as HDMI */ diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index d5680a0..275b26a 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -48,7 +48,7 @@ * - 2.5.0 - add get accel 2 to work around ddx breakage for evergreen * - 2.6.0 - add tiling config query (r6xx+), add initial HiZ support (r300->r500) * 2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs - * 2.8.0 - pageflip support, r500 US_FORMAT regs. r500 ARGB2101010 colorbuf, r300->r500 CMASK + * 2.8.0 - pageflip support, r500 US_FORMAT regs. r500 ARGB2101010 colorbuf, r300->r500 CMASK, clock crystal query */ #define KMS_DRIVER_MAJOR 2 #define KMS_DRIVER_MINOR 8 diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index a289646..9ec830c 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c @@ -110,11 +110,14 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev) int radeon_irq_kms_init(struct radeon_device *rdev) { + int i; int r = 0; INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func); spin_lock_init(&rdev->irq.sw_lock); + for (i = 0; i < rdev->num_crtc; i++) + spin_lock_init(&rdev->irq.pflip_lock[i]); r = drm_vblank_init(rdev->ddev, rdev->num_crtc); if (r) { return r; diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 28a53e4..9832129 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -201,6 +201,10 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) } radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, &value); break; + case RADEON_INFO_CLOCK_CRYSTAL_FREQ: + /* return clock value in KHz */ + value = rdev->clock.spll.reference_freq * 10; + break; default: DRM_DEBUG_KMS("Invalid request %d\n", info->request); return -EINVAL; diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c index c380c65..ace2b16 100644 --- a/drivers/gpu/vga/vgaarb.c +++ b/drivers/gpu/vga/vgaarb.c @@ -636,7 +636,7 @@ int vga_client_register(struct pci_dev *pdev, void *cookie, void (*irq_set_state)(void *cookie, bool state), unsigned int (*set_vga_decode)(void *cookie, bool decode)) { - int ret = -1; + int ret = -ENODEV; struct vga_device *vgadev; unsigned long flags; diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 7acb32e..1fa091e 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -263,7 +263,7 @@ static void __setup_broadcast_timer(void *arg) clockevents_notify(reason, &cpu); } -static int __cpuinit setup_broadcast_cpuhp_notify(struct notifier_block *n, +static int setup_broadcast_cpuhp_notify(struct notifier_block *n, unsigned long action, void *hcpu) { int hotcpu = (unsigned long)hcpu; @@ -273,15 +273,11 @@ static int __cpuinit setup_broadcast_cpuhp_notify(struct notifier_block *n, smp_call_function_single(hotcpu, __setup_broadcast_timer, (void *)true, 1); break; - case CPU_DOWN_PREPARE: - smp_call_function_single(hotcpu, __setup_broadcast_timer, - (void *)false, 1); - break; } return NOTIFY_OK; } -static struct notifier_block __cpuinitdata setup_broadcast_notifier = { +static struct notifier_block setup_broadcast_notifier = { .notifier_call = setup_broadcast_cpuhp_notify, }; diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c index 5decfd0..153ab97 100644 --- a/drivers/mmc/host/msm_sdcc.c +++ b/drivers/mmc/host/msm_sdcc.c @@ -383,14 +383,30 @@ static int msmsdcc_config_dma(struct msmsdcc_host *host, struct mmc_data *data) host->curr.user_pages = 0; box = &nc->cmd[0]; - for (i = 0; i < host->dma.num_ents; i++) { - box->cmd = CMD_MODE_BOX; - /* Initialize sg dma address */ - sg->dma_address = page_to_dma(mmc_dev(host->mmc), sg_page(sg)) - + sg->offset; + /* location of command block must be 64 bit aligned */ + BUG_ON(host->dma.cmd_busaddr & 0x07); - if (i == (host->dma.num_ents - 1)) + nc->cmdptr = (host->dma.cmd_busaddr >> 3) | CMD_PTR_LP; + host->dma.hdr.cmdptr = DMOV_CMD_PTR_LIST | + DMOV_CMD_ADDR(host->dma.cmdptr_busaddr); + host->dma.hdr.complete_func = msmsdcc_dma_complete_func; + + n = dma_map_sg(mmc_dev(host->mmc), host->dma.sg, + host->dma.num_ents, host->dma.dir); + if (n == 0) { + printk(KERN_ERR "%s: Unable to map in all sg elements\n", + mmc_hostname(host->mmc)); + host->dma.sg = NULL; + host->dma.num_ents = 0; + return -ENOMEM; + } + + for_each_sg(host->dma.sg, sg, n, i) { + + box->cmd = CMD_MODE_BOX; + + if (i == n - 1) box->cmd |= CMD_LC; rows = (sg_dma_len(sg) % MCI_FIFOSIZE) ? (sg_dma_len(sg) / MCI_FIFOSIZE) + 1 : @@ -418,27 +434,6 @@ static int msmsdcc_config_dma(struct msmsdcc_host *host, struct mmc_data *data) box->cmd |= CMD_DST_CRCI(crci); } box++; - sg++; - } - - /* location of command block must be 64 bit aligned */ - BUG_ON(host->dma.cmd_busaddr & 0x07); - - nc->cmdptr = (host->dma.cmd_busaddr >> 3) | CMD_PTR_LP; - host->dma.hdr.cmdptr = DMOV_CMD_PTR_LIST | - DMOV_CMD_ADDR(host->dma.cmdptr_busaddr); - host->dma.hdr.complete_func = msmsdcc_dma_complete_func; - - n = dma_map_sg(mmc_dev(host->mmc), host->dma.sg, - host->dma.num_ents, host->dma.dir); -/* dsb inside dma_map_sg will write nc out to mem as well */ - - if (n != host->dma.num_ents) { - printk(KERN_ERR "%s: Unable to map in all sg elements\n", - mmc_hostname(host->mmc)); - host->dma.sg = NULL; - host->dma.num_ents = 0; - return -ENOMEM; } return 0; @@ -1331,9 +1326,6 @@ msmsdcc_probe(struct platform_device *pdev) if (host->timer.function) pr_info("%s: Polling status mode enabled\n", mmc_hostname(mmc)); -#if BUSCLK_PWRSAVE - msmsdcc_disable_clocks(host, 1); -#endif return 0; cmd_irq_free: free_irq(cmd_irqres->start, host); diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index 4941cad..cdd9719 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -97,18 +97,6 @@ config RTC_INTF_DEV If unsure, say Y. -config RTC_INTF_DEV_UIE_EMUL - bool "RTC UIE emulation on dev interface" - depends on RTC_INTF_DEV - help - Provides an emulation for RTC_UIE if the underlying rtc chip - driver does not expose RTC_UIE ioctls. Those requests generate - once-per-second update interrupts, used for synchronization. - - The emulation code will read the time from the hardware - clock several times per second, please enable this option - only if you know that you really need it. - config RTC_DRV_TEST tristate "Test driver/device" help diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c index 90384b9..925006d 100644 --- a/drivers/rtc/interface.c +++ b/drivers/rtc/interface.c @@ -16,6 +16,9 @@ #include #include +static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer); +static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer); + static int __rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm) { int err; @@ -120,12 +123,18 @@ int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) err = mutex_lock_interruptible(&rtc->ops_lock); if (err) return err; - alarm->enabled = rtc->aie_timer.enabled; - if (alarm->enabled) + if (rtc->ops == NULL) + err = -ENODEV; + else if (!rtc->ops->read_alarm) + err = -EINVAL; + else { + memset(alarm, 0, sizeof(struct rtc_wkalrm)); + alarm->enabled = rtc->aie_timer.enabled; alarm->time = rtc_ktime_to_tm(rtc->aie_timer.node.expires); + } mutex_unlock(&rtc->ops_lock); - return 0; + return err; } EXPORT_SYMBOL_GPL(rtc_read_alarm); @@ -175,16 +184,14 @@ int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) return err; if (rtc->aie_timer.enabled) { rtc_timer_remove(rtc, &rtc->aie_timer); - rtc->aie_timer.enabled = 0; } rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time); rtc->aie_timer.period = ktime_set(0, 0); if (alarm->enabled) { - rtc->aie_timer.enabled = 1; - rtc_timer_enqueue(rtc, &rtc->aie_timer); + err = rtc_timer_enqueue(rtc, &rtc->aie_timer); } mutex_unlock(&rtc->ops_lock); - return 0; + return err; } EXPORT_SYMBOL_GPL(rtc_set_alarm); @@ -195,15 +202,15 @@ int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled) return err; if (rtc->aie_timer.enabled != enabled) { - if (enabled) { - rtc->aie_timer.enabled = 1; - rtc_timer_enqueue(rtc, &rtc->aie_timer); - } else { + if (enabled) + err = rtc_timer_enqueue(rtc, &rtc->aie_timer); + else rtc_timer_remove(rtc, &rtc->aie_timer); - rtc->aie_timer.enabled = 0; - } } + if (err) + return err; + if (!rtc->ops) err = -ENODEV; else if (!rtc->ops->alarm_irq_enable) @@ -235,12 +242,9 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled) now = rtc_tm_to_ktime(tm); rtc->uie_rtctimer.node.expires = ktime_add(now, onesec); rtc->uie_rtctimer.period = ktime_set(1, 0); - rtc->uie_rtctimer.enabled = 1; - rtc_timer_enqueue(rtc, &rtc->uie_rtctimer); - } else { + err = rtc_timer_enqueue(rtc, &rtc->uie_rtctimer); + } else rtc_timer_remove(rtc, &rtc->uie_rtctimer); - rtc->uie_rtctimer.enabled = 0; - } out: mutex_unlock(&rtc->ops_lock); @@ -488,10 +492,13 @@ EXPORT_SYMBOL_GPL(rtc_irq_set_freq); * Enqueues a timer onto the rtc devices timerqueue and sets * the next alarm event appropriately. * + * Sets the enabled bit on the added timer. + * * Must hold ops_lock for proper serialization of timerqueue */ -void rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer) +static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer) { + timer->enabled = 1; timerqueue_add(&rtc->timerqueue, &timer->node); if (&timer->node == timerqueue_getnext(&rtc->timerqueue)) { struct rtc_wkalrm alarm; @@ -501,7 +508,13 @@ void rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer) err = __rtc_set_alarm(rtc, &alarm); if (err == -ETIME) schedule_work(&rtc->irqwork); + else if (err) { + timerqueue_del(&rtc->timerqueue, &timer->node); + timer->enabled = 0; + return err; + } } + return 0; } /** @@ -512,13 +525,15 @@ void rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer) * Removes a timer onto the rtc devices timerqueue and sets * the next alarm event appropriately. * + * Clears the enabled bit on the removed timer. + * * Must hold ops_lock for proper serialization of timerqueue */ -void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer) +static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer) { struct timerqueue_node *next = timerqueue_getnext(&rtc->timerqueue); timerqueue_del(&rtc->timerqueue, &timer->node); - + timer->enabled = 0; if (next == &timer->node) { struct rtc_wkalrm alarm; int err; @@ -626,8 +641,7 @@ int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer* timer, timer->node.expires = expires; timer->period = period; - timer->enabled = 1; - rtc_timer_enqueue(rtc, timer); + ret = rtc_timer_enqueue(rtc, timer); mutex_unlock(&rtc->ops_lock); return ret; @@ -645,7 +659,6 @@ int rtc_timer_cancel(struct rtc_device *rtc, struct rtc_timer* timer) mutex_lock(&rtc->ops_lock); if (timer->enabled) rtc_timer_remove(rtc, timer); - timer->enabled = 0; mutex_unlock(&rtc->ops_lock); return ret; } diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h index 475c31a..77b26f5 100644 --- a/drivers/scsi/arcmsr/arcmsr.h +++ b/drivers/scsi/arcmsr/arcmsr.h @@ -2,7 +2,7 @@ ******************************************************************************* ** O.S : Linux ** FILE NAME : arcmsr.h -** BY : Erich Chen +** BY : Nick Cheng ** Description: SCSI RAID Device Driver for ** ARECA RAID Host adapter ******************************************************************************* @@ -46,8 +46,12 @@ struct device_attribute; /*The limit of outstanding scsi command that firmware can handle*/ #define ARCMSR_MAX_OUTSTANDING_CMD 256 -#define ARCMSR_MAX_FREECCB_NUM 320 -#define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.15 2010/02/02" +#ifdef CONFIG_XEN + #define ARCMSR_MAX_FREECCB_NUM 160 +#else + #define ARCMSR_MAX_FREECCB_NUM 320 +#endif +#define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.15 2010/08/05" #define ARCMSR_SCSI_INITIATOR_ID 255 #define ARCMSR_MAX_XFER_SECTORS 512 #define ARCMSR_MAX_XFER_SECTORS_B 4096 @@ -60,7 +64,6 @@ struct device_attribute; #define ARCMSR_MAX_HBB_POSTQUEUE 264 #define ARCMSR_MAX_XFER_LEN 0x26000 /* 152K */ #define ARCMSR_CDB_SG_PAGE_LENGTH 256 -#define SCSI_CMD_ARECA_SPECIFIC 0xE1 #ifndef PCI_DEVICE_ID_ARECA_1880 #define PCI_DEVICE_ID_ARECA_1880 0x1880 #endif diff --git a/drivers/scsi/arcmsr/arcmsr_attr.c b/drivers/scsi/arcmsr/arcmsr_attr.c index a4e04c5..acdae33 100644 --- a/drivers/scsi/arcmsr/arcmsr_attr.c +++ b/drivers/scsi/arcmsr/arcmsr_attr.c @@ -2,7 +2,7 @@ ******************************************************************************* ** O.S : Linux ** FILE NAME : arcmsr_attr.c -** BY : Erich Chen +** BY : Nick Cheng ** Description: attributes exported to sysfs and device host ******************************************************************************* ** Copyright (C) 2002 - 2005, Areca Technology Corporation All rights reserved diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index 1cadcd6..984bd52 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c @@ -2,7 +2,7 @@ ******************************************************************************* ** O.S : Linux ** FILE NAME : arcmsr_hba.c -** BY : Erich Chen +** BY : Nick Cheng ** Description: SCSI RAID Device Driver for ** ARECA RAID Host adapter ******************************************************************************* @@ -76,7 +76,7 @@ MODULE_DESCRIPTION("ARECA (ARC11xx/12xx/16xx/1880) SATA/SAS RAID Host Bus Adapte MODULE_LICENSE("Dual BSD/GPL"); MODULE_VERSION(ARCMSR_DRIVER_VERSION); static int sleeptime = 10; -static int retrycount = 30; +static int retrycount = 12; wait_queue_head_t wait_q; static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, struct scsi_cmnd *cmd); @@ -187,7 +187,6 @@ int arcmsr_sleep_for_bus_reset(struct scsi_cmnd *cmd) if (isleep > 0) { msleep(isleep*1000); } - printk(KERN_NOTICE "wake-up\n"); return 0; } @@ -921,7 +920,6 @@ static void arcmsr_report_ccb_state(struct AdapterControlBlock *acb, } static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct CommandControlBlock *pCCB, bool error) - { int id, lun; if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) { @@ -948,7 +946,7 @@ static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct Comma , pCCB->startdone , atomic_read(&acb->ccboutstandingcount)); return; - } + } arcmsr_report_ccb_state(acb, pCCB, error); } @@ -981,7 +979,7 @@ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb) case ACB_ADAPTER_TYPE_B: { struct MessageUnit_B *reg = acb->pmuB; /*clear all outbound posted Q*/ - writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, ®->iop2drv_doorbell); /* clear doorbell interrupt */ + writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); /* clear doorbell interrupt */ for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) { if ((flag_ccb = readl(®->done_qbuffer[i])) != 0) { writel(0, ®->done_qbuffer[i]); @@ -1511,7 +1509,6 @@ static void arcmsr_hba_postqueue_isr(struct AdapterControlBlock *acb) arcmsr_drain_donequeue(acb, pCCB, error); } } - static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb) { uint32_t index; @@ -2106,10 +2103,6 @@ static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd, if (atomic_read(&acb->ccboutstandingcount) >= ARCMSR_MAX_OUTSTANDING_CMD) return SCSI_MLQUEUE_HOST_BUSY; - if ((scsicmd == SCSI_CMD_ARECA_SPECIFIC)) { - printk(KERN_NOTICE "Receiveing SCSI_CMD_ARECA_SPECIFIC command..\n"); - return 0; - } ccb = arcmsr_get_freeccb(acb); if (!ccb) return SCSI_MLQUEUE_HOST_BUSY; @@ -2393,6 +2386,7 @@ static int arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb, int index, rtn; bool error; polling_hbb_ccb_retry: + poll_count++; /* clear doorbell interrupt */ writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); @@ -2663,6 +2657,7 @@ static void arcmsr_request_hba_device_map(struct AdapterControlBlock *acb) { struct MessageUnit_A __iomem *reg = acb->pmuA; if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){ + mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); return; } else { acb->fw_flag = FW_NORMAL; @@ -2670,8 +2665,10 @@ static void arcmsr_request_hba_device_map(struct AdapterControlBlock *acb) atomic_set(&acb->rq_map_token, 16); } atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token)); - if (atomic_dec_and_test(&acb->rq_map_token)) + if (atomic_dec_and_test(&acb->rq_map_token)) { + mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); return; + } writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0); mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); } @@ -2682,15 +2679,18 @@ static void arcmsr_request_hbb_device_map(struct AdapterControlBlock *acb) { struct MessageUnit_B __iomem *reg = acb->pmuB; if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){ + mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); return; } else { acb->fw_flag = FW_NORMAL; if (atomic_read(&acb->ante_token_value) == atomic_read(&acb->rq_map_token)) { - atomic_set(&acb->rq_map_token,16); + atomic_set(&acb->rq_map_token, 16); } atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token)); - if(atomic_dec_and_test(&acb->rq_map_token)) + if (atomic_dec_and_test(&acb->rq_map_token)) { + mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); return; + } writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell); mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); } @@ -2701,6 +2701,7 @@ static void arcmsr_request_hbc_device_map(struct AdapterControlBlock *acb) { struct MessageUnit_C __iomem *reg = acb->pmuC; if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0) || ((acb->acb_flags & ACB_F_ABORT) != 0)) { + mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); return; } else { acb->fw_flag = FW_NORMAL; @@ -2708,8 +2709,10 @@ static void arcmsr_request_hbc_device_map(struct AdapterControlBlock *acb) atomic_set(&acb->rq_map_token, 16); } atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token)); - if (atomic_dec_and_test(&acb->rq_map_token)) + if (atomic_dec_and_test(&acb->rq_map_token)) { + mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); return; + } writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0); writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell); mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); @@ -2897,6 +2900,8 @@ static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb) uint32_t intmask_org; uint8_t rtnval = 0x00; int i = 0; + unsigned long flags; + if (atomic_read(&acb->ccboutstandingcount) != 0) { /* disable all outbound interrupt */ intmask_org = arcmsr_disable_outbound_ints(acb); @@ -2907,7 +2912,12 @@ static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb) for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) { ccb = acb->pccb_pool[i]; if (ccb->startdone == ARCMSR_CCB_START) { - arcmsr_ccb_complete(ccb); + scsi_dma_unmap(ccb->pcmd); + ccb->startdone = ARCMSR_CCB_DONE; + ccb->ccb_flags = 0; + spin_lock_irqsave(&acb->ccblist_lock, flags); + list_add_tail(&ccb->list, &acb->ccb_free_list); + spin_unlock_irqrestore(&acb->ccblist_lock, flags); } } atomic_set(&acb->ccboutstandingcount, 0); @@ -2920,8 +2930,7 @@ static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb) static int arcmsr_bus_reset(struct scsi_cmnd *cmd) { - struct AdapterControlBlock *acb = - (struct AdapterControlBlock *)cmd->device->host->hostdata; + struct AdapterControlBlock *acb; uint32_t intmask_org, outbound_doorbell; int retry_count = 0; int rtn = FAILED; @@ -2971,31 +2980,16 @@ sleep_again: atomic_set(&acb->rq_map_token, 16); atomic_set(&acb->ante_token_value, 16); acb->fw_flag = FW_NORMAL; - init_timer(&acb->eternal_timer); - acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6*HZ); - acb->eternal_timer.data = (unsigned long) acb; - acb->eternal_timer.function = &arcmsr_request_device_map; - add_timer(&acb->eternal_timer); + mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); acb->acb_flags &= ~ACB_F_BUS_RESET; rtn = SUCCESS; printk(KERN_ERR "arcmsr: scsi bus reset eh returns with success\n"); } else { acb->acb_flags &= ~ACB_F_BUS_RESET; - if (atomic_read(&acb->rq_map_token) == 0) { - atomic_set(&acb->rq_map_token, 16); - atomic_set(&acb->ante_token_value, 16); - acb->fw_flag = FW_NORMAL; - init_timer(&acb->eternal_timer); - acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6*HZ); - acb->eternal_timer.data = (unsigned long) acb; - acb->eternal_timer.function = &arcmsr_request_device_map; - add_timer(&acb->eternal_timer); - } else { - atomic_set(&acb->rq_map_token, 16); - atomic_set(&acb->ante_token_value, 16); - acb->fw_flag = FW_NORMAL; - mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ)); - } + atomic_set(&acb->rq_map_token, 16); + atomic_set(&acb->ante_token_value, 16); + acb->fw_flag = FW_NORMAL; + mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ)); rtn = SUCCESS; } break; @@ -3007,21 +3001,10 @@ sleep_again: rtn = FAILED; } else { acb->acb_flags &= ~ACB_F_BUS_RESET; - if (atomic_read(&acb->rq_map_token) == 0) { - atomic_set(&acb->rq_map_token, 16); - atomic_set(&acb->ante_token_value, 16); - acb->fw_flag = FW_NORMAL; - init_timer(&acb->eternal_timer); - acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6*HZ); - acb->eternal_timer.data = (unsigned long) acb; - acb->eternal_timer.function = &arcmsr_request_device_map; - add_timer(&acb->eternal_timer); - } else { - atomic_set(&acb->rq_map_token, 16); - atomic_set(&acb->ante_token_value, 16); - acb->fw_flag = FW_NORMAL; - mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ)); - } + atomic_set(&acb->rq_map_token, 16); + atomic_set(&acb->ante_token_value, 16); + acb->fw_flag = FW_NORMAL; + mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); rtn = SUCCESS; } break; @@ -3067,31 +3050,16 @@ sleep: atomic_set(&acb->rq_map_token, 16); atomic_set(&acb->ante_token_value, 16); acb->fw_flag = FW_NORMAL; - init_timer(&acb->eternal_timer); - acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ); - acb->eternal_timer.data = (unsigned long) acb; - acb->eternal_timer.function = &arcmsr_request_device_map; - add_timer(&acb->eternal_timer); + mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); acb->acb_flags &= ~ACB_F_BUS_RESET; rtn = SUCCESS; printk(KERN_ERR "arcmsr: scsi bus reset eh returns with success\n"); } else { acb->acb_flags &= ~ACB_F_BUS_RESET; - if (atomic_read(&acb->rq_map_token) == 0) { - atomic_set(&acb->rq_map_token, 16); - atomic_set(&acb->ante_token_value, 16); - acb->fw_flag = FW_NORMAL; - init_timer(&acb->eternal_timer); - acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6*HZ); - acb->eternal_timer.data = (unsigned long) acb; - acb->eternal_timer.function = &arcmsr_request_device_map; - add_timer(&acb->eternal_timer); - } else { - atomic_set(&acb->rq_map_token, 16); - atomic_set(&acb->ante_token_value, 16); - acb->fw_flag = FW_NORMAL; - mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ)); - } + atomic_set(&acb->rq_map_token, 16); + atomic_set(&acb->ante_token_value, 16); + acb->fw_flag = FW_NORMAL; + mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ)); rtn = SUCCESS; } break; diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c index 5815cbe..9a7aaf5 100644 --- a/drivers/scsi/libsas/sas_scsi_host.c +++ b/drivers/scsi/libsas/sas_scsi_host.c @@ -646,6 +646,7 @@ void sas_scsi_recover_host(struct Scsi_Host *shost) spin_lock_irqsave(shost->host_lock, flags); list_splice_init(&shost->eh_cmd_q, &eh_work_q); + shost->host_eh_scheduled = 0; spin_unlock_irqrestore(shost->host_lock, flags); SAS_DPRINTK("Enter %s\n", __func__); diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index 36008ab..e8a6f1c 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c @@ -2200,9 +2200,9 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) /* adjust hba_queue_depth, reply_free_queue_depth, * and queue_size */ - ioc->hba_queue_depth -= queue_diff; - ioc->reply_free_queue_depth -= queue_diff; - queue_size -= queue_diff; + ioc->hba_queue_depth -= (queue_diff / 2); + ioc->reply_free_queue_depth -= (queue_diff / 2); + queue_size = facts->MaxReplyDescriptorPostQueueDepth; } ioc->reply_post_queue_depth = queue_size; @@ -3965,6 +3965,8 @@ mpt2sas_base_detach(struct MPT2SAS_ADAPTER *ioc) static void _base_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase) { + mpt2sas_scsih_reset_handler(ioc, reset_phase); + mpt2sas_ctl_reset_handler(ioc, reset_phase); switch (reset_phase) { case MPT2_IOC_PRE_RESET: dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: " @@ -3995,8 +3997,6 @@ _base_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase) "MPT2_IOC_DONE_RESET\n", ioc->name, __func__)); break; } - mpt2sas_scsih_reset_handler(ioc, reset_phase); - mpt2sas_ctl_reset_handler(ioc, reset_phase); } /** @@ -4050,6 +4050,7 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag, { int r; unsigned long flags; + u8 pe_complete = ioc->wait_for_port_enable_to_complete; dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name, __func__)); @@ -4092,6 +4093,14 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag, if (r) goto out; _base_reset_handler(ioc, MPT2_IOC_AFTER_RESET); + + /* If this hard reset is called while port enable is active, then + * there is no reason to call make_ioc_operational + */ + if (pe_complete) { + r = -EFAULT; + goto out; + } r = _base_make_ioc_operational(ioc, sleep_flag); if (!r) _base_reset_handler(ioc, MPT2_IOC_DONE_RESET); diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index eda347c..5ded3db 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c @@ -819,7 +819,7 @@ _scsih_is_end_device(u32 device_info) } /** - * mptscsih_get_scsi_lookup - returns scmd entry + * _scsih_scsi_lookup_get - returns scmd entry * @ioc: per adapter object * @smid: system request message index * @@ -832,6 +832,28 @@ _scsih_scsi_lookup_get(struct MPT2SAS_ADAPTER *ioc, u16 smid) } /** + * _scsih_scsi_lookup_get_clear - returns scmd entry + * @ioc: per adapter object + * @smid: system request message index + * + * Returns the smid stored scmd pointer. + * Then will derefrence the stored scmd pointer. + */ +static inline struct scsi_cmnd * +_scsih_scsi_lookup_get_clear(struct MPT2SAS_ADAPTER *ioc, u16 smid) +{ + unsigned long flags; + struct scsi_cmnd *scmd; + + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + scmd = ioc->scsi_lookup[smid - 1].scmd; + ioc->scsi_lookup[smid - 1].scmd = NULL; + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + + return scmd; +} + +/** * _scsih_scsi_lookup_find_by_scmd - scmd lookup * @ioc: per adapter object * @smid: system request message index @@ -2981,9 +3003,6 @@ _scsih_check_topo_delete_events(struct MPT2SAS_ADAPTER *ioc, u16 handle; for (i = 0 ; i < event_data->NumEntries; i++) { - if (event_data->PHY[i].PhyStatus & - MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) - continue; handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); if (!handle) continue; @@ -3210,7 +3229,7 @@ _scsih_flush_running_cmds(struct MPT2SAS_ADAPTER *ioc) u16 count = 0; for (smid = 1; smid <= ioc->scsiio_depth; smid++) { - scmd = _scsih_scsi_lookup_get(ioc, smid); + scmd = _scsih_scsi_lookup_get_clear(ioc, smid); if (!scmd) continue; count++; @@ -3804,7 +3823,7 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) u32 response_code = 0; mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply); - scmd = _scsih_scsi_lookup_get(ioc, smid); + scmd = _scsih_scsi_lookup_get_clear(ioc, smid); if (scmd == NULL) return 1; @@ -5005,6 +5024,12 @@ _scsih_sas_device_status_change_event(struct MPT2SAS_ADAPTER *ioc, event_data); #endif + /* In MPI Revision K (0xC), the internal device reset complete was + * implemented, so avoid setting tm_busy flag for older firmware. + */ + if ((ioc->facts.HeaderVersion >> 8) < 0xC) + return; + if (event_data->ReasonCode != MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET && event_data->ReasonCode != @@ -5099,6 +5124,7 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work *fw_event) { struct scsi_cmnd *scmd; + struct scsi_device *sdev; u16 smid, handle; u32 lun; struct MPT2SAS_DEVICE *sas_device_priv_data; @@ -5109,12 +5135,17 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc, Mpi2EventDataSasBroadcastPrimitive_t *event_data = fw_event->event_data; #endif u16 ioc_status; + unsigned long flags; + int r; + dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "broadcast primative: " "phy number(%d), width(%d)\n", ioc->name, event_data->PhyNum, event_data->PortWidth)); dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name, __func__)); + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + ioc->broadcast_aen_busy = 0; termination_count = 0; query_count = 0; mpi_reply = ioc->tm_cmds.reply; @@ -5122,7 +5153,8 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc, scmd = _scsih_scsi_lookup_get(ioc, smid); if (!scmd) continue; - sas_device_priv_data = scmd->device->hostdata; + sdev = scmd->device; + sas_device_priv_data = sdev->hostdata; if (!sas_device_priv_data || !sas_device_priv_data->sas_target) continue; /* skip hidden raid components */ @@ -5138,6 +5170,7 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc, lun = sas_device_priv_data->lun; query_count++; + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); mpt2sas_scsih_issue_tm(ioc, handle, 0, 0, lun, MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30, NULL); ioc->tm_cmds.status = MPT2_CMD_NOT_USED; @@ -5147,14 +5180,20 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc, (mpi_reply->ResponseCode == MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED || mpi_reply->ResponseCode == - MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC)) + MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC)) { + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); continue; - - mpt2sas_scsih_issue_tm(ioc, handle, 0, 0, lun, - MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET, 0, 30, NULL); + } + r = mpt2sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id, + sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, + scmd); + if (r == FAILED) + sdev_printk(KERN_WARNING, sdev, "task abort: FAILED " + "scmd(%p)\n", scmd); termination_count += le32_to_cpu(mpi_reply->TerminationCount); + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); } - ioc->broadcast_aen_busy = 0; + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s - exit, query_count = %d termination_count = %d\n", @@ -6626,6 +6665,7 @@ _scsih_remove(struct pci_dev *pdev) destroy_workqueue(wq); /* release all the volumes */ + _scsih_ir_shutdown(ioc); list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list, list) { if (raid_device->starget) { diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c index 7ed3653..f1c6862 100644 --- a/fs/cifs/cifs_dfs_ref.c +++ b/fs/cifs/cifs_dfs_ref.c @@ -297,7 +297,6 @@ static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt) cifs_sb = CIFS_SB(mntpt->d_inode->i_sb); tlink = cifs_sb_tlink(cifs_sb); - mnt = ERR_PTR(-EINVAL); if (IS_ERR(tlink)) { mnt = ERR_CAST(tlink); goto free_full_path; diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 5bfb753..edd5b29 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -166,6 +166,9 @@ struct TCP_Server_Info { struct socket *ssocket; struct sockaddr_storage dstaddr; struct sockaddr_storage srcaddr; /* locally bind to this IP */ +#ifdef CONFIG_NET_NS + struct net *net; +#endif wait_queue_head_t response_q; wait_queue_head_t request_q; /* if more than maxmpx to srvr must block*/ struct list_head pending_mid_q; @@ -217,6 +220,36 @@ struct TCP_Server_Info { }; /* + * Macros to allow the TCP_Server_Info->net field and related code to drop out + * when CONFIG_NET_NS isn't set. + */ + +#ifdef CONFIG_NET_NS + +static inline struct net *cifs_net_ns(struct TCP_Server_Info *srv) +{ + return srv->net; +} + +static inline void cifs_set_net_ns(struct TCP_Server_Info *srv, struct net *net) +{ + srv->net = net; +} + +#else + +static inline struct net *cifs_net_ns(struct TCP_Server_Info *srv) +{ + return &init_net; +} + +static inline void cifs_set_net_ns(struct TCP_Server_Info *srv, struct net *net) +{ +} + +#endif + +/* * Session structure. One of these for each uid session with a particular host */ struct cifsSesInfo { diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 18d3c77..0cc3b81 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -1568,6 +1568,9 @@ cifs_find_tcp_session(struct sockaddr *addr, struct smb_vol *vol) spin_lock(&cifs_tcp_ses_lock); list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) { + if (!net_eq(cifs_net_ns(server), current->nsproxy->net_ns)) + continue; + if (!match_address(server, addr, (struct sockaddr *)&vol->srcaddr)) continue; @@ -1598,6 +1601,8 @@ cifs_put_tcp_session(struct TCP_Server_Info *server) return; } + put_net(cifs_net_ns(server)); + list_del_init(&server->tcp_ses_list); spin_unlock(&cifs_tcp_ses_lock); @@ -1672,6 +1677,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info) goto out_err; } + cifs_set_net_ns(tcp_ses, get_net(current->nsproxy->net_ns)); tcp_ses->hostname = extract_hostname(volume_info->UNC); if (IS_ERR(tcp_ses->hostname)) { rc = PTR_ERR(tcp_ses->hostname); @@ -1752,6 +1758,8 @@ cifs_get_tcp_session(struct smb_vol *volume_info) out_err_crypto_release: cifs_crypto_shash_release(tcp_ses); + put_net(cifs_net_ns(tcp_ses)); + out_err: if (tcp_ses) { if (!IS_ERR(tcp_ses->hostname)) @@ -2263,8 +2271,8 @@ generic_ip_connect(struct TCP_Server_Info *server) } if (socket == NULL) { - rc = sock_create_kern(sfamily, SOCK_STREAM, - IPPROTO_TCP, &socket); + rc = __sock_create(cifs_net_ns(server), sfamily, SOCK_STREAM, + IPPROTO_TCP, &socket, 1); if (rc < 0) { cERROR(1, "Error %d creating socket", rc); server->ssocket = NULL; diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h index e95a86b..e5c607a 100644 --- a/include/drm/radeon_drm.h +++ b/include/drm/radeon_drm.h @@ -907,6 +907,7 @@ struct drm_radeon_cs { #define RADEON_INFO_TILING_CONFIG 0x06 #define RADEON_INFO_WANT_HYPERZ 0x07 #define RADEON_INFO_WANT_CMASK 0x08 /* get access to CMASK on r300 */ +#define RADEON_INFO_CLOCK_CRYSTAL_FREQ 0x09 /* clock crystal frequency */ struct drm_radeon_info { uint32_t request; diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 46ad519..dddedfc 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -148,6 +148,7 @@ enum rq_flag_bits { __REQ_ALLOCED, /* request came from our alloc pool */ __REQ_COPY_USER, /* contains copies of user pages */ __REQ_FLUSH, /* request for cache flush */ + __REQ_FLUSH_SEQ, /* request for flush sequence */ __REQ_IO_STAT, /* account I/O stat */ __REQ_MIXED_MERGE, /* merge of different types, fail separately */ __REQ_SECURE, /* secure discard (used with __REQ_DISCARD) */ @@ -188,6 +189,7 @@ enum rq_flag_bits { #define REQ_ALLOCED (1 << __REQ_ALLOCED) #define REQ_COPY_USER (1 << __REQ_COPY_USER) #define REQ_FLUSH (1 << __REQ_FLUSH) +#define REQ_FLUSH_SEQ (1 << __REQ_FLUSH_SEQ) #define REQ_IO_STAT (1 << __REQ_IO_STAT) #define REQ_MIXED_MERGE (1 << __REQ_MIXED_MERGE) #define REQ_SECURE (1 << __REQ_SECURE) diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 4d18ff3..8a082a5 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -99,13 +99,18 @@ struct request { /* * The rb_node is only used inside the io scheduler, requests * are pruned when moved to the dispatch queue. So let the - * completion_data share space with the rb_node. + * flush fields share space with the rb_node. */ union { struct rb_node rb_node; /* sort/lookup */ - void *completion_data; + struct { + unsigned int seq; + struct list_head list; + } flush; }; + void *completion_data; + /* * Three pointers are available for the IO schedulers, if they need * more they have to dynamically allocate it. @@ -363,11 +368,12 @@ struct request_queue * for flush operations */ unsigned int flush_flags; - unsigned int flush_seq; - int flush_err; + unsigned int flush_pending_idx:1; + unsigned int flush_running_idx:1; + unsigned long flush_pending_since; + struct list_head flush_queue[2]; + struct list_head flush_data_in_flight; struct request flush_rq; - struct request *orig_flush_rq; - struct list_head pending_flushes; struct mutex sysfs_lock; diff --git a/include/linux/elevator.h b/include/linux/elevator.h index 4d85797..39b68ed 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -167,6 +167,7 @@ extern struct request *elv_rb_find(struct rb_root *, sector_t); #define ELEVATOR_INSERT_BACK 2 #define ELEVATOR_INSERT_SORT 3 #define ELEVATOR_INSERT_REQUEUE 4 +#define ELEVATOR_INSERT_FLUSH 5 /* * return values from elevator_may_queue_fn diff --git a/include/linux/rtc.h b/include/linux/rtc.h index 3c995b4..a0b639f 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h @@ -235,8 +235,6 @@ extern int rtc_irq_set_freq(struct rtc_device *rtc, struct rtc_task *task, int freq); extern int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled); extern int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled); -extern int rtc_dev_update_irq_enable_emul(struct rtc_device *rtc, - unsigned int enabled); void rtc_aie_update_irq(void *private); void rtc_uie_update_irq(void *private); @@ -246,8 +244,6 @@ int rtc_register(rtc_task_t *task); int rtc_unregister(rtc_task_t *task); int rtc_control(rtc_task_t *t, unsigned int cmd, unsigned long arg); -void rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer); -void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer); void rtc_timer_init(struct rtc_timer *timer, void (*f)(void* p), void* data); int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer* timer, ktime_t expires, ktime_t period); diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h index 648d233..b76d400 100644 --- a/include/scsi/scsi.h +++ b/include/scsi/scsi.h @@ -9,6 +9,7 @@ #define _SCSI_SCSI_H #include +#include struct scsi_cmnd; diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 84522c7..126a302 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -2201,13 +2201,6 @@ find_lively_task_by_vpid(pid_t vpid) if (!task) return ERR_PTR(-ESRCH); - /* - * Can't attach events to a dying task. - */ - err = -ESRCH; - if (task->flags & PF_EXITING) - goto errout; - /* Reuse ptrace permission checks for now. */ err = -EACCES; if (!ptrace_may_access(task, PTRACE_MODE_READ)) @@ -2268,14 +2261,27 @@ retry: get_ctx(ctx); - if (cmpxchg(&task->perf_event_ctxp[ctxn], NULL, ctx)) { - /* - * We raced with some other task; use - * the context they set. - */ + err = 0; + mutex_lock(&task->perf_event_mutex); + /* + * If it has already passed perf_event_exit_task(). + * we must see PF_EXITING, it takes this mutex too. + */ + if (task->flags & PF_EXITING) + err = -ESRCH; + else if (task->perf_event_ctxp[ctxn]) + err = -EAGAIN; + else + rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx); + mutex_unlock(&task->perf_event_mutex); + + if (unlikely(err)) { put_task_struct(task); kfree(ctx); - goto retry; + + if (err == -EAGAIN) + goto retry; + goto errout; } } @@ -5374,6 +5380,8 @@ free_dev: goto out; } +static struct lock_class_key cpuctx_mutex; + int perf_pmu_register(struct pmu *pmu, char *name, int type) { int cpu, ret; @@ -5422,6 +5430,7 @@ skip_type: cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); __perf_event_init_context(&cpuctx->ctx); + lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex); cpuctx->ctx.type = cpu_context; cpuctx->ctx.pmu = pmu; cpuctx->jiffies_interval = 1; @@ -6127,7 +6136,7 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn) * scheduled, so we are now safe from rescheduling changing * our context. */ - child_ctx = child->perf_event_ctxp[ctxn]; + child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]); task_ctx_sched_out(child_ctx, EVENT_ALL); /* @@ -6440,11 +6449,6 @@ int perf_event_init_context(struct task_struct *child, int ctxn) unsigned long flags; int ret = 0; - child->perf_event_ctxp[ctxn] = NULL; - - mutex_init(&child->perf_event_mutex); - INIT_LIST_HEAD(&child->perf_event_list); - if (likely(!parent->perf_event_ctxp[ctxn])) return 0; @@ -6533,6 +6537,10 @@ int perf_event_init_task(struct task_struct *child) { int ctxn, ret; + memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp)); + mutex_init(&child->perf_event_mutex); + INIT_LIST_HEAD(&child->perf_event_list); + for_each_task_context_nr(ctxn) { ret = perf_event_init_context(child, ctxn); if (ret) diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 77e9166..3547699 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -699,7 +699,8 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) cfs_rq->nr_running--; } -#if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED +#ifdef CONFIG_FAIR_GROUP_SCHED +# ifdef CONFIG_SMP static void update_cfs_rq_load_contribution(struct cfs_rq *cfs_rq, int global_update) { @@ -762,6 +763,51 @@ static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update) list_del_leaf_cfs_rq(cfs_rq); } +static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg, + long weight_delta) +{ + long load_weight, load, shares; + + load = cfs_rq->load.weight + weight_delta; + + load_weight = atomic_read(&tg->load_weight); + load_weight -= cfs_rq->load_contribution; + load_weight += load; + + shares = (tg->shares * load); + if (load_weight) + shares /= load_weight; + + if (shares < MIN_SHARES) + shares = MIN_SHARES; + if (shares > tg->shares) + shares = tg->shares; + + return shares; +} + +static void update_entity_shares_tick(struct cfs_rq *cfs_rq) +{ + if (cfs_rq->load_unacc_exec_time > sysctl_sched_shares_window) { + update_cfs_load(cfs_rq, 0); + update_cfs_shares(cfs_rq, 0); + } +} +# else /* CONFIG_SMP */ +static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update) +{ +} + +static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg, + long weight_delta) +{ + return tg->shares; +} + +static inline void update_entity_shares_tick(struct cfs_rq *cfs_rq) +{ +} +# endif /* CONFIG_SMP */ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, unsigned long weight) { @@ -782,7 +828,7 @@ static void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta) { struct task_group *tg; struct sched_entity *se; - long load_weight, load, shares; + long shares; if (!cfs_rq) return; @@ -791,32 +837,14 @@ static void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta) se = tg->se[cpu_of(rq_of(cfs_rq))]; if (!se) return; - - load = cfs_rq->load.weight + weight_delta; - - load_weight = atomic_read(&tg->load_weight); - load_weight -= cfs_rq->load_contribution; - load_weight += load; - - shares = (tg->shares * load); - if (load_weight) - shares /= load_weight; - - if (shares < MIN_SHARES) - shares = MIN_SHARES; - if (shares > tg->shares) - shares = tg->shares; +#ifndef CONFIG_SMP + if (likely(se->load.weight == tg->shares)) + return; +#endif + shares = calc_cfs_shares(cfs_rq, tg, weight_delta); reweight_entity(cfs_rq_of(se), se, shares); } - -static void update_entity_shares_tick(struct cfs_rq *cfs_rq) -{ - if (cfs_rq->load_unacc_exec_time > sysctl_sched_shares_window) { - update_cfs_load(cfs_rq, 0); - update_cfs_shares(cfs_rq, 0); - } -} #else /* CONFIG_FAIR_GROUP_SCHED */ static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update) { diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 3e216e0..c55ea24 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -642,8 +642,7 @@ static void tick_nohz_switch_to_nohz(void) } local_irq_enable(); - printk(KERN_INFO "Switched to NOHz mode on CPU #%d\n", - smp_processor_id()); + printk(KERN_INFO "Switched to NOHz mode on CPU #%d\n", smp_processor_id()); } /* @@ -795,8 +794,10 @@ void tick_setup_sched_timer(void) } #ifdef CONFIG_NO_HZ - if (tick_nohz_enabled) + if (tick_nohz_enabled) { ts->nohz_mode = NOHZ_MODE_HIGHRES; + printk(KERN_INFO "Switched to NOHz mode on CPU #%d\n", smp_processor_id()); + } #endif } #endif /* HIGH_RES_TIMERS */ diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 153562d..d95721f 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -138,6 +138,13 @@ void __trace_note_message(struct blk_trace *bt, const char *fmt, ...) !blk_tracer_enabled)) return; + /* + * If the BLK_TC_NOTIFY action mask isn't set, don't send any note + * message to the trace. + */ + if (!(bt->act_mask & BLK_TC_NOTIFY)) + return; + local_irq_save(flags); buf = per_cpu_ptr(bt->msg_data, smp_processor_id()); va_start(args, fmt); diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 2b5387d..7141c42 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -204,13 +204,11 @@ EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wshadow EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Winit-self EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wpacked EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wredundant-decls -EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wstack-protector EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wstrict-aliasing=3 EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wswitch-default EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wswitch-enum EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wno-system-headers EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wundef -EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wvolatile-register-var EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wwrite-strings EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wbad-function-cast EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wmissing-declarations @@ -294,6 +292,13 @@ ifeq ($(call try-cc,$(SOURCE_HELLO),-Werror -fstack-protector-all),y) CFLAGS := $(CFLAGS) -fstack-protector-all endif +ifeq ($(call try-cc,$(SOURCE_HELLO),-Werror -Wstack-protector),y) + CFLAGS := $(CFLAGS) -Wstack-protector +endif + +ifeq ($(call try-cc,$(SOURCE_HELLO),-Werror -Wvolatile-register-var),y) + CFLAGS := $(CFLAGS) -Wvolatile-register-var +endif ### --- END CONFIGURATION SECTION --- diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index c056cdc..8879463 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -212,7 +212,7 @@ get_source_line(struct hist_entry *he, int len, const char *filename) continue; offset = start + i; - sprintf(cmd, "addr2line -e %s %016llx", filename, offset); + sprintf(cmd, "addr2line -e %s %016" PRIx64, filename, offset); fp = popen(cmd, "r"); if (!fp) continue; @@ -270,9 +270,9 @@ static void hist_entry__print_hits(struct hist_entry *self) for (offset = 0; offset < len; ++offset) if (h->ip[offset] != 0) - printf("%*Lx: %Lu\n", BITS_PER_LONG / 2, + printf("%*" PRIx64 ": %" PRIu64 "\n", BITS_PER_LONG / 2, sym->start + offset, h->ip[offset]); - printf("%*s: %Lu\n", BITS_PER_LONG / 2, "h->sum", h->sum); + printf("%*s: %" PRIu64 "\n", BITS_PER_LONG / 2, "h->sum", h->sum); } static int hist_entry__tty_annotate(struct hist_entry *he) diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c index def7ddc..d97256d 100644 --- a/tools/perf/builtin-kmem.c +++ b/tools/perf/builtin-kmem.c @@ -371,10 +371,10 @@ static void __print_result(struct rb_root *root, struct perf_session *session, addr = data->ptr; if (sym != NULL) - snprintf(buf, sizeof(buf), "%s+%Lx", sym->name, + snprintf(buf, sizeof(buf), "%s+%" PRIx64 "", sym->name, addr - map->unmap_ip(map, sym->start)); else - snprintf(buf, sizeof(buf), "%#Lx", addr); + snprintf(buf, sizeof(buf), "%#" PRIx64 "", addr); printf(" %-34s |", buf); printf(" %9llu/%-5lu | %9llu/%-5lu | %8lu | %8lu | %6.3f%%\n", diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c index b9c6e54..2b36def 100644 --- a/tools/perf/builtin-lock.c +++ b/tools/perf/builtin-lock.c @@ -782,9 +782,9 @@ static void print_result(void) pr_info("%10u ", st->nr_acquired); pr_info("%10u ", st->nr_contended); - pr_info("%15llu ", st->wait_time_total); - pr_info("%15llu ", st->wait_time_max); - pr_info("%15llu ", st->wait_time_min == ULLONG_MAX ? + pr_info("%15" PRIu64 " ", st->wait_time_total); + pr_info("%15" PRIu64 " ", st->wait_time_max); + pr_info("%15" PRIu64 " ", st->wait_time_min == ULLONG_MAX ? 0 : st->wait_time_min); pr_info("\n"); } diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index fcd29e8..b2f729f 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -817,7 +817,7 @@ static int __cmd_record(int argc, const char **argv) * Approximate RIP event size: 24 bytes. */ fprintf(stderr, - "[ perf record: Captured and wrote %.3f MB %s (~%lld samples) ]\n", + "[ perf record: Captured and wrote %.3f MB %s (~%" PRIu64 " samples) ]\n", (double)bytes_written / 1024.0 / 1024.0, output_name, bytes_written / 24); diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 75183a4..c27e31f 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -197,7 +197,7 @@ static int process_read_event(event_t *event, struct sample_data *sample __used, event->read.value); } - dump_printf(": %d %d %s %Lu\n", event->read.pid, event->read.tid, + dump_printf(": %d %d %s %" PRIu64 "\n", event->read.pid, event->read.tid, attr ? __event_name(attr->type, attr->config) : "FAIL", event->read.value); diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index 29e7ffd..29acb89 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -193,7 +193,7 @@ static void calibrate_run_measurement_overhead(void) } run_measurement_overhead = min_delta; - printf("run measurement overhead: %Ld nsecs\n", min_delta); + printf("run measurement overhead: %" PRIu64 " nsecs\n", min_delta); } static void calibrate_sleep_measurement_overhead(void) @@ -211,7 +211,7 @@ static void calibrate_sleep_measurement_overhead(void) min_delta -= 10000; sleep_measurement_overhead = min_delta; - printf("sleep measurement overhead: %Ld nsecs\n", min_delta); + printf("sleep measurement overhead: %" PRIu64 " nsecs\n", min_delta); } static struct sched_atom * @@ -617,13 +617,13 @@ static void test_calibrations(void) burn_nsecs(1e6); T1 = get_nsecs(); - printf("the run test took %Ld nsecs\n", T1-T0); + printf("the run test took %" PRIu64 " nsecs\n", T1 - T0); T0 = get_nsecs(); sleep_nsecs(1e6); T1 = get_nsecs(); - printf("the sleep test took %Ld nsecs\n", T1-T0); + printf("the sleep test took %" PRIu64 " nsecs\n", T1 - T0); } #define FILL_FIELD(ptr, field, event, data) \ @@ -816,10 +816,10 @@ replay_switch_event(struct trace_switch_event *switch_event, delta = 0; if (delta < 0) - die("hm, delta: %Ld < 0 ?\n", delta); + die("hm, delta: %" PRIu64 " < 0 ?\n", delta); if (verbose) { - printf(" ... switch from %s/%d to %s/%d [ran %Ld nsecs]\n", + printf(" ... switch from %s/%d to %s/%d [ran %" PRIu64 " nsecs]\n", switch_event->prev_comm, switch_event->prev_pid, switch_event->next_comm, switch_event->next_pid, delta); @@ -1048,7 +1048,7 @@ latency_switch_event(struct trace_switch_event *switch_event, delta = 0; if (delta < 0) - die("hm, delta: %Ld < 0 ?\n", delta); + die("hm, delta: %" PRIu64 " < 0 ?\n", delta); sched_out = perf_session__findnew(session, switch_event->prev_pid); @@ -1221,7 +1221,7 @@ static void output_lat_thread(struct work_atoms *work_list) avg = work_list->total_lat / work_list->nb_atoms; - printf("|%11.3f ms |%9llu | avg:%9.3f ms | max:%9.3f ms | max at: %9.6f s\n", + printf("|%11.3f ms |%9" PRIu64 " | avg:%9.3f ms | max:%9.3f ms | max at: %9.6f s\n", (double)work_list->total_runtime / 1e6, work_list->nb_atoms, (double)avg / 1e6, (double)work_list->max_lat / 1e6, @@ -1423,7 +1423,7 @@ map_switch_event(struct trace_switch_event *switch_event, delta = 0; if (delta < 0) - die("hm, delta: %Ld < 0 ?\n", delta); + die("hm, delta: %" PRIu64 " < 0 ?\n", delta); sched_out = perf_session__findnew(session, switch_event->prev_pid); @@ -1713,7 +1713,7 @@ static void __cmd_lat(void) } printf(" -----------------------------------------------------------------------------------------\n"); - printf(" TOTAL: |%11.3f ms |%9Ld |\n", + printf(" TOTAL: |%11.3f ms |%9" PRIu64 " |\n", (double)all_runtime/1e6, all_count); printf(" ---------------------------------------------------\n"); diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c index 150a606..b766c2a 100644 --- a/tools/perf/builtin-script.c +++ b/tools/perf/builtin-script.c @@ -77,8 +77,8 @@ static int process_sample_event(event_t *event, struct sample_data *sample, if (session->sample_type & PERF_SAMPLE_RAW) { if (debug_mode) { if (sample->time < last_timestamp) { - pr_err("Samples misordered, previous: %llu " - "this: %llu\n", last_timestamp, + pr_err("Samples misordered, previous: %" PRIu64 + " this: %" PRIu64 "\n", last_timestamp, sample->time); nr_unordered++; } @@ -126,7 +126,7 @@ static int __cmd_script(struct perf_session *session) ret = perf_session__process_events(session, &event_ops); if (debug_mode) - pr_err("Misordered timestamps: %llu\n", nr_unordered); + pr_err("Misordered timestamps: %" PRIu64 "\n", nr_unordered); return ret; } diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 0ff11d9..a482a19 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -206,8 +206,8 @@ static int read_counter_aggr(struct perf_evsel *counter) update_stats(&ps->res_stats[i], count[i]); if (verbose) { - fprintf(stderr, "%s: %Ld %Ld %Ld\n", event_name(counter), - count[0], count[1], count[2]); + fprintf(stderr, "%s: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n", + event_name(counter), count[0], count[1], count[2]); } /* diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c index ed56961..5dcdba6 100644 --- a/tools/perf/builtin-test.c +++ b/tools/perf/builtin-test.c @@ -146,7 +146,7 @@ next_pair: if (llabs(skew) < page_size) continue; - pr_debug("%#Lx: diff end addr for %s v: %#Lx k: %#Lx\n", + pr_debug("%#" PRIx64 ": diff end addr for %s v: %#" PRIx64 " k: %#" PRIx64 "\n", sym->start, sym->name, sym->end, pair->end); } else { struct rb_node *nnd; @@ -168,11 +168,11 @@ detour: goto detour; } - pr_debug("%#Lx: diff name v: %s k: %s\n", + pr_debug("%#" PRIx64 ": diff name v: %s k: %s\n", sym->start, sym->name, pair->name); } } else - pr_debug("%#Lx: %s not on kallsyms\n", sym->start, sym->name); + pr_debug("%#" PRIx64 ": %s not on kallsyms\n", sym->start, sym->name); err = -1; } @@ -211,10 +211,10 @@ detour: if (pair->start == pos->start) { pair->priv = 1; - pr_info(" %Lx-%Lx %Lx %s in kallsyms as", + pr_info(" %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s in kallsyms as", pos->start, pos->end, pos->pgoff, pos->dso->name); if (pos->pgoff != pair->pgoff || pos->end != pair->end) - pr_info(": \n*%Lx-%Lx %Lx", + pr_info(": \n*%" PRIx64 "-%" PRIx64 " %" PRIx64 "", pair->start, pair->end, pair->pgoff); pr_info(" %s\n", pair->dso->name); pair->priv = 1; @@ -307,7 +307,7 @@ static int test__open_syscall_event(void) } if (evsel->counts->cpu[0].val != nr_open_calls) { - pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls, got %Ld\n", + pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls, got %" PRIu64 "\n", nr_open_calls, evsel->counts->cpu[0].val); goto out_close_fd; } @@ -332,8 +332,7 @@ static int test__open_syscall_event_on_all_cpus(void) struct perf_evsel *evsel; struct perf_event_attr attr; unsigned int nr_open_calls = 111, i; - cpu_set_t *cpu_set; - size_t cpu_set_size; + cpu_set_t cpu_set; int id = trace_event__id("sys_enter_open"); if (id < 0) { @@ -353,13 +352,8 @@ static int test__open_syscall_event_on_all_cpus(void) return -1; } - cpu_set = CPU_ALLOC(cpus->nr); - if (cpu_set == NULL) - goto out_thread_map_delete; - - cpu_set_size = CPU_ALLOC_SIZE(cpus->nr); - CPU_ZERO_S(cpu_set_size, cpu_set); + CPU_ZERO(&cpu_set); memset(&attr, 0, sizeof(attr)); attr.type = PERF_TYPE_TRACEPOINT; @@ -367,7 +361,7 @@ static int test__open_syscall_event_on_all_cpus(void) evsel = perf_evsel__new(&attr, 0); if (evsel == NULL) { pr_debug("perf_evsel__new\n"); - goto out_cpu_free; + goto out_thread_map_delete; } if (perf_evsel__open(evsel, cpus, threads) < 0) { @@ -379,14 +373,29 @@ static int test__open_syscall_event_on_all_cpus(void) for (cpu = 0; cpu < cpus->nr; ++cpu) { unsigned int ncalls = nr_open_calls + cpu; + /* + * XXX eventually lift this restriction in a way that + * keeps perf building on older glibc installations + * without CPU_ALLOC. 1024 cpus in 2010 still seems + * a reasonable upper limit tho :-) + */ + if (cpus->map[cpu] >= CPU_SETSIZE) { + pr_debug("Ignoring CPU %d\n", cpus->map[cpu]); + continue; + } - CPU_SET(cpu, cpu_set); - sched_setaffinity(0, cpu_set_size, cpu_set); + CPU_SET(cpus->map[cpu], &cpu_set); + if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) { + pr_debug("sched_setaffinity() failed on CPU %d: %s ", + cpus->map[cpu], + strerror(errno)); + goto out_close_fd; + } for (i = 0; i < ncalls; ++i) { fd = open("/etc/passwd", O_RDONLY); close(fd); } - CPU_CLR(cpu, cpu_set); + CPU_CLR(cpus->map[cpu], &cpu_set); } /* @@ -402,6 +411,9 @@ static int test__open_syscall_event_on_all_cpus(void) for (cpu = 0; cpu < cpus->nr; ++cpu) { unsigned int expected; + if (cpus->map[cpu] >= CPU_SETSIZE) + continue; + if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) { pr_debug("perf_evsel__open_read_on_cpu\n"); goto out_close_fd; @@ -409,8 +421,8 @@ static int test__open_syscall_event_on_all_cpus(void) expected = nr_open_calls + cpu; if (evsel->counts->cpu[cpu].val != expected) { - pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %Ld\n", - expected, cpu, evsel->counts->cpu[cpu].val); + pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n", + expected, cpus->map[cpu], evsel->counts->cpu[cpu].val); goto out_close_fd; } } @@ -420,8 +432,6 @@ out_close_fd: perf_evsel__close_fd(evsel, 1, threads->nr); out_evsel_delete: perf_evsel__delete(evsel); -out_cpu_free: - CPU_FREE(cpu_set); out_thread_map_delete: thread_map__delete(threads); return err; diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 05344c6..b6998e0 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -40,6 +40,7 @@ #include #include #include +#include #include #include @@ -214,7 +215,7 @@ static int parse_source(struct sym_entry *syme) len = sym->end - sym->start; sprintf(command, - "objdump --start-address=%#0*Lx --stop-address=%#0*Lx -dS %s", + "objdump --start-address=%#0*" PRIx64 " --stop-address=%#0*" PRIx64 " -dS %s", BITS_PER_LONG / 4, map__rip_2objdump(map, sym->start), BITS_PER_LONG / 4, map__rip_2objdump(map, sym->end), path); @@ -308,7 +309,7 @@ static void lookup_sym_source(struct sym_entry *syme) struct source_line *line; char pattern[PATTERN_LEN + 1]; - sprintf(pattern, "%0*Lx <", BITS_PER_LONG / 4, + sprintf(pattern, "%0*" PRIx64 " <", BITS_PER_LONG / 4, map__rip_2objdump(syme->map, symbol->start)); pthread_mutex_lock(&syme->src->lock); @@ -537,7 +538,7 @@ static void print_sym_table(void) if (nr_counters == 1 || !display_weighted) { struct perf_evsel *first; first = list_entry(evsel_list.next, struct perf_evsel, node); - printf("%Ld", first->attr.sample_period); + printf("%" PRIu64, (uint64_t)first->attr.sample_period); if (freq) printf("Hz "); else @@ -640,7 +641,7 @@ static void print_sym_table(void) percent_color_fprintf(stdout, "%4.1f%%", pcnt); if (verbose) - printf(" %016llx", sym->start); + printf(" %016" PRIx64, sym->start); printf(" %-*.*s", sym_width, sym_width, sym->name); printf(" %-*.*s\n", dso_width, dso_width, dso_width >= syme->map->dso->long_name_len ? diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index 2302ec0..1478ab4 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c @@ -459,7 +459,8 @@ int event__process_comm(event_t *self, struct sample_data *sample __used, int event__process_lost(event_t *self, struct sample_data *sample __used, struct perf_session *session) { - dump_printf(": id:%Ld: lost:%Ld\n", self->lost.id, self->lost.lost); + dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n", + self->lost.id, self->lost.lost); session->hists.stats.total_lost += self->lost.lost; return 0; } @@ -575,7 +576,7 @@ int event__process_mmap(event_t *self, struct sample_data *sample __used, u8 cpumode = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; int ret = 0; - dump_printf(" %d/%d: [%#Lx(%#Lx) @ %#Lx]: %s\n", + dump_printf(" %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %s\n", self->mmap.pid, self->mmap.tid, self->mmap.start, self->mmap.len, self->mmap.pgoff, self->mmap.filename); diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 989fa2d..f6a929e 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -798,8 +798,8 @@ static int perf_file_section__process(struct perf_file_section *self, int feat, int fd) { if (lseek(fd, self->offset, SEEK_SET) == (off_t)-1) { - pr_debug("Failed to lseek to %Ld offset for feature %d, " - "continuing...\n", self->offset, feat); + pr_debug("Failed to lseek to %" PRIu64 " offset for feature " + "%d, continuing...\n", self->offset, feat); return 0; } diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index c749ba6..32f4f1f 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c @@ -636,13 +636,13 @@ int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size, } } } else - ret = snprintf(s, size, sep ? "%lld" : "%12lld ", period); + ret = snprintf(s, size, sep ? "%" PRIu64 : "%12" PRIu64 " ", period); if (symbol_conf.show_nr_samples) { if (sep) - ret += snprintf(s + ret, size - ret, "%c%lld", *sep, period); + ret += snprintf(s + ret, size - ret, "%c%" PRIu64, *sep, period); else - ret += snprintf(s + ret, size - ret, "%11lld", period); + ret += snprintf(s + ret, size - ret, "%11" PRIu64, period); } if (pair_hists) { @@ -971,7 +971,7 @@ int hist_entry__inc_addr_samples(struct hist_entry *self, u64 ip) sym_size = sym->end - sym->start; offset = ip - sym->start; - pr_debug3("%s: ip=%#Lx\n", __func__, self->ms.map->unmap_ip(self->ms.map, ip)); + pr_debug3("%s: ip=%#" PRIx64 "\n", __func__, self->ms.map->unmap_ip(self->ms.map, ip)); if (offset >= sym_size) return 0; @@ -980,8 +980,9 @@ int hist_entry__inc_addr_samples(struct hist_entry *self, u64 ip) h->sum++; h->ip[offset]++; - pr_debug3("%#Lx %s: period++ [ip: %#Lx, %#Lx] => %Ld\n", self->ms.sym->start, - self->ms.sym->name, ip, ip - self->ms.sym->start, h->ip[offset]); + pr_debug3("%#" PRIx64 " %s: period++ [ip: %#" PRIx64 ", %#" PRIx64 + "] => %" PRIu64 "\n", self->ms.sym->start, self->ms.sym->name, + ip, ip - self->ms.sym->start, h->ip[offset]); return 0; } @@ -1132,7 +1133,7 @@ fallback: goto out_free_filename; } - pr_debug("%s: filename=%s, sym=%s, start=%#Lx, end=%#Lx\n", __func__, + pr_debug("%s: filename=%s, sym=%s, start=%#" PRIx64 ", end=%#" PRIx64 "\n", __func__, filename, sym->name, map->unmap_ip(map, sym->start), map->unmap_ip(map, sym->end)); @@ -1142,7 +1143,7 @@ fallback: dso, dso->long_name, sym, sym->name); snprintf(command, sizeof(command), - "objdump --start-address=0x%016Lx --stop-address=0x%016Lx -dS -C %s|grep -v %s|expand", + "objdump --start-address=0x%016" PRIx64 " --stop-address=0x%016" PRIx64 " -dS -C %s|grep -v %s|expand", map__rip_2objdump(map, sym->start), map__rip_2objdump(map, sym->end), symfs_filename, filename); diff --git a/tools/perf/util/include/linux/bitops.h b/tools/perf/util/include/linux/bitops.h index 8be0b96..305c848 100644 --- a/tools/perf/util/include/linux/bitops.h +++ b/tools/perf/util/include/linux/bitops.h @@ -2,6 +2,7 @@ #define _PERF_LINUX_BITOPS_H_ #include +#include #include #define BITS_PER_LONG __WORDSIZE diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c index 3a7eb6e..a16ecab 100644 --- a/tools/perf/util/map.c +++ b/tools/perf/util/map.c @@ -1,5 +1,6 @@ #include "symbol.h" #include +#include #include #include #include @@ -195,7 +196,7 @@ int map__overlap(struct map *l, struct map *r) size_t map__fprintf(struct map *self, FILE *fp) { - return fprintf(fp, " %Lx-%Lx %Lx %s\n", + return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s\n", self->start, self->end, self->pgoff, self->dso->name); } diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index bc2732e..135f69b 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -279,7 +279,7 @@ const char *__event_name(int type, u64 config) static char buf[32]; if (type == PERF_TYPE_RAW) { - sprintf(buf, "raw 0x%llx", config); + sprintf(buf, "raw 0x%" PRIx64, config); return buf; } diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h index b82cafb..458e3ec 100644 --- a/tools/perf/util/parse-events.h +++ b/tools/perf/util/parse-events.h @@ -23,7 +23,7 @@ struct tracepoint_path { }; extern struct tracepoint_path *tracepoint_id_to_path(u64 config); -extern bool have_tracepoints(struct list_head *evsel_list); +extern bool have_tracepoints(struct list_head *evlist); extern int nr_counters; diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index 128aaab..6e29d9c 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c @@ -172,7 +172,7 @@ static int kprobe_convert_to_perf_probe(struct probe_trace_point *tp, sym = __find_kernel_function_by_name(tp->symbol, &map); if (sym) { addr = map->unmap_ip(map, sym->start + tp->offset); - pr_debug("try to find %s+%ld@%llx\n", tp->symbol, + pr_debug("try to find %s+%ld@%" PRIx64 "\n", tp->symbol, tp->offset, addr); ret = find_perf_probe_point((unsigned long)addr, pp); } diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 313dac2..105f00b 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -652,10 +652,11 @@ static void callchain__printf(struct sample_data *sample) { unsigned int i; - printf("... chain: nr:%Lu\n", sample->callchain->nr); + printf("... chain: nr:%" PRIu64 "\n", sample->callchain->nr); for (i = 0; i < sample->callchain->nr; i++) - printf("..... %2d: %016Lx\n", i, sample->callchain->ips[i]); + printf("..... %2d: %016" PRIx64 "\n", + i, sample->callchain->ips[i]); } static void perf_session__print_tstamp(struct perf_session *session, @@ -672,7 +673,7 @@ static void perf_session__print_tstamp(struct perf_session *session, printf("%u ", sample->cpu); if (session->sample_type & PERF_SAMPLE_TIME) - printf("%Lu ", sample->time); + printf("%" PRIu64 " ", sample->time); } static void dump_event(struct perf_session *session, event_t *event, @@ -681,16 +682,16 @@ static void dump_event(struct perf_session *session, event_t *event, if (!dump_trace) return; - printf("\n%#Lx [%#x]: event: %d\n", file_offset, event->header.size, - event->header.type); + printf("\n%#" PRIx64 " [%#x]: event: %d\n", + file_offset, event->header.size, event->header.type); trace_event(event); if (sample) perf_session__print_tstamp(session, event, sample); - printf("%#Lx [%#x]: PERF_RECORD_%s", file_offset, event->header.size, - event__get_event_name(event->header.type)); + printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset, + event->header.size, event__get_event_name(event->header.type)); } static void dump_sample(struct perf_session *session, event_t *event, @@ -699,8 +700,9 @@ static void dump_sample(struct perf_session *session, event_t *event, if (!dump_trace) return; - printf("(IP, %d): %d/%d: %#Lx period: %Ld\n", event->header.misc, - sample->pid, sample->tid, sample->ip, sample->period); + printf("(IP, %d): %d/%d: %#" PRIx64 " period: %" PRIu64 "\n", + event->header.misc, sample->pid, sample->tid, sample->ip, + sample->period); if (session->sample_type & PERF_SAMPLE_CALLCHAIN) callchain__printf(sample); @@ -843,8 +845,8 @@ static void perf_session__warn_about_errors(const struct perf_session *session, { if (ops->lost == event__process_lost && session->hists.stats.total_lost != 0) { - ui__warning("Processed %Lu events and LOST %Lu!\n\n" - "Check IO/CPU overload!\n\n", + ui__warning("Processed %" PRIu64 " events and LOST %" PRIu64 + "!\n\nCheck IO/CPU overload!\n\n", session->hists.stats.total_period, session->hists.stats.total_lost); } @@ -918,7 +920,7 @@ more: if (size == 0 || (skip = perf_session__process_event(self, &event, ops, head)) < 0) { - dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n", + dump_printf("%#" PRIx64 " [%#x]: skipping unknown header type: %d\n", head, event.header.size, event.header.type); /* * assume we lost track of the stream, check alignment, and @@ -1023,7 +1025,7 @@ more: if (size == 0 || perf_session__process_event(session, event, ops, file_pos) < 0) { - dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n", + dump_printf("%#" PRIx64 " [%#x]: skipping unknown header type: %d\n", file_offset + head, event->header.size, event->header.type); /* diff --git a/tools/perf/util/svghelper.c b/tools/perf/util/svghelper.c index b3637db..fb737fe 100644 --- a/tools/perf/util/svghelper.c +++ b/tools/perf/util/svghelper.c @@ -12,6 +12,7 @@ * of the License. */ +#include #include #include #include @@ -43,11 +44,11 @@ static double cpu2y(int cpu) return cpu2slot(cpu) * SLOT_MULT; } -static double time2pixels(u64 time) +static double time2pixels(u64 __time) { double X; - X = 1.0 * svg_page_width * (time - first_time) / (last_time - first_time); + X = 1.0 * svg_page_width * (__time - first_time) / (last_time - first_time); return X; } @@ -94,7 +95,7 @@ void open_svg(const char *filename, int cpus, int rows, u64 start, u64 end) total_height = (1 + rows + cpu2slot(cpus)) * SLOT_MULT; fprintf(svgfile, " \n"); - fprintf(svgfile, "\n", svg_page_width, total_height); + fprintf(svgfile, "\n", svg_page_width, total_height); fprintf(svgfile, "\n