{set,clear,test}_bit() related cleanup From: mita@miraclelinux.com (Akinobu Mita) While working on these patch set, I found several possible cleanup on x86-64 and ia64. Signed-off-by: Akinobu Mita Signed-off-by: Andi Kleen Index: linux/arch/x86_64/kernel/mce.c =================================================================== --- linux.orig/arch/x86_64/kernel/mce.c +++ linux/arch/x86_64/kernel/mce.c @@ -139,8 +139,7 @@ static void mce_panic(char *msg, struct static int mce_available(struct cpuinfo_x86 *c) { - return test_bit(X86_FEATURE_MCE, &c->x86_capability) && - test_bit(X86_FEATURE_MCA, &c->x86_capability); + return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA); } static inline void mce_get_rip(struct mce *m, struct pt_regs *regs) Index: linux/arch/x86_64/kernel/setup.c =================================================================== --- linux.orig/arch/x86_64/kernel/setup.c +++ linux/arch/x86_64/kernel/setup.c @@ -1340,8 +1340,7 @@ static int show_cpuinfo(struct seq_file { int i; for ( i = 0 ; i < 32*NCAPINTS ; i++ ) - if ( test_bit(i, &c->x86_capability) && - x86_cap_flags[i] != NULL ) + if (cpu_has(c, i) && x86_cap_flags[i] != NULL ) seq_printf(m, " %s", x86_cap_flags[i]); } Index: linux/include/asm-x86_64/mmu_context.h =================================================================== --- linux.orig/include/asm-x86_64/mmu_context.h +++ linux/include/asm-x86_64/mmu_context.h @@ -34,12 +34,12 @@ static inline void switch_mm(struct mm_s unsigned cpu = smp_processor_id(); if (likely(prev != next)) { /* stop flush ipis for the previous mm */ - clear_bit(cpu, &prev->cpu_vm_mask); + cpu_clear(cpu, prev->cpu_vm_mask); #ifdef CONFIG_SMP write_pda(mmu_state, TLBSTATE_OK); write_pda(active_mm, next); #endif - set_bit(cpu, &next->cpu_vm_mask); + cpu_set(cpu, next->cpu_vm_mask); load_cr3(next->pgd); if (unlikely(next->context.ldt != prev->context.ldt)) @@ -50,7 +50,7 @@ static inline void switch_mm(struct mm_s write_pda(mmu_state, TLBSTATE_OK); if (read_pda(active_mm) != next) out_of_line_bug(); - if(!test_and_set_bit(cpu, &next->cpu_vm_mask)) { + if(!cpu_test_and_set(cpu, next->cpu_vm_mask)) { /* We were in lazy tlb mode and leave_mm disabled * tlb flush IPI delivery. We must reload CR3 * to make sure to use no freed page tables. Index: linux/arch/x86_64/pci/mmconfig.c =================================================================== --- linux.orig/arch/x86_64/pci/mmconfig.c +++ linux/arch/x86_64/pci/mmconfig.c @@ -46,7 +46,7 @@ static char __iomem *get_virt(unsigned i static char __iomem *pci_dev_base(unsigned int seg, unsigned int bus, unsigned int devfn) { char __iomem *addr; - if (seg == 0 && bus == 0 && test_bit(PCI_SLOT(devfn), &fallback_slots)) + if (seg == 0 && bus == 0 && test_bit(PCI_SLOT(devfn), fallback_slots)) return NULL; addr = get_virt(seg, bus); if (!addr) @@ -134,7 +134,7 @@ static __init void unreachable_devices(v continue; addr = pci_dev_base(0, 0, PCI_DEVFN(i, 0)); if (addr == NULL|| readl(addr) != val1) { - set_bit(i, &fallback_slots); + set_bit(i, fallback_slots); } } } Index: linux/include/asm-x86_64/pgtable.h =================================================================== --- linux.orig/include/asm-x86_64/pgtable.h +++ linux/include/asm-x86_64/pgtable.h @@ -293,19 +293,19 @@ static inline int ptep_test_and_clear_di { if (!pte_dirty(*ptep)) return 0; - return test_and_clear_bit(_PAGE_BIT_DIRTY, ptep); + return test_and_clear_bit(_PAGE_BIT_DIRTY, &ptep->pte); } static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { if (!pte_young(*ptep)) return 0; - return test_and_clear_bit(_PAGE_BIT_ACCESSED, ptep); + return test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte); } static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - clear_bit(_PAGE_BIT_RW, ptep); + clear_bit(_PAGE_BIT_RW, &ptep->pte); } /*