Index: linux-2.6.16-mm2/include/asm-ia64/bitops.h =================================================================== --- linux-2.6.16-mm2.orig/include/asm-ia64/bitops.h 2006-03-30 21:21:04.000000000 -0800 +++ linux-2.6.16-mm2/include/asm-ia64/bitops.h 2006-03-31 11:17:53.000000000 -0800 @@ -63,10 +63,13 @@ __set_bit (int nr, volatile void *addr) } /* - * clear_bit() has "acquire" semantics. + * The current linux locking paradigm is oriented towards + * barriers. We cannot utilize acquire or relase behaviors + * to synchronize but must implement full barriers in order + * to avoid potential subtle breakage. */ #define smp_mb__before_clear_bit() smp_mb() -#define smp_mb__after_clear_bit() do { /* skip */; } while (0) +#define smp_mb__after_clear_bit() smp_mb() /** * clear_bit - Clears a bit in memory Index: linux-2.6.16-mm2/mm/vmscan.c =================================================================== --- linux-2.6.16-mm2.orig/mm/vmscan.c 2006-03-30 21:21:06.000000000 -0800 +++ linux-2.6.16-mm2/mm/vmscan.c 2006-03-31 11:23:00.000000000 -0800 @@ -174,7 +174,7 @@ unsigned long shrink_slab(unsigned long if (scanned == 0) scanned = SWAP_CLUSTER_MAX; - if (!down_read_trylock(&shrinker_rwsem)) + if (!down_write_trylock(&shrinker_rwsem)) return 1; /* Assume we'll be able to shrink next time */ list_for_each_entry(shrinker, &shrinker_list, list) { @@ -222,7 +222,7 @@ unsigned long shrink_slab(unsigned long shrinker->nr += total_scan; } - up_read(&shrinker_rwsem); + up_write(&shrinker_rwsem); return ret; }