From: Roel Kluin <12o3l@tiscali.nl> Because the _lock routines are faster and provide a better example to follow. Signed-off-by: Roel Kluin <12o3l@tiscali.nl> Signed-off-by: Andrew Morton --- lib/likely_prof.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff -puN lib/likely_prof.c~likely_prof-update-to-test_and_set_bit_lock-clear_bit_unlock lib/likely_prof.c --- a/lib/likely_prof.c~likely_prof-update-to-test_and_set_bit_lock-clear_bit_unlock +++ a/lib/likely_prof.c @@ -36,7 +36,7 @@ int do_check_likely(struct likeliness *l * disable and it was a bit cleaner then using internal __raw * spinlock calls. */ - if (!test_and_set_bit(0, &likely_lock)) { + if (!test_and_set_bit_lock(0, &likely_lock)) { if (likeliness->label & LP_UNSEEN) { likeliness->label &= (~LP_UNSEEN); likeliness->next = likeliness_head; @@ -44,8 +44,7 @@ int do_check_likely(struct likeliness *l likeliness->caller = (unsigned long) __builtin_return_address(0); } - smp_mb__before_clear_bit(); - clear_bit(0, &likely_lock); + clear_bit_unlock(0, &likely_lock); } } _