--- kernel/trace/ring_buffer.c | 104 ++++++++++++++++++++++----------------------- 1 file changed, 52 insertions(+), 52 deletions(-) Index: linux-2.6/kernel/trace/ring_buffer.c =================================================================== --- linux-2.6.orig/kernel/trace/ring_buffer.c 2009-10-07 17:17:48.000000000 -0500 +++ linux-2.6/kernel/trace/ring_buffer.c 2009-10-07 17:31:01.000000000 -0500 @@ -311,7 +311,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_event_data struct buffer_data_page { u64 time_stamp; /* page time stamp */ - local_t commit; /* write committed index */ + int commit; /* write committed index */ unsigned char data[]; /* data of buffer page */ }; @@ -325,9 +325,9 @@ struct buffer_data_page { */ struct buffer_page { struct list_head list; /* list of buffer pages */ - local_t write; /* index for next write */ + int write; /* index for next write */ unsigned read; /* index for next read */ - local_t entries; /* entries on this page */ + int entries; /* entries on this page */ struct buffer_data_page *page; /* Actual data page */ }; @@ -348,7 +348,7 @@ struct buffer_page { static void rb_init_page(struct buffer_data_page *bpage) { - local_set(&bpage->commit, 0); + bpage->commit = 0; } /** @@ -359,7 +359,7 @@ static void rb_init_page(struct buffer_d */ size_t ring_buffer_page_len(void *page) { - return local_read(&((struct buffer_data_page *)page)->commit) + return ((struct buffer_data_page *)page)->commit) + BUF_PAGE_HDR_SIZE; } @@ -427,11 +427,11 @@ struct ring_buffer_per_cpu { struct buffer_page *tail_page; /* write to tail */ struct buffer_page *commit_page; /* committed pages */ struct buffer_page *reader_page; - local_t commit_overrun; - local_t overrun; - local_t entries; - local_t committing; - local_t commits; + int commit_overrun; + int overrun; + int entries; + int committing; + int commits; unsigned long read; u64 write_stamp; u64 read_stamp; @@ -857,7 +857,7 @@ static int rb_tail_page_update(struct ri * it only can increment when a commit takes place. But that * only happens in the outer most nested commit. */ - local_set(&next_page->page->commit, 0); + next_page->page->commit = 0; old_tail = cmpxchg(&cpu_buffer->tail_page, tail_page, next_page); @@ -1395,17 +1395,17 @@ rb_iter_head_event(struct ring_buffer_it static inline unsigned long rb_page_write(struct buffer_page *bpage) { - return local_read(&bpage->write) & RB_WRITE_MASK; + return bpage->write & RB_WRITE_MASK; } static inline unsigned rb_page_commit(struct buffer_page *bpage) { - return local_read(&bpage->page->commit); + return bpage->page->commit; } static inline unsigned long rb_page_entries(struct buffer_page *bpage) { - return local_read(&bpage->entries) & RB_WRITE_MASK; + return bpage->entries & RB_WRITE_MASK; } /* Size is determined by what has been commited */ @@ -1464,8 +1464,8 @@ rb_set_commit_to_write(struct ring_buffe if (RB_WARN_ON(cpu_buffer, rb_is_reader_page(cpu_buffer->tail_page))) return; - local_set(&cpu_buffer->commit_page->page->commit, - rb_page_write(cpu_buffer->commit_page)); + cpu_buffer->commit_page->page->commit = + rb_page_write(cpu_buffer->commit_page); rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); cpu_buffer->write_stamp = cpu_buffer->commit_page->page->time_stamp; @@ -1475,10 +1475,10 @@ rb_set_commit_to_write(struct ring_buffe while (rb_commit_index(cpu_buffer) != rb_page_write(cpu_buffer->commit_page)) { - local_set(&cpu_buffer->commit_page->page->commit, - rb_page_write(cpu_buffer->commit_page)); + cpu_buffer->commit_page->page->commit = + rb_page_write(cpu_buffer->commit_page); RB_WARN_ON(cpu_buffer, - local_read(&cpu_buffer->commit_page->page->commit) & + cpu_buffer->commit_page->page->commit & ~RB_WRITE_MASK); barrier(); } @@ -1947,7 +1947,7 @@ rb_try_to_discard(struct ring_buffer_per if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) { unsigned long write_mask = - local_read(&bpage->write) & ~RB_WRITE_MASK; + bpage->write & ~RB_WRITE_MASK; /* * This is on the tail page. It is possible that * a write could come in and move the tail page @@ -2043,14 +2043,14 @@ static void rb_end_commit(struct ring_bu unsigned long commits; if (RB_WARN_ON(cpu_buffer, - !local_read(&cpu_buffer->committing))) + !cpu_buffer->committing)) return; again: - commits = local_read(&cpu_buffer->commits); + commits = cpu_buffer->commits; /* synchronize with interrupts */ barrier(); - if (local_read(&cpu_buffer->committing) == 1) + if (cpu_buffer->committing == 1) rb_set_commit_to_write(cpu_buffer); local_dec(&cpu_buffer->committing); @@ -2063,8 +2063,8 @@ static void rb_end_commit(struct ring_bu * updating of the commit page and the clearing of the * committing counter. */ - if (unlikely(local_read(&cpu_buffer->commits) != commits) && - !local_read(&cpu_buffer->committing)) { + if (unlikely(cpu_buffer->commits != commits) && + !cpu_buffer->committing) { local_inc(&cpu_buffer->committing); goto again; } @@ -2419,7 +2419,7 @@ void ring_buffer_discard_commit(struct r * committed yet. Thus we can assume that preemption * is still disabled. */ - RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); + RB_WARN_ON(buffer, !cpu_buffer->committing); rb_decrement_entry(cpu_buffer, event); if (rb_try_to_discard(cpu_buffer, event)) @@ -2608,7 +2608,7 @@ unsigned long ring_buffer_entries_cpu(st return 0; cpu_buffer = buffer->buffers[cpu]; - ret = (local_read(&cpu_buffer->entries) - local_read(&cpu_buffer->overrun)) + ret = cpu_buffer->entries - cpu_buffer->overrun - cpu_buffer->read; return ret; @@ -2629,7 +2629,7 @@ unsigned long ring_buffer_overrun_cpu(st return 0; cpu_buffer = buffer->buffers[cpu]; - ret = local_read(&cpu_buffer->overrun); + ret = cpu_buffer->overrun; return ret; } @@ -2650,7 +2650,7 @@ ring_buffer_commit_overrun_cpu(struct ri return 0; cpu_buffer = buffer->buffers[cpu]; - ret = local_read(&cpu_buffer->commit_overrun); + ret = cpu_buffer->commit_overrun; return ret; } @@ -2672,8 +2672,8 @@ unsigned long ring_buffer_entries(struct /* if you care about this being correct, lock the buffer */ for_each_buffer_cpu(buffer, cpu) { cpu_buffer = buffer->buffers[cpu]; - entries += (local_read(&cpu_buffer->entries) - - local_read(&cpu_buffer->overrun)) - cpu_buffer->read; + entries += cpu_buffer->entries - + cpu_buffer->overrun - cpu_buffer->read; } return entries; @@ -2696,7 +2696,7 @@ unsigned long ring_buffer_overruns(struc /* if you care about this being correct, lock the buffer */ for_each_buffer_cpu(buffer, cpu) { cpu_buffer = buffer->buffers[cpu]; - overruns += local_read(&cpu_buffer->overrun); + overruns += cpu_buffer->overrun); } return overruns; @@ -2865,9 +2865,9 @@ rb_get_reader_page(struct ring_buffer_pe /* * Reset the reader page to size zero. */ - local_set(&cpu_buffer->reader_page->write, 0); - local_set(&cpu_buffer->reader_page->entries, 0); - local_set(&cpu_buffer->reader_page->page->commit, 0); + cpu_buffer->reader_page->write = 0; + cpu_buffer->reader_page->entries = 0; + cpu_buffer->reader_page->page->commit = 0; spin: /* @@ -3358,9 +3358,9 @@ rb_reset_cpu(struct ring_buffer_per_cpu cpu_buffer->head_page = list_entry(cpu_buffer->pages, struct buffer_page, list); - local_set(&cpu_buffer->head_page->write, 0); - local_set(&cpu_buffer->head_page->entries, 0); - local_set(&cpu_buffer->head_page->page->commit, 0); + cpu_buffer->head_page->write = 0; + cpu_buffer->head_page->entries = 0; + cpu_buffer->head_page->page->commit = 0; cpu_buffer->head_page->read = 0; @@ -3368,16 +3368,16 @@ rb_reset_cpu(struct ring_buffer_per_cpu cpu_buffer->commit_page = cpu_buffer->head_page; INIT_LIST_HEAD(&cpu_buffer->reader_page->list); - local_set(&cpu_buffer->reader_page->write, 0); - local_set(&cpu_buffer->reader_page->entries, 0); - local_set(&cpu_buffer->reader_page->page->commit, 0); + cpu_buffer->reader_page->write = 0; + cpu_buffer->reader_page->entries = 0; + cpu_buffer->reader_page->page->commit = 0; cpu_buffer->reader_page->read = 0; - local_set(&cpu_buffer->commit_overrun, 0); - local_set(&cpu_buffer->overrun, 0); - local_set(&cpu_buffer->entries, 0); - local_set(&cpu_buffer->committing, 0); - local_set(&cpu_buffer->commits, 0); + cpu_buffer->commit_overrun = 0; + cpu_buffer->overrun = 0; + cpu_buffer->entries = 0; + cpu_buffer->committing = 0; + cpu_buffer->commits = 0; cpu_buffer->read = 0; cpu_buffer->write_stamp = 0; @@ -3403,7 +3403,7 @@ void ring_buffer_reset_cpu(struct ring_b spin_lock_irqsave(&cpu_buffer->reader_lock, flags); - if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) + if (RB_WARN_ON(cpu_buffer, cpu_buffer->committing)) goto out; __raw_spin_lock(&cpu_buffer->lock); @@ -3551,9 +3551,9 @@ int ring_buffer_swap_cpu(struct ring_buf atomic_inc(&cpu_buffer_b->record_disabled); ret = -EBUSY; - if (local_read(&cpu_buffer_a->committing)) + if (cpu_buffer_a->committing) goto out_dec; - if (local_read(&cpu_buffer_b->committing)) + if (cpu_buffer_b->committing) goto out_dec; buffer_a->buffers[cpu] = cpu_buffer_b; @@ -3737,7 +3737,7 @@ int ring_buffer_read_page(struct ring_bu } while (len > size); /* update bpage */ - local_set(&bpage->commit, pos); + bpage->commit = pos; bpage->time_stamp = save_timestamp; /* we copied everything to the beginning */ @@ -3750,8 +3750,8 @@ int ring_buffer_read_page(struct ring_bu rb_init_page(bpage); bpage = reader->page; reader->page = *data_page; - local_set(&reader->write, 0); - local_set(&reader->entries, 0); + reader->write = 0; + reader->entries = 0; reader->read = 0; *data_page = bpage; }