percpu: remove per_cpu__ prefix.
Now that the return from alloc_percpu is compatible with the address
of per-cpu vars, it makes sense to hand around the address of per-cpu
variables. To make this sane, we remove the per_cpu__ prefix we used
created to stop people accidentally using these vars directly.
Now we have sparse, we can use that (next patch).
tj: * Updated to convert stuff which were missed by or added after the
original patch.
* Kill per_cpu_var() macro.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Christoph Lameter <cl@linux-foundation.org>
This commit is contained in:
@@ -731,13 +731,13 @@ static void rcu_torture_timer(unsigned long unused)
|
||||
/* Should not happen, but... */
|
||||
pipe_count = RCU_TORTURE_PIPE_LEN;
|
||||
}
|
||||
__this_cpu_inc(per_cpu_var(rcu_torture_count)[pipe_count]);
|
||||
__this_cpu_inc(rcu_torture_count[pipe_count]);
|
||||
completed = cur_ops->completed() - completed;
|
||||
if (completed > RCU_TORTURE_PIPE_LEN) {
|
||||
/* Should not happen, but... */
|
||||
completed = RCU_TORTURE_PIPE_LEN;
|
||||
}
|
||||
__this_cpu_inc(per_cpu_var(rcu_torture_batch)[completed]);
|
||||
__this_cpu_inc(rcu_torture_batch[completed]);
|
||||
preempt_enable();
|
||||
cur_ops->readunlock(idx);
|
||||
}
|
||||
@@ -786,13 +786,13 @@ rcu_torture_reader(void *arg)
|
||||
/* Should not happen, but... */
|
||||
pipe_count = RCU_TORTURE_PIPE_LEN;
|
||||
}
|
||||
__this_cpu_inc(per_cpu_var(rcu_torture_count)[pipe_count]);
|
||||
__this_cpu_inc(rcu_torture_count[pipe_count]);
|
||||
completed = cur_ops->completed() - completed;
|
||||
if (completed > RCU_TORTURE_PIPE_LEN) {
|
||||
/* Should not happen, but... */
|
||||
completed = RCU_TORTURE_PIPE_LEN;
|
||||
}
|
||||
__this_cpu_inc(per_cpu_var(rcu_torture_batch)[completed]);
|
||||
__this_cpu_inc(rcu_torture_batch[completed]);
|
||||
preempt_enable();
|
||||
cur_ops->readunlock(idx);
|
||||
schedule();
|
||||
|
||||
@@ -91,12 +91,12 @@ DEFINE_PER_CPU(int, ftrace_cpu_disabled);
|
||||
static inline void ftrace_disable_cpu(void)
|
||||
{
|
||||
preempt_disable();
|
||||
__this_cpu_inc(per_cpu_var(ftrace_cpu_disabled));
|
||||
__this_cpu_inc(ftrace_cpu_disabled);
|
||||
}
|
||||
|
||||
static inline void ftrace_enable_cpu(void)
|
||||
{
|
||||
__this_cpu_dec(per_cpu_var(ftrace_cpu_disabled));
|
||||
__this_cpu_dec(ftrace_cpu_disabled);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
@@ -1085,7 +1085,7 @@ trace_function(struct trace_array *tr,
|
||||
struct ftrace_entry *entry;
|
||||
|
||||
/* If we are reading the ring buffer, don't trace */
|
||||
if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled))))
|
||||
if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
|
||||
return;
|
||||
|
||||
event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
|
||||
|
||||
@@ -176,7 +176,7 @@ static int __trace_graph_entry(struct trace_array *tr,
|
||||
struct ring_buffer *buffer = tr->buffer;
|
||||
struct ftrace_graph_ent_entry *entry;
|
||||
|
||||
if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled))))
|
||||
if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
|
||||
return 0;
|
||||
|
||||
event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
|
||||
@@ -240,7 +240,7 @@ static void __trace_graph_return(struct trace_array *tr,
|
||||
struct ring_buffer *buffer = tr->buffer;
|
||||
struct ftrace_graph_ret_entry *entry;
|
||||
|
||||
if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled))))
|
||||
if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
|
||||
return;
|
||||
|
||||
event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
|
||||
|
||||
Reference in New Issue
Block a user