Merge branch 'idle-release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-idle-2.6
* 'idle-release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-idle-2.6: cpuidle/x86/perf: fix power:cpu_idle double end events and throw cpu_idle events from the cpuidle layer intel_idle: open broadcast clock event cpuidle: CPUIDLE_FLAG_CHECK_BM is omap3_idle specific cpuidle: CPUIDLE_FLAG_TLB_FLUSHED is specific to intel_idle cpuidle: delete unused CPUIDLE_FLAG_SHALLOW, BALANCED, DEEP definitions SH, cpuidle: delete use of NOP CPUIDLE_FLAGS_SHALLOW cpuidle: delete NOP CPUIDLE_FLAG_POLL ACPI: processor_idle: delete use of NOP CPUIDLE_FLAGs cpuidle: Rename X86 specific idle poll state[0] from C0 to POLL ACPI, intel_idle: Cleanup idle= internal variables cpuidle: Make cpuidle_enable_device() call poll_idle_init() intel_idle: update Sandy Bridge core C-state residency targets
This commit is contained in:
@@ -47,6 +47,8 @@
|
||||
|
||||
#define OMAP3_STATE_MAX OMAP3_STATE_C7
|
||||
|
||||
#define CPUIDLE_FLAG_CHECK_BM 0x10000 /* use omap3_enter_idle_bm() */
|
||||
|
||||
struct omap3_processor_cx {
|
||||
u8 valid;
|
||||
u8 type;
|
||||
|
||||
@@ -717,8 +717,9 @@ prefetchw (const void *x)
|
||||
#define spin_lock_prefetch(x) prefetchw(x)
|
||||
|
||||
extern unsigned long boot_option_idle_override;
|
||||
extern unsigned long idle_halt;
|
||||
extern unsigned long idle_nomwait;
|
||||
|
||||
enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_FORCE_MWAIT,
|
||||
IDLE_NOMWAIT, IDLE_POLL};
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
|
||||
@@ -53,12 +53,8 @@
|
||||
|
||||
void (*ia64_mark_idle)(int);
|
||||
|
||||
unsigned long boot_option_idle_override = 0;
|
||||
unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
|
||||
EXPORT_SYMBOL(boot_option_idle_override);
|
||||
unsigned long idle_halt;
|
||||
EXPORT_SYMBOL(idle_halt);
|
||||
unsigned long idle_nomwait;
|
||||
EXPORT_SYMBOL(idle_nomwait);
|
||||
void (*pm_idle) (void);
|
||||
EXPORT_SYMBOL(pm_idle);
|
||||
void (*pm_power_off) (void);
|
||||
|
||||
@@ -81,7 +81,6 @@ void sh_mobile_setup_cpuidle(void)
|
||||
state->target_residency = 1 * 2;
|
||||
state->power_usage = 3;
|
||||
state->flags = 0;
|
||||
state->flags |= CPUIDLE_FLAG_SHALLOW;
|
||||
state->flags |= CPUIDLE_FLAG_TIME_VALID;
|
||||
state->enter = cpuidle_sleep_enter;
|
||||
|
||||
|
||||
@@ -761,10 +761,11 @@ extern void select_idle_routine(const struct cpuinfo_x86 *c);
|
||||
extern void init_c1e_mask(void);
|
||||
|
||||
extern unsigned long boot_option_idle_override;
|
||||
extern unsigned long idle_halt;
|
||||
extern unsigned long idle_nomwait;
|
||||
extern bool c1e_detected;
|
||||
|
||||
enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT,
|
||||
IDLE_POLL, IDLE_FORCE_MWAIT};
|
||||
|
||||
extern void enable_sep_cpu(void);
|
||||
extern int sysenter_setup(void);
|
||||
|
||||
|
||||
@@ -22,11 +22,6 @@
|
||||
#include <asm/i387.h>
|
||||
#include <asm/debugreg.h>
|
||||
|
||||
unsigned long idle_halt;
|
||||
EXPORT_SYMBOL(idle_halt);
|
||||
unsigned long idle_nomwait;
|
||||
EXPORT_SYMBOL(idle_nomwait);
|
||||
|
||||
struct kmem_cache *task_xstate_cachep;
|
||||
EXPORT_SYMBOL_GPL(task_xstate_cachep);
|
||||
|
||||
@@ -327,7 +322,7 @@ long sys_execve(const char __user *name,
|
||||
/*
|
||||
* Idle related variables and functions
|
||||
*/
|
||||
unsigned long boot_option_idle_override = 0;
|
||||
unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
|
||||
EXPORT_SYMBOL(boot_option_idle_override);
|
||||
|
||||
/*
|
||||
@@ -386,6 +381,8 @@ void default_idle(void)
|
||||
else
|
||||
local_irq_enable();
|
||||
current_thread_info()->status |= TS_POLLING;
|
||||
trace_power_end(smp_processor_id());
|
||||
trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
|
||||
} else {
|
||||
local_irq_enable();
|
||||
/* loop is done by the caller */
|
||||
@@ -443,8 +440,6 @@ EXPORT_SYMBOL_GPL(cpu_idle_wait);
|
||||
*/
|
||||
void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
|
||||
{
|
||||
trace_power_start(POWER_CSTATE, (ax>>4)+1, smp_processor_id());
|
||||
trace_cpu_idle((ax>>4)+1, smp_processor_id());
|
||||
if (!need_resched()) {
|
||||
if (cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLUSH_MONITOR))
|
||||
clflush((void *)¤t_thread_info()->flags);
|
||||
@@ -471,6 +466,8 @@ static void mwait_idle(void)
|
||||
__sti_mwait(0, 0);
|
||||
else
|
||||
local_irq_enable();
|
||||
trace_power_end(smp_processor_id());
|
||||
trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
|
||||
} else
|
||||
local_irq_enable();
|
||||
}
|
||||
@@ -503,7 +500,6 @@ static void poll_idle(void)
|
||||
*
|
||||
* idle=mwait overrides this decision and forces the usage of mwait.
|
||||
*/
|
||||
static int __cpuinitdata force_mwait;
|
||||
|
||||
#define MWAIT_INFO 0x05
|
||||
#define MWAIT_ECX_EXTENDED_INFO 0x01
|
||||
@@ -513,7 +509,7 @@ static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
|
||||
{
|
||||
u32 eax, ebx, ecx, edx;
|
||||
|
||||
if (force_mwait)
|
||||
if (boot_option_idle_override == IDLE_FORCE_MWAIT)
|
||||
return 1;
|
||||
|
||||
if (c->cpuid_level < MWAIT_INFO)
|
||||
@@ -633,9 +629,10 @@ static int __init idle_setup(char *str)
|
||||
if (!strcmp(str, "poll")) {
|
||||
printk("using polling idle threads.\n");
|
||||
pm_idle = poll_idle;
|
||||
} else if (!strcmp(str, "mwait"))
|
||||
force_mwait = 1;
|
||||
else if (!strcmp(str, "halt")) {
|
||||
boot_option_idle_override = IDLE_POLL;
|
||||
} else if (!strcmp(str, "mwait")) {
|
||||
boot_option_idle_override = IDLE_FORCE_MWAIT;
|
||||
} else if (!strcmp(str, "halt")) {
|
||||
/*
|
||||
* When the boot option of idle=halt is added, halt is
|
||||
* forced to be used for CPU idle. In such case CPU C2/C3
|
||||
@@ -644,8 +641,7 @@ static int __init idle_setup(char *str)
|
||||
* the boot_option_idle_override.
|
||||
*/
|
||||
pm_idle = default_idle;
|
||||
idle_halt = 1;
|
||||
return 0;
|
||||
boot_option_idle_override = IDLE_HALT;
|
||||
} else if (!strcmp(str, "nomwait")) {
|
||||
/*
|
||||
* If the boot option of "idle=nomwait" is added,
|
||||
@@ -653,12 +649,10 @@ static int __init idle_setup(char *str)
|
||||
* states. In such case it won't touch the variable
|
||||
* of boot_option_idle_override.
|
||||
*/
|
||||
idle_nomwait = 1;
|
||||
return 0;
|
||||
boot_option_idle_override = IDLE_NOMWAIT;
|
||||
} else
|
||||
return -1;
|
||||
|
||||
boot_option_idle_override = 1;
|
||||
return 0;
|
||||
}
|
||||
early_param("idle", idle_setup);
|
||||
|
||||
@@ -57,8 +57,6 @@
|
||||
#include <asm/syscalls.h>
|
||||
#include <asm/debugreg.h>
|
||||
|
||||
#include <trace/events/power.h>
|
||||
|
||||
asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
|
||||
|
||||
/*
|
||||
@@ -113,8 +111,6 @@ void cpu_idle(void)
|
||||
stop_critical_timings();
|
||||
pm_idle();
|
||||
start_critical_timings();
|
||||
trace_power_end(smp_processor_id());
|
||||
trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
|
||||
}
|
||||
tick_nohz_restart_sched_tick();
|
||||
preempt_enable_no_resched();
|
||||
|
||||
@@ -51,8 +51,6 @@
|
||||
#include <asm/syscalls.h>
|
||||
#include <asm/debugreg.h>
|
||||
|
||||
#include <trace/events/power.h>
|
||||
|
||||
asmlinkage extern void ret_from_fork(void);
|
||||
|
||||
DEFINE_PER_CPU(unsigned long, old_rsp);
|
||||
@@ -141,10 +139,6 @@ void cpu_idle(void)
|
||||
pm_idle();
|
||||
start_critical_timings();
|
||||
|
||||
trace_power_end(smp_processor_id());
|
||||
trace_cpu_idle(PWR_EVENT_EXIT,
|
||||
smp_processor_id());
|
||||
|
||||
/* In many cases the interrupt that ended idle
|
||||
has already called exit_idle. But some idle
|
||||
loops can be woken up without interrupt. */
|
||||
|
||||
Reference in New Issue
Block a user