Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched-devel
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched-devel: (62 commits) sched: build fix sched: better rt-group documentation sched: features fix sched: /debug/sched_features sched: add SCHED_FEAT_DEADLINE sched: debug: show a weight tree sched: fair: weight calculations sched: fair-group: de-couple load-balancing from the rb-trees sched: fair-group scheduling vs latency sched: rt-group: optimize dequeue_rt_stack sched: debug: add some debug code to handle the full hierarchy sched: fair-group: SMP-nice for group scheduling sched, cpuset: customize sched domains, core sched, cpuset: customize sched domains, docs sched: prepatory code movement sched: rt: multi level group constraints sched: task_group hierarchy sched: fix the task_group hierarchy for UID grouping sched: allow the group scheduler to have multiple levels sched: mix tasks and groups ...
This commit is contained in:
@@ -445,7 +445,7 @@ asmlinkage long compat_sys_sched_setaffinity(compat_pid_t pid,
|
||||
if (retval)
|
||||
return retval;
|
||||
|
||||
return sched_setaffinity(pid, new_mask);
|
||||
return sched_setaffinity(pid, &new_mask);
|
||||
}
|
||||
|
||||
asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, unsigned int len,
|
||||
|
||||
@@ -232,9 +232,9 @@ static int _cpu_down(unsigned int cpu, int tasks_frozen)
|
||||
|
||||
/* Ensure that we are not runnable on dying cpu */
|
||||
old_allowed = current->cpus_allowed;
|
||||
tmp = CPU_MASK_ALL;
|
||||
cpus_setall(tmp);
|
||||
cpu_clear(cpu, tmp);
|
||||
set_cpus_allowed(current, tmp);
|
||||
set_cpus_allowed_ptr(current, &tmp);
|
||||
|
||||
p = __stop_machine_run(take_cpu_down, &tcd_param, cpu);
|
||||
|
||||
@@ -268,7 +268,7 @@ static int _cpu_down(unsigned int cpu, int tasks_frozen)
|
||||
out_thread:
|
||||
err = kthread_stop(p);
|
||||
out_allowed:
|
||||
set_cpus_allowed(current, old_allowed);
|
||||
set_cpus_allowed_ptr(current, &old_allowed);
|
||||
out_release:
|
||||
cpu_hotplug_done();
|
||||
return err;
|
||||
|
||||
100
kernel/cpuset.c
100
kernel/cpuset.c
@@ -98,6 +98,9 @@ struct cpuset {
|
||||
/* partition number for rebuild_sched_domains() */
|
||||
int pn;
|
||||
|
||||
/* for custom sched domain */
|
||||
int relax_domain_level;
|
||||
|
||||
/* used for walking a cpuset heirarchy */
|
||||
struct list_head stack_list;
|
||||
};
|
||||
@@ -478,6 +481,16 @@ static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
|
||||
return cpus_intersects(a->cpus_allowed, b->cpus_allowed);
|
||||
}
|
||||
|
||||
static void
|
||||
update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
|
||||
{
|
||||
if (!dattr)
|
||||
return;
|
||||
if (dattr->relax_domain_level < c->relax_domain_level)
|
||||
dattr->relax_domain_level = c->relax_domain_level;
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* rebuild_sched_domains()
|
||||
*
|
||||
@@ -553,12 +566,14 @@ static void rebuild_sched_domains(void)
|
||||
int csn; /* how many cpuset ptrs in csa so far */
|
||||
int i, j, k; /* indices for partition finding loops */
|
||||
cpumask_t *doms; /* resulting partition; i.e. sched domains */
|
||||
struct sched_domain_attr *dattr; /* attributes for custom domains */
|
||||
int ndoms; /* number of sched domains in result */
|
||||
int nslot; /* next empty doms[] cpumask_t slot */
|
||||
|
||||
q = NULL;
|
||||
csa = NULL;
|
||||
doms = NULL;
|
||||
dattr = NULL;
|
||||
|
||||
/* Special case for the 99% of systems with one, full, sched domain */
|
||||
if (is_sched_load_balance(&top_cpuset)) {
|
||||
@@ -566,6 +581,11 @@ static void rebuild_sched_domains(void)
|
||||
doms = kmalloc(sizeof(cpumask_t), GFP_KERNEL);
|
||||
if (!doms)
|
||||
goto rebuild;
|
||||
dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL);
|
||||
if (dattr) {
|
||||
*dattr = SD_ATTR_INIT;
|
||||
update_domain_attr(dattr, &top_cpuset);
|
||||
}
|
||||
*doms = top_cpuset.cpus_allowed;
|
||||
goto rebuild;
|
||||
}
|
||||
@@ -622,6 +642,7 @@ restart:
|
||||
doms = kmalloc(ndoms * sizeof(cpumask_t), GFP_KERNEL);
|
||||
if (!doms)
|
||||
goto rebuild;
|
||||
dattr = kmalloc(ndoms * sizeof(struct sched_domain_attr), GFP_KERNEL);
|
||||
|
||||
for (nslot = 0, i = 0; i < csn; i++) {
|
||||
struct cpuset *a = csa[i];
|
||||
@@ -644,12 +665,15 @@ restart:
|
||||
}
|
||||
|
||||
cpus_clear(*dp);
|
||||
if (dattr)
|
||||
*(dattr + nslot) = SD_ATTR_INIT;
|
||||
for (j = i; j < csn; j++) {
|
||||
struct cpuset *b = csa[j];
|
||||
|
||||
if (apn == b->pn) {
|
||||
cpus_or(*dp, *dp, b->cpus_allowed);
|
||||
b->pn = -1;
|
||||
update_domain_attr(dattr, b);
|
||||
}
|
||||
}
|
||||
nslot++;
|
||||
@@ -660,7 +684,7 @@ restart:
|
||||
rebuild:
|
||||
/* Have scheduler rebuild sched domains */
|
||||
get_online_cpus();
|
||||
partition_sched_domains(ndoms, doms);
|
||||
partition_sched_domains(ndoms, doms, dattr);
|
||||
put_online_cpus();
|
||||
|
||||
done:
|
||||
@@ -668,6 +692,7 @@ done:
|
||||
kfifo_free(q);
|
||||
kfree(csa);
|
||||
/* Don't kfree(doms) -- partition_sched_domains() does that. */
|
||||
/* Don't kfree(dattr) -- partition_sched_domains() does that. */
|
||||
}
|
||||
|
||||
static inline int started_after_time(struct task_struct *t1,
|
||||
@@ -729,7 +754,7 @@ int cpuset_test_cpumask(struct task_struct *tsk, struct cgroup_scanner *scan)
|
||||
*/
|
||||
void cpuset_change_cpumask(struct task_struct *tsk, struct cgroup_scanner *scan)
|
||||
{
|
||||
set_cpus_allowed(tsk, (cgroup_cs(scan->cg))->cpus_allowed);
|
||||
set_cpus_allowed_ptr(tsk, &((cgroup_cs(scan->cg))->cpus_allowed));
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1011,6 +1036,21 @@ static int update_memory_pressure_enabled(struct cpuset *cs, char *buf)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int update_relax_domain_level(struct cpuset *cs, char *buf)
|
||||
{
|
||||
int val = simple_strtol(buf, NULL, 10);
|
||||
|
||||
if (val < 0)
|
||||
val = -1;
|
||||
|
||||
if (val != cs->relax_domain_level) {
|
||||
cs->relax_domain_level = val;
|
||||
rebuild_sched_domains();
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* update_flag - read a 0 or a 1 in a file and update associated flag
|
||||
* bit: the bit to update (CS_CPU_EXCLUSIVE, CS_MEM_EXCLUSIVE,
|
||||
@@ -1178,7 +1218,7 @@ static void cpuset_attach(struct cgroup_subsys *ss,
|
||||
|
||||
mutex_lock(&callback_mutex);
|
||||
guarantee_online_cpus(cs, &cpus);
|
||||
set_cpus_allowed(tsk, cpus);
|
||||
set_cpus_allowed_ptr(tsk, &cpus);
|
||||
mutex_unlock(&callback_mutex);
|
||||
|
||||
from = oldcs->mems_allowed;
|
||||
@@ -1202,6 +1242,7 @@ typedef enum {
|
||||
FILE_CPU_EXCLUSIVE,
|
||||
FILE_MEM_EXCLUSIVE,
|
||||
FILE_SCHED_LOAD_BALANCE,
|
||||
FILE_SCHED_RELAX_DOMAIN_LEVEL,
|
||||
FILE_MEMORY_PRESSURE_ENABLED,
|
||||
FILE_MEMORY_PRESSURE,
|
||||
FILE_SPREAD_PAGE,
|
||||
@@ -1256,6 +1297,9 @@ static ssize_t cpuset_common_file_write(struct cgroup *cont,
|
||||
case FILE_SCHED_LOAD_BALANCE:
|
||||
retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, buffer);
|
||||
break;
|
||||
case FILE_SCHED_RELAX_DOMAIN_LEVEL:
|
||||
retval = update_relax_domain_level(cs, buffer);
|
||||
break;
|
||||
case FILE_MEMORY_MIGRATE:
|
||||
retval = update_flag(CS_MEMORY_MIGRATE, cs, buffer);
|
||||
break;
|
||||
@@ -1354,6 +1398,9 @@ static ssize_t cpuset_common_file_read(struct cgroup *cont,
|
||||
case FILE_SCHED_LOAD_BALANCE:
|
||||
*s++ = is_sched_load_balance(cs) ? '1' : '0';
|
||||
break;
|
||||
case FILE_SCHED_RELAX_DOMAIN_LEVEL:
|
||||
s += sprintf(s, "%d", cs->relax_domain_level);
|
||||
break;
|
||||
case FILE_MEMORY_MIGRATE:
|
||||
*s++ = is_memory_migrate(cs) ? '1' : '0';
|
||||
break;
|
||||
@@ -1424,6 +1471,13 @@ static struct cftype cft_sched_load_balance = {
|
||||
.private = FILE_SCHED_LOAD_BALANCE,
|
||||
};
|
||||
|
||||
static struct cftype cft_sched_relax_domain_level = {
|
||||
.name = "sched_relax_domain_level",
|
||||
.read = cpuset_common_file_read,
|
||||
.write = cpuset_common_file_write,
|
||||
.private = FILE_SCHED_RELAX_DOMAIN_LEVEL,
|
||||
};
|
||||
|
||||
static struct cftype cft_memory_migrate = {
|
||||
.name = "memory_migrate",
|
||||
.read = cpuset_common_file_read,
|
||||
@@ -1475,6 +1529,9 @@ static int cpuset_populate(struct cgroup_subsys *ss, struct cgroup *cont)
|
||||
return err;
|
||||
if ((err = cgroup_add_file(cont, ss, &cft_sched_load_balance)) < 0)
|
||||
return err;
|
||||
if ((err = cgroup_add_file(cont, ss,
|
||||
&cft_sched_relax_domain_level)) < 0)
|
||||
return err;
|
||||
if ((err = cgroup_add_file(cont, ss, &cft_memory_pressure)) < 0)
|
||||
return err;
|
||||
if ((err = cgroup_add_file(cont, ss, &cft_spread_page)) < 0)
|
||||
@@ -1555,10 +1612,11 @@ static struct cgroup_subsys_state *cpuset_create(
|
||||
if (is_spread_slab(parent))
|
||||
set_bit(CS_SPREAD_SLAB, &cs->flags);
|
||||
set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
|
||||
cs->cpus_allowed = CPU_MASK_NONE;
|
||||
cs->mems_allowed = NODE_MASK_NONE;
|
||||
cpus_clear(cs->cpus_allowed);
|
||||
nodes_clear(cs->mems_allowed);
|
||||
cs->mems_generation = cpuset_mems_generation++;
|
||||
fmeter_init(&cs->fmeter);
|
||||
cs->relax_domain_level = -1;
|
||||
|
||||
cs->parent = parent;
|
||||
number_of_cpusets++;
|
||||
@@ -1625,12 +1683,13 @@ int __init cpuset_init(void)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
top_cpuset.cpus_allowed = CPU_MASK_ALL;
|
||||
top_cpuset.mems_allowed = NODE_MASK_ALL;
|
||||
cpus_setall(top_cpuset.cpus_allowed);
|
||||
nodes_setall(top_cpuset.mems_allowed);
|
||||
|
||||
fmeter_init(&top_cpuset.fmeter);
|
||||
top_cpuset.mems_generation = cpuset_mems_generation++;
|
||||
set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags);
|
||||
top_cpuset.relax_domain_level = -1;
|
||||
|
||||
err = register_filesystem(&cpuset_fs_type);
|
||||
if (err < 0)
|
||||
@@ -1844,6 +1903,7 @@ void __init cpuset_init_smp(void)
|
||||
|
||||
* cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
|
||||
* @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
|
||||
* @pmask: pointer to cpumask_t variable to receive cpus_allowed set.
|
||||
*
|
||||
* Description: Returns the cpumask_t cpus_allowed of the cpuset
|
||||
* attached to the specified @tsk. Guaranteed to return some non-empty
|
||||
@@ -1851,35 +1911,27 @@ void __init cpuset_init_smp(void)
|
||||
* tasks cpuset.
|
||||
**/
|
||||
|
||||
cpumask_t cpuset_cpus_allowed(struct task_struct *tsk)
|
||||
void cpuset_cpus_allowed(struct task_struct *tsk, cpumask_t *pmask)
|
||||
{
|
||||
cpumask_t mask;
|
||||
|
||||
mutex_lock(&callback_mutex);
|
||||
mask = cpuset_cpus_allowed_locked(tsk);
|
||||
cpuset_cpus_allowed_locked(tsk, pmask);
|
||||
mutex_unlock(&callback_mutex);
|
||||
|
||||
return mask;
|
||||
}
|
||||
|
||||
/**
|
||||
* cpuset_cpus_allowed_locked - return cpus_allowed mask from a tasks cpuset.
|
||||
* Must be called with callback_mutex held.
|
||||
**/
|
||||
cpumask_t cpuset_cpus_allowed_locked(struct task_struct *tsk)
|
||||
void cpuset_cpus_allowed_locked(struct task_struct *tsk, cpumask_t *pmask)
|
||||
{
|
||||
cpumask_t mask;
|
||||
|
||||
task_lock(tsk);
|
||||
guarantee_online_cpus(task_cs(tsk), &mask);
|
||||
guarantee_online_cpus(task_cs(tsk), pmask);
|
||||
task_unlock(tsk);
|
||||
|
||||
return mask;
|
||||
}
|
||||
|
||||
void cpuset_init_current_mems_allowed(void)
|
||||
{
|
||||
current->mems_allowed = NODE_MASK_ALL;
|
||||
nodes_setall(current->mems_allowed);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -2261,8 +2313,16 @@ void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task)
|
||||
m->count += cpumask_scnprintf(m->buf + m->count, m->size - m->count,
|
||||
task->cpus_allowed);
|
||||
seq_printf(m, "\n");
|
||||
seq_printf(m, "Cpus_allowed_list:\t");
|
||||
m->count += cpulist_scnprintf(m->buf + m->count, m->size - m->count,
|
||||
task->cpus_allowed);
|
||||
seq_printf(m, "\n");
|
||||
seq_printf(m, "Mems_allowed:\t");
|
||||
m->count += nodemask_scnprintf(m->buf + m->count, m->size - m->count,
|
||||
task->mems_allowed);
|
||||
seq_printf(m, "\n");
|
||||
seq_printf(m, "Mems_allowed_list:\t");
|
||||
m->count += nodelist_scnprintf(m->buf + m->count, m->size - m->count,
|
||||
task->mems_allowed);
|
||||
seq_printf(m, "\n");
|
||||
}
|
||||
|
||||
@@ -47,7 +47,7 @@ void dynamic_irq_init(unsigned int irq)
|
||||
desc->irq_count = 0;
|
||||
desc->irqs_unhandled = 0;
|
||||
#ifdef CONFIG_SMP
|
||||
desc->affinity = CPU_MASK_ALL;
|
||||
cpus_setall(desc->affinity);
|
||||
#endif
|
||||
spin_unlock_irqrestore(&desc->lock, flags);
|
||||
}
|
||||
|
||||
@@ -165,7 +165,7 @@ static int ____call_usermodehelper(void *data)
|
||||
}
|
||||
|
||||
/* We can run anywhere, unlike our parent keventd(). */
|
||||
set_cpus_allowed(current, CPU_MASK_ALL);
|
||||
set_cpus_allowed_ptr(current, CPU_MASK_ALL_PTR);
|
||||
|
||||
/*
|
||||
* Our parent is keventd, which runs with elevated scheduling priority.
|
||||
|
||||
@@ -180,6 +180,7 @@ void kthread_bind(struct task_struct *k, unsigned int cpu)
|
||||
wait_task_inactive(k);
|
||||
set_task_cpu(k, cpu);
|
||||
k->cpus_allowed = cpumask_of_cpu(cpu);
|
||||
k->rt.nr_cpus_allowed = 1;
|
||||
}
|
||||
EXPORT_SYMBOL(kthread_bind);
|
||||
|
||||
|
||||
@@ -64,8 +64,8 @@ account_global_scheduler_latency(struct task_struct *tsk, struct latency_record
|
||||
return;
|
||||
|
||||
for (i = 0; i < MAXLR; i++) {
|
||||
int q;
|
||||
int same = 1;
|
||||
int q, same = 1;
|
||||
|
||||
/* Nothing stored: */
|
||||
if (!latency_record[i].backtrace[0]) {
|
||||
if (firstnonnull > i)
|
||||
@@ -73,12 +73,15 @@ account_global_scheduler_latency(struct task_struct *tsk, struct latency_record
|
||||
continue;
|
||||
}
|
||||
for (q = 0 ; q < LT_BACKTRACEDEPTH ; q++) {
|
||||
if (latency_record[i].backtrace[q] !=
|
||||
lat->backtrace[q])
|
||||
unsigned long record = lat->backtrace[q];
|
||||
|
||||
if (latency_record[i].backtrace[q] != record) {
|
||||
same = 0;
|
||||
if (same && lat->backtrace[q] == 0)
|
||||
break;
|
||||
if (same && lat->backtrace[q] == ULONG_MAX)
|
||||
}
|
||||
|
||||
/* 0 and ULONG_MAX entries mean end of backtrace: */
|
||||
if (record == 0 || record == ULONG_MAX)
|
||||
break;
|
||||
}
|
||||
if (same) {
|
||||
@@ -143,14 +146,18 @@ account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
|
||||
for (i = 0; i < LT_SAVECOUNT ; i++) {
|
||||
struct latency_record *mylat;
|
||||
int same = 1;
|
||||
|
||||
mylat = &tsk->latency_record[i];
|
||||
for (q = 0 ; q < LT_BACKTRACEDEPTH ; q++) {
|
||||
if (mylat->backtrace[q] !=
|
||||
lat.backtrace[q])
|
||||
unsigned long record = lat.backtrace[q];
|
||||
|
||||
if (mylat->backtrace[q] != record) {
|
||||
same = 0;
|
||||
if (same && lat.backtrace[q] == 0)
|
||||
break;
|
||||
if (same && lat.backtrace[q] == ULONG_MAX)
|
||||
}
|
||||
|
||||
/* 0 and ULONG_MAX entries mean end of backtrace: */
|
||||
if (record == 0 || record == ULONG_MAX)
|
||||
break;
|
||||
}
|
||||
if (same) {
|
||||
|
||||
@@ -1007,10 +1007,10 @@ void __synchronize_sched(void)
|
||||
if (sched_getaffinity(0, &oldmask) < 0)
|
||||
oldmask = cpu_possible_map;
|
||||
for_each_online_cpu(cpu) {
|
||||
sched_setaffinity(0, cpumask_of_cpu(cpu));
|
||||
sched_setaffinity(0, &cpumask_of_cpu(cpu));
|
||||
schedule();
|
||||
}
|
||||
sched_setaffinity(0, oldmask);
|
||||
sched_setaffinity(0, &oldmask);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__synchronize_sched);
|
||||
|
||||
|
||||
@@ -723,9 +723,10 @@ static int rcu_idle_cpu; /* Force all torture tasks off this CPU */
|
||||
*/
|
||||
static void rcu_torture_shuffle_tasks(void)
|
||||
{
|
||||
cpumask_t tmp_mask = CPU_MASK_ALL;
|
||||
cpumask_t tmp_mask;
|
||||
int i;
|
||||
|
||||
cpus_setall(tmp_mask);
|
||||
get_online_cpus();
|
||||
|
||||
/* No point in shuffling if there is only one online CPU (ex: UP) */
|
||||
@@ -737,25 +738,27 @@ static void rcu_torture_shuffle_tasks(void)
|
||||
if (rcu_idle_cpu != -1)
|
||||
cpu_clear(rcu_idle_cpu, tmp_mask);
|
||||
|
||||
set_cpus_allowed(current, tmp_mask);
|
||||
set_cpus_allowed_ptr(current, &tmp_mask);
|
||||
|
||||
if (reader_tasks) {
|
||||
for (i = 0; i < nrealreaders; i++)
|
||||
if (reader_tasks[i])
|
||||
set_cpus_allowed(reader_tasks[i], tmp_mask);
|
||||
set_cpus_allowed_ptr(reader_tasks[i],
|
||||
&tmp_mask);
|
||||
}
|
||||
|
||||
if (fakewriter_tasks) {
|
||||
for (i = 0; i < nfakewriters; i++)
|
||||
if (fakewriter_tasks[i])
|
||||
set_cpus_allowed(fakewriter_tasks[i], tmp_mask);
|
||||
set_cpus_allowed_ptr(fakewriter_tasks[i],
|
||||
&tmp_mask);
|
||||
}
|
||||
|
||||
if (writer_task)
|
||||
set_cpus_allowed(writer_task, tmp_mask);
|
||||
set_cpus_allowed_ptr(writer_task, &tmp_mask);
|
||||
|
||||
if (stats_task)
|
||||
set_cpus_allowed(stats_task, tmp_mask);
|
||||
set_cpus_allowed_ptr(stats_task, &tmp_mask);
|
||||
|
||||
if (rcu_idle_cpu == -1)
|
||||
rcu_idle_cpu = num_online_cpus() - 1;
|
||||
|
||||
1914
kernel/sched.c
1914
kernel/sched.c
File diff suppressed because it is too large
Load Diff
@@ -67,14 +67,24 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
|
||||
(long long)(p->nvcsw + p->nivcsw),
|
||||
p->prio);
|
||||
#ifdef CONFIG_SCHEDSTATS
|
||||
SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld\n",
|
||||
SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
|
||||
SPLIT_NS(p->se.vruntime),
|
||||
SPLIT_NS(p->se.sum_exec_runtime),
|
||||
SPLIT_NS(p->se.sum_sleep_runtime));
|
||||
#else
|
||||
SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld\n",
|
||||
SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld",
|
||||
0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_CGROUP_SCHED
|
||||
{
|
||||
char path[64];
|
||||
|
||||
cgroup_path(task_group(p)->css.cgroup, path, sizeof(path));
|
||||
SEQ_printf(m, " %s", path);
|
||||
}
|
||||
#endif
|
||||
SEQ_printf(m, "\n");
|
||||
}
|
||||
|
||||
static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
|
||||
@@ -109,7 +119,21 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
|
||||
struct sched_entity *last;
|
||||
unsigned long flags;
|
||||
|
||||
SEQ_printf(m, "\ncfs_rq\n");
|
||||
#if !defined(CONFIG_CGROUP_SCHED) || !defined(CONFIG_USER_SCHED)
|
||||
SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu);
|
||||
#else
|
||||
char path[128] = "";
|
||||
struct cgroup *cgroup = NULL;
|
||||
struct task_group *tg = cfs_rq->tg;
|
||||
|
||||
if (tg)
|
||||
cgroup = tg->css.cgroup;
|
||||
|
||||
if (cgroup)
|
||||
cgroup_path(cgroup, path, sizeof(path));
|
||||
|
||||
SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, path);
|
||||
#endif
|
||||
|
||||
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
|
||||
SPLIT_NS(cfs_rq->exec_clock));
|
||||
@@ -143,6 +167,11 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
|
||||
#endif
|
||||
SEQ_printf(m, " .%-30s: %ld\n", "nr_spread_over",
|
||||
cfs_rq->nr_spread_over);
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
#ifdef CONFIG_SMP
|
||||
SEQ_printf(m, " .%-30s: %lu\n", "shares", cfs_rq->shares);
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
static void print_cpu(struct seq_file *m, int cpu)
|
||||
@@ -214,7 +243,6 @@ static int sched_debug_show(struct seq_file *m, void *v)
|
||||
PN(sysctl_sched_latency);
|
||||
PN(sysctl_sched_min_granularity);
|
||||
PN(sysctl_sched_wakeup_granularity);
|
||||
PN(sysctl_sched_batch_wakeup_granularity);
|
||||
PN(sysctl_sched_child_runs_first);
|
||||
P(sysctl_sched_features);
|
||||
#undef PN
|
||||
|
||||
@@ -62,24 +62,14 @@ const_debug unsigned int sysctl_sched_child_runs_first = 1;
|
||||
unsigned int __read_mostly sysctl_sched_compat_yield;
|
||||
|
||||
/*
|
||||
* SCHED_BATCH wake-up granularity.
|
||||
* SCHED_OTHER wake-up granularity.
|
||||
* (default: 10 msec * (1 + ilog(ncpus)), units: nanoseconds)
|
||||
*
|
||||
* This option delays the preemption effects of decoupled workloads
|
||||
* and reduces their over-scheduling. Synchronous workloads will still
|
||||
* have immediate wakeup/sleep latencies.
|
||||
*/
|
||||
unsigned int sysctl_sched_batch_wakeup_granularity = 10000000UL;
|
||||
|
||||
/*
|
||||
* SCHED_OTHER wake-up granularity.
|
||||
* (default: 5 msec * (1 + ilog(ncpus)), units: nanoseconds)
|
||||
*
|
||||
* This option delays the preemption effects of decoupled workloads
|
||||
* and reduces their over-scheduling. Synchronous workloads will still
|
||||
* have immediate wakeup/sleep latencies.
|
||||
*/
|
||||
unsigned int sysctl_sched_wakeup_granularity = 5000000UL;
|
||||
unsigned int sysctl_sched_wakeup_granularity = 10000000UL;
|
||||
|
||||
const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
|
||||
|
||||
@@ -87,6 +77,11 @@ const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
|
||||
* CFS operations on generic schedulable entities:
|
||||
*/
|
||||
|
||||
static inline struct task_struct *task_of(struct sched_entity *se)
|
||||
{
|
||||
return container_of(se, struct task_struct, se);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
|
||||
/* cpu runqueue to which this cfs_rq is attached */
|
||||
@@ -98,6 +93,54 @@ static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
|
||||
/* An entity is a task if it doesn't "own" a runqueue */
|
||||
#define entity_is_task(se) (!se->my_q)
|
||||
|
||||
/* Walk up scheduling entities hierarchy */
|
||||
#define for_each_sched_entity(se) \
|
||||
for (; se; se = se->parent)
|
||||
|
||||
static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
|
||||
{
|
||||
return p->se.cfs_rq;
|
||||
}
|
||||
|
||||
/* runqueue on which this entity is (to be) queued */
|
||||
static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
|
||||
{
|
||||
return se->cfs_rq;
|
||||
}
|
||||
|
||||
/* runqueue "owned" by this group */
|
||||
static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
|
||||
{
|
||||
return grp->my_q;
|
||||
}
|
||||
|
||||
/* Given a group's cfs_rq on one cpu, return its corresponding cfs_rq on
|
||||
* another cpu ('this_cpu')
|
||||
*/
|
||||
static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
|
||||
{
|
||||
return cfs_rq->tg->cfs_rq[this_cpu];
|
||||
}
|
||||
|
||||
/* Iterate thr' all leaf cfs_rq's on a runqueue */
|
||||
#define for_each_leaf_cfs_rq(rq, cfs_rq) \
|
||||
list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
|
||||
|
||||
/* Do the two (enqueued) entities belong to the same group ? */
|
||||
static inline int
|
||||
is_same_group(struct sched_entity *se, struct sched_entity *pse)
|
||||
{
|
||||
if (se->cfs_rq == pse->cfs_rq)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline struct sched_entity *parent_entity(struct sched_entity *se)
|
||||
{
|
||||
return se->parent;
|
||||
}
|
||||
|
||||
#else /* CONFIG_FAIR_GROUP_SCHED */
|
||||
|
||||
static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
|
||||
@@ -107,13 +150,49 @@ static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
|
||||
|
||||
#define entity_is_task(se) 1
|
||||
|
||||
#endif /* CONFIG_FAIR_GROUP_SCHED */
|
||||
#define for_each_sched_entity(se) \
|
||||
for (; se; se = NULL)
|
||||
|
||||
static inline struct task_struct *task_of(struct sched_entity *se)
|
||||
static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
|
||||
{
|
||||
return container_of(se, struct task_struct, se);
|
||||
return &task_rq(p)->cfs;
|
||||
}
|
||||
|
||||
static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
|
||||
{
|
||||
struct task_struct *p = task_of(se);
|
||||
struct rq *rq = task_rq(p);
|
||||
|
||||
return &rq->cfs;
|
||||
}
|
||||
|
||||
/* runqueue "owned" by this group */
|
||||
static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
|
||||
{
|
||||
return &cpu_rq(this_cpu)->cfs;
|
||||
}
|
||||
|
||||
#define for_each_leaf_cfs_rq(rq, cfs_rq) \
|
||||
for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
|
||||
|
||||
static inline int
|
||||
is_same_group(struct sched_entity *se, struct sched_entity *pse)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline struct sched_entity *parent_entity(struct sched_entity *se)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_FAIR_GROUP_SCHED */
|
||||
|
||||
|
||||
/**************************************************************
|
||||
* Scheduling class tree data structure manipulation methods:
|
||||
@@ -254,6 +333,34 @@ int sched_nr_latency_handler(struct ctl_table *table, int write,
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* delta *= w / rw
|
||||
*/
|
||||
static inline unsigned long
|
||||
calc_delta_weight(unsigned long delta, struct sched_entity *se)
|
||||
{
|
||||
for_each_sched_entity(se) {
|
||||
delta = calc_delta_mine(delta,
|
||||
se->load.weight, &cfs_rq_of(se)->load);
|
||||
}
|
||||
|
||||
return delta;
|
||||
}
|
||||
|
||||
/*
|
||||
* delta *= rw / w
|
||||
*/
|
||||
static inline unsigned long
|
||||
calc_delta_fair(unsigned long delta, struct sched_entity *se)
|
||||
{
|
||||
for_each_sched_entity(se) {
|
||||
delta = calc_delta_mine(delta,
|
||||
cfs_rq_of(se)->load.weight, &se->load);
|
||||
}
|
||||
|
||||
return delta;
|
||||
}
|
||||
|
||||
/*
|
||||
* The idea is to set a period in which each task runs once.
|
||||
*
|
||||
@@ -283,29 +390,54 @@ static u64 __sched_period(unsigned long nr_running)
|
||||
*/
|
||||
static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
{
|
||||
return calc_delta_mine(__sched_period(cfs_rq->nr_running),
|
||||
se->load.weight, &cfs_rq->load);
|
||||
return calc_delta_weight(__sched_period(cfs_rq->nr_running), se);
|
||||
}
|
||||
|
||||
/*
|
||||
* We calculate the vruntime slice.
|
||||
* We calculate the vruntime slice of a to be inserted task
|
||||
*
|
||||
* vs = s/w = p/rw
|
||||
* vs = s*rw/w = p
|
||||
*/
|
||||
static u64 __sched_vslice(unsigned long rq_weight, unsigned long nr_running)
|
||||
{
|
||||
u64 vslice = __sched_period(nr_running);
|
||||
|
||||
vslice *= NICE_0_LOAD;
|
||||
do_div(vslice, rq_weight);
|
||||
|
||||
return vslice;
|
||||
}
|
||||
|
||||
static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
{
|
||||
return __sched_vslice(cfs_rq->load.weight + se->load.weight,
|
||||
cfs_rq->nr_running + 1);
|
||||
unsigned long nr_running = cfs_rq->nr_running;
|
||||
|
||||
if (!se->on_rq)
|
||||
nr_running++;
|
||||
|
||||
return __sched_period(nr_running);
|
||||
}
|
||||
|
||||
/*
|
||||
* The goal of calc_delta_asym() is to be asymmetrically around NICE_0_LOAD, in
|
||||
* that it favours >=0 over <0.
|
||||
*
|
||||
* -20 |
|
||||
* |
|
||||
* 0 --------+-------
|
||||
* .'
|
||||
* 19 .'
|
||||
*
|
||||
*/
|
||||
static unsigned long
|
||||
calc_delta_asym(unsigned long delta, struct sched_entity *se)
|
||||
{
|
||||
struct load_weight lw = {
|
||||
.weight = NICE_0_LOAD,
|
||||
.inv_weight = 1UL << (WMULT_SHIFT-NICE_0_SHIFT)
|
||||
};
|
||||
|
||||
for_each_sched_entity(se) {
|
||||
struct load_weight *se_lw = &se->load;
|
||||
|
||||
if (se->load.weight < NICE_0_LOAD)
|
||||
se_lw = &lw;
|
||||
|
||||
delta = calc_delta_mine(delta,
|
||||
cfs_rq_of(se)->load.weight, se_lw);
|
||||
}
|
||||
|
||||
return delta;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -322,11 +454,7 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
|
||||
|
||||
curr->sum_exec_runtime += delta_exec;
|
||||
schedstat_add(cfs_rq, exec_clock, delta_exec);
|
||||
delta_exec_weighted = delta_exec;
|
||||
if (unlikely(curr->load.weight != NICE_0_LOAD)) {
|
||||
delta_exec_weighted = calc_delta_fair(delta_exec_weighted,
|
||||
&curr->load);
|
||||
}
|
||||
delta_exec_weighted = calc_delta_fair(delta_exec, curr);
|
||||
curr->vruntime += delta_exec_weighted;
|
||||
}
|
||||
|
||||
@@ -413,20 +541,43 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
* Scheduling class queueing methods:
|
||||
*/
|
||||
|
||||
#if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
|
||||
static void
|
||||
add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
|
||||
{
|
||||
cfs_rq->task_weight += weight;
|
||||
}
|
||||
#else
|
||||
static inline void
|
||||
add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
static void
|
||||
account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
{
|
||||
update_load_add(&cfs_rq->load, se->load.weight);
|
||||
if (!parent_entity(se))
|
||||
inc_cpu_load(rq_of(cfs_rq), se->load.weight);
|
||||
if (entity_is_task(se))
|
||||
add_cfs_task_weight(cfs_rq, se->load.weight);
|
||||
cfs_rq->nr_running++;
|
||||
se->on_rq = 1;
|
||||
list_add(&se->group_node, &cfs_rq->tasks);
|
||||
}
|
||||
|
||||
static void
|
||||
account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
{
|
||||
update_load_sub(&cfs_rq->load, se->load.weight);
|
||||
if (!parent_entity(se))
|
||||
dec_cpu_load(rq_of(cfs_rq), se->load.weight);
|
||||
if (entity_is_task(se))
|
||||
add_cfs_task_weight(cfs_rq, -se->load.weight);
|
||||
cfs_rq->nr_running--;
|
||||
se->on_rq = 0;
|
||||
list_del_init(&se->group_node);
|
||||
}
|
||||
|
||||
static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
@@ -510,8 +661,12 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
|
||||
|
||||
if (!initial) {
|
||||
/* sleeps upto a single latency don't count. */
|
||||
if (sched_feat(NEW_FAIR_SLEEPERS))
|
||||
vruntime -= sysctl_sched_latency;
|
||||
if (sched_feat(NEW_FAIR_SLEEPERS)) {
|
||||
if (sched_feat(NORMALIZED_SLEEPER))
|
||||
vruntime -= calc_delta_weight(sysctl_sched_latency, se);
|
||||
else
|
||||
vruntime -= sysctl_sched_latency;
|
||||
}
|
||||
|
||||
/* ensure we never gain time by being placed backwards. */
|
||||
vruntime = max_vruntime(se->vruntime, vruntime);
|
||||
@@ -627,20 +782,16 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
se->prev_sum_exec_runtime = se->sum_exec_runtime;
|
||||
}
|
||||
|
||||
static int
|
||||
wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
|
||||
|
||||
static struct sched_entity *
|
||||
pick_next(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
{
|
||||
s64 diff, gran;
|
||||
|
||||
if (!cfs_rq->next)
|
||||
return se;
|
||||
|
||||
diff = cfs_rq->next->vruntime - se->vruntime;
|
||||
if (diff < 0)
|
||||
return se;
|
||||
|
||||
gran = calc_delta_fair(sysctl_sched_wakeup_granularity, &cfs_rq->load);
|
||||
if (diff > gran)
|
||||
if (wakeup_preempt_entity(cfs_rq->next, se) != 0)
|
||||
return se;
|
||||
|
||||
return cfs_rq->next;
|
||||
@@ -708,101 +859,6 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
|
||||
* CFS operations on tasks:
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
|
||||
/* Walk up scheduling entities hierarchy */
|
||||
#define for_each_sched_entity(se) \
|
||||
for (; se; se = se->parent)
|
||||
|
||||
static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
|
||||
{
|
||||
return p->se.cfs_rq;
|
||||
}
|
||||
|
||||
/* runqueue on which this entity is (to be) queued */
|
||||
static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
|
||||
{
|
||||
return se->cfs_rq;
|
||||
}
|
||||
|
||||
/* runqueue "owned" by this group */
|
||||
static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
|
||||
{
|
||||
return grp->my_q;
|
||||
}
|
||||
|
||||
/* Given a group's cfs_rq on one cpu, return its corresponding cfs_rq on
|
||||
* another cpu ('this_cpu')
|
||||
*/
|
||||
static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
|
||||
{
|
||||
return cfs_rq->tg->cfs_rq[this_cpu];
|
||||
}
|
||||
|
||||
/* Iterate thr' all leaf cfs_rq's on a runqueue */
|
||||
#define for_each_leaf_cfs_rq(rq, cfs_rq) \
|
||||
list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
|
||||
|
||||
/* Do the two (enqueued) entities belong to the same group ? */
|
||||
static inline int
|
||||
is_same_group(struct sched_entity *se, struct sched_entity *pse)
|
||||
{
|
||||
if (se->cfs_rq == pse->cfs_rq)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline struct sched_entity *parent_entity(struct sched_entity *se)
|
||||
{
|
||||
return se->parent;
|
||||
}
|
||||
|
||||
#else /* CONFIG_FAIR_GROUP_SCHED */
|
||||
|
||||
#define for_each_sched_entity(se) \
|
||||
for (; se; se = NULL)
|
||||
|
||||
static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
|
||||
{
|
||||
return &task_rq(p)->cfs;
|
||||
}
|
||||
|
||||
static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
|
||||
{
|
||||
struct task_struct *p = task_of(se);
|
||||
struct rq *rq = task_rq(p);
|
||||
|
||||
return &rq->cfs;
|
||||
}
|
||||
|
||||
/* runqueue "owned" by this group */
|
||||
static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
|
||||
{
|
||||
return &cpu_rq(this_cpu)->cfs;
|
||||
}
|
||||
|
||||
#define for_each_leaf_cfs_rq(rq, cfs_rq) \
|
||||
for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
|
||||
|
||||
static inline int
|
||||
is_same_group(struct sched_entity *se, struct sched_entity *pse)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline struct sched_entity *parent_entity(struct sched_entity *se)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_FAIR_GROUP_SCHED */
|
||||
|
||||
#ifdef CONFIG_SCHED_HRTICK
|
||||
static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
|
||||
{
|
||||
@@ -916,7 +972,7 @@ static void yield_task_fair(struct rq *rq)
|
||||
/*
|
||||
* Already in the rightmost position?
|
||||
*/
|
||||
if (unlikely(rightmost->vruntime < se->vruntime))
|
||||
if (unlikely(!rightmost || rightmost->vruntime < se->vruntime))
|
||||
return;
|
||||
|
||||
/*
|
||||
@@ -955,7 +1011,9 @@ static int wake_idle(int cpu, struct task_struct *p)
|
||||
return cpu;
|
||||
|
||||
for_each_domain(cpu, sd) {
|
||||
if (sd->flags & SD_WAKE_IDLE) {
|
||||
if ((sd->flags & SD_WAKE_IDLE)
|
||||
|| ((sd->flags & SD_WAKE_IDLE_FAR)
|
||||
&& !task_hot(p, task_rq(p)->clock, sd))) {
|
||||
cpus_and(tmp, sd->span, p->cpus_allowed);
|
||||
for_each_cpu_mask(i, tmp) {
|
||||
if (idle_cpu(i)) {
|
||||
@@ -1099,6 +1157,58 @@ out:
|
||||
}
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
static unsigned long wakeup_gran(struct sched_entity *se)
|
||||
{
|
||||
unsigned long gran = sysctl_sched_wakeup_granularity;
|
||||
|
||||
/*
|
||||
* More easily preempt - nice tasks, while not making it harder for
|
||||
* + nice tasks.
|
||||
*/
|
||||
gran = calc_delta_asym(sysctl_sched_wakeup_granularity, se);
|
||||
|
||||
return gran;
|
||||
}
|
||||
|
||||
/*
|
||||
* Should 'se' preempt 'curr'.
|
||||
*
|
||||
* |s1
|
||||
* |s2
|
||||
* |s3
|
||||
* g
|
||||
* |<--->|c
|
||||
*
|
||||
* w(c, s1) = -1
|
||||
* w(c, s2) = 0
|
||||
* w(c, s3) = 1
|
||||
*
|
||||
*/
|
||||
static int
|
||||
wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
|
||||
{
|
||||
s64 gran, vdiff = curr->vruntime - se->vruntime;
|
||||
|
||||
if (vdiff < 0)
|
||||
return -1;
|
||||
|
||||
gran = wakeup_gran(curr);
|
||||
if (vdiff > gran)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* return depth at which a sched entity is present in the hierarchy */
|
||||
static inline int depth_se(struct sched_entity *se)
|
||||
{
|
||||
int depth = 0;
|
||||
|
||||
for_each_sched_entity(se)
|
||||
depth++;
|
||||
|
||||
return depth;
|
||||
}
|
||||
|
||||
/*
|
||||
* Preempt the current task with a newly woken task if needed:
|
||||
@@ -1108,7 +1218,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
|
||||
struct task_struct *curr = rq->curr;
|
||||
struct cfs_rq *cfs_rq = task_cfs_rq(curr);
|
||||
struct sched_entity *se = &curr->se, *pse = &p->se;
|
||||
unsigned long gran;
|
||||
int se_depth, pse_depth;
|
||||
|
||||
if (unlikely(rt_prio(p->prio))) {
|
||||
update_rq_clock(rq);
|
||||
@@ -1133,20 +1243,33 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
|
||||
if (!sched_feat(WAKEUP_PREEMPT))
|
||||
return;
|
||||
|
||||
/*
|
||||
* preemption test can be made between sibling entities who are in the
|
||||
* same cfs_rq i.e who have a common parent. Walk up the hierarchy of
|
||||
* both tasks until we find their ancestors who are siblings of common
|
||||
* parent.
|
||||
*/
|
||||
|
||||
/* First walk up until both entities are at same depth */
|
||||
se_depth = depth_se(se);
|
||||
pse_depth = depth_se(pse);
|
||||
|
||||
while (se_depth > pse_depth) {
|
||||
se_depth--;
|
||||
se = parent_entity(se);
|
||||
}
|
||||
|
||||
while (pse_depth > se_depth) {
|
||||
pse_depth--;
|
||||
pse = parent_entity(pse);
|
||||
}
|
||||
|
||||
while (!is_same_group(se, pse)) {
|
||||
se = parent_entity(se);
|
||||
pse = parent_entity(pse);
|
||||
}
|
||||
|
||||
gran = sysctl_sched_wakeup_granularity;
|
||||
/*
|
||||
* More easily preempt - nice tasks, while not making
|
||||
* it harder for + nice tasks.
|
||||
*/
|
||||
if (unlikely(se->load.weight > NICE_0_LOAD))
|
||||
gran = calc_delta_fair(gran, &se->load);
|
||||
|
||||
if (pse->vruntime + gran < se->vruntime)
|
||||
if (wakeup_preempt_entity(se, pse) == 1)
|
||||
resched_task(curr);
|
||||
}
|
||||
|
||||
@@ -1197,15 +1320,27 @@ static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
|
||||
* the current task:
|
||||
*/
|
||||
static struct task_struct *
|
||||
__load_balance_iterator(struct cfs_rq *cfs_rq, struct rb_node *curr)
|
||||
__load_balance_iterator(struct cfs_rq *cfs_rq, struct list_head *next)
|
||||
{
|
||||
struct task_struct *p;
|
||||
struct task_struct *p = NULL;
|
||||
struct sched_entity *se;
|
||||
|
||||
if (!curr)
|
||||
if (next == &cfs_rq->tasks)
|
||||
return NULL;
|
||||
|
||||
p = rb_entry(curr, struct task_struct, se.run_node);
|
||||
cfs_rq->rb_load_balance_curr = rb_next(curr);
|
||||
/* Skip over entities that are not tasks */
|
||||
do {
|
||||
se = list_entry(next, struct sched_entity, group_node);
|
||||
next = next->next;
|
||||
} while (next != &cfs_rq->tasks && !entity_is_task(se));
|
||||
|
||||
if (next == &cfs_rq->tasks)
|
||||
return NULL;
|
||||
|
||||
cfs_rq->balance_iterator = next;
|
||||
|
||||
if (entity_is_task(se))
|
||||
p = task_of(se);
|
||||
|
||||
return p;
|
||||
}
|
||||
@@ -1214,85 +1349,100 @@ static struct task_struct *load_balance_start_fair(void *arg)
|
||||
{
|
||||
struct cfs_rq *cfs_rq = arg;
|
||||
|
||||
return __load_balance_iterator(cfs_rq, first_fair(cfs_rq));
|
||||
return __load_balance_iterator(cfs_rq, cfs_rq->tasks.next);
|
||||
}
|
||||
|
||||
static struct task_struct *load_balance_next_fair(void *arg)
|
||||
{
|
||||
struct cfs_rq *cfs_rq = arg;
|
||||
|
||||
return __load_balance_iterator(cfs_rq, cfs_rq->rb_load_balance_curr);
|
||||
return __load_balance_iterator(cfs_rq, cfs_rq->balance_iterator);
|
||||
}
|
||||
|
||||
static unsigned long
|
||||
__load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
||||
unsigned long max_load_move, struct sched_domain *sd,
|
||||
enum cpu_idle_type idle, int *all_pinned, int *this_best_prio,
|
||||
struct cfs_rq *cfs_rq)
|
||||
{
|
||||
struct rq_iterator cfs_rq_iterator;
|
||||
|
||||
cfs_rq_iterator.start = load_balance_start_fair;
|
||||
cfs_rq_iterator.next = load_balance_next_fair;
|
||||
cfs_rq_iterator.arg = cfs_rq;
|
||||
|
||||
return balance_tasks(this_rq, this_cpu, busiest,
|
||||
max_load_move, sd, idle, all_pinned,
|
||||
this_best_prio, &cfs_rq_iterator);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
static int cfs_rq_best_prio(struct cfs_rq *cfs_rq)
|
||||
{
|
||||
struct sched_entity *curr;
|
||||
struct task_struct *p;
|
||||
|
||||
if (!cfs_rq->nr_running || !first_fair(cfs_rq))
|
||||
return MAX_PRIO;
|
||||
|
||||
curr = cfs_rq->curr;
|
||||
if (!curr)
|
||||
curr = __pick_next_entity(cfs_rq);
|
||||
|
||||
p = task_of(curr);
|
||||
|
||||
return p->prio;
|
||||
}
|
||||
#endif
|
||||
|
||||
static unsigned long
|
||||
load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
||||
unsigned long max_load_move,
|
||||
struct sched_domain *sd, enum cpu_idle_type idle,
|
||||
int *all_pinned, int *this_best_prio)
|
||||
{
|
||||
struct cfs_rq *busy_cfs_rq;
|
||||
long rem_load_move = max_load_move;
|
||||
struct rq_iterator cfs_rq_iterator;
|
||||
int busiest_cpu = cpu_of(busiest);
|
||||
struct task_group *tg;
|
||||
|
||||
cfs_rq_iterator.start = load_balance_start_fair;
|
||||
cfs_rq_iterator.next = load_balance_next_fair;
|
||||
|
||||
for_each_leaf_cfs_rq(busiest, busy_cfs_rq) {
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
struct cfs_rq *this_cfs_rq;
|
||||
rcu_read_lock();
|
||||
list_for_each_entry(tg, &task_groups, list) {
|
||||
long imbalance;
|
||||
unsigned long maxload;
|
||||
unsigned long this_weight, busiest_weight;
|
||||
long rem_load, max_load, moved_load;
|
||||
|
||||
this_cfs_rq = cpu_cfs_rq(busy_cfs_rq, this_cpu);
|
||||
|
||||
imbalance = busy_cfs_rq->load.weight - this_cfs_rq->load.weight;
|
||||
/* Don't pull if this_cfs_rq has more load than busy_cfs_rq */
|
||||
if (imbalance <= 0)
|
||||
/*
|
||||
* empty group
|
||||
*/
|
||||
if (!aggregate(tg, sd)->task_weight)
|
||||
continue;
|
||||
|
||||
/* Don't pull more than imbalance/2 */
|
||||
imbalance /= 2;
|
||||
maxload = min(rem_load_move, imbalance);
|
||||
rem_load = rem_load_move * aggregate(tg, sd)->rq_weight;
|
||||
rem_load /= aggregate(tg, sd)->load + 1;
|
||||
|
||||
*this_best_prio = cfs_rq_best_prio(this_cfs_rq);
|
||||
#else
|
||||
# define maxload rem_load_move
|
||||
#endif
|
||||
/*
|
||||
* pass busy_cfs_rq argument into
|
||||
* load_balance_[start|next]_fair iterators
|
||||
*/
|
||||
cfs_rq_iterator.arg = busy_cfs_rq;
|
||||
rem_load_move -= balance_tasks(this_rq, this_cpu, busiest,
|
||||
maxload, sd, idle, all_pinned,
|
||||
this_best_prio,
|
||||
&cfs_rq_iterator);
|
||||
this_weight = tg->cfs_rq[this_cpu]->task_weight;
|
||||
busiest_weight = tg->cfs_rq[busiest_cpu]->task_weight;
|
||||
|
||||
if (rem_load_move <= 0)
|
||||
imbalance = (busiest_weight - this_weight) / 2;
|
||||
|
||||
if (imbalance < 0)
|
||||
imbalance = busiest_weight;
|
||||
|
||||
max_load = max(rem_load, imbalance);
|
||||
moved_load = __load_balance_fair(this_rq, this_cpu, busiest,
|
||||
max_load, sd, idle, all_pinned, this_best_prio,
|
||||
tg->cfs_rq[busiest_cpu]);
|
||||
|
||||
if (!moved_load)
|
||||
continue;
|
||||
|
||||
move_group_shares(tg, sd, busiest_cpu, this_cpu);
|
||||
|
||||
moved_load *= aggregate(tg, sd)->load;
|
||||
moved_load /= aggregate(tg, sd)->rq_weight + 1;
|
||||
|
||||
rem_load_move -= moved_load;
|
||||
if (rem_load_move < 0)
|
||||
break;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return max_load_move - rem_load_move;
|
||||
}
|
||||
#else
|
||||
static unsigned long
|
||||
load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
||||
unsigned long max_load_move,
|
||||
struct sched_domain *sd, enum cpu_idle_type idle,
|
||||
int *all_pinned, int *this_best_prio)
|
||||
{
|
||||
return __load_balance_fair(this_rq, this_cpu, busiest,
|
||||
max_load_move, sd, idle, all_pinned,
|
||||
this_best_prio, &busiest->cfs);
|
||||
}
|
||||
#endif
|
||||
|
||||
static int
|
||||
move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
||||
@@ -1461,16 +1611,40 @@ static const struct sched_class fair_sched_class = {
|
||||
};
|
||||
|
||||
#ifdef CONFIG_SCHED_DEBUG
|
||||
static void
|
||||
print_cfs_rq_tasks(struct seq_file *m, struct cfs_rq *cfs_rq, int depth)
|
||||
{
|
||||
struct sched_entity *se;
|
||||
|
||||
if (!cfs_rq)
|
||||
return;
|
||||
|
||||
list_for_each_entry_rcu(se, &cfs_rq->tasks, group_node) {
|
||||
int i;
|
||||
|
||||
for (i = depth; i; i--)
|
||||
seq_puts(m, " ");
|
||||
|
||||
seq_printf(m, "%lu %s %lu\n",
|
||||
se->load.weight,
|
||||
entity_is_task(se) ? "T" : "G",
|
||||
calc_delta_weight(SCHED_LOAD_SCALE, se)
|
||||
);
|
||||
if (!entity_is_task(se))
|
||||
print_cfs_rq_tasks(m, group_cfs_rq(se), depth + 1);
|
||||
}
|
||||
}
|
||||
|
||||
static void print_cfs_stats(struct seq_file *m, int cpu)
|
||||
{
|
||||
struct cfs_rq *cfs_rq;
|
||||
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
print_cfs_rq(m, cpu, &cpu_rq(cpu)->cfs);
|
||||
#endif
|
||||
rcu_read_lock();
|
||||
for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
|
||||
print_cfs_rq(m, cpu, cfs_rq);
|
||||
|
||||
seq_printf(m, "\nWeight tree:\n");
|
||||
print_cfs_rq_tasks(m, &cpu_rq(cpu)->cfs, 1);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
#endif
|
||||
|
||||
10
kernel/sched_features.h
Normal file
10
kernel/sched_features.h
Normal file
@@ -0,0 +1,10 @@
|
||||
SCHED_FEAT(NEW_FAIR_SLEEPERS, 1)
|
||||
SCHED_FEAT(WAKEUP_PREEMPT, 1)
|
||||
SCHED_FEAT(START_DEBIT, 1)
|
||||
SCHED_FEAT(AFFINE_WAKEUPS, 1)
|
||||
SCHED_FEAT(CACHE_HOT_BUDDY, 1)
|
||||
SCHED_FEAT(SYNC_WAKEUPS, 1)
|
||||
SCHED_FEAT(HRTICK, 1)
|
||||
SCHED_FEAT(DOUBLE_TICK, 0)
|
||||
SCHED_FEAT(NORMALIZED_SLEEPER, 1)
|
||||
SCHED_FEAT(DEADLINE, 1)
|
||||
@@ -62,7 +62,12 @@ static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
|
||||
if (!rt_rq->tg)
|
||||
return RUNTIME_INF;
|
||||
|
||||
return rt_rq->tg->rt_runtime;
|
||||
return rt_rq->rt_runtime;
|
||||
}
|
||||
|
||||
static inline u64 sched_rt_period(struct rt_rq *rt_rq)
|
||||
{
|
||||
return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
|
||||
}
|
||||
|
||||
#define for_each_leaf_rt_rq(rt_rq, rq) \
|
||||
@@ -127,14 +132,39 @@ static int rt_se_boosted(struct sched_rt_entity *rt_se)
|
||||
return p->prio != p->normal_prio;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static inline cpumask_t sched_rt_period_mask(void)
|
||||
{
|
||||
return cpu_rq(smp_processor_id())->rd->span;
|
||||
}
|
||||
#else
|
||||
static inline cpumask_t sched_rt_period_mask(void)
|
||||
{
|
||||
return cpu_online_map;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline
|
||||
struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
|
||||
{
|
||||
return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
|
||||
}
|
||||
|
||||
static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
|
||||
{
|
||||
return &rt_rq->tg->rt_bandwidth;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
|
||||
{
|
||||
if (sysctl_sched_rt_runtime == -1)
|
||||
return RUNTIME_INF;
|
||||
return rt_rq->rt_runtime;
|
||||
}
|
||||
|
||||
return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
|
||||
static inline u64 sched_rt_period(struct rt_rq *rt_rq)
|
||||
{
|
||||
return ktime_to_ns(def_rt_bandwidth.rt_period);
|
||||
}
|
||||
|
||||
#define for_each_leaf_rt_rq(rt_rq, rq) \
|
||||
@@ -173,6 +203,102 @@ static inline int rt_rq_throttled(struct rt_rq *rt_rq)
|
||||
{
|
||||
return rt_rq->rt_throttled;
|
||||
}
|
||||
|
||||
static inline cpumask_t sched_rt_period_mask(void)
|
||||
{
|
||||
return cpu_online_map;
|
||||
}
|
||||
|
||||
static inline
|
||||
struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
|
||||
{
|
||||
return &cpu_rq(cpu)->rt;
|
||||
}
|
||||
|
||||
static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
|
||||
{
|
||||
return &def_rt_bandwidth;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
|
||||
{
|
||||
int i, idle = 1;
|
||||
cpumask_t span;
|
||||
|
||||
if (rt_b->rt_runtime == RUNTIME_INF)
|
||||
return 1;
|
||||
|
||||
span = sched_rt_period_mask();
|
||||
for_each_cpu_mask(i, span) {
|
||||
int enqueue = 0;
|
||||
struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
|
||||
struct rq *rq = rq_of_rt_rq(rt_rq);
|
||||
|
||||
spin_lock(&rq->lock);
|
||||
if (rt_rq->rt_time) {
|
||||
u64 runtime;
|
||||
|
||||
spin_lock(&rt_rq->rt_runtime_lock);
|
||||
runtime = rt_rq->rt_runtime;
|
||||
rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
|
||||
if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
|
||||
rt_rq->rt_throttled = 0;
|
||||
enqueue = 1;
|
||||
}
|
||||
if (rt_rq->rt_time || rt_rq->rt_nr_running)
|
||||
idle = 0;
|
||||
spin_unlock(&rt_rq->rt_runtime_lock);
|
||||
}
|
||||
|
||||
if (enqueue)
|
||||
sched_rt_rq_enqueue(rt_rq);
|
||||
spin_unlock(&rq->lock);
|
||||
}
|
||||
|
||||
return idle;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static int balance_runtime(struct rt_rq *rt_rq)
|
||||
{
|
||||
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
|
||||
struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
|
||||
int i, weight, more = 0;
|
||||
u64 rt_period;
|
||||
|
||||
weight = cpus_weight(rd->span);
|
||||
|
||||
spin_lock(&rt_b->rt_runtime_lock);
|
||||
rt_period = ktime_to_ns(rt_b->rt_period);
|
||||
for_each_cpu_mask(i, rd->span) {
|
||||
struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
|
||||
s64 diff;
|
||||
|
||||
if (iter == rt_rq)
|
||||
continue;
|
||||
|
||||
spin_lock(&iter->rt_runtime_lock);
|
||||
diff = iter->rt_runtime - iter->rt_time;
|
||||
if (diff > 0) {
|
||||
do_div(diff, weight);
|
||||
if (rt_rq->rt_runtime + diff > rt_period)
|
||||
diff = rt_period - rt_rq->rt_runtime;
|
||||
iter->rt_runtime -= diff;
|
||||
rt_rq->rt_runtime += diff;
|
||||
more = 1;
|
||||
if (rt_rq->rt_runtime == rt_period) {
|
||||
spin_unlock(&iter->rt_runtime_lock);
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock(&iter->rt_runtime_lock);
|
||||
}
|
||||
spin_unlock(&rt_b->rt_runtime_lock);
|
||||
|
||||
return more;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline int rt_se_prio(struct sched_rt_entity *rt_se)
|
||||
@@ -197,12 +323,24 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
|
||||
if (rt_rq->rt_throttled)
|
||||
return rt_rq_throttled(rt_rq);
|
||||
|
||||
if (sched_rt_runtime(rt_rq) >= sched_rt_period(rt_rq))
|
||||
return 0;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
if (rt_rq->rt_time > runtime) {
|
||||
struct rq *rq = rq_of_rt_rq(rt_rq);
|
||||
int more;
|
||||
|
||||
rq->rt_throttled = 1;
|
||||
spin_unlock(&rt_rq->rt_runtime_lock);
|
||||
more = balance_runtime(rt_rq);
|
||||
spin_lock(&rt_rq->rt_runtime_lock);
|
||||
|
||||
if (more)
|
||||
runtime = sched_rt_runtime(rt_rq);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (rt_rq->rt_time > runtime) {
|
||||
rt_rq->rt_throttled = 1;
|
||||
|
||||
if (rt_rq_throttled(rt_rq)) {
|
||||
sched_rt_rq_dequeue(rt_rq);
|
||||
return 1;
|
||||
@@ -212,29 +350,6 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void update_sched_rt_period(struct rq *rq)
|
||||
{
|
||||
struct rt_rq *rt_rq;
|
||||
u64 period;
|
||||
|
||||
while (rq->clock > rq->rt_period_expire) {
|
||||
period = (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
|
||||
rq->rt_period_expire += period;
|
||||
|
||||
for_each_leaf_rt_rq(rt_rq, rq) {
|
||||
u64 runtime = sched_rt_runtime(rt_rq);
|
||||
|
||||
rt_rq->rt_time -= min(rt_rq->rt_time, runtime);
|
||||
if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
|
||||
rt_rq->rt_throttled = 0;
|
||||
sched_rt_rq_enqueue(rt_rq);
|
||||
}
|
||||
}
|
||||
|
||||
rq->rt_throttled = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Update the current task's runtime statistics. Skip current tasks that
|
||||
* are not in our scheduling class.
|
||||
@@ -259,9 +374,15 @@ static void update_curr_rt(struct rq *rq)
|
||||
curr->se.exec_start = rq->clock;
|
||||
cpuacct_charge(curr, delta_exec);
|
||||
|
||||
rt_rq->rt_time += delta_exec;
|
||||
if (sched_rt_runtime_exceeded(rt_rq))
|
||||
resched_task(curr);
|
||||
for_each_sched_rt_entity(rt_se) {
|
||||
rt_rq = rt_rq_of_se(rt_se);
|
||||
|
||||
spin_lock(&rt_rq->rt_runtime_lock);
|
||||
rt_rq->rt_time += delta_exec;
|
||||
if (sched_rt_runtime_exceeded(rt_rq))
|
||||
resched_task(curr);
|
||||
spin_unlock(&rt_rq->rt_runtime_lock);
|
||||
}
|
||||
}
|
||||
|
||||
static inline
|
||||
@@ -284,6 +405,11 @@ void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
|
||||
#ifdef CONFIG_RT_GROUP_SCHED
|
||||
if (rt_se_boosted(rt_se))
|
||||
rt_rq->rt_nr_boosted++;
|
||||
|
||||
if (rt_rq->tg)
|
||||
start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
|
||||
#else
|
||||
start_rt_bandwidth(&def_rt_bandwidth);
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -353,27 +479,21 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
|
||||
/*
|
||||
* Because the prio of an upper entry depends on the lower
|
||||
* entries, we must remove entries top - down.
|
||||
*
|
||||
* XXX: O(1/2 h^2) because we can only walk up, not down the chain.
|
||||
* doesn't matter much for now, as h=2 for GROUP_SCHED.
|
||||
*/
|
||||
static void dequeue_rt_stack(struct task_struct *p)
|
||||
{
|
||||
struct sched_rt_entity *rt_se, *top_se;
|
||||
struct sched_rt_entity *rt_se, *back = NULL;
|
||||
|
||||
/*
|
||||
* dequeue all, top - down.
|
||||
*/
|
||||
do {
|
||||
rt_se = &p->rt;
|
||||
top_se = NULL;
|
||||
for_each_sched_rt_entity(rt_se) {
|
||||
if (on_rt_rq(rt_se))
|
||||
top_se = rt_se;
|
||||
}
|
||||
if (top_se)
|
||||
dequeue_rt_entity(top_se);
|
||||
} while (top_se);
|
||||
rt_se = &p->rt;
|
||||
for_each_sched_rt_entity(rt_se) {
|
||||
rt_se->back = back;
|
||||
back = rt_se;
|
||||
}
|
||||
|
||||
for (rt_se = back; rt_se; rt_se = rt_se->back) {
|
||||
if (on_rt_rq(rt_se))
|
||||
dequeue_rt_entity(rt_se);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -393,6 +513,8 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
|
||||
*/
|
||||
for_each_sched_rt_entity(rt_se)
|
||||
enqueue_rt_entity(rt_se);
|
||||
|
||||
inc_cpu_load(rq, p->se.load.weight);
|
||||
}
|
||||
|
||||
static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
|
||||
@@ -412,6 +534,8 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
|
||||
if (rt_rq && rt_rq->rt_nr_running)
|
||||
enqueue_rt_entity(rt_se);
|
||||
}
|
||||
|
||||
dec_cpu_load(rq, p->se.load.weight);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1001,7 +1125,8 @@ move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void set_cpus_allowed_rt(struct task_struct *p, cpumask_t *new_mask)
|
||||
static void set_cpus_allowed_rt(struct task_struct *p,
|
||||
const cpumask_t *new_mask)
|
||||
{
|
||||
int weight = cpus_weight(*new_mask);
|
||||
|
||||
|
||||
@@ -9,6 +9,11 @@
|
||||
static int show_schedstat(struct seq_file *seq, void *v)
|
||||
{
|
||||
int cpu;
|
||||
int mask_len = NR_CPUS/32 * 9;
|
||||
char *mask_str = kmalloc(mask_len, GFP_KERNEL);
|
||||
|
||||
if (mask_str == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION);
|
||||
seq_printf(seq, "timestamp %lu\n", jiffies);
|
||||
@@ -36,9 +41,8 @@ static int show_schedstat(struct seq_file *seq, void *v)
|
||||
preempt_disable();
|
||||
for_each_domain(cpu, sd) {
|
||||
enum cpu_idle_type itype;
|
||||
char mask_str[NR_CPUS];
|
||||
|
||||
cpumask_scnprintf(mask_str, NR_CPUS, sd->span);
|
||||
cpumask_scnprintf(mask_str, mask_len, sd->span);
|
||||
seq_printf(seq, "domain%d %s", dcount++, mask_str);
|
||||
for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES;
|
||||
itype++) {
|
||||
|
||||
@@ -356,7 +356,8 @@ void open_softirq(int nr, void (*action)(struct softirq_action*), void *data)
|
||||
/* Tasklets */
|
||||
struct tasklet_head
|
||||
{
|
||||
struct tasklet_struct *list;
|
||||
struct tasklet_struct *head;
|
||||
struct tasklet_struct **tail;
|
||||
};
|
||||
|
||||
/* Some compilers disobey section attribute on statics when not
|
||||
@@ -369,8 +370,9 @@ void __tasklet_schedule(struct tasklet_struct *t)
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
t->next = __get_cpu_var(tasklet_vec).list;
|
||||
__get_cpu_var(tasklet_vec).list = t;
|
||||
t->next = NULL;
|
||||
*__get_cpu_var(tasklet_vec).tail = t;
|
||||
__get_cpu_var(tasklet_vec).tail = &(t->next);
|
||||
raise_softirq_irqoff(TASKLET_SOFTIRQ);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
@@ -382,8 +384,9 @@ void __tasklet_hi_schedule(struct tasklet_struct *t)
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
t->next = __get_cpu_var(tasklet_hi_vec).list;
|
||||
__get_cpu_var(tasklet_hi_vec).list = t;
|
||||
t->next = NULL;
|
||||
*__get_cpu_var(tasklet_hi_vec).tail = t;
|
||||
__get_cpu_var(tasklet_hi_vec).tail = &(t->next);
|
||||
raise_softirq_irqoff(HI_SOFTIRQ);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
@@ -395,8 +398,9 @@ static void tasklet_action(struct softirq_action *a)
|
||||
struct tasklet_struct *list;
|
||||
|
||||
local_irq_disable();
|
||||
list = __get_cpu_var(tasklet_vec).list;
|
||||
__get_cpu_var(tasklet_vec).list = NULL;
|
||||
list = __get_cpu_var(tasklet_vec).head;
|
||||
__get_cpu_var(tasklet_vec).head = NULL;
|
||||
__get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head;
|
||||
local_irq_enable();
|
||||
|
||||
while (list) {
|
||||
@@ -416,8 +420,9 @@ static void tasklet_action(struct softirq_action *a)
|
||||
}
|
||||
|
||||
local_irq_disable();
|
||||
t->next = __get_cpu_var(tasklet_vec).list;
|
||||
__get_cpu_var(tasklet_vec).list = t;
|
||||
t->next = NULL;
|
||||
*__get_cpu_var(tasklet_vec).tail = t;
|
||||
__get_cpu_var(tasklet_vec).tail = &(t->next);
|
||||
__raise_softirq_irqoff(TASKLET_SOFTIRQ);
|
||||
local_irq_enable();
|
||||
}
|
||||
@@ -428,8 +433,9 @@ static void tasklet_hi_action(struct softirq_action *a)
|
||||
struct tasklet_struct *list;
|
||||
|
||||
local_irq_disable();
|
||||
list = __get_cpu_var(tasklet_hi_vec).list;
|
||||
__get_cpu_var(tasklet_hi_vec).list = NULL;
|
||||
list = __get_cpu_var(tasklet_hi_vec).head;
|
||||
__get_cpu_var(tasklet_hi_vec).head = NULL;
|
||||
__get_cpu_var(tasklet_hi_vec).tail = &__get_cpu_var(tasklet_hi_vec).head;
|
||||
local_irq_enable();
|
||||
|
||||
while (list) {
|
||||
@@ -449,8 +455,9 @@ static void tasklet_hi_action(struct softirq_action *a)
|
||||
}
|
||||
|
||||
local_irq_disable();
|
||||
t->next = __get_cpu_var(tasklet_hi_vec).list;
|
||||
__get_cpu_var(tasklet_hi_vec).list = t;
|
||||
t->next = NULL;
|
||||
*__get_cpu_var(tasklet_hi_vec).tail = t;
|
||||
__get_cpu_var(tasklet_hi_vec).tail = &(t->next);
|
||||
__raise_softirq_irqoff(HI_SOFTIRQ);
|
||||
local_irq_enable();
|
||||
}
|
||||
@@ -487,6 +494,15 @@ EXPORT_SYMBOL(tasklet_kill);
|
||||
|
||||
void __init softirq_init(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
per_cpu(tasklet_vec, cpu).tail =
|
||||
&per_cpu(tasklet_vec, cpu).head;
|
||||
per_cpu(tasklet_hi_vec, cpu).tail =
|
||||
&per_cpu(tasklet_hi_vec, cpu).head;
|
||||
}
|
||||
|
||||
open_softirq(TASKLET_SOFTIRQ, tasklet_action, NULL);
|
||||
open_softirq(HI_SOFTIRQ, tasklet_hi_action, NULL);
|
||||
}
|
||||
@@ -555,9 +571,12 @@ void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
|
||||
return;
|
||||
|
||||
/* CPU is dead, so no lock needed. */
|
||||
for (i = &per_cpu(tasklet_vec, cpu).list; *i; i = &(*i)->next) {
|
||||
for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
|
||||
if (*i == t) {
|
||||
*i = t->next;
|
||||
/* If this was the tail element, move the tail ptr */
|
||||
if (*i == NULL)
|
||||
per_cpu(tasklet_vec, cpu).tail = i;
|
||||
return;
|
||||
}
|
||||
}
|
||||
@@ -566,20 +585,20 @@ void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
|
||||
|
||||
static void takeover_tasklets(unsigned int cpu)
|
||||
{
|
||||
struct tasklet_struct **i;
|
||||
|
||||
/* CPU is dead, so no lock needed. */
|
||||
local_irq_disable();
|
||||
|
||||
/* Find end, append list for that CPU. */
|
||||
for (i = &__get_cpu_var(tasklet_vec).list; *i; i = &(*i)->next);
|
||||
*i = per_cpu(tasklet_vec, cpu).list;
|
||||
per_cpu(tasklet_vec, cpu).list = NULL;
|
||||
*__get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).head;
|
||||
__get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).tail;
|
||||
per_cpu(tasklet_vec, cpu).head = NULL;
|
||||
per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
|
||||
raise_softirq_irqoff(TASKLET_SOFTIRQ);
|
||||
|
||||
for (i = &__get_cpu_var(tasklet_hi_vec).list; *i; i = &(*i)->next);
|
||||
*i = per_cpu(tasklet_hi_vec, cpu).list;
|
||||
per_cpu(tasklet_hi_vec, cpu).list = NULL;
|
||||
*__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).head;
|
||||
__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).tail;
|
||||
per_cpu(tasklet_hi_vec, cpu).head = NULL;
|
||||
per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
|
||||
raise_softirq_irqoff(HI_SOFTIRQ);
|
||||
|
||||
local_irq_enable();
|
||||
|
||||
@@ -35,7 +35,7 @@ static int stopmachine(void *cpu)
|
||||
int irqs_disabled = 0;
|
||||
int prepared = 0;
|
||||
|
||||
set_cpus_allowed(current, cpumask_of_cpu((int)(long)cpu));
|
||||
set_cpus_allowed_ptr(current, &cpumask_of_cpu((int)(long)cpu));
|
||||
|
||||
/* Ack: we are alive */
|
||||
smp_mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */
|
||||
|
||||
@@ -268,17 +268,6 @@ static struct ctl_table kern_table[] = {
|
||||
.extra1 = &min_wakeup_granularity_ns,
|
||||
.extra2 = &max_wakeup_granularity_ns,
|
||||
},
|
||||
{
|
||||
.ctl_name = CTL_UNNUMBERED,
|
||||
.procname = "sched_batch_wakeup_granularity_ns",
|
||||
.data = &sysctl_sched_batch_wakeup_granularity,
|
||||
.maxlen = sizeof(unsigned int),
|
||||
.mode = 0644,
|
||||
.proc_handler = &proc_dointvec_minmax,
|
||||
.strategy = &sysctl_intvec,
|
||||
.extra1 = &min_wakeup_granularity_ns,
|
||||
.extra2 = &max_wakeup_granularity_ns,
|
||||
},
|
||||
{
|
||||
.ctl_name = CTL_UNNUMBERED,
|
||||
.procname = "sched_child_runs_first",
|
||||
@@ -318,7 +307,7 @@ static struct ctl_table kern_table[] = {
|
||||
.data = &sysctl_sched_rt_period,
|
||||
.maxlen = sizeof(unsigned int),
|
||||
.mode = 0644,
|
||||
.proc_handler = &proc_dointvec,
|
||||
.proc_handler = &sched_rt_handler,
|
||||
},
|
||||
{
|
||||
.ctl_name = CTL_UNNUMBERED,
|
||||
@@ -326,7 +315,7 @@ static struct ctl_table kern_table[] = {
|
||||
.data = &sysctl_sched_rt_runtime,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = &proc_dointvec,
|
||||
.proc_handler = &sched_rt_handler,
|
||||
},
|
||||
{
|
||||
.ctl_name = CTL_UNNUMBERED,
|
||||
|
||||
@@ -191,7 +191,6 @@ u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
|
||||
void tick_nohz_stop_sched_tick(void)
|
||||
{
|
||||
unsigned long seq, last_jiffies, next_jiffies, delta_jiffies, flags;
|
||||
unsigned long rt_jiffies;
|
||||
struct tick_sched *ts;
|
||||
ktime_t last_update, expires, now;
|
||||
struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
|
||||
@@ -243,10 +242,6 @@ void tick_nohz_stop_sched_tick(void)
|
||||
next_jiffies = get_next_timer_interrupt(last_jiffies);
|
||||
delta_jiffies = next_jiffies - last_jiffies;
|
||||
|
||||
rt_jiffies = rt_needs_cpu(cpu);
|
||||
if (rt_jiffies && rt_jiffies < delta_jiffies)
|
||||
delta_jiffies = rt_jiffies;
|
||||
|
||||
if (rcu_needs_cpu(cpu))
|
||||
delta_jiffies = 1;
|
||||
/*
|
||||
|
||||
@@ -101,7 +101,7 @@ static int sched_create_user(struct user_struct *up)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
up->tg = sched_create_group();
|
||||
up->tg = sched_create_group(&root_task_group);
|
||||
if (IS_ERR(up->tg))
|
||||
rc = -ENOMEM;
|
||||
|
||||
@@ -193,6 +193,33 @@ static ssize_t cpu_rt_runtime_store(struct kobject *kobj,
|
||||
|
||||
static struct kobj_attribute cpu_rt_runtime_attr =
|
||||
__ATTR(cpu_rt_runtime, 0644, cpu_rt_runtime_show, cpu_rt_runtime_store);
|
||||
|
||||
static ssize_t cpu_rt_period_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct user_struct *up = container_of(kobj, struct user_struct, kobj);
|
||||
|
||||
return sprintf(buf, "%lu\n", sched_group_rt_period(up->tg));
|
||||
}
|
||||
|
||||
static ssize_t cpu_rt_period_store(struct kobject *kobj,
|
||||
struct kobj_attribute *attr,
|
||||
const char *buf, size_t size)
|
||||
{
|
||||
struct user_struct *up = container_of(kobj, struct user_struct, kobj);
|
||||
unsigned long rt_period;
|
||||
int rc;
|
||||
|
||||
sscanf(buf, "%lu", &rt_period);
|
||||
|
||||
rc = sched_group_set_rt_period(up->tg, rt_period);
|
||||
|
||||
return (rc ? rc : size);
|
||||
}
|
||||
|
||||
static struct kobj_attribute cpu_rt_period_attr =
|
||||
__ATTR(cpu_rt_period, 0644, cpu_rt_period_show, cpu_rt_period_store);
|
||||
#endif
|
||||
|
||||
/* default attributes per uid directory */
|
||||
@@ -202,6 +229,7 @@ static struct attribute *uids_attributes[] = {
|
||||
#endif
|
||||
#ifdef CONFIG_RT_GROUP_SCHED
|
||||
&cpu_rt_runtime_attr.attr,
|
||||
&cpu_rt_period_attr.attr,
|
||||
#endif
|
||||
NULL
|
||||
};
|
||||
|
||||
Reference in New Issue
Block a user