Skip to content

Commit

Permalink
sched/fair: Update blocked load from NEWIDLE
Browse files Browse the repository at this point in the history
Since we already iterate CPUs looking for work on NEWIDLE, use this
iteration to age the blocked load. If the domain for which this is
done completely spand the idle set, we can push the ILB based aging
forward.

Suggested-by: Brendan Jackman <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: [email protected]
Signed-off-by: Ingo Molnar <[email protected]>
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed Mar 9, 2018
1 parent a4064fb commit e022e0d
Show file tree
Hide file tree
Showing 3 changed files with 45 additions and 6 deletions.
1 change: 1 addition & 0 deletions kernel/sched/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -6074,6 +6074,7 @@ void __init sched_init(void)
rq_attach_root(rq, &def_root_domain);
#ifdef CONFIG_NO_HZ_COMMON
rq->last_load_update_tick = jiffies;
rq->last_blocked_load_update_tick = jiffies;
atomic_set(&rq->nohz_flags, 0);
#endif
#endif /* CONFIG_SMP */
Expand Down
49 changes: 43 additions & 6 deletions kernel/sched/fair.c
Original file line number Diff line number Diff line change
Expand Up @@ -5376,6 +5376,14 @@ decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
}
return load;
}

static struct {
cpumask_var_t idle_cpus_mask;
atomic_t nr_cpus;
unsigned long next_balance; /* in jiffy units */
unsigned long next_stats;
} nohz ____cacheline_aligned;

#endif /* CONFIG_NO_HZ_COMMON */

/**
Expand Down Expand Up @@ -7022,6 +7030,7 @@ enum fbq_type { regular, remote, all };
#define LBF_NEED_BREAK 0x02
#define LBF_DST_PINNED 0x04
#define LBF_SOME_PINNED 0x08
#define LBF_NOHZ_STATS 0x10

struct lb_env {
struct sched_domain *sd;
Expand Down Expand Up @@ -7460,6 +7469,10 @@ static void update_blocked_averages(int cpu)
if (cfs_rq_is_decayed(cfs_rq))
list_del_leaf_cfs_rq(cfs_rq);
}

#ifdef CONFIG_NO_HZ_COMMON
rq->last_blocked_load_update_tick = jiffies;
#endif
rq_unlock_irqrestore(rq, &rf);
}

Expand Down Expand Up @@ -7519,6 +7532,9 @@ static inline void update_blocked_averages(int cpu)
rq_lock_irqsave(rq, &rf);
update_rq_clock(rq);
update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq);
#ifdef CONFIG_NO_HZ_COMMON
rq->last_blocked_load_update_tick = jiffies;
#endif
rq_unlock_irqrestore(rq, &rf);
}

Expand Down Expand Up @@ -7853,6 +7869,21 @@ group_type group_classify(struct sched_group *group,
return group_other;
}

static void update_nohz_stats(struct rq *rq)
{
#ifdef CONFIG_NO_HZ_COMMON
unsigned int cpu = rq->cpu;

if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask))
return;

if (!time_after(jiffies, rq->last_blocked_load_update_tick))
return;

update_blocked_averages(cpu);
#endif
}

/**
* update_sg_lb_stats - Update sched_group's statistics for load balancing.
* @env: The load balancing environment.
Expand All @@ -7875,6 +7906,9 @@ static inline void update_sg_lb_stats(struct lb_env *env,
for_each_cpu_and(i, sched_group_span(group), env->cpus) {
struct rq *rq = cpu_rq(i);

if (env->flags & LBF_NOHZ_STATS)
update_nohz_stats(rq);

/* Bias balancing toward CPUs of our domain: */
if (local_group)
load = target_load(i, load_idx);
Expand Down Expand Up @@ -8030,6 +8064,15 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
if (child && child->flags & SD_PREFER_SIBLING)
prefer_sibling = 1;

#ifdef CONFIG_NO_HZ_COMMON
if (env->idle == CPU_NEWLY_IDLE) {
env->flags |= LBF_NOHZ_STATS;

if (cpumask_subset(nohz.idle_cpus_mask, sched_domain_span(env->sd)))
nohz.next_stats = jiffies + msecs_to_jiffies(LOAD_AVG_PERIOD);
}
#endif

load_idx = get_sd_load_idx(env->sd, env->idle);

do {
Expand Down Expand Up @@ -9049,12 +9092,6 @@ static inline int on_null_domain(struct rq *rq)
* needed, they will kick the idle load balancer, which then does idle
* load balancing for all the idle CPUs.
*/
static struct {
cpumask_var_t idle_cpus_mask;
atomic_t nr_cpus;
unsigned long next_balance; /* in jiffy units */
unsigned long next_stats;
} nohz ____cacheline_aligned;

static inline int find_new_ilb(void)
{
Expand Down
1 change: 1 addition & 0 deletions kernel/sched/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -762,6 +762,7 @@ struct rq {
#ifdef CONFIG_NO_HZ_COMMON
#ifdef CONFIG_SMP
unsigned long last_load_update_tick;
unsigned long last_blocked_load_update_tick;
#endif /* CONFIG_SMP */
atomic_t nohz_flags;
#endif /* CONFIG_NO_HZ_COMMON */
Expand Down

0 comments on commit e022e0d

Please sign in to comment.