Skip to content

Commit

Permalink
sched/cpufreq: Provide migration hint
Browse files Browse the repository at this point in the history
It was suggested that a migration hint might be usefull for the
CPU-freq governors.

Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Juri Lelli <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Mike Galbraith <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Rafael J. Wysocki <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Viresh Kumar <[email protected]>
Cc: [email protected]
Signed-off-by: Ingo Molnar <[email protected]>
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed Mar 9, 2018
1 parent 00357f5 commit ea14b57
Show file tree
Hide file tree
Showing 2 changed files with 20 additions and 12 deletions.
1 change: 1 addition & 0 deletions include/linux/sched/cpufreq.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
*/

#define SCHED_CPUFREQ_IOWAIT (1U << 0)
#define SCHED_CPUFREQ_MIGRATION (1U << 1)

#ifdef CONFIG_CPU_FREQ
struct update_util_data {
Expand Down
31 changes: 19 additions & 12 deletions kernel/sched/fair.c
Original file line number Diff line number Diff line change
Expand Up @@ -772,7 +772,7 @@ void post_init_entity_util_avg(struct sched_entity *se)
* For !fair tasks do:
*
update_cfs_rq_load_avg(now, cfs_rq);
attach_entity_load_avg(cfs_rq, se);
attach_entity_load_avg(cfs_rq, se, 0);
switched_from_fair(rq, p);
*
* such that the next switched_to_fair() has the
Expand Down Expand Up @@ -3009,11 +3009,11 @@ static inline void update_cfs_group(struct sched_entity *se)
}
#endif /* CONFIG_FAIR_GROUP_SCHED */

static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq)
static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags)
{
struct rq *rq = rq_of(cfs_rq);

if (&rq->cfs == cfs_rq) {
if (&rq->cfs == cfs_rq || (flags & SCHED_CPUFREQ_MIGRATION)) {
/*
* There are a few boundary cases this might miss but it should
* get called often enough that that should (hopefully) not be
Expand All @@ -3028,7 +3028,7 @@ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq)
*
* See cpu_util().
*/
cpufreq_update_util(rq, 0);
cpufreq_update_util(rq, flags);
}
}

Expand Down Expand Up @@ -3686,7 +3686,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
#endif

if (decayed)
cfs_rq_util_change(cfs_rq);
cfs_rq_util_change(cfs_rq, 0);

return decayed;
}
Expand All @@ -3699,7 +3699,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
* Must call update_cfs_rq_load_avg() before this, since we rely on
* cfs_rq->avg.last_update_time being current.
*/
static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
u32 divider = LOAD_AVG_MAX - 1024 + cfs_rq->avg.period_contrib;

Expand Down Expand Up @@ -3735,7 +3735,7 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s

add_tg_cfs_propagate(cfs_rq, se->avg.load_sum);

cfs_rq_util_change(cfs_rq);
cfs_rq_util_change(cfs_rq, flags);
}

/**
Expand All @@ -3754,7 +3754,7 @@ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s

add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum);

cfs_rq_util_change(cfs_rq);
cfs_rq_util_change(cfs_rq, 0);
}

/*
Expand Down Expand Up @@ -3784,7 +3784,14 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s

if (!se->avg.last_update_time && (flags & DO_ATTACH)) {

attach_entity_load_avg(cfs_rq, se);
/*
* DO_ATTACH means we're here from enqueue_entity().
* !last_update_time means we've passed through
* migrate_task_rq_fair() indicating we migrated.
*
* IOW we're enqueueing a task on a new CPU.
*/
attach_entity_load_avg(cfs_rq, se, SCHED_CPUFREQ_MIGRATION);
update_tg_load_avg(cfs_rq, 0);

} else if (decayed && (flags & UPDATE_TG))
Expand Down Expand Up @@ -3880,13 +3887,13 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)

static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1)
{
cfs_rq_util_change(cfs_rq);
cfs_rq_util_change(cfs_rq, 0);
}

static inline void remove_entity_load_avg(struct sched_entity *se) {}

static inline void
attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) {}
static inline void
detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}

Expand Down Expand Up @@ -9726,7 +9733,7 @@ static void attach_entity_cfs_rq(struct sched_entity *se)

/* Synchronize entity with its cfs_rq */
update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD);
attach_entity_load_avg(cfs_rq, se);
attach_entity_load_avg(cfs_rq, se, 0);
update_tg_load_avg(cfs_rq, false);
propagate_entity_cfs_rq(se);
}
Expand Down

0 comments on commit ea14b57

Please sign in to comment.