Skip to content

Commit

Permalink
sched: qhmp: fix compilation after 6f09f3d.
Browse files Browse the repository at this point in the history
this commit 6f09f3d purposed to generic sched instead of qhmp.
but santoni using qhmp as kernel sched by default,
make change qhmp too according this commit to fix compilation.

fixed: 6f09f3d sched: Add commits left out during CAF's 3.18 stable merge.

Signed-off-by: Ryan Andri <[email protected]>
  • Loading branch information
Ryan Andri authored and bitrvmpd committed Feb 3, 2018
1 parent 51d7c49 commit 51a4677
Showing 1 changed file with 27 additions and 22 deletions.
49 changes: 27 additions & 22 deletions kernel/sched/qhmp_core.c
Original file line number Diff line number Diff line change
Expand Up @@ -6266,34 +6266,23 @@ EXPORT_SYMBOL_GPL(yield_to);
* This task is about to go to sleep on IO. Increment rq->nr_iowait so
* that process accounting knows that this is a task in IO wait state.
*/
void __sched io_schedule(void)
{
struct rq *rq = raw_rq();

delayacct_blkio_start();
atomic_inc(&rq->nr_iowait);
blk_flush_plug(current);
current->in_iowait = 1;
schedule();
current->in_iowait = 0;
atomic_dec(&rq->nr_iowait);
delayacct_blkio_end();
}
EXPORT_SYMBOL(io_schedule);

long __sched io_schedule_timeout(long timeout)
{
struct rq *rq = raw_rq();
int old_iowait = current->in_iowait;
struct rq *rq;
long ret;

current->in_iowait = 1;
blk_schedule_flush_plug(current);

delayacct_blkio_start();
rq = raw_rq();
atomic_inc(&rq->nr_iowait);
blk_flush_plug(current);
current->in_iowait = 1;
ret = schedule_timeout(timeout);
current->in_iowait = 0;
current->in_iowait = old_iowait;
atomic_dec(&rq->nr_iowait);
delayacct_blkio_end();

return ret;
}
EXPORT_SYMBOL(io_schedule_timeout);
Expand Down Expand Up @@ -7722,6 +7711,9 @@ enum s_alloc {
* Build an iteration mask that can exclude certain CPUs from the upwards
* domain traversal.
*
* Only CPUs that can arrive at this group should be considered to continue
* balancing.
*
* Asymmetric node setups can result in situations where the domain tree is of
* unequal depth, make sure to skip domains that already cover the entire
* range.
Expand All @@ -7733,18 +7725,31 @@ enum s_alloc {
*/
static void build_group_mask(struct sched_domain *sd, struct sched_group *sg)
{
const struct cpumask *span = sched_domain_span(sd);
const struct cpumask *sg_span = sched_group_cpus(sg);
struct sd_data *sdd = sd->private;
struct sched_domain *sibling;
int i;

for_each_cpu(i, span) {
for_each_cpu(i, sg_span) {
sibling = *per_cpu_ptr(sdd->sd, i);
if (!cpumask_test_cpu(i, sched_domain_span(sibling)))

/*
* Can happen in the asymmetric case, where these siblings are
* unused. The mask will not be empty because those CPUs that
* do have the top domain _should_ span the domain.
*/
if (!sibling->child)
continue;

/* If we would not end up here, we can't continue from here */
if (!cpumask_equal(sg_span, sched_domain_span(sibling->child)))
continue;

cpumask_set_cpu(i, sched_group_mask(sg));
}

/* We must not have empty masks here */
WARN_ON_ONCE(cpumask_empty(sched_group_mask(sg)));
}

/*
Expand Down

0 comments on commit 51a4677

Please sign in to comment.