@@ -82,7 +82,7 @@ index bdedde199504..18d2b5d41b36 100644
82
82
83
83
wait_bit_init();
84
84
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
85
- index f153abf6d077..f3b9ccdc0b41 100644
85
+ index 6298e519d4f0..85d51d68eaff 100644
86
86
--- a/kernel/sched/fair.c
87
87
+++ b/kernel/sched/fair.c
88
88
@@ -776,6 +776,10 @@ static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *_se)
@@ -187,23 +187,7 @@ index f153abf6d077..f3b9ccdc0b41 100644
187
187
188
188
dequeue_throttle:
189
189
util_est_update(&rq->cfs, p, task_sleep);
190
- @@ -6193,6 +6215,7 @@ static int wake_wide(struct task_struct *p)
191
- }
192
- #endif /* CONFIG_CACULE_SCHED */
193
-
194
- + #if !defined(CONFIG_CACULE_RDB)
195
- /*
196
- * The purpose of wake_affine() is to quickly determine on which CPU we can run
197
- * soonest. For the purpose of speed we only consider the waking and previous
198
- @@ -6294,6 +6317,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p,
199
- schedstat_inc(p->se.statistics.nr_wakeups_affine);
200
- return target;
201
- }
202
- + #endif
203
-
204
- static struct sched_group *
205
- find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu);
206
- @@ -7580,11 +7604,23 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf
190
+ @@ -7580,11 +7602,23 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf
207
191
if (prev)
208
192
put_prev_task(rq, prev);
209
193
@@ -227,7 +211,7 @@ index f153abf6d077..f3b9ccdc0b41 100644
227
211
228
212
p = task_of(se);
229
213
230
- @@ -7606,6 +7642 ,10 @@ done: __maybe_unused;
214
+ @@ -7606,6 +7640 ,10 @@ done: __maybe_unused;
231
215
return p;
232
216
233
217
idle:
@@ -238,151 +222,151 @@ index f153abf6d077..f3b9ccdc0b41 100644
238
222
if (!rf)
239
223
return NULL;
240
224
241
- @@ -7912,6 +7952 ,7 @@ struct lb_env {
225
+ @@ -7912,6 +7950 ,7 @@ struct lb_env {
242
226
struct list_head tasks;
243
227
};
244
228
245
229
+ #if !defined(CONFIG_CACULE_RDB)
246
230
/*
247
231
* Is this task likely cache-hot:
248
232
*/
249
- @@ -8333,6 +8374 ,7 @@ static void attach_tasks(struct lb_env *env)
233
+ @@ -8333,6 +8372 ,7 @@ static void attach_tasks(struct lb_env *env)
250
234
251
235
rq_unlock(env->dst_rq, &rf);
252
236
}
253
237
+ #endif
254
238
255
239
#ifdef CONFIG_NO_HZ_COMMON
256
240
static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq)
257
- @@ -8382,6 +8424 ,7 @@ static inline void update_blocked_load_tick(struct rq *rq) {}
241
+ @@ -8382,6 +8422 ,7 @@ static inline void update_blocked_load_tick(struct rq *rq) {}
258
242
static inline void update_blocked_load_status(struct rq *rq, bool has_blocked) {}
259
243
#endif
260
244
261
245
+ #if !defined(CONFIG_CACULE_RDB)
262
246
static bool __update_blocked_others(struct rq *rq, bool *done)
263
247
{
264
248
const struct sched_class *curr_class;
265
- @@ -8407,6 +8450 ,7 @@ static bool __update_blocked_others(struct rq *rq, bool *done)
249
+ @@ -8407,6 +8448 ,7 @@ static bool __update_blocked_others(struct rq *rq, bool *done)
266
250
267
251
return decayed;
268
252
}
269
253
+ #endif
270
254
271
255
#ifdef CONFIG_FAIR_GROUP_SCHED
272
256
273
- @@ -8497,6 +8541 ,7 @@ static unsigned long task_h_load(struct task_struct *p)
257
+ @@ -8497,6 +8539 ,7 @@ static unsigned long task_h_load(struct task_struct *p)
274
258
cfs_rq_load_avg(cfs_rq) + 1);
275
259
}
276
260
#else
277
261
+ #if !defined(CONFIG_CACULE_RDB)
278
262
static bool __update_blocked_fair(struct rq *rq, bool *done)
279
263
{
280
264
struct cfs_rq *cfs_rq = &rq->cfs;
281
- @@ -8508,6 +8553 ,7 @@ static bool __update_blocked_fair(struct rq *rq, bool *done)
265
+ @@ -8508,6 +8551 ,7 @@ static bool __update_blocked_fair(struct rq *rq, bool *done)
282
266
283
267
return decayed;
284
268
}
285
269
+ #endif
286
270
287
271
static unsigned long task_h_load(struct task_struct *p)
288
272
{
289
- @@ -8515,6 +8561 ,7 @@ static unsigned long task_h_load(struct task_struct *p)
273
+ @@ -8515,6 +8559 ,7 @@ static unsigned long task_h_load(struct task_struct *p)
290
274
}
291
275
#endif
292
276
293
277
+ #if !defined(CONFIG_CACULE_RDB)
294
278
static void update_blocked_averages(int cpu)
295
279
{
296
280
bool decayed = false, done = true;
297
- @@ -8533,6 +8580 ,7 @@ static void update_blocked_averages(int cpu)
281
+ @@ -8533,6 +8578 ,7 @@ static void update_blocked_averages(int cpu)
298
282
cpufreq_update_util(rq, 0);
299
283
rq_unlock_irqrestore(rq, &rf);
300
284
}
301
285
+ #endif
302
286
303
287
/********** Helpers for find_busiest_group ************************/
304
288
305
- @@ -9636,6 +9684 ,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
289
+ @@ -9636,6 +9682 ,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
306
290
* different in groups.
307
291
*/
308
292
309
293
+ #if !defined(CONFIG_CACULE_RDB)
310
294
/**
311
295
* find_busiest_group - Returns the busiest group within the sched_domain
312
296
* if there is an imbalance.
313
- @@ -9904,6 +9953 ,7 @@ static struct rq *find_busiest_queue(struct lb_env *env,
297
+ @@ -9904,6 +9951 ,7 @@ static struct rq *find_busiest_queue(struct lb_env *env,
314
298
315
299
return busiest;
316
300
}
317
301
+ #endif
318
302
319
303
/*
320
304
* Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
321
- @@ -9940,6 +9990 ,7 @@ imbalanced_active_balance(struct lb_env *env)
305
+ @@ -9940,6 +9988 ,7 @@ imbalanced_active_balance(struct lb_env *env)
322
306
return 0;
323
307
}
324
308
325
309
+ #if !defined(CONFIG_CACULE_RDB)
326
310
static int need_active_balance(struct lb_env *env)
327
311
{
328
312
struct sched_domain *sd = env->sd;
329
- @@ -10272,6 +10323 ,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
313
+ @@ -10272,6 +10321 ,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
330
314
out:
331
315
return ld_moved;
332
316
}
333
317
+ #endif
334
318
335
319
static inline unsigned long
336
320
get_sd_balance_interval(struct sched_domain *sd, int cpu_busy)
337
- @@ -10310,6 +10362 ,7 @@ update_next_balance(struct sched_domain *sd, unsigned long *next_balance)
321
+ @@ -10310,6 +10360 ,7 @@ update_next_balance(struct sched_domain *sd, unsigned long *next_balance)
338
322
*next_balance = next;
339
323
}
340
324
341
325
+ #if !defined(CONFIG_CACULE_RDB)
342
326
/*
343
327
* active_load_balance_cpu_stop is run by the CPU stopper. It pushes
344
328
* running tasks off the busiest CPU onto idle CPUs. It requires at
345
- @@ -10395,6 +10448 ,7 @@ static int active_load_balance_cpu_stop(void *data)
329
+ @@ -10395,6 +10446 ,7 @@ static int active_load_balance_cpu_stop(void *data)
346
330
}
347
331
348
332
static DEFINE_SPINLOCK(balancing);
349
333
+ #endif
350
334
351
335
/*
352
336
* Scale the max load_balance interval with the number of CPUs in the system.
353
- @@ -10405,6 +10459 ,7 @@ void update_max_interval(void)
337
+ @@ -10405,6 +10457 ,7 @@ void update_max_interval(void)
354
338
max_load_balance_interval = HZ*num_online_cpus()/10;
355
339
}
356
340
357
341
+ #if !defined(CONFIG_CACULE_RDB)
358
342
/*
359
343
* It checks each scheduling domain to see if it is due to be balanced,
360
344
* and initiates a balancing operation if so.
361
- @@ -10497,6 +10552 ,7 @@ static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
345
+ @@ -10497,6 +10550 ,7 @@ static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
362
346
rq->next_balance = next_balance;
363
347
364
348
}
365
349
+ #endif
366
350
367
351
static inline int on_null_domain(struct rq *rq)
368
352
{
369
- @@ -10530,6 +10586 ,7 @@ static inline int find_new_ilb(void)
353
+ @@ -10530,6 +10584 ,7 @@ static inline int find_new_ilb(void)
370
354
return nr_cpu_ids;
371
355
}
372
356
373
357
+ #if !defined(CONFIG_CACULE_RDB)
374
358
/*
375
359
* Kick a CPU to do the nohz balancing, if it is time for it. We pick any
376
360
* idle CPU in the HK_FLAG_MISC housekeeping set (if there is one).
377
- @@ -10680,6 +10737 ,7 @@ static void nohz_balancer_kick(struct rq *rq)
361
+ @@ -10680,6 +10735 ,7 @@ static void nohz_balancer_kick(struct rq *rq)
378
362
if (flags)
379
363
kick_ilb(flags);
380
364
}
381
365
+ #endif /* CONFIG_CACULE_RDB */
382
366
383
367
static void set_cpu_sd_state_busy(int cpu)
384
368
{
385
- @@ -10800,11 +10858 ,17 @@ static bool update_nohz_stats(struct rq *rq)
369
+ @@ -10800,11 +10856 ,17 @@ static bool update_nohz_stats(struct rq *rq)
386
370
if (!time_after(jiffies, READ_ONCE(rq->last_blocked_load_update_tick)))
387
371
return true;
388
372
@@ -400,7 +384,7 @@ index f153abf6d077..f3b9ccdc0b41 100644
400
384
/*
401
385
* Internal function that runs load balance for all idle cpus. The load balance
402
386
* can be a simple update of blocked load or a complete load balance with
403
- @@ -10874,7 +10938 ,11 @@ static void _nohz_idle_balance(struct rq *this_rq, unsigned int flags,
387
+ @@ -10874,7 +10936 ,11 @@ static void _nohz_idle_balance(struct rq *this_rq, unsigned int flags,
404
388
rq_unlock_irqrestore(rq, &rf);
405
389
406
390
if (flags & NOHZ_BALANCE_KICK)
@@ -412,31 +396,31 @@ index f153abf6d077..f3b9ccdc0b41 100644
412
396
}
413
397
414
398
if (time_after(next_balance, rq->next_balance)) {
415
- @@ -10900,6 +10968 ,7 @@ static void _nohz_idle_balance(struct rq *this_rq, unsigned int flags,
399
+ @@ -10900,6 +10966 ,7 @@ static void _nohz_idle_balance(struct rq *this_rq, unsigned int flags,
416
400
WRITE_ONCE(nohz.has_blocked, 1);
417
401
}
418
402
419
403
+ #if !defined(CONFIG_CACULE_RDB)
420
404
/*
421
405
* In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the
422
406
* rebalancing for all the cpus for whom scheduler ticks are stopped.
423
- @@ -10920,6 +10989 ,7 @@ static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
407
+ @@ -10920,6 +10987 ,7 @@ static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
424
408
425
409
return true;
426
410
}
427
411
+ #endif
428
412
429
413
/*
430
414
* Check if we need to run the ILB for updating blocked load before entering
431
- @@ -10969,6 +11039 ,7 @@ static void nohz_newidle_balance(struct rq *this_rq)
415
+ @@ -10969,6 +11037 ,7 @@ static void nohz_newidle_balance(struct rq *this_rq)
432
416
}
433
417
434
418
#else /* !CONFIG_NO_HZ_COMMON */
435
419
+ #if !defined(CONFIG_CACULE_RDB)
436
420
static inline void nohz_balancer_kick(struct rq *rq) { }
437
421
438
422
static inline bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
439
- @@ -10977,8 +11048 ,130 @@ static inline bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle
423
+ @@ -10977,8 +11046 ,130 @@ static inline bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle
440
424
}
441
425
442
426
static inline void nohz_newidle_balance(struct rq *this_rq) { }
@@ -567,7 +551,7 @@ index f153abf6d077..f3b9ccdc0b41 100644
567
551
/*
568
552
* newidle_balance is called by schedule() if this_cpu is about to become
569
553
* idle. Attempts to pull tasks from other CPUs.
570
- @@ -10989,6 +11182 ,107 @@ static inline void nohz_newidle_balance(struct rq *this_rq) { }
554
+ @@ -10989,6 +11180 ,107 @@ static inline void nohz_newidle_balance(struct rq *this_rq) { }
571
555
* > 0 - success, new (fair) tasks present
572
556
*/
573
557
static int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
@@ -675,7 +659,7 @@ index f153abf6d077..f3b9ccdc0b41 100644
675
659
{
676
660
unsigned long next_balance = jiffies + HZ;
677
661
int this_cpu = this_rq->cpu;
678
- @@ -11145,6 +11439 ,214 @@ void trigger_load_balance(struct rq *rq)
662
+ @@ -11145,6 +11437 ,214 @@ void trigger_load_balance(struct rq *rq)
679
663
680
664
nohz_balancer_kick(rq);
681
665
}
@@ -890,7 +874,7 @@ index f153abf6d077..f3b9ccdc0b41 100644
890
874
891
875
static void rq_online_fair(struct rq *rq)
892
876
{
893
- @@ -11785,7 +12287 ,9 @@ void show_numa_stats(struct task_struct *p, struct seq_file *m)
877
+ @@ -11785,7 +12285 ,9 @@ void show_numa_stats(struct task_struct *p, struct seq_file *m)
894
878
__init void init_sched_fair_class(void)
895
879
{
896
880
#ifdef CONFIG_SMP
@@ -918,7 +902,7 @@ index 7ca3d3d86c2a..a7422dea8a9f 100644
918
902
/*
919
903
* If the arch has a polling bit, we maintain an invariant:
920
904
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
921
- index 0affe3be7c21..635a32027496 100644
905
+ index 09a8290fc883..4d46ef7190bf 100644
922
906
--- a/kernel/sched/sched.h
923
907
+++ b/kernel/sched/sched.h
924
908
@@ -544,6 +544,10 @@ struct cfs_rq {
0 commit comments