Skip to content
This repository was archived by the owner on Sep 2, 2021. It is now read-only.

Commit 3551db5

Browse files
committed
Fix: #42
1 parent 3abff81 commit 3551db5

File tree

1 file changed

+31
-47
lines changed

1 file changed

+31
-47
lines changed

patches/CacULE/v5.13/rdb-5.13.patch

Lines changed: 31 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,7 @@ index bdedde199504..18d2b5d41b36 100644
8282

8383
wait_bit_init();
8484
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
85-
index f153abf6d077..f3b9ccdc0b41 100644
85+
index 6298e519d4f0..85d51d68eaff 100644
8686
--- a/kernel/sched/fair.c
8787
+++ b/kernel/sched/fair.c
8888
@@ -776,6 +776,10 @@ static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *_se)
@@ -187,23 +187,7 @@ index f153abf6d077..f3b9ccdc0b41 100644
187187

188188
dequeue_throttle:
189189
util_est_update(&rq->cfs, p, task_sleep);
190-
@@ -6193,6 +6215,7 @@ static int wake_wide(struct task_struct *p)
191-
}
192-
#endif /* CONFIG_CACULE_SCHED */
193-
194-
+#if !defined(CONFIG_CACULE_RDB)
195-
/*
196-
* The purpose of wake_affine() is to quickly determine on which CPU we can run
197-
* soonest. For the purpose of speed we only consider the waking and previous
198-
@@ -6294,6 +6317,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p,
199-
schedstat_inc(p->se.statistics.nr_wakeups_affine);
200-
return target;
201-
}
202-
+#endif
203-
204-
static struct sched_group *
205-
find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu);
206-
@@ -7580,11 +7604,23 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf
190+
@@ -7580,11 +7602,23 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf
207191
if (prev)
208192
put_prev_task(rq, prev);
209193

@@ -227,7 +211,7 @@ index f153abf6d077..f3b9ccdc0b41 100644
227211

228212
p = task_of(se);
229213

230-
@@ -7606,6 +7642,10 @@ done: __maybe_unused;
214+
@@ -7606,6 +7640,10 @@ done: __maybe_unused;
231215
return p;
232216

233217
idle:
@@ -238,151 +222,151 @@ index f153abf6d077..f3b9ccdc0b41 100644
238222
if (!rf)
239223
return NULL;
240224

241-
@@ -7912,6 +7952,7 @@ struct lb_env {
225+
@@ -7912,6 +7950,7 @@ struct lb_env {
242226
struct list_head tasks;
243227
};
244228

245229
+#if !defined(CONFIG_CACULE_RDB)
246230
/*
247231
* Is this task likely cache-hot:
248232
*/
249-
@@ -8333,6 +8374,7 @@ static void attach_tasks(struct lb_env *env)
233+
@@ -8333,6 +8372,7 @@ static void attach_tasks(struct lb_env *env)
250234

251235
rq_unlock(env->dst_rq, &rf);
252236
}
253237
+#endif
254238

255239
#ifdef CONFIG_NO_HZ_COMMON
256240
static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq)
257-
@@ -8382,6 +8424,7 @@ static inline void update_blocked_load_tick(struct rq *rq) {}
241+
@@ -8382,6 +8422,7 @@ static inline void update_blocked_load_tick(struct rq *rq) {}
258242
static inline void update_blocked_load_status(struct rq *rq, bool has_blocked) {}
259243
#endif
260244

261245
+#if !defined(CONFIG_CACULE_RDB)
262246
static bool __update_blocked_others(struct rq *rq, bool *done)
263247
{
264248
const struct sched_class *curr_class;
265-
@@ -8407,6 +8450,7 @@ static bool __update_blocked_others(struct rq *rq, bool *done)
249+
@@ -8407,6 +8448,7 @@ static bool __update_blocked_others(struct rq *rq, bool *done)
266250

267251
return decayed;
268252
}
269253
+#endif
270254

271255
#ifdef CONFIG_FAIR_GROUP_SCHED
272256

273-
@@ -8497,6 +8541,7 @@ static unsigned long task_h_load(struct task_struct *p)
257+
@@ -8497,6 +8539,7 @@ static unsigned long task_h_load(struct task_struct *p)
274258
cfs_rq_load_avg(cfs_rq) + 1);
275259
}
276260
#else
277261
+#if !defined(CONFIG_CACULE_RDB)
278262
static bool __update_blocked_fair(struct rq *rq, bool *done)
279263
{
280264
struct cfs_rq *cfs_rq = &rq->cfs;
281-
@@ -8508,6 +8553,7 @@ static bool __update_blocked_fair(struct rq *rq, bool *done)
265+
@@ -8508,6 +8551,7 @@ static bool __update_blocked_fair(struct rq *rq, bool *done)
282266

283267
return decayed;
284268
}
285269
+#endif
286270

287271
static unsigned long task_h_load(struct task_struct *p)
288272
{
289-
@@ -8515,6 +8561,7 @@ static unsigned long task_h_load(struct task_struct *p)
273+
@@ -8515,6 +8559,7 @@ static unsigned long task_h_load(struct task_struct *p)
290274
}
291275
#endif
292276

293277
+#if !defined(CONFIG_CACULE_RDB)
294278
static void update_blocked_averages(int cpu)
295279
{
296280
bool decayed = false, done = true;
297-
@@ -8533,6 +8580,7 @@ static void update_blocked_averages(int cpu)
281+
@@ -8533,6 +8578,7 @@ static void update_blocked_averages(int cpu)
298282
cpufreq_update_util(rq, 0);
299283
rq_unlock_irqrestore(rq, &rf);
300284
}
301285
+#endif
302286

303287
/********** Helpers for find_busiest_group ************************/
304288

305-
@@ -9636,6 +9684,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
289+
@@ -9636,6 +9682,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
306290
* different in groups.
307291
*/
308292

309293
+#if !defined(CONFIG_CACULE_RDB)
310294
/**
311295
* find_busiest_group - Returns the busiest group within the sched_domain
312296
* if there is an imbalance.
313-
@@ -9904,6 +9953,7 @@ static struct rq *find_busiest_queue(struct lb_env *env,
297+
@@ -9904,6 +9951,7 @@ static struct rq *find_busiest_queue(struct lb_env *env,
314298

315299
return busiest;
316300
}
317301
+#endif
318302

319303
/*
320304
* Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
321-
@@ -9940,6 +9990,7 @@ imbalanced_active_balance(struct lb_env *env)
305+
@@ -9940,6 +9988,7 @@ imbalanced_active_balance(struct lb_env *env)
322306
return 0;
323307
}
324308

325309
+#if !defined(CONFIG_CACULE_RDB)
326310
static int need_active_balance(struct lb_env *env)
327311
{
328312
struct sched_domain *sd = env->sd;
329-
@@ -10272,6 +10323,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
313+
@@ -10272,6 +10321,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
330314
out:
331315
return ld_moved;
332316
}
333317
+#endif
334318

335319
static inline unsigned long
336320
get_sd_balance_interval(struct sched_domain *sd, int cpu_busy)
337-
@@ -10310,6 +10362,7 @@ update_next_balance(struct sched_domain *sd, unsigned long *next_balance)
321+
@@ -10310,6 +10360,7 @@ update_next_balance(struct sched_domain *sd, unsigned long *next_balance)
338322
*next_balance = next;
339323
}
340324

341325
+#if !defined(CONFIG_CACULE_RDB)
342326
/*
343327
* active_load_balance_cpu_stop is run by the CPU stopper. It pushes
344328
* running tasks off the busiest CPU onto idle CPUs. It requires at
345-
@@ -10395,6 +10448,7 @@ static int active_load_balance_cpu_stop(void *data)
329+
@@ -10395,6 +10446,7 @@ static int active_load_balance_cpu_stop(void *data)
346330
}
347331

348332
static DEFINE_SPINLOCK(balancing);
349333
+#endif
350334

351335
/*
352336
* Scale the max load_balance interval with the number of CPUs in the system.
353-
@@ -10405,6 +10459,7 @@ void update_max_interval(void)
337+
@@ -10405,6 +10457,7 @@ void update_max_interval(void)
354338
max_load_balance_interval = HZ*num_online_cpus()/10;
355339
}
356340

357341
+#if !defined(CONFIG_CACULE_RDB)
358342
/*
359343
* It checks each scheduling domain to see if it is due to be balanced,
360344
* and initiates a balancing operation if so.
361-
@@ -10497,6 +10552,7 @@ static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
345+
@@ -10497,6 +10550,7 @@ static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
362346
rq->next_balance = next_balance;
363347

364348
}
365349
+#endif
366350

367351
static inline int on_null_domain(struct rq *rq)
368352
{
369-
@@ -10530,6 +10586,7 @@ static inline int find_new_ilb(void)
353+
@@ -10530,6 +10584,7 @@ static inline int find_new_ilb(void)
370354
return nr_cpu_ids;
371355
}
372356

373357
+#if !defined(CONFIG_CACULE_RDB)
374358
/*
375359
* Kick a CPU to do the nohz balancing, if it is time for it. We pick any
376360
* idle CPU in the HK_FLAG_MISC housekeeping set (if there is one).
377-
@@ -10680,6 +10737,7 @@ static void nohz_balancer_kick(struct rq *rq)
361+
@@ -10680,6 +10735,7 @@ static void nohz_balancer_kick(struct rq *rq)
378362
if (flags)
379363
kick_ilb(flags);
380364
}
381365
+#endif /* CONFIG_CACULE_RDB */
382366

383367
static void set_cpu_sd_state_busy(int cpu)
384368
{
385-
@@ -10800,11 +10858,17 @@ static bool update_nohz_stats(struct rq *rq)
369+
@@ -10800,11 +10856,17 @@ static bool update_nohz_stats(struct rq *rq)
386370
if (!time_after(jiffies, READ_ONCE(rq->last_blocked_load_update_tick)))
387371
return true;
388372

@@ -400,7 +384,7 @@ index f153abf6d077..f3b9ccdc0b41 100644
400384
/*
401385
* Internal function that runs load balance for all idle cpus. The load balance
402386
* can be a simple update of blocked load or a complete load balance with
403-
@@ -10874,7 +10938,11 @@ static void _nohz_idle_balance(struct rq *this_rq, unsigned int flags,
387+
@@ -10874,7 +10936,11 @@ static void _nohz_idle_balance(struct rq *this_rq, unsigned int flags,
404388
rq_unlock_irqrestore(rq, &rf);
405389

406390
if (flags & NOHZ_BALANCE_KICK)
@@ -412,31 +396,31 @@ index f153abf6d077..f3b9ccdc0b41 100644
412396
}
413397

414398
if (time_after(next_balance, rq->next_balance)) {
415-
@@ -10900,6 +10968,7 @@ static void _nohz_idle_balance(struct rq *this_rq, unsigned int flags,
399+
@@ -10900,6 +10966,7 @@ static void _nohz_idle_balance(struct rq *this_rq, unsigned int flags,
416400
WRITE_ONCE(nohz.has_blocked, 1);
417401
}
418402

419403
+#if !defined(CONFIG_CACULE_RDB)
420404
/*
421405
* In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the
422406
* rebalancing for all the cpus for whom scheduler ticks are stopped.
423-
@@ -10920,6 +10989,7 @@ static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
407+
@@ -10920,6 +10987,7 @@ static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
424408

425409
return true;
426410
}
427411
+#endif
428412

429413
/*
430414
* Check if we need to run the ILB for updating blocked load before entering
431-
@@ -10969,6 +11039,7 @@ static void nohz_newidle_balance(struct rq *this_rq)
415+
@@ -10969,6 +11037,7 @@ static void nohz_newidle_balance(struct rq *this_rq)
432416
}
433417

434418
#else /* !CONFIG_NO_HZ_COMMON */
435419
+#if !defined(CONFIG_CACULE_RDB)
436420
static inline void nohz_balancer_kick(struct rq *rq) { }
437421

438422
static inline bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
439-
@@ -10977,8 +11048,130 @@ static inline bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle
423+
@@ -10977,8 +11046,130 @@ static inline bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle
440424
}
441425

442426
static inline void nohz_newidle_balance(struct rq *this_rq) { }
@@ -567,7 +551,7 @@ index f153abf6d077..f3b9ccdc0b41 100644
567551
/*
568552
* newidle_balance is called by schedule() if this_cpu is about to become
569553
* idle. Attempts to pull tasks from other CPUs.
570-
@@ -10989,6 +11182,107 @@ static inline void nohz_newidle_balance(struct rq *this_rq) { }
554+
@@ -10989,6 +11180,107 @@ static inline void nohz_newidle_balance(struct rq *this_rq) { }
571555
* > 0 - success, new (fair) tasks present
572556
*/
573557
static int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
@@ -675,7 +659,7 @@ index f153abf6d077..f3b9ccdc0b41 100644
675659
{
676660
unsigned long next_balance = jiffies + HZ;
677661
int this_cpu = this_rq->cpu;
678-
@@ -11145,6 +11439,214 @@ void trigger_load_balance(struct rq *rq)
662+
@@ -11145,6 +11437,214 @@ void trigger_load_balance(struct rq *rq)
679663

680664
nohz_balancer_kick(rq);
681665
}
@@ -890,7 +874,7 @@ index f153abf6d077..f3b9ccdc0b41 100644
890874

891875
static void rq_online_fair(struct rq *rq)
892876
{
893-
@@ -11785,7 +12287,9 @@ void show_numa_stats(struct task_struct *p, struct seq_file *m)
877+
@@ -11785,7 +12285,9 @@ void show_numa_stats(struct task_struct *p, struct seq_file *m)
894878
__init void init_sched_fair_class(void)
895879
{
896880
#ifdef CONFIG_SMP
@@ -918,7 +902,7 @@ index 7ca3d3d86c2a..a7422dea8a9f 100644
918902
/*
919903
* If the arch has a polling bit, we maintain an invariant:
920904
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
921-
index 0affe3be7c21..635a32027496 100644
905+
index 09a8290fc883..4d46ef7190bf 100644
922906
--- a/kernel/sched/sched.h
923907
+++ b/kernel/sched/sched.h
924908
@@ -544,6 +544,10 @@ struct cfs_rq {

0 commit comments

Comments
 (0)