• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2016, The Linux Foundation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 and
6  * only version 2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  */
13 
14 #ifndef __WALT_H
15 #define __WALT_H
16 
17 #ifdef CONFIG_SCHED_WALT
18 
19 void walt_update_task_ravg(struct task_struct *p, struct rq *rq, int event,
20 		u64 wallclock, u64 irqtime);
21 void walt_inc_cumulative_runnable_avg(struct rq *rq, struct task_struct *p);
22 void walt_dec_cumulative_runnable_avg(struct rq *rq, struct task_struct *p);
23 
24 void walt_fixup_busy_time(struct task_struct *p, int new_cpu);
25 void walt_init_new_task_load(struct task_struct *p);
26 void walt_mark_task_starting(struct task_struct *p);
27 void walt_set_window_start(struct rq *rq, struct rq_flags *rf);
28 void walt_migrate_sync_cpu(int cpu);
29 u64 walt_ktime_clock(void);
30 void walt_account_irqtime(int cpu, struct task_struct *curr, u64 delta,
31                                   u64 wallclock);
32 
33 u64 walt_irqload(int cpu);
34 int walt_cpu_high_irqload(int cpu);
35 
36 #else /* CONFIG_SCHED_WALT */
37 
walt_update_task_ravg(struct task_struct * p,struct rq * rq,int event,u64 wallclock,u64 irqtime)38 static inline void walt_update_task_ravg(struct task_struct *p, struct rq *rq,
39 		int event, u64 wallclock, u64 irqtime) { }
walt_inc_cumulative_runnable_avg(struct rq * rq,struct task_struct * p)40 static inline void walt_inc_cumulative_runnable_avg(struct rq *rq, struct task_struct *p) { }
walt_dec_cumulative_runnable_avg(struct rq * rq,struct task_struct * p)41 static inline void walt_dec_cumulative_runnable_avg(struct rq *rq, struct task_struct *p) { }
walt_fixup_busy_time(struct task_struct * p,int new_cpu)42 static inline void walt_fixup_busy_time(struct task_struct *p, int new_cpu) { }
walt_init_new_task_load(struct task_struct * p)43 static inline void walt_init_new_task_load(struct task_struct *p) { }
walt_mark_task_starting(struct task_struct * p)44 static inline void walt_mark_task_starting(struct task_struct *p) { }
walt_set_window_start(struct rq * rq,struct rq_flags * rf)45 static inline void walt_set_window_start(struct rq *rq, struct rq_flags *rf) { }
walt_migrate_sync_cpu(int cpu)46 static inline void walt_migrate_sync_cpu(int cpu) { }
walt_ktime_clock(void)47 static inline u64 walt_ktime_clock(void) { return 0; }
48 
49 #define walt_cpu_high_irqload(cpu) false
50 
51 #endif /* CONFIG_SCHED_WALT */
52 
53 #if defined(CONFIG_CFS_BANDWIDTH) && defined(CONFIG_SCHED_WALT)
54 void walt_inc_cfs_cumulative_runnable_avg(struct cfs_rq *rq,
55 		struct task_struct *p);
56 void walt_dec_cfs_cumulative_runnable_avg(struct cfs_rq *rq,
57 		struct task_struct *p);
58 #else
walt_inc_cfs_cumulative_runnable_avg(struct cfs_rq * rq,struct task_struct * p)59 static inline void walt_inc_cfs_cumulative_runnable_avg(struct cfs_rq *rq,
60 		struct task_struct *p) { }
walt_dec_cfs_cumulative_runnable_avg(struct cfs_rq * rq,struct task_struct * p)61 static inline void walt_dec_cfs_cumulative_runnable_avg(struct cfs_rq *rq,
62 		struct task_struct *p) { }
63 #endif
64 
65 extern bool walt_disabled;
66 
67 #endif
68