• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Frame-based load tracking for rt_frame and RTG
4  *
5  * Copyright (c) 2022-2023 Huawei Technologies Co., Ltd.
6  */
7 
8 #include "frame_rtg.h"
9 #include "rtg.h"
10 
11 #include <linux/sched.h>
12 #include <trace/events/rtg.h>
13 #include <../kernel/sched/sched.h>
14 #include <uapi/linux/sched/types.h>
15 
16 static struct multi_frame_id_manager g_id_manager = {
17 	.id_map = {0},
18 	.offset = 0,
19 	.lock = __RW_LOCK_UNLOCKED(g_id_manager.lock)
20 };
21 
22 static struct frame_info g_multi_frame_info[MULTI_FRAME_NUM];
23 
is_rtg_rt_task(struct task_struct * task)24 static bool is_rtg_rt_task(struct task_struct *task)
25 {
26 	bool ret = false;
27 
28 	if (!task)
29 		return ret;
30 
31 	ret = ((task->prio < MAX_RT_PRIO) &&
32 	       (task->rtg_depth == STATIC_RTG_DEPTH));
33 
34 	return ret;
35 }
36 
37 #ifdef CONFIG_SCHED_RTG_RT_THREAD_LIMIT
38 static atomic_t g_rtg_rt_thread_num = ATOMIC_INIT(0);
39 
_get_rtg_rt_thread_num(struct related_thread_group * grp)40 static unsigned int _get_rtg_rt_thread_num(struct related_thread_group *grp)
41 {
42 	unsigned int rtg_rt_thread_num = 0;
43 	struct task_struct *p = NULL;
44 
45 	if (list_empty(&grp->tasks))
46 		goto out;
47 
48 	list_for_each_entry(p, &grp->tasks, grp_list) {
49 		if (is_rtg_rt_task(p))
50 			++rtg_rt_thread_num;
51 	}
52 
53 out:
54 	return rtg_rt_thread_num;
55 }
56 
get_rtg_rt_thread_num(void)57 static unsigned int get_rtg_rt_thread_num(void)
58 {
59 	struct related_thread_group *grp = NULL;
60 	unsigned int total_rtg_rt_thread_num = 0;
61 	unsigned long flag;
62 	unsigned int i;
63 
64 	for (i = MULTI_FRAME_ID; i < MULTI_FRAME_ID + MULTI_FRAME_NUM; i++) {
65 		grp = lookup_related_thread_group(i);
66 		if (grp == NULL)
67 			continue;
68 		raw_spin_lock_irqsave(&grp->lock, flag);
69 		total_rtg_rt_thread_num += _get_rtg_rt_thread_num(grp);
70 		raw_spin_unlock_irqrestore(&grp->lock, flag);
71 	}
72 
73 	return total_rtg_rt_thread_num;
74 }
75 
inc_rtg_rt_thread_num(void)76 static void inc_rtg_rt_thread_num(void)
77 {
78 	atomic_inc(&g_rtg_rt_thread_num);
79 }
80 
dec_rtg_rt_thread_num(void)81 static void dec_rtg_rt_thread_num(void)
82 {
83 	atomic_dec_if_positive(&g_rtg_rt_thread_num);
84 }
85 
test_and_read_rtg_rt_thread_num(void)86 static int test_and_read_rtg_rt_thread_num(void)
87 {
88 	if (atomic_read(&g_rtg_rt_thread_num) >= RTG_MAX_RT_THREAD_NUM)
89 		atomic_set(&g_rtg_rt_thread_num, get_rtg_rt_thread_num());
90 
91 	return atomic_read(&g_rtg_rt_thread_num);
92 }
93 
read_rtg_rt_thread_num(void)94 int read_rtg_rt_thread_num(void)
95 {
96 	return atomic_read(&g_rtg_rt_thread_num);
97 }
98 #else
inc_rtg_rt_thread_num(void)99 static inline void inc_rtg_rt_thread_num(void) { }
dec_rtg_rt_thread_num(void)100 static inline void dec_rtg_rt_thread_num(void) { }
test_and_read_rtg_rt_thread_num(void)101 static inline int test_and_read_rtg_rt_thread_num(void)
102 {
103 	return 0;
104 }
105 #endif
106 
is_frame_rtg(int id)107 bool is_frame_rtg(int id)
108 {
109 	return (id >= MULTI_FRAME_ID) &&
110 		(id < (MULTI_FRAME_ID + MULTI_FRAME_NUM));
111 }
112 
frame_rtg(int id)113 static struct related_thread_group *frame_rtg(int id)
114 {
115 	if (!is_frame_rtg(id))
116 		return NULL;
117 
118 	return lookup_related_thread_group(id);
119 }
120 
rtg_frame_info(int id)121 struct frame_info *rtg_frame_info(int id)
122 {
123 	if (!is_frame_rtg(id))
124 		return NULL;
125 
126 	return rtg_active_multi_frame_info(id);
127 }
128 
alloc_rtg_id(void)129 static int alloc_rtg_id(void)
130 {
131 	unsigned int id_offset;
132 	int id;
133 
134 	write_lock(&g_id_manager.lock);
135 	id_offset = find_next_zero_bit(g_id_manager.id_map, MULTI_FRAME_NUM,
136 				       g_id_manager.offset);
137 	if (id_offset >= MULTI_FRAME_NUM) {
138 		id_offset = find_first_zero_bit(g_id_manager.id_map,
139 						MULTI_FRAME_NUM);
140 		if (id_offset >= MULTI_FRAME_NUM) {
141 			write_unlock(&g_id_manager.lock);
142 			return -EINVAL;
143 		}
144 	}
145 
146 	set_bit(id_offset, g_id_manager.id_map);
147 	g_id_manager.offset = id_offset;
148 	id = id_offset + MULTI_FRAME_ID;
149 	write_unlock(&g_id_manager.lock);
150 	pr_debug("[FRAME_RTG] %s id_offset=%u, id=%d\n", __func__, id_offset, id);
151 
152 	return id;
153 }
154 
free_rtg_id(int id)155 static void free_rtg_id(int id)
156 {
157 	unsigned int id_offset = id - MULTI_FRAME_ID;
158 
159 	if (id_offset >= MULTI_FRAME_NUM) {
160 		pr_err("[FRAME_RTG] %s id_offset is invalid, id=%d, id_offset=%u.\n",
161 		       __func__, id, id_offset);
162 		return;
163 	}
164 
165 	pr_debug("[FRAME_RTG] %s id=%d id_offset=%u\n", __func__, id, id_offset);
166 	write_lock(&g_id_manager.lock);
167 	clear_bit(id_offset, g_id_manager.id_map);
168 	write_unlock(&g_id_manager.lock);
169 }
170 
set_frame_rate(struct frame_info * frame_info,int rate)171 int set_frame_rate(struct frame_info *frame_info, int rate)
172 {
173 	int id;
174 
175 	if ((rate < MIN_FRAME_RATE) || (rate > MAX_FRAME_RATE)) {
176 		pr_err("[FRAME_RTG]: %s invalid QOS(rate) value\n",
177 			__func__);
178 		return -EINVAL;
179 	}
180 
181 	if (!frame_info || !frame_info->rtg)
182 		return -EINVAL;
183 
184 	frame_info->frame_rate = (unsigned int)rate;
185 	frame_info->frame_time = div_u64(NSEC_PER_SEC, rate);
186 	frame_info->max_vload_time =
187 		div_u64(frame_info->frame_time, NSEC_PER_MSEC) +
188 		frame_info->vload_margin;
189 	id = frame_info->rtg->id;
190 	trace_rtg_frame_sched(id, "FRAME_QOS", rate);
191 	trace_rtg_frame_sched(id, "FRAME_MAX_TIME", frame_info->max_vload_time);
192 
193 	return 0;
194 }
195 
alloc_multi_frame_info(void)196 int alloc_multi_frame_info(void)
197 {
198 	struct frame_info *frame_info = NULL;
199 	int id;
200 	int i;
201 
202 	id = alloc_rtg_id();
203 	if (id < 0)
204 		return id;
205 
206 	frame_info = rtg_frame_info(id);
207 	if (!frame_info) {
208 		free_rtg_id(id);
209 		return -EINVAL;
210 	}
211 
212 	set_frame_rate(frame_info, DEFAULT_FRAME_RATE);
213 	atomic_set(&frame_info->curr_rt_thread_num, 0);
214 	atomic_set(&frame_info->max_rt_thread_num, DEFAULT_MAX_RT_THREAD);
215 	for (i = 0; i < MAX_TID_NUM; i++)
216 		atomic_set(&frame_info->thread_prio[i], 0);
217 
218 	return id;
219 }
220 
release_multi_frame_info(int id)221 void release_multi_frame_info(int id)
222 {
223 	if ((id < MULTI_FRAME_ID) || (id >= MULTI_FRAME_ID + MULTI_FRAME_NUM)) {
224 		pr_err("[FRAME_RTG] %s frame(id=%d) not found.\n", __func__, id);
225 		return;
226 	}
227 
228 	read_lock(&g_id_manager.lock);
229 	if (!test_bit(id - MULTI_FRAME_ID, g_id_manager.id_map)) {
230 		read_unlock(&g_id_manager.lock);
231 		return;
232 	}
233 	read_unlock(&g_id_manager.lock);
234 
235 	pr_debug("[FRAME_RTG] %s release frame(id=%d).\n", __func__, id);
236 	free_rtg_id(id);
237 }
238 
clear_multi_frame_info(void)239 void clear_multi_frame_info(void)
240 {
241 	write_lock(&g_id_manager.lock);
242 	bitmap_zero(g_id_manager.id_map, MULTI_FRAME_NUM);
243 	g_id_manager.offset = 0;
244 	write_unlock(&g_id_manager.lock);
245 }
246 
rtg_active_multi_frame_info(int id)247 struct frame_info *rtg_active_multi_frame_info(int id)
248 {
249 	struct frame_info *frame_info = NULL;
250 
251 	if ((id < MULTI_FRAME_ID) || (id >= MULTI_FRAME_ID + MULTI_FRAME_NUM))
252 		return NULL;
253 
254 	read_lock(&g_id_manager.lock);
255 	if (test_bit(id - MULTI_FRAME_ID, g_id_manager.id_map))
256 		frame_info = &g_multi_frame_info[id - MULTI_FRAME_ID];
257 	read_unlock(&g_id_manager.lock);
258 	if (!frame_info)
259 		pr_debug("[FRAME_RTG] %s frame %d has been released\n",
260 			 __func__, id);
261 
262 	return frame_info;
263 }
264 
rtg_multi_frame_info(int id)265 struct frame_info *rtg_multi_frame_info(int id)
266 {
267 	if ((id < MULTI_FRAME_ID) || (id >= MULTI_FRAME_ID + MULTI_FRAME_NUM))
268 		return NULL;
269 
270 	return &g_multi_frame_info[id - MULTI_FRAME_ID];
271 }
272 
do_update_frame_task_prio(struct frame_info * frame_info,struct task_struct * task,int prio)273 static void do_update_frame_task_prio(struct frame_info *frame_info,
274 				      struct task_struct *task, int prio)
275 {
276 	int policy = SCHED_NORMAL;
277 	struct sched_param sp = {0};
278 	bool is_rt_task = (prio != NOT_RT_PRIO);
279 	bool need_dec_flag = false;
280 	bool need_inc_flag = false;
281 	int err;
282 
283 	trace_rtg_frame_sched(frame_info->rtg->id, "rtg_rt_thread_num",
284 			      read_rtg_rt_thread_num());
285 	/* change policy to RT */
286 	if (is_rt_task && (atomic_read(&frame_info->curr_rt_thread_num) <
287 			   atomic_read(&frame_info->max_rt_thread_num))) {
288 		/* change policy from CFS to RT */
289 		if (!is_rtg_rt_task(task)) {
290 			if (test_and_read_rtg_rt_thread_num() >= RTG_MAX_RT_THREAD_NUM)
291 				goto out;
292 			need_inc_flag = true;
293 		}
294 		/* change RT priority */
295 		policy = SCHED_FIFO | SCHED_RESET_ON_FORK;
296 		sp.sched_priority = MAX_USER_RT_PRIO - 1 - prio;
297 		atomic_inc(&frame_info->curr_rt_thread_num);
298 	} else {
299 		/* change policy from RT to CFS */
300 		if (!is_rt_task && is_rtg_rt_task(task))
301 			need_dec_flag = true;
302 	}
303 out:
304 	trace_rtg_frame_sched(frame_info->rtg->id, "rtg_rt_thread_num",
305 			      read_rtg_rt_thread_num());
306 	trace_rtg_frame_sched(frame_info->rtg->id, "curr_rt_thread_num",
307 			      atomic_read(&frame_info->curr_rt_thread_num));
308 	err = sched_setscheduler_nocheck(task, policy, &sp);
309 	if (err == 0) {
310 		if (need_dec_flag)
311 			dec_rtg_rt_thread_num();
312 		else if (need_inc_flag)
313 			inc_rtg_rt_thread_num();
314 	}
315 }
316 
list_rtg_group(struct rtg_info * rs_data)317 int list_rtg_group(struct rtg_info *rs_data)
318 {
319 	int i;
320 	int num = 0;
321 
322 	read_lock(&g_id_manager.lock);
323 	for (i = MULTI_FRAME_ID; i < MULTI_FRAME_ID + MULTI_FRAME_NUM; i++) {
324 		if (test_bit(i - MULTI_FRAME_ID, g_id_manager.id_map)) {
325 			rs_data->rtgs[num] = i;
326 			num++;
327 		}
328 	}
329 	read_unlock(&g_id_manager.lock);
330 	rs_data->rtg_num = num;
331 
332 	return num;
333 }
334 
search_rtg(int pid)335 int search_rtg(int pid)
336 {
337 	struct rtg_info grp_info;
338 	struct frame_info *frame_info = NULL;
339 	int i = 0;
340 	int j = 0;
341 
342 	grp_info.rtg_num = 0;
343 	read_lock(&g_id_manager.lock);
344 	for (i = MULTI_FRAME_ID; i < MULTI_FRAME_ID + MULTI_FRAME_NUM; i++) {
345 		if (test_bit(i - MULTI_FRAME_ID, g_id_manager.id_map)) {
346 			grp_info.rtgs[grp_info.rtg_num] = i;
347 			grp_info.rtg_num++;
348 		}
349 	}
350 	read_unlock(&g_id_manager.lock);
351 	for (i = 0; i < grp_info.rtg_num; i++) {
352 		frame_info = lookup_frame_info_by_grp_id(grp_info.rtgs[i]);
353 		if (!frame_info) {
354 			pr_err("[FRAME_RTG] unexpected grp %d find error.", i);
355 			return -EINVAL;
356 		}
357 
358 		for (j = 0; j < frame_info->thread_num; j++) {
359 			if (frame_info->thread[j] && frame_info->thread[j]->pid == pid)
360 				return grp_info.rtgs[i];
361 		}
362 	}
363 
364 	return 0;
365 }
366 
update_frame_task_prio(struct frame_info * frame_info,int prio)367 static void update_frame_task_prio(struct frame_info *frame_info, int prio)
368 {
369 	int i;
370 	struct task_struct *thread = NULL;
371 
372 	/* reset curr_rt_thread_num */
373 	atomic_set(&frame_info->curr_rt_thread_num, 0);
374 
375 	for (i = 0; i < MAX_TID_NUM; i++) {
376 		thread = frame_info->thread[i];
377 		if (thread)
378 			do_update_frame_task_prio(frame_info, thread, prio);
379 	}
380 }
381 
set_frame_prio(struct frame_info * frame_info,int prio)382 void set_frame_prio(struct frame_info *frame_info, int prio)
383 {
384 	if (!frame_info)
385 		return;
386 
387 	mutex_lock(&frame_info->lock);
388 	if (frame_info->prio == prio)
389 		goto out;
390 
391 	update_frame_task_prio(frame_info, prio);
392 	frame_info->prio = prio;
393 out:
394 	mutex_unlock(&frame_info->lock);
395 }
396 
do_set_rtg_sched(struct task_struct * task,bool is_rtg,int grp_id,int prio)397 static int do_set_rtg_sched(struct task_struct *task, bool is_rtg,
398 			    int grp_id, int prio)
399 {
400 	int err;
401 	int policy = SCHED_NORMAL;
402 	int grpid = DEFAULT_RTG_GRP_ID;
403 	bool is_rt_task = (prio != NOT_RT_PRIO);
404 	struct sched_param sp = {0};
405 
406 	if (is_rtg) {
407 		if (is_rt_task) {
408 			if (test_and_read_rtg_rt_thread_num() >= RTG_MAX_RT_THREAD_NUM)
409 				// rtg_rt_thread_num is inavailable, set policy to CFS
410 				goto skip_setpolicy;
411 			policy = SCHED_FIFO | SCHED_RESET_ON_FORK;
412 			sp.sched_priority = MAX_USER_RT_PRIO - 1 - prio;
413 		}
414 skip_setpolicy:
415 		grpid = grp_id;
416 	}
417 	err = sched_setscheduler_nocheck(task, policy, &sp);
418 	if (err < 0) {
419 		pr_err("[FRAME_RTG]: %s task:%d setscheduler err:%d\n",
420 				__func__, task->pid, err);
421 		return err;
422 	}
423 	err = sched_set_group_id(task, grpid);
424 	if (err < 0) {
425 		pr_err("[FRAME_RTG]: %s task:%d set_group_id err:%d\n",
426 				__func__, task->pid, err);
427 		if (is_rtg) {
428 			policy = SCHED_NORMAL;
429 			sp.sched_priority = 0;
430 			sched_setscheduler_nocheck(task, policy, &sp);
431 		}
432 	}
433 	if (err == 0) {
434 		if (is_rtg) {
435 			if (policy != SCHED_NORMAL)
436 				inc_rtg_rt_thread_num();
437 		} else {
438 			dec_rtg_rt_thread_num();
439 		}
440 	}
441 
442 	return err;
443 }
444 
set_rtg_sched(struct task_struct * task,bool is_rtg,int grp_id,int prio)445 static int set_rtg_sched(struct task_struct *task, bool is_rtg,
446 			 int grp_id, int prio)
447 {
448 	int err = -1;
449 	bool is_rt_task = (prio != NOT_RT_PRIO);
450 
451 	if (!task)
452 		return err;
453 
454 	if (is_rt_task && is_rtg && ((prio < 0) ||
455 		(prio > MAX_USER_RT_PRIO - 1)))
456 		return err;
457 	/*
458 	 * original logic deny the non-cfs task st rt.
459 	 * add !fair_policy(task->policy) if needed
460 	 *
461 	 * if CONFIG_HW_FUTEX_PI is set, task->prio and task->sched_class
462 	 * may be modified by rtmutex. So we use task->policy instead.
463 	 */
464 	if (is_rtg && task->flags & PF_EXITING)
465 		return err;
466 
467 	if (in_interrupt()) {
468 		pr_err("[FRAME_RTG]: %s is in interrupt\n", __func__);
469 		return err;
470 	}
471 
472 	return do_set_rtg_sched(task, is_rtg, grp_id, prio);
473 }
474 
set_frame_rtg_thread(int grp_id,struct task_struct * task,bool is_rtg,int prio)475 static bool set_frame_rtg_thread(int grp_id, struct task_struct *task,
476 				 bool is_rtg, int prio)
477 {
478 	int depth;
479 
480 	if (!task)
481 		return false;
482 	depth = task->rtg_depth;
483 	if (is_rtg)
484 		task->rtg_depth = STATIC_RTG_DEPTH;
485 	else
486 		task->rtg_depth = 0;
487 
488 	if (set_rtg_sched(task, is_rtg, grp_id, prio) < 0) {
489 		task->rtg_depth = depth;
490 		return false;
491 	}
492 
493 	return true;
494 }
495 
update_frame_thread(struct frame_info * frame_info,int old_prio,int prio,int pid,struct task_struct * old_task)496 struct task_struct *update_frame_thread(struct frame_info *frame_info,
497 					int old_prio, int prio, int pid,
498 					struct task_struct *old_task)
499 {
500 	struct task_struct *task = NULL;
501 	bool is_rt_task = (prio != NOT_RT_PRIO);
502 	int new_prio = prio;
503 	bool update_ret = false;
504 
505 	if (pid > 0) {
506 		if (old_task && (pid == old_task->pid) && (old_prio == new_prio)) {
507 			if (is_rt_task && atomic_read(&frame_info->curr_rt_thread_num) <
508 			    atomic_read(&frame_info->max_rt_thread_num) &&
509 			    (atomic_read(&frame_info->frame_sched_state) == 1))
510 				atomic_inc(&frame_info->curr_rt_thread_num);
511 			trace_rtg_frame_sched(frame_info->rtg->id, "curr_rt_thread_num",
512 					      atomic_read(&frame_info->curr_rt_thread_num));
513 			return old_task;
514 		}
515 		rcu_read_lock();
516 		task = find_task_by_vpid(pid);
517 		if (task)
518 			get_task_struct(task);
519 		rcu_read_unlock();
520 	}
521 	trace_rtg_frame_sched(frame_info->rtg->id, "FRAME_SCHED_ENABLE",
522 			      atomic_read(&frame_info->frame_sched_state));
523 	if (atomic_read(&frame_info->frame_sched_state) == 1) {
524 		if (task && is_rt_task) {
525 			if (atomic_read(&frame_info->curr_rt_thread_num) <
526 			    atomic_read(&frame_info->max_rt_thread_num))
527 				atomic_inc(&frame_info->curr_rt_thread_num);
528 			else
529 				new_prio = NOT_RT_PRIO;
530 		}
531 		trace_rtg_frame_sched(frame_info->rtg->id, "curr_rt_thread_num",
532 				      atomic_read(&frame_info->curr_rt_thread_num));
533 		trace_rtg_frame_sched(frame_info->rtg->id, "rtg_rt_thread_num",
534 				      read_rtg_rt_thread_num());
535 
536 		set_frame_rtg_thread(frame_info->rtg->id, old_task, false, NOT_RT_PRIO);
537 		update_ret = set_frame_rtg_thread(frame_info->rtg->id, task, true, new_prio);
538 	}
539 	if (old_task)
540 		put_task_struct(old_task);
541 	if (!update_ret)
542 		return NULL;
543 
544 	return task;
545 }
546 
update_frame_thread_info(struct frame_info * frame_info,struct frame_thread_info * frame_thread_info)547 void update_frame_thread_info(struct frame_info *frame_info,
548 			      struct frame_thread_info *frame_thread_info)
549 {
550 	int i;
551 	int old_prio;
552 	int prio;
553 	int thread_num;
554 	int real_thread;
555 
556 	if (!frame_info || !frame_thread_info ||
557 		frame_thread_info->thread_num < 0)
558 		return;
559 
560 	prio = frame_thread_info->prio;
561 	thread_num = frame_thread_info->thread_num;
562 	if (thread_num > MAX_TID_NUM)
563 		thread_num = MAX_TID_NUM;
564 
565 	// reset curr_rt_thread_num
566 	atomic_set(&frame_info->curr_rt_thread_num, 0);
567 	mutex_lock(&frame_info->lock);
568 	old_prio = frame_info->prio;
569 	real_thread = 0;
570 	for (i = 0; i < thread_num; i++) {
571 		atomic_set(&frame_info->thread_prio[i], 0);
572 		frame_info->thread[i] = update_frame_thread(frame_info, old_prio, prio,
573 							    frame_thread_info->thread[i],
574 							    frame_info->thread[i]);
575 		if (frame_info->thread[i] && (frame_thread_info->thread[i] > 0))
576 			real_thread++;
577 	}
578 	frame_info->prio = prio;
579 	frame_info->thread_num = real_thread;
580 	mutex_unlock(&frame_info->lock);
581 }
582 
do_set_frame_sched_state(struct frame_info * frame_info,struct task_struct * task,bool enable,int prio)583 static void do_set_frame_sched_state(struct frame_info *frame_info,
584 				     struct task_struct *task,
585 				     bool enable, int prio)
586 {
587 	int new_prio = prio;
588 	bool is_rt_task = (prio != NOT_RT_PRIO);
589 
590 	if (enable && is_rt_task) {
591 		if (atomic_read(&frame_info->curr_rt_thread_num) <
592 		    atomic_read(&frame_info->max_rt_thread_num))
593 			atomic_inc(&frame_info->curr_rt_thread_num);
594 		else
595 			new_prio = NOT_RT_PRIO;
596 	}
597 	trace_rtg_frame_sched(frame_info->rtg->id, "curr_rt_thread_num",
598 			      atomic_read(&frame_info->curr_rt_thread_num));
599 	trace_rtg_frame_sched(frame_info->rtg->id, "rtg_rt_thread_num",
600 			      read_rtg_rt_thread_num());
601 	set_frame_rtg_thread(frame_info->rtg->id, task, enable, new_prio);
602 }
603 
set_frame_sched_state(struct frame_info * frame_info,bool enable)604 void set_frame_sched_state(struct frame_info *frame_info, bool enable)
605 {
606 	atomic_t *frame_sched_state = NULL;
607 	int prio;
608 	int i;
609 
610 	if (!frame_info || !frame_info->rtg)
611 		return;
612 
613 	frame_sched_state = &(frame_info->frame_sched_state);
614 	if (enable) {
615 		if (atomic_read(frame_sched_state) == 1)
616 			return;
617 		atomic_set(frame_sched_state, 1);
618 		trace_rtg_frame_sched(frame_info->rtg->id, "FRAME_SCHED_ENABLE", 1);
619 
620 		frame_info->prev_fake_load_util = 0;
621 		frame_info->prev_frame_load_util = 0;
622 		frame_info->frame_vload = 0;
623 		frame_info_rtg_load(frame_info)->curr_window_load = 0;
624 	} else {
625 		if (atomic_read(frame_sched_state) == 0)
626 			return;
627 		atomic_set(frame_sched_state, 0);
628 		trace_rtg_frame_sched(frame_info->rtg->id, "FRAME_SCHED_ENABLE", 0);
629 
630 		(void)sched_set_group_normalized_util(frame_info->rtg->id,
631 						      0, RTG_FREQ_NORMAL_UPDATE);
632 		trace_rtg_frame_sched(frame_info->rtg->id, "preferred_cluster",
633 			INVALID_PREFERRED_CLUSTER);
634 		frame_info->status = FRAME_END;
635 	}
636 
637 	/* reset curr_rt_thread_num */
638 	atomic_set(&frame_info->curr_rt_thread_num, 0);
639 	mutex_lock(&frame_info->lock);
640 	for (i = 0; i < MAX_TID_NUM; i++) {
641 		if (frame_info->thread[i]) {
642 			prio = atomic_read(&frame_info->thread_prio[i]);
643 			do_set_frame_sched_state(frame_info, frame_info->thread[i],
644 						 enable, prio);
645 		}
646 	}
647 	mutex_unlock(&frame_info->lock);
648 
649 	trace_rtg_frame_sched(frame_info->rtg->id, "FRAME_STATUS",
650 			      frame_info->status);
651 	trace_rtg_frame_sched(frame_info->rtg->id, "frame_status",
652 			      frame_info->status);
653 }
654 
check_frame_util_invalid(const struct frame_info * frame_info,u64 timeline)655 static inline bool check_frame_util_invalid(const struct frame_info *frame_info,
656 	u64 timeline)
657 {
658 	return ((frame_info_rtg(frame_info)->util_invalid_interval <= timeline) &&
659 		(frame_info_rtg_load(frame_info)->curr_window_exec * FRAME_UTIL_INVALID_FACTOR
660 		 <= timeline));
661 }
662 
calc_prev_fake_load_util(const struct frame_info * frame_info)663 static u64 calc_prev_fake_load_util(const struct frame_info *frame_info)
664 {
665 	u64 prev_frame_load = frame_info->prev_frame_load;
666 	u64 prev_frame_time = max_t(unsigned long, frame_info->prev_frame_time,
667 		frame_info->frame_time);
668 	u64 frame_util = 0;
669 
670 	if (prev_frame_time > 0)
671 		frame_util = div_u64((prev_frame_load << SCHED_CAPACITY_SHIFT),
672 			prev_frame_time);
673 	frame_util = clamp_t(unsigned long, frame_util,
674 		frame_info->prev_min_util,
675 		frame_info->prev_max_util);
676 
677 	return frame_util;
678 }
679 
calc_prev_frame_load_util(const struct frame_info * frame_info)680 static u64 calc_prev_frame_load_util(const struct frame_info *frame_info)
681 {
682 	u64 prev_frame_load = frame_info->prev_frame_load;
683 	u64 frame_time = frame_info->frame_time;
684 	u64 frame_util = 0;
685 
686 	if (prev_frame_load >= frame_time)
687 		frame_util = FRAME_MAX_LOAD;
688 	else
689 		frame_util = div_u64((prev_frame_load << SCHED_CAPACITY_SHIFT),
690 			frame_info->frame_time);
691 	frame_util = clamp_t(unsigned long, frame_util,
692 		frame_info->prev_min_util,
693 		frame_info->prev_max_util);
694 
695 	return frame_util;
696 }
697 
698 /* last frame load tracking */
update_frame_prev_load(struct frame_info * frame_info,bool fake)699 static void update_frame_prev_load(struct frame_info *frame_info, bool fake)
700 {
701 	/* last frame load tracking */
702 	frame_info->prev_frame_exec =
703 		frame_info_rtg_load(frame_info)->prev_window_exec;
704 	frame_info->prev_frame_time =
705 		frame_info_rtg(frame_info)->prev_window_time;
706 	frame_info->prev_frame_load =
707 		frame_info_rtg_load(frame_info)->prev_window_load;
708 
709 	if (fake)
710 		frame_info->prev_fake_load_util =
711 			calc_prev_fake_load_util(frame_info);
712 	else
713 		frame_info->prev_frame_load_util =
714 			calc_prev_frame_load_util(frame_info);
715 }
716 
do_frame_end(struct frame_info * frame_info,bool fake)717 static void do_frame_end(struct frame_info *frame_info, bool fake)
718 {
719 	unsigned long prev_util;
720 	int id = frame_info->rtg->id;
721 
722 	frame_info->status = FRAME_END;
723 	trace_rtg_frame_sched(id, "frame_status", frame_info->status);
724 
725 	/* last frame load tracking */
726 	update_frame_prev_load(frame_info, fake);
727 
728 	/* reset frame_info */
729 	frame_info->frame_vload = 0;
730 
731 	/* reset frame_min_util */
732 	frame_info->frame_min_util = 0;
733 
734 	if (fake)
735 		prev_util = frame_info->prev_fake_load_util;
736 	else
737 		prev_util = frame_info->prev_frame_load_util;
738 
739 	frame_info->frame_util = clamp_t(unsigned long, prev_util,
740 		frame_info->frame_min_util,
741 		frame_info->frame_max_util);
742 
743 	trace_rtg_frame_sched(id, "frame_last_task_time",
744 		frame_info->prev_frame_exec);
745 	trace_rtg_frame_sched(id, "frame_last_time", frame_info->prev_frame_time);
746 	trace_rtg_frame_sched(id, "frame_last_load", frame_info->prev_frame_load);
747 	trace_rtg_frame_sched(id, "frame_last_load_util",
748 		frame_info->prev_frame_load_util);
749 	trace_rtg_frame_sched(id, "frame_util", frame_info->frame_util);
750 	trace_rtg_frame_sched(id, "frame_vload", frame_info->frame_vload);
751 }
752 
753 /*
754  * frame_load : calculate frame load using exec util
755  */
calc_frame_exec(const struct frame_info * frame_info)756 static inline u64 calc_frame_exec(const struct frame_info *frame_info)
757 {
758 	if (frame_info->frame_time > 0)
759 		return div_u64((frame_info_rtg_load(frame_info)->curr_window_exec <<
760 			SCHED_CAPACITY_SHIFT), frame_info->frame_time);
761 	else
762 		return 0;
763 }
764 
765 /*
766  * real_util:
767  * max(last_util, virtual_util, boost_util, phase_util, frame_min_util)
768  */
calc_frame_util(const struct frame_info * frame_info,bool fake)769 static u64 calc_frame_util(const struct frame_info *frame_info, bool fake)
770 {
771 	unsigned long load_util;
772 
773 	if (fake)
774 		load_util = frame_info->prev_fake_load_util;
775 	else
776 		load_util = frame_info->prev_frame_load_util;
777 
778 	load_util = max_t(unsigned long, load_util, frame_info->frame_vload);
779 	load_util = clamp_t(unsigned long, load_util,
780 		frame_info->frame_min_util,
781 		frame_info->frame_max_util);
782 
783 	return load_util;
784 }
785 
786 /*
787  * frame_vload [0~1024]
788  * vtime: now - timestamp
789  * max_time: frame_info->frame_time + vload_margin
790  * load = F(vtime)
791  *      = vtime ^ 2 - vtime * max_time + FRAME_MAX_VLOAD * vtime / max_time;
792  *      = vtime * (vtime + FRAME_MAX_VLOAD / max_time - max_time);
793  * [0, 0] -=> [max_time, FRAME_MAX_VLOAD]
794  *
795  */
calc_frame_vload(const struct frame_info * frame_info,u64 timeline)796 static u64 calc_frame_vload(const struct frame_info *frame_info, u64 timeline)
797 {
798 	u64 vload;
799 	int vtime = div_u64(timeline, NSEC_PER_MSEC);
800 	int max_time = frame_info->max_vload_time;
801 	int factor;
802 
803 	if ((max_time <= 0) || (vtime > max_time))
804 		return FRAME_MAX_VLOAD;
805 
806 	factor = vtime + FRAME_MAX_VLOAD / max_time;
807 	/* margin maybe negative */
808 	if ((vtime <= 0) || (factor <= max_time))
809 		return 0;
810 
811 	vload = (u64)vtime * (u64)(factor - max_time);
812 
813 	return vload;
814 }
815 
update_frame_info_tick_inner(int id,struct frame_info * frame_info,u64 timeline)816 static int update_frame_info_tick_inner(int id, struct frame_info *frame_info,
817 	u64 timeline)
818 {
819 	switch (frame_info->status) {
820 	case FRAME_INVALID:
821 	case FRAME_END:
822 		if (timeline >= frame_info->frame_time) {
823 			/*
824 			 * fake FRAME_END here to rollover frame_window.
825 			 */
826 			sched_set_group_window_rollover(id);
827 			do_frame_end(frame_info, true);
828 		} else {
829 			frame_info->frame_vload = calc_frame_exec(frame_info);
830 			frame_info->frame_util =
831 				calc_frame_util(frame_info, true);
832 		}
833 
834 		/* when not in boost, start tick timer */
835 		break;
836 	case FRAME_START:
837 		/* check frame_util invalid */
838 		if (!check_frame_util_invalid(frame_info, timeline)) {
839 			/* frame_vload statistic */
840 			frame_info->frame_vload = calc_frame_vload(frame_info, timeline);
841 			/* frame_util statistic */
842 			frame_info->frame_util =
843 				calc_frame_util(frame_info, false);
844 		} else {
845 			frame_info->status = FRAME_INVALID;
846 			trace_rtg_frame_sched(id, "FRAME_STATUS",
847 				frame_info->status);
848 			trace_rtg_frame_sched(id, "frame_status",
849 				frame_info->status);
850 
851 			/*
852 			 * trigger FRAME_END to rollover frame_window,
853 			 * we treat FRAME_INVALID as FRAME_END.
854 			 */
855 			sched_set_group_window_rollover(id);
856 			do_frame_end(frame_info, false);
857 		}
858 		break;
859 	default:
860 		return -EINVAL;
861 	}
862 
863 	return 0;
864 }
865 
rtg_frame_info_inner(const struct related_thread_group * grp)866 static inline struct frame_info *rtg_frame_info_inner(
867 	const struct related_thread_group *grp)
868 {
869 	return (struct frame_info *)grp->private_data;
870 }
871 
frame_boost(struct frame_info * frame_info)872 static inline void frame_boost(struct frame_info *frame_info)
873 {
874 	if (frame_info->frame_util < frame_info->frame_boost_min_util)
875 		frame_info->frame_util = frame_info->frame_boost_min_util;
876 }
877 
878 /*
879  * update CPUFREQ and PLACEMENT when frame task running (in tick) and migration
880  */
update_frame_info_tick(struct related_thread_group * grp)881 static void update_frame_info_tick(struct related_thread_group *grp)
882 {
883 	u64 window_start;
884 	u64 wallclock;
885 	u64 timeline;
886 	struct frame_info *frame_info = NULL;
887 	int id = grp->id;
888 
889 	rcu_read_lock();
890 	frame_info = rtg_frame_info_inner(grp);
891 	window_start = grp->window_start;
892 	rcu_read_unlock();
893 	if (unlikely(!frame_info))
894 		return;
895 
896 	if (atomic_read(&frame_info->frame_sched_state) == 0)
897 		return;
898 	trace_rtg_frame_sched(id, "frame_status", frame_info->status);
899 
900 	wallclock = ktime_get_ns();
901 	timeline = wallclock - window_start;
902 
903 	trace_rtg_frame_sched(id, "update_curr_pid", current->pid);
904 	trace_rtg_frame_sched(id, "frame_timeline", div_u64(timeline, NSEC_PER_MSEC));
905 
906 	if (update_frame_info_tick_inner(grp->id, frame_info, timeline) == -EINVAL)
907 		return;
908 
909 	frame_boost(frame_info);
910 	trace_rtg_frame_sched(id, "frame_vload", frame_info->frame_vload);
911 	trace_rtg_frame_sched(id, "frame_util", frame_info->frame_util);
912 
913 	sched_set_group_normalized_util(grp->id,
914 		frame_info->frame_util, RTG_FREQ_NORMAL_UPDATE);
915 
916 	if (grp->preferred_cluster)
917 		trace_rtg_frame_sched(id, "preferred_cluster",
918 			grp->preferred_cluster->id);
919 }
920 
921 const struct rtg_class frame_rtg_class = {
922 	.sched_update_rtg_tick = update_frame_info_tick,
923 };
924 
set_frame_margin(struct frame_info * frame_info,int margin)925 int set_frame_margin(struct frame_info *frame_info, int margin)
926 {
927 	int id;
928 
929 	if ((margin < MIN_VLOAD_MARGIN) || (margin > MAX_VLOAD_MARGIN)) {
930 		pr_err("[FRAME_RTG]: %s invalid MARGIN value\n",
931 			__func__);
932 		return -EINVAL;
933 	}
934 
935 	if (!frame_info || !frame_info->rtg)
936 		return -EINVAL;
937 
938 	frame_info->vload_margin = margin;
939 	frame_info->max_vload_time =
940 		div_u64(frame_info->frame_time, NSEC_PER_MSEC) +
941 		frame_info->vload_margin;
942 	id = frame_info->rtg->id;
943 	trace_rtg_frame_sched(id, "FRAME_MARGIN", -margin);
944 	trace_rtg_frame_sched(id, "FRAME_MAX_TIME", frame_info->max_vload_time);
945 
946 	return 0;
947 }
948 
set_frame_start(struct frame_info * frame_info)949 static void set_frame_start(struct frame_info *frame_info)
950 {
951 	int id = frame_info->rtg->id;
952 
953 	if (likely(frame_info->status == FRAME_START)) {
954 		/*
955 		 * START -=> START -=> ......
956 		 * FRMAE_START is
957 		 *	the end of last frame
958 		 *	the start of the current frame
959 		 */
960 		update_frame_prev_load(frame_info, false);
961 	} else if ((frame_info->status == FRAME_END) ||
962 		(frame_info->status == FRAME_INVALID)) {
963 		/* START -=> END -=> [START]
964 		 *  FRAME_START is
965 		 *	only the start of current frame
966 		 * we shoudn't tracking the last rtg-window
967 		 * [FRAME_END, FRAME_START]
968 		 * it's not an available frame window
969 		 */
970 		update_frame_prev_load(frame_info, true);
971 		frame_info->status = FRAME_START;
972 	}
973 	trace_rtg_frame_sched(id, "FRAME_STATUS", frame_info->status);
974 	trace_rtg_frame_sched(id, "frame_last_task_time",
975 		frame_info->prev_frame_exec);
976 	trace_rtg_frame_sched(id, "frame_last_time", frame_info->prev_frame_time);
977 	trace_rtg_frame_sched(id, "frame_last_load", frame_info->prev_frame_load);
978 	trace_rtg_frame_sched(id, "frame_last_load_util",
979 		frame_info->prev_frame_load_util);
980 
981 	/* new_frame_start */
982 	if (!frame_info->margin_imme) {
983 		frame_info->frame_vload = 0;
984 		frame_info->frame_util = clamp_t(unsigned long,
985 			frame_info->prev_frame_load_util,
986 			frame_info->frame_min_util,
987 			frame_info->frame_max_util);
988 	} else {
989 		frame_info->frame_vload = calc_frame_vload(frame_info, 0);
990 		frame_info->frame_util = calc_frame_util(frame_info, false);
991 	}
992 
993 	trace_rtg_frame_sched(id, "frame_vload", frame_info->frame_vload);
994 }
995 
set_frame_end(struct frame_info * frame_info)996 static void set_frame_end(struct frame_info *frame_info)
997 {
998 	trace_rtg_frame_sched(frame_info->rtg->id, "FRAME_STATUS", FRAME_END);
999 	do_frame_end(frame_info, false);
1000 }
1001 
update_frame_timestamp(unsigned long status,struct frame_info * frame_info,struct related_thread_group * grp)1002 static int update_frame_timestamp(unsigned long status,
1003 	struct frame_info *frame_info, struct related_thread_group *grp)
1004 {
1005 	int id = frame_info->rtg->id;
1006 
1007 	/* SCHED_FRAME timestamp */
1008 	switch (status) {
1009 	case FRAME_START:
1010 		/* collect frame_info when frame_end timestamp coming */
1011 		set_frame_start(frame_info);
1012 		break;
1013 	case FRAME_END:
1014 		/* FRAME_END should only set and update freq once */
1015 		if (unlikely(frame_info->status == FRAME_END))
1016 			return 0;
1017 		set_frame_end(frame_info);
1018 		break;
1019 	default:
1020 		pr_err("[FRAME_RTG]: %s invalid timestamp(status)\n",
1021 			__func__);
1022 		return -EINVAL;
1023 	}
1024 
1025 	frame_boost(frame_info);
1026 	trace_rtg_frame_sched(id, "frame_util", frame_info->frame_util);
1027 
1028 	/* update cpufreq force when frame_stop */
1029 	sched_set_group_normalized_util(grp->id,
1030 		frame_info->frame_util, RTG_FREQ_FORCE_UPDATE);
1031 	if (grp->preferred_cluster)
1032 		trace_rtg_frame_sched(id, "preferred_cluster",
1033 			grp->preferred_cluster->id);
1034 
1035 	return 0;
1036 }
1037 
set_frame_status(struct frame_info * frame_info,unsigned long status)1038 static int set_frame_status(struct frame_info *frame_info, unsigned long status)
1039 {
1040 	struct related_thread_group *grp = NULL;
1041 	int id;
1042 
1043 	if (!frame_info)
1044 		return -EINVAL;
1045 
1046 	grp = frame_info->rtg;
1047 	if (unlikely(!grp))
1048 		return -EINVAL;
1049 
1050 	if (atomic_read(&frame_info->frame_sched_state) == 0)
1051 		return -EINVAL;
1052 
1053 	if (!(status & FRAME_SETTIME) ||
1054 		(status == (unsigned long)FRAME_SETTIME_PARAM)) {
1055 		pr_err("[FRAME_RTG]: %s invalid timetsamp(status)\n",
1056 			__func__);
1057 		return -EINVAL;
1058 	}
1059 
1060 	if (status & FRAME_TIMESTAMP_SKIP_START) {
1061 		frame_info->timestamp_skipped = true;
1062 		status &= ~FRAME_TIMESTAMP_SKIP_START;
1063 	} else if (status & FRAME_TIMESTAMP_SKIP_END) {
1064 		frame_info->timestamp_skipped = false;
1065 		status &= ~FRAME_TIMESTAMP_SKIP_END;
1066 	} else if (frame_info->timestamp_skipped) {
1067 		/*
1068 		 * skip the following timestamp until
1069 		 * FRAME_TIMESTAMP_SKIPPED reset
1070 		 */
1071 		return 0;
1072 	}
1073 	id = grp->id;
1074 	trace_rtg_frame_sched(id, "FRAME_TIMESTAMP_SKIPPED",
1075 		frame_info->timestamp_skipped);
1076 	trace_rtg_frame_sched(id, "FRAME_MAX_UTIL", frame_info->frame_max_util);
1077 
1078 	if (status & FRAME_USE_MARGIN_IMME) {
1079 		frame_info->margin_imme = true;
1080 		status &= ~FRAME_USE_MARGIN_IMME;
1081 	} else {
1082 		frame_info->margin_imme = false;
1083 	}
1084 	trace_rtg_frame_sched(id, "FRAME_MARGIN_IMME", frame_info->margin_imme);
1085 	trace_rtg_frame_sched(id, "FRAME_TIMESTAMP", status);
1086 
1087 	return update_frame_timestamp(status, frame_info, grp);
1088 }
1089 
set_frame_timestamp(struct frame_info * frame_info,unsigned long timestamp)1090 int set_frame_timestamp(struct frame_info *frame_info, unsigned long timestamp)
1091 {
1092 	int ret;
1093 
1094 	if (!frame_info || !frame_info->rtg)
1095 		return -EINVAL;
1096 
1097 	if (atomic_read(&frame_info->frame_sched_state) == 0)
1098 		return -EINVAL;
1099 
1100 	ret = sched_set_group_window_rollover(frame_info->rtg->id);
1101 	if (!ret)
1102 		ret = set_frame_status(frame_info, timestamp);
1103 
1104 	return ret;
1105 }
1106 
set_frame_min_util(struct frame_info * frame_info,int min_util,bool is_boost)1107 int set_frame_min_util(struct frame_info *frame_info, int min_util, bool is_boost)
1108 {
1109 	int id;
1110 
1111 	if (unlikely((min_util < 0) || (min_util > SCHED_CAPACITY_SCALE))) {
1112 		pr_err("[FRAME_RTG]: %s invalid min_util value\n",
1113 			__func__);
1114 		return -EINVAL;
1115 	}
1116 
1117 	if (!frame_info || !frame_info->rtg)
1118 		return -EINVAL;
1119 
1120 	id = frame_info->rtg->id;
1121 	if (is_boost) {
1122 		frame_info->frame_boost_min_util = min_util;
1123 		trace_rtg_frame_sched(id, "FRAME_BOOST_MIN_UTIL", min_util);
1124 	} else {
1125 		frame_info->frame_min_util = min_util;
1126 
1127 		frame_info->frame_util = calc_frame_util(frame_info, false);
1128 		trace_rtg_frame_sched(id, "frame_util", frame_info->frame_util);
1129 		sched_set_group_normalized_util(id,
1130 			frame_info->frame_util, RTG_FREQ_FORCE_UPDATE);
1131 	}
1132 
1133 	return 0;
1134 }
1135 
set_frame_max_util(struct frame_info * frame_info,int max_util)1136 int set_frame_max_util(struct frame_info *frame_info, int max_util)
1137 {
1138 	int id;
1139 
1140 	if ((max_util < 0) || (max_util > SCHED_CAPACITY_SCALE)) {
1141 		pr_err("[FRAME_RTG]: %s invalid max_util value\n",
1142 			__func__);
1143 		return -EINVAL;
1144 	}
1145 
1146 	if (!frame_info || !frame_info->rtg)
1147 		return -EINVAL;
1148 
1149 	frame_info->frame_max_util = max_util;
1150 	id = frame_info->rtg->id;
1151 	trace_rtg_frame_sched(id, "FRAME_MAX_UTIL", frame_info->frame_max_util);
1152 
1153 	return 0;
1154 }
1155 
lookup_frame_info_by_grp_id(int grp_id)1156 struct frame_info *lookup_frame_info_by_grp_id(int grp_id)
1157 {
1158 	if (grp_id >= (MULTI_FRAME_ID + MULTI_FRAME_NUM) || (grp_id <= 0))
1159 		return NULL;
1160 	if (grp_id >= MULTI_FRAME_ID) {
1161 		read_lock(&g_id_manager.lock);
1162 		if (!test_bit(grp_id - MULTI_FRAME_ID, g_id_manager.id_map)) {
1163 			read_unlock(&g_id_manager.lock);
1164 			return NULL;
1165 		}
1166 		read_unlock(&g_id_manager.lock);
1167 		return rtg_frame_info(grp_id);
1168 	} else
1169 		return rtg_frame_info(grp_id);
1170 }
1171 
_init_frame_info(struct frame_info * frame_info,int id)1172 static int _init_frame_info(struct frame_info *frame_info, int id)
1173 {
1174 	struct related_thread_group *grp = NULL;
1175 	unsigned long flags;
1176 
1177 	memset(frame_info, 0, sizeof(struct frame_info));
1178 	mutex_init(&frame_info->lock);
1179 
1180 	mutex_lock(&frame_info->lock);
1181 	frame_info->frame_rate = DEFAULT_FRAME_RATE;
1182 	frame_info->frame_time = div_u64(NSEC_PER_SEC, frame_info->frame_rate);
1183 	frame_info->thread_num = 0;
1184 	frame_info->prio = NOT_RT_PRIO;
1185 	atomic_set(&(frame_info->curr_rt_thread_num), 0);
1186 	atomic_set(&(frame_info->frame_sched_state), 0);
1187 	frame_info->vload_margin = DEFAULT_VLOAD_MARGIN;
1188 	frame_info->max_vload_time =
1189 		div_u64(frame_info->frame_time, NSEC_PER_MSEC) +
1190 		frame_info->vload_margin;
1191 	frame_info->frame_min_util = FRAME_DEFAULT_MIN_UTIL;
1192 	frame_info->frame_max_util = FRAME_DEFAULT_MAX_UTIL;
1193 	frame_info->prev_min_util = FRAME_DEFAULT_MIN_PREV_UTIL;
1194 	frame_info->prev_max_util = FRAME_DEFAULT_MAX_PREV_UTIL;
1195 	frame_info->margin_imme = false;
1196 	frame_info->timestamp_skipped = false;
1197 	frame_info->status = FRAME_END;
1198 
1199 	grp = frame_rtg(id);
1200 	if (unlikely(!grp)) {
1201 		mutex_unlock(&frame_info->lock);
1202 		return -EINVAL;
1203 	}
1204 
1205 	raw_spin_lock_irqsave(&grp->lock, flags);
1206 	grp->private_data = frame_info;
1207 	grp->rtg_class = &frame_rtg_class;
1208 	raw_spin_unlock_irqrestore(&grp->lock, flags);
1209 
1210 	frame_info->rtg = grp;
1211 	mutex_unlock(&frame_info->lock);
1212 
1213 	return 0;
1214 }
1215 
init_frame_info(void)1216 static int __init init_frame_info(void)
1217 {
1218 	int ret = 0;
1219 	int id;
1220 
1221 	for (id = MULTI_FRAME_ID; id < (MULTI_FRAME_ID + MULTI_FRAME_NUM); id++) {
1222 		if (ret != 0)
1223 			break;
1224 		ret = _init_frame_info(rtg_multi_frame_info(id), id);
1225 	}
1226 
1227 	return ret;
1228 }
1229 late_initcall(init_frame_info);
1230