• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Frame-based load tracking for rt_frame and RTG
4  *
5  * Copyright (c) 2022-2023 Huawei Technologies Co., Ltd.
6  */
7 
8 #include "frame_rtg.h"
9 #include "rtg.h"
10 
11 #include <linux/sched.h>
12 #include <trace/events/rtg.h>
13 #include <../kernel/sched/sched.h>
14 #include <uapi/linux/sched/types.h>
15 
16 static struct multi_frame_id_manager g_id_manager = {
17 	.id_map = {0},
18 	.offset = 0,
19 	.lock = __RW_LOCK_UNLOCKED(g_id_manager.lock)
20 };
21 
22 static struct frame_info g_multi_frame_info[MULTI_FRAME_NUM];
23 
is_rtg_rt_task(struct task_struct * task)24 static bool is_rtg_rt_task(struct task_struct *task)
25 {
26 	bool ret = false;
27 
28 	if (!task)
29 		return ret;
30 
31 	ret = ((task->prio < MAX_RT_PRIO) &&
32 	       (task->rtg_depth == STATIC_RTG_DEPTH));
33 
34 	return ret;
35 }
36 
37 #ifdef CONFIG_SCHED_RTG_RT_THREAD_LIMIT
38 static atomic_t g_rtg_rt_thread_num = ATOMIC_INIT(0);
39 
_get_rtg_rt_thread_num(struct related_thread_group * grp)40 static unsigned int _get_rtg_rt_thread_num(struct related_thread_group *grp)
41 {
42 	unsigned int rtg_rt_thread_num = 0;
43 	struct task_struct *p = NULL;
44 
45 	if (list_empty(&grp->tasks))
46 		goto out;
47 
48 	list_for_each_entry(p, &grp->tasks, grp_list) {
49 		if (is_rtg_rt_task(p))
50 			++rtg_rt_thread_num;
51 	}
52 
53 out:
54 	return rtg_rt_thread_num;
55 }
56 
get_rtg_rt_thread_num(void)57 static unsigned int get_rtg_rt_thread_num(void)
58 {
59 	struct related_thread_group *grp = NULL;
60 	unsigned int total_rtg_rt_thread_num = 0;
61 	unsigned long flag;
62 	unsigned int i;
63 
64 	for (i = MULTI_FRAME_ID; i < MULTI_FRAME_ID + MULTI_FRAME_NUM; i++) {
65 		grp = lookup_related_thread_group(i);
66 		if (grp == NULL)
67 			continue;
68 		raw_spin_lock_irqsave(&grp->lock, flag);
69 		total_rtg_rt_thread_num += _get_rtg_rt_thread_num(grp);
70 		raw_spin_unlock_irqrestore(&grp->lock, flag);
71 	}
72 
73 	return total_rtg_rt_thread_num;
74 }
75 
inc_rtg_rt_thread_num(void)76 static void inc_rtg_rt_thread_num(void)
77 {
78 	atomic_inc(&g_rtg_rt_thread_num);
79 }
80 
dec_rtg_rt_thread_num(void)81 static void dec_rtg_rt_thread_num(void)
82 {
83 	atomic_dec_if_positive(&g_rtg_rt_thread_num);
84 }
85 
test_and_read_rtg_rt_thread_num(void)86 static int test_and_read_rtg_rt_thread_num(void)
87 {
88 	if (atomic_read(&g_rtg_rt_thread_num) >= RTG_MAX_RT_THREAD_NUM)
89 		atomic_set(&g_rtg_rt_thread_num, get_rtg_rt_thread_num());
90 
91 	return atomic_read(&g_rtg_rt_thread_num);
92 }
93 
read_rtg_rt_thread_num(void)94 int read_rtg_rt_thread_num(void)
95 {
96 	return atomic_read(&g_rtg_rt_thread_num);
97 }
98 #else
inc_rtg_rt_thread_num(void)99 static inline void inc_rtg_rt_thread_num(void) { }
dec_rtg_rt_thread_num(void)100 static inline void dec_rtg_rt_thread_num(void) { }
test_and_read_rtg_rt_thread_num(void)101 static inline int test_and_read_rtg_rt_thread_num(void)
102 {
103 	return 0;
104 }
105 #endif
106 
is_frame_rtg(int id)107 bool is_frame_rtg(int id)
108 {
109 	return (id >= MULTI_FRAME_ID) &&
110 		(id < (MULTI_FRAME_ID + MULTI_FRAME_NUM));
111 }
112 
frame_rtg(int id)113 static struct related_thread_group *frame_rtg(int id)
114 {
115 	if (!is_frame_rtg(id))
116 		return NULL;
117 
118 	return lookup_related_thread_group(id);
119 }
120 
rtg_frame_info(int id)121 struct frame_info *rtg_frame_info(int id)
122 {
123 	if (!is_frame_rtg(id))
124 		return NULL;
125 
126 	return rtg_active_multi_frame_info(id);
127 }
128 
alloc_rtg_id(void)129 static int alloc_rtg_id(void)
130 {
131 	unsigned int id_offset;
132 	int id;
133 
134 	write_lock(&g_id_manager.lock);
135 	id_offset = find_next_zero_bit(g_id_manager.id_map, MULTI_FRAME_NUM,
136 				       g_id_manager.offset);
137 	if (id_offset >= MULTI_FRAME_NUM) {
138 		id_offset = find_first_zero_bit(g_id_manager.id_map,
139 						MULTI_FRAME_NUM);
140 		if (id_offset >= MULTI_FRAME_NUM) {
141 			write_unlock(&g_id_manager.lock);
142 			return -EINVAL;
143 		}
144 	}
145 
146 	set_bit(id_offset, g_id_manager.id_map);
147 	g_id_manager.offset = id_offset;
148 	id = id_offset + MULTI_FRAME_ID;
149 	write_unlock(&g_id_manager.lock);
150 	pr_debug("[FRAME_RTG] %s id_offset=%u, id=%d\n", __func__, id_offset, id);
151 
152 	return id;
153 }
154 
free_rtg_id(int id)155 static void free_rtg_id(int id)
156 {
157 	unsigned int id_offset = id - MULTI_FRAME_ID;
158 
159 	if (id_offset >= MULTI_FRAME_NUM) {
160 		pr_err("[FRAME_RTG] %s id_offset is invalid, id=%d, id_offset=%u.\n",
161 		       __func__, id, id_offset);
162 		return;
163 	}
164 
165 	pr_debug("[FRAME_RTG] %s id=%d id_offset=%u\n", __func__, id, id_offset);
166 	write_lock(&g_id_manager.lock);
167 	clear_bit(id_offset, g_id_manager.id_map);
168 	write_unlock(&g_id_manager.lock);
169 }
170 
set_frame_rate(struct frame_info * frame_info,int rate)171 int set_frame_rate(struct frame_info *frame_info, int rate)
172 {
173 	int id;
174 
175 	if ((rate < MIN_FRAME_RATE) || (rate > MAX_FRAME_RATE)) {
176 		pr_err("[FRAME_RTG]: %s invalid QOS(rate) value\n",
177 			__func__);
178 		return -EINVAL;
179 	}
180 
181 	if (!frame_info || !frame_info->rtg)
182 		return -EINVAL;
183 
184 	frame_info->frame_rate = (unsigned int)rate;
185 	frame_info->frame_time = div_u64(NSEC_PER_SEC, rate);
186 	frame_info->max_vload_time =
187 		div_u64(frame_info->frame_time, NSEC_PER_MSEC) +
188 		frame_info->vload_margin;
189 	id = frame_info->rtg->id;
190 	trace_rtg_frame_sched(id, "FRAME_QOS", rate);
191 	trace_rtg_frame_sched(id, "FRAME_MAX_TIME", frame_info->max_vload_time);
192 
193 	return 0;
194 }
195 
alloc_multi_frame_info(void)196 int alloc_multi_frame_info(void)
197 {
198 	struct frame_info *frame_info = NULL;
199 	int id;
200 	int i;
201 
202 	id = alloc_rtg_id();
203 	if (id < 0)
204 		return id;
205 
206 	frame_info = rtg_frame_info(id);
207 	if (!frame_info) {
208 		free_rtg_id(id);
209 		return -EINVAL;
210 	}
211 
212 	set_frame_rate(frame_info, DEFAULT_FRAME_RATE);
213 	atomic_set(&frame_info->curr_rt_thread_num, 0);
214 	atomic_set(&frame_info->max_rt_thread_num, DEFAULT_MAX_RT_THREAD);
215 	for (i = 0; i < MAX_TID_NUM; i++)
216 		atomic_set(&frame_info->thread_prio[i], 0);
217 
218 	return id;
219 }
220 
release_multi_frame_info(int id)221 void release_multi_frame_info(int id)
222 {
223 	if ((id < MULTI_FRAME_ID) || (id >= MULTI_FRAME_ID + MULTI_FRAME_NUM)) {
224 		pr_err("[FRAME_RTG] %s frame(id=%d) not found.\n", __func__, id);
225 		return;
226 	}
227 
228 	read_lock(&g_id_manager.lock);
229 	if (!test_bit(id - MULTI_FRAME_ID, g_id_manager.id_map)) {
230 		read_unlock(&g_id_manager.lock);
231 		return;
232 	}
233 	read_unlock(&g_id_manager.lock);
234 
235 	pr_debug("[FRAME_RTG] %s release frame(id=%d).\n", __func__, id);
236 	free_rtg_id(id);
237 }
238 
clear_multi_frame_info(void)239 void clear_multi_frame_info(void)
240 {
241 	write_lock(&g_id_manager.lock);
242 	bitmap_zero(g_id_manager.id_map, MULTI_FRAME_NUM);
243 	g_id_manager.offset = 0;
244 	write_unlock(&g_id_manager.lock);
245 }
246 
rtg_active_multi_frame_info(int id)247 struct frame_info *rtg_active_multi_frame_info(int id)
248 {
249 	struct frame_info *frame_info = NULL;
250 
251 	if ((id < MULTI_FRAME_ID) || (id >= MULTI_FRAME_ID + MULTI_FRAME_NUM))
252 		return NULL;
253 
254 	read_lock(&g_id_manager.lock);
255 	if (test_bit(id - MULTI_FRAME_ID, g_id_manager.id_map))
256 		frame_info = &g_multi_frame_info[id - MULTI_FRAME_ID];
257 	read_unlock(&g_id_manager.lock);
258 	if (!frame_info)
259 		pr_debug("[FRAME_RTG] %s frame %d has been released\n",
260 			 __func__, id);
261 
262 	return frame_info;
263 }
264 
rtg_multi_frame_info(int id)265 struct frame_info *rtg_multi_frame_info(int id)
266 {
267 	if ((id < MULTI_FRAME_ID) || (id >= MULTI_FRAME_ID + MULTI_FRAME_NUM))
268 		return NULL;
269 
270 	return &g_multi_frame_info[id - MULTI_FRAME_ID];
271 }
272 
do_update_frame_task_prio(struct frame_info * frame_info,struct task_struct * task,int prio)273 static void do_update_frame_task_prio(struct frame_info *frame_info,
274 				      struct task_struct *task, int prio)
275 {
276 	int policy = SCHED_NORMAL;
277 	struct sched_param sp = {0};
278 	bool is_rt_task = (prio != NOT_RT_PRIO);
279 	bool need_dec_flag = false;
280 	bool need_inc_flag = false;
281 	int err;
282 
283 	trace_rtg_frame_sched(frame_info->rtg->id, "rtg_rt_thread_num",
284 			      read_rtg_rt_thread_num());
285 	/* change policy to RT */
286 	if (is_rt_task && (atomic_read(&frame_info->curr_rt_thread_num) <
287 			   atomic_read(&frame_info->max_rt_thread_num))) {
288 		/* change policy from CFS to RT */
289 		if (!is_rtg_rt_task(task)) {
290 			if (test_and_read_rtg_rt_thread_num() >= RTG_MAX_RT_THREAD_NUM)
291 				goto out;
292 			need_inc_flag = true;
293 		}
294 		/* change RT priority */
295 		policy = SCHED_FIFO | SCHED_RESET_ON_FORK;
296 		sp.sched_priority = MAX_USER_RT_PRIO - 1 - prio;
297 		atomic_inc(&frame_info->curr_rt_thread_num);
298 	} else {
299 		/* change policy from RT to CFS */
300 		if (!is_rt_task && is_rtg_rt_task(task))
301 			need_dec_flag = true;
302 	}
303 out:
304 	trace_rtg_frame_sched(frame_info->rtg->id, "rtg_rt_thread_num",
305 			      read_rtg_rt_thread_num());
306 	trace_rtg_frame_sched(frame_info->rtg->id, "curr_rt_thread_num",
307 			      atomic_read(&frame_info->curr_rt_thread_num));
308 	err = sched_setscheduler_nocheck(task, policy, &sp);
309 	if (err == 0) {
310 		if (need_dec_flag)
311 			dec_rtg_rt_thread_num();
312 		else if (need_inc_flag)
313 			inc_rtg_rt_thread_num();
314 	}
315 }
316 
list_rtg_group(struct rtg_info * rs_data)317 int list_rtg_group(struct rtg_info *rs_data)
318 {
319 	int i;
320 	int num = 0;
321 
322 	read_lock(&g_id_manager.lock);
323 	for (i = MULTI_FRAME_ID; i < MULTI_FRAME_ID + MULTI_FRAME_NUM; i++) {
324 		if (test_bit(i - MULTI_FRAME_ID, g_id_manager.id_map)) {
325 			rs_data->rtgs[num] = i;
326 			num++;
327 		}
328 	}
329 	read_unlock(&g_id_manager.lock);
330 	rs_data->rtg_num = num;
331 
332 	return num;
333 }
334 
search_rtg(int pid)335 int search_rtg(int pid)
336 {
337 	struct rtg_info grp_info;
338 	struct frame_info *frame_info = NULL;
339 	int i = 0;
340 	int j = 0;
341 
342 	grp_info.rtg_num = 0;
343 	read_lock(&g_id_manager.lock);
344 	for (i = MULTI_FRAME_ID; i < MULTI_FRAME_ID + MULTI_FRAME_NUM; i++) {
345 		if (test_bit(i - MULTI_FRAME_ID, g_id_manager.id_map)) {
346 			grp_info.rtgs[grp_info.rtg_num] = i;
347 			grp_info.rtg_num++;
348 		}
349 	}
350 	read_unlock(&g_id_manager.lock);
351 	for (i = 0; i < grp_info.rtg_num; i++) {
352 		frame_info = lookup_frame_info_by_grp_id(grp_info.rtgs[i]);
353 		if (!frame_info) {
354 			pr_err("[FRAME_RTG] unexpected grp %d find error.", i);
355 			return -EINVAL;
356 		}
357 
358 		for (j = 0; j < frame_info->thread_num; j++) {
359 			if (frame_info->thread[j] && frame_info->thread[j]->pid == pid)
360 				return grp_info.rtgs[i];
361 		}
362 	}
363 
364 	return 0;
365 }
366 
update_frame_task_prio(struct frame_info * frame_info,int prio)367 static void update_frame_task_prio(struct frame_info *frame_info, int prio)
368 {
369 	int i;
370 	struct task_struct *thread = NULL;
371 
372 	/* reset curr_rt_thread_num */
373 	atomic_set(&frame_info->curr_rt_thread_num, 0);
374 
375 	for (i = 0; i < MAX_TID_NUM; i++) {
376 		thread = frame_info->thread[i];
377 		if (thread)
378 			do_update_frame_task_prio(frame_info, thread, prio);
379 	}
380 }
381 
set_frame_prio(struct frame_info * frame_info,int prio)382 void set_frame_prio(struct frame_info *frame_info, int prio)
383 {
384 	if (!frame_info)
385 		return;
386 
387 	mutex_lock(&frame_info->lock);
388 	if (frame_info->prio == prio)
389 		goto out;
390 
391 	update_frame_task_prio(frame_info, prio);
392 	frame_info->prio = prio;
393 out:
394 	mutex_unlock(&frame_info->lock);
395 }
396 
do_set_rtg_sched(struct task_struct * task,bool is_rtg,int grp_id,int prio)397 static int do_set_rtg_sched(struct task_struct *task, bool is_rtg,
398 			    int grp_id, int prio)
399 {
400 	int err;
401 	int policy = SCHED_NORMAL;
402 	int grpid = DEFAULT_RTG_GRP_ID;
403 	bool is_rt_task = (prio != NOT_RT_PRIO);
404 	struct sched_param sp = {0};
405 
406 	if (is_rtg) {
407 		if (is_rt_task) {
408 			if (test_and_read_rtg_rt_thread_num() >= RTG_MAX_RT_THREAD_NUM)
409 				// rtg_rt_thread_num is inavailable, set policy to CFS
410 				goto skip_setpolicy;
411 			policy = SCHED_FIFO | SCHED_RESET_ON_FORK;
412 			sp.sched_priority = MAX_USER_RT_PRIO - 1 - prio;
413 		}
414 skip_setpolicy:
415 		grpid = grp_id;
416 	}
417 	err = sched_setscheduler_nocheck(task, policy, &sp);
418 	if (err < 0) {
419 		pr_err("[FRAME_RTG]: %s task:%d setscheduler err:%d\n",
420 				__func__, task->pid, err);
421 		return err;
422 	}
423 	err = sched_set_group_id(task, grpid);
424 	if (err < 0) {
425 		pr_err("[FRAME_RTG]: %s task:%d set_group_id err:%d\n",
426 				__func__, task->pid, err);
427 		if (is_rtg) {
428 			policy = SCHED_NORMAL;
429 			sp.sched_priority = 0;
430 			sched_setscheduler_nocheck(task, policy, &sp);
431 		}
432 	}
433 	if (err == 0) {
434 		if (is_rtg) {
435 			if (policy != SCHED_NORMAL)
436 				inc_rtg_rt_thread_num();
437 		} else {
438 			dec_rtg_rt_thread_num();
439 		}
440 	}
441 
442 	return err;
443 }
444 
set_rtg_sched(struct task_struct * task,bool is_rtg,int grp_id,int prio)445 static int set_rtg_sched(struct task_struct *task, bool is_rtg,
446 			 int grp_id, int prio)
447 {
448 	int err = -1;
449 	bool is_rt_task = (prio != NOT_RT_PRIO);
450 
451 	if (!task)
452 		return err;
453 
454 	if (is_rt_task && is_rtg && ((prio < 0) ||
455 		(prio > MAX_USER_RT_PRIO - 1)))
456 		return err;
457 	/*
458 	 * if CONFIG_HW_FUTEX_PI is set, task->prio and task->sched_class
459 	 * may be modified by rtmutex. So we use task->policy instead.
460 	 */
461 	if (is_rtg && (!fair_policy(task->policy) || (task->flags & PF_EXITING)))
462 		return err;
463 
464 	if (in_interrupt()) {
465 		pr_err("[FRAME_RTG]: %s is in interrupt\n", __func__);
466 		return err;
467 	}
468 
469 	return do_set_rtg_sched(task, is_rtg, grp_id, prio);
470 }
471 
set_frame_rtg_thread(int grp_id,struct task_struct * task,bool is_rtg,int prio)472 static bool set_frame_rtg_thread(int grp_id, struct task_struct *task,
473 				 bool is_rtg, int prio)
474 {
475 	int depth;
476 
477 	if (!task)
478 		return false;
479 	depth = task->rtg_depth;
480 	if (is_rtg)
481 		task->rtg_depth = STATIC_RTG_DEPTH;
482 	else
483 		task->rtg_depth = 0;
484 
485 	if (set_rtg_sched(task, is_rtg, grp_id, prio) < 0) {
486 		task->rtg_depth = depth;
487 		return false;
488 	}
489 
490 	return true;
491 }
492 
update_frame_thread(struct frame_info * frame_info,int old_prio,int prio,int pid,struct task_struct * old_task)493 struct task_struct *update_frame_thread(struct frame_info *frame_info,
494 					int old_prio, int prio, int pid,
495 					struct task_struct *old_task)
496 {
497 	struct task_struct *task = NULL;
498 	bool is_rt_task = (prio != NOT_RT_PRIO);
499 	int new_prio = prio;
500 	bool update_ret = false;
501 
502 	if (pid > 0) {
503 		if (old_task && (pid == old_task->pid) && (old_prio == new_prio)) {
504 			if (is_rt_task && atomic_read(&frame_info->curr_rt_thread_num) <
505 			    atomic_read(&frame_info->max_rt_thread_num) &&
506 			    (atomic_read(&frame_info->frame_sched_state) == 1))
507 				atomic_inc(&frame_info->curr_rt_thread_num);
508 			trace_rtg_frame_sched(frame_info->rtg->id, "curr_rt_thread_num",
509 					      atomic_read(&frame_info->curr_rt_thread_num));
510 			return old_task;
511 		}
512 		rcu_read_lock();
513 		task = find_task_by_vpid(pid);
514 		if (task)
515 			get_task_struct(task);
516 		rcu_read_unlock();
517 	}
518 	trace_rtg_frame_sched(frame_info->rtg->id, "FRAME_SCHED_ENABLE",
519 			      atomic_read(&frame_info->frame_sched_state));
520 	if (atomic_read(&frame_info->frame_sched_state) == 1) {
521 		if (task && is_rt_task) {
522 			if (atomic_read(&frame_info->curr_rt_thread_num) <
523 			    atomic_read(&frame_info->max_rt_thread_num))
524 				atomic_inc(&frame_info->curr_rt_thread_num);
525 			else
526 				new_prio = NOT_RT_PRIO;
527 		}
528 		trace_rtg_frame_sched(frame_info->rtg->id, "curr_rt_thread_num",
529 				      atomic_read(&frame_info->curr_rt_thread_num));
530 		trace_rtg_frame_sched(frame_info->rtg->id, "rtg_rt_thread_num",
531 				      read_rtg_rt_thread_num());
532 
533 		set_frame_rtg_thread(frame_info->rtg->id, old_task, false, NOT_RT_PRIO);
534 		update_ret = set_frame_rtg_thread(frame_info->rtg->id, task, true, new_prio);
535 	}
536 	if (old_task)
537 		put_task_struct(old_task);
538 	if (!update_ret)
539 		return NULL;
540 
541 	return task;
542 }
543 
update_frame_thread_info(struct frame_info * frame_info,struct frame_thread_info * frame_thread_info)544 void update_frame_thread_info(struct frame_info *frame_info,
545 			      struct frame_thread_info *frame_thread_info)
546 {
547 	int i;
548 	int old_prio;
549 	int prio;
550 	int thread_num;
551 	int real_thread;
552 
553 	if (!frame_info || !frame_thread_info ||
554 		frame_thread_info->thread_num < 0)
555 		return;
556 
557 	prio = frame_thread_info->prio;
558 	thread_num = frame_thread_info->thread_num;
559 	if (thread_num > MAX_TID_NUM)
560 		thread_num = MAX_TID_NUM;
561 
562 	// reset curr_rt_thread_num
563 	atomic_set(&frame_info->curr_rt_thread_num, 0);
564 	mutex_lock(&frame_info->lock);
565 	old_prio = frame_info->prio;
566 	real_thread = 0;
567 	for (i = 0; i < thread_num; i++) {
568 		atomic_set(&frame_info->thread_prio[i], 0);
569 		frame_info->thread[i] = update_frame_thread(frame_info, old_prio, prio,
570 							    frame_thread_info->thread[i],
571 							    frame_info->thread[i]);
572 		if (frame_info->thread[i] && (frame_thread_info->thread[i] > 0))
573 			real_thread++;
574 	}
575 	frame_info->prio = prio;
576 	frame_info->thread_num = real_thread;
577 	mutex_unlock(&frame_info->lock);
578 }
579 
do_set_frame_sched_state(struct frame_info * frame_info,struct task_struct * task,bool enable,int prio)580 static void do_set_frame_sched_state(struct frame_info *frame_info,
581 				     struct task_struct *task,
582 				     bool enable, int prio)
583 {
584 	int new_prio = prio;
585 	bool is_rt_task = (prio != NOT_RT_PRIO);
586 
587 	if (enable && is_rt_task) {
588 		if (atomic_read(&frame_info->curr_rt_thread_num) <
589 		    atomic_read(&frame_info->max_rt_thread_num))
590 			atomic_inc(&frame_info->curr_rt_thread_num);
591 		else
592 			new_prio = NOT_RT_PRIO;
593 	}
594 	trace_rtg_frame_sched(frame_info->rtg->id, "curr_rt_thread_num",
595 			      atomic_read(&frame_info->curr_rt_thread_num));
596 	trace_rtg_frame_sched(frame_info->rtg->id, "rtg_rt_thread_num",
597 			      read_rtg_rt_thread_num());
598 	set_frame_rtg_thread(frame_info->rtg->id, task, enable, new_prio);
599 }
600 
set_frame_sched_state(struct frame_info * frame_info,bool enable)601 void set_frame_sched_state(struct frame_info *frame_info, bool enable)
602 {
603 	atomic_t *frame_sched_state = NULL;
604 	int prio;
605 	int i;
606 
607 	if (!frame_info || !frame_info->rtg)
608 		return;
609 
610 	frame_sched_state = &(frame_info->frame_sched_state);
611 	if (enable) {
612 		if (atomic_read(frame_sched_state) == 1)
613 			return;
614 		atomic_set(frame_sched_state, 1);
615 		trace_rtg_frame_sched(frame_info->rtg->id, "FRAME_SCHED_ENABLE", 1);
616 
617 		frame_info->prev_fake_load_util = 0;
618 		frame_info->prev_frame_load_util = 0;
619 		frame_info->frame_vload = 0;
620 		frame_info_rtg_load(frame_info)->curr_window_load = 0;
621 	} else {
622 		if (atomic_read(frame_sched_state) == 0)
623 			return;
624 		atomic_set(frame_sched_state, 0);
625 		trace_rtg_frame_sched(frame_info->rtg->id, "FRAME_SCHED_ENABLE", 0);
626 
627 		(void)sched_set_group_normalized_util(frame_info->rtg->id,
628 						      0, RTG_FREQ_NORMAL_UPDATE);
629 		trace_rtg_frame_sched(frame_info->rtg->id, "preferred_cluster",
630 			INVALID_PREFERRED_CLUSTER);
631 		frame_info->status = FRAME_END;
632 	}
633 
634 	/* reset curr_rt_thread_num */
635 	atomic_set(&frame_info->curr_rt_thread_num, 0);
636 	mutex_lock(&frame_info->lock);
637 	for (i = 0; i < MAX_TID_NUM; i++) {
638 		if (frame_info->thread[i]) {
639 			prio = atomic_read(&frame_info->thread_prio[i]);
640 			do_set_frame_sched_state(frame_info, frame_info->thread[i],
641 						 enable, prio);
642 		}
643 	}
644 	mutex_unlock(&frame_info->lock);
645 
646 	trace_rtg_frame_sched(frame_info->rtg->id, "FRAME_STATUS",
647 			      frame_info->status);
648 	trace_rtg_frame_sched(frame_info->rtg->id, "frame_status",
649 			      frame_info->status);
650 }
651 
check_frame_util_invalid(const struct frame_info * frame_info,u64 timeline)652 static inline bool check_frame_util_invalid(const struct frame_info *frame_info,
653 	u64 timeline)
654 {
655 	return ((frame_info_rtg(frame_info)->util_invalid_interval <= timeline) &&
656 		(frame_info_rtg_load(frame_info)->curr_window_exec * FRAME_UTIL_INVALID_FACTOR
657 		 <= timeline));
658 }
659 
calc_prev_fake_load_util(const struct frame_info * frame_info)660 static u64 calc_prev_fake_load_util(const struct frame_info *frame_info)
661 {
662 	u64 prev_frame_load = frame_info->prev_frame_load;
663 	u64 prev_frame_time = max_t(unsigned long, frame_info->prev_frame_time,
664 		frame_info->frame_time);
665 	u64 frame_util = 0;
666 
667 	if (prev_frame_time > 0)
668 		frame_util = div_u64((prev_frame_load << SCHED_CAPACITY_SHIFT),
669 			prev_frame_time);
670 	frame_util = clamp_t(unsigned long, frame_util,
671 		frame_info->prev_min_util,
672 		frame_info->prev_max_util);
673 
674 	return frame_util;
675 }
676 
calc_prev_frame_load_util(const struct frame_info * frame_info)677 static u64 calc_prev_frame_load_util(const struct frame_info *frame_info)
678 {
679 	u64 prev_frame_load = frame_info->prev_frame_load;
680 	u64 frame_time = frame_info->frame_time;
681 	u64 frame_util = 0;
682 
683 	if (prev_frame_load >= frame_time)
684 		frame_util = FRAME_MAX_LOAD;
685 	else
686 		frame_util = div_u64((prev_frame_load << SCHED_CAPACITY_SHIFT),
687 			frame_info->frame_time);
688 	frame_util = clamp_t(unsigned long, frame_util,
689 		frame_info->prev_min_util,
690 		frame_info->prev_max_util);
691 
692 	return frame_util;
693 }
694 
695 /* last frame load tracking */
update_frame_prev_load(struct frame_info * frame_info,bool fake)696 static void update_frame_prev_load(struct frame_info *frame_info, bool fake)
697 {
698 	/* last frame load tracking */
699 	frame_info->prev_frame_exec =
700 		frame_info_rtg_load(frame_info)->prev_window_exec;
701 	frame_info->prev_frame_time =
702 		frame_info_rtg(frame_info)->prev_window_time;
703 	frame_info->prev_frame_load =
704 		frame_info_rtg_load(frame_info)->prev_window_load;
705 
706 	if (fake)
707 		frame_info->prev_fake_load_util =
708 			calc_prev_fake_load_util(frame_info);
709 	else
710 		frame_info->prev_frame_load_util =
711 			calc_prev_frame_load_util(frame_info);
712 }
713 
do_frame_end(struct frame_info * frame_info,bool fake)714 static void do_frame_end(struct frame_info *frame_info, bool fake)
715 {
716 	unsigned long prev_util;
717 	int id = frame_info->rtg->id;
718 
719 	frame_info->status = FRAME_END;
720 	trace_rtg_frame_sched(id, "frame_status", frame_info->status);
721 
722 	/* last frame load tracking */
723 	update_frame_prev_load(frame_info, fake);
724 
725 	/* reset frame_info */
726 	frame_info->frame_vload = 0;
727 
728 	/* reset frame_min_util */
729 	frame_info->frame_min_util = 0;
730 
731 	if (fake)
732 		prev_util = frame_info->prev_fake_load_util;
733 	else
734 		prev_util = frame_info->prev_frame_load_util;
735 
736 	frame_info->frame_util = clamp_t(unsigned long, prev_util,
737 		frame_info->frame_min_util,
738 		frame_info->frame_max_util);
739 
740 	trace_rtg_frame_sched(id, "frame_last_task_time",
741 		frame_info->prev_frame_exec);
742 	trace_rtg_frame_sched(id, "frame_last_time", frame_info->prev_frame_time);
743 	trace_rtg_frame_sched(id, "frame_last_load", frame_info->prev_frame_load);
744 	trace_rtg_frame_sched(id, "frame_last_load_util",
745 		frame_info->prev_frame_load_util);
746 	trace_rtg_frame_sched(id, "frame_util", frame_info->frame_util);
747 	trace_rtg_frame_sched(id, "frame_vload", frame_info->frame_vload);
748 }
749 
750 /*
751  * frame_load : calculate frame load using exec util
752  */
calc_frame_exec(const struct frame_info * frame_info)753 static inline u64 calc_frame_exec(const struct frame_info *frame_info)
754 {
755 	if (frame_info->frame_time > 0)
756 		return div_u64((frame_info_rtg_load(frame_info)->curr_window_exec <<
757 			SCHED_CAPACITY_SHIFT), frame_info->frame_time);
758 	else
759 		return 0;
760 }
761 
762 /*
763  * real_util:
764  * max(last_util, virtual_util, boost_util, phase_util, frame_min_util)
765  */
calc_frame_util(const struct frame_info * frame_info,bool fake)766 static u64 calc_frame_util(const struct frame_info *frame_info, bool fake)
767 {
768 	unsigned long load_util;
769 
770 	if (fake)
771 		load_util = frame_info->prev_fake_load_util;
772 	else
773 		load_util = frame_info->prev_frame_load_util;
774 
775 	load_util = max_t(unsigned long, load_util, frame_info->frame_vload);
776 	load_util = clamp_t(unsigned long, load_util,
777 		frame_info->frame_min_util,
778 		frame_info->frame_max_util);
779 
780 	return load_util;
781 }
782 
783 /*
784  * frame_vload [0~1024]
785  * vtime: now - timestamp
786  * max_time: frame_info->frame_time + vload_margin
787  * load = F(vtime)
788  *      = vtime ^ 2 - vtime * max_time + FRAME_MAX_VLOAD * vtime / max_time;
789  *      = vtime * (vtime + FRAME_MAX_VLOAD / max_time - max_time);
790  * [0, 0] -=> [max_time, FRAME_MAX_VLOAD]
791  *
792  */
calc_frame_vload(const struct frame_info * frame_info,u64 timeline)793 static u64 calc_frame_vload(const struct frame_info *frame_info, u64 timeline)
794 {
795 	u64 vload;
796 	int vtime = div_u64(timeline, NSEC_PER_MSEC);
797 	int max_time = frame_info->max_vload_time;
798 	int factor;
799 
800 	if ((max_time <= 0) || (vtime > max_time))
801 		return FRAME_MAX_VLOAD;
802 
803 	factor = vtime + FRAME_MAX_VLOAD / max_time;
804 	/* margin maybe negative */
805 	if ((vtime <= 0) || (factor <= max_time))
806 		return 0;
807 
808 	vload = (u64)vtime * (u64)(factor - max_time);
809 
810 	return vload;
811 }
812 
update_frame_info_tick_inner(int id,struct frame_info * frame_info,u64 timeline)813 static int update_frame_info_tick_inner(int id, struct frame_info *frame_info,
814 	u64 timeline)
815 {
816 	switch (frame_info->status) {
817 	case FRAME_INVALID:
818 	case FRAME_END:
819 		if (timeline >= frame_info->frame_time) {
820 			/*
821 			 * fake FRAME_END here to rollover frame_window.
822 			 */
823 			sched_set_group_window_rollover(id);
824 			do_frame_end(frame_info, true);
825 		} else {
826 			frame_info->frame_vload = calc_frame_exec(frame_info);
827 			frame_info->frame_util =
828 				calc_frame_util(frame_info, true);
829 		}
830 
831 		/* when not in boost, start tick timer */
832 		break;
833 	case FRAME_START:
834 		/* check frame_util invalid */
835 		if (!check_frame_util_invalid(frame_info, timeline)) {
836 			/* frame_vload statistic */
837 			frame_info->frame_vload = calc_frame_vload(frame_info, timeline);
838 			/* frame_util statistic */
839 			frame_info->frame_util =
840 				calc_frame_util(frame_info, false);
841 		} else {
842 			frame_info->status = FRAME_INVALID;
843 			trace_rtg_frame_sched(id, "FRAME_STATUS",
844 				frame_info->status);
845 			trace_rtg_frame_sched(id, "frame_status",
846 				frame_info->status);
847 
848 			/*
849 			 * trigger FRAME_END to rollover frame_window,
850 			 * we treat FRAME_INVALID as FRAME_END.
851 			 */
852 			sched_set_group_window_rollover(id);
853 			do_frame_end(frame_info, false);
854 		}
855 		break;
856 	default:
857 		return -EINVAL;
858 	}
859 
860 	return 0;
861 }
862 
rtg_frame_info_inner(const struct related_thread_group * grp)863 static inline struct frame_info *rtg_frame_info_inner(
864 	const struct related_thread_group *grp)
865 {
866 	return (struct frame_info *)grp->private_data;
867 }
868 
frame_boost(struct frame_info * frame_info)869 static inline void frame_boost(struct frame_info *frame_info)
870 {
871 	if (frame_info->frame_util < frame_info->frame_boost_min_util)
872 		frame_info->frame_util = frame_info->frame_boost_min_util;
873 }
874 
875 /*
876  * update CPUFREQ and PLACEMENT when frame task running (in tick) and migration
877  */
update_frame_info_tick(struct related_thread_group * grp)878 static void update_frame_info_tick(struct related_thread_group *grp)
879 {
880 	u64 window_start;
881 	u64 wallclock;
882 	u64 timeline;
883 	struct frame_info *frame_info = NULL;
884 	int id = grp->id;
885 
886 	rcu_read_lock();
887 	frame_info = rtg_frame_info_inner(grp);
888 	window_start = grp->window_start;
889 	rcu_read_unlock();
890 	if (unlikely(!frame_info))
891 		return;
892 
893 	if (atomic_read(&frame_info->frame_sched_state) == 0)
894 		return;
895 	trace_rtg_frame_sched(id, "frame_status", frame_info->status);
896 
897 	wallclock = ktime_get_ns();
898 	timeline = wallclock - window_start;
899 
900 	trace_rtg_frame_sched(id, "update_curr_pid", current->pid);
901 	trace_rtg_frame_sched(id, "frame_timeline", div_u64(timeline, NSEC_PER_MSEC));
902 
903 	if (update_frame_info_tick_inner(grp->id, frame_info, timeline) == -EINVAL)
904 		return;
905 
906 	frame_boost(frame_info);
907 	trace_rtg_frame_sched(id, "frame_vload", frame_info->frame_vload);
908 	trace_rtg_frame_sched(id, "frame_util", frame_info->frame_util);
909 
910 	sched_set_group_normalized_util(grp->id,
911 		frame_info->frame_util, RTG_FREQ_NORMAL_UPDATE);
912 
913 	if (grp->preferred_cluster)
914 		trace_rtg_frame_sched(id, "preferred_cluster",
915 			grp->preferred_cluster->id);
916 }
917 
918 const struct rtg_class frame_rtg_class = {
919 	.sched_update_rtg_tick = update_frame_info_tick,
920 };
921 
set_frame_margin(struct frame_info * frame_info,int margin)922 int set_frame_margin(struct frame_info *frame_info, int margin)
923 {
924 	int id;
925 
926 	if ((margin < MIN_VLOAD_MARGIN) || (margin > MAX_VLOAD_MARGIN)) {
927 		pr_err("[FRAME_RTG]: %s invalid MARGIN value\n",
928 			__func__);
929 		return -EINVAL;
930 	}
931 
932 	if (!frame_info || !frame_info->rtg)
933 		return -EINVAL;
934 
935 	frame_info->vload_margin = margin;
936 	frame_info->max_vload_time =
937 		div_u64(frame_info->frame_time, NSEC_PER_MSEC) +
938 		frame_info->vload_margin;
939 	id = frame_info->rtg->id;
940 	trace_rtg_frame_sched(id, "FRAME_MARGIN", -margin);
941 	trace_rtg_frame_sched(id, "FRAME_MAX_TIME", frame_info->max_vload_time);
942 
943 	return 0;
944 }
945 
set_frame_start(struct frame_info * frame_info)946 static void set_frame_start(struct frame_info *frame_info)
947 {
948 	int id = frame_info->rtg->id;
949 
950 	if (likely(frame_info->status == FRAME_START)) {
951 		/*
952 		 * START -=> START -=> ......
953 		 * FRMAE_START is
954 		 *	the end of last frame
955 		 *	the start of the current frame
956 		 */
957 		update_frame_prev_load(frame_info, false);
958 	} else if ((frame_info->status == FRAME_END) ||
959 		(frame_info->status == FRAME_INVALID)) {
960 		/* START -=> END -=> [START]
961 		 *  FRAME_START is
962 		 *	only the start of current frame
963 		 * we shoudn't tracking the last rtg-window
964 		 * [FRAME_END, FRAME_START]
965 		 * it's not an available frame window
966 		 */
967 		update_frame_prev_load(frame_info, true);
968 		frame_info->status = FRAME_START;
969 	}
970 	trace_rtg_frame_sched(id, "FRAME_STATUS", frame_info->status);
971 	trace_rtg_frame_sched(id, "frame_last_task_time",
972 		frame_info->prev_frame_exec);
973 	trace_rtg_frame_sched(id, "frame_last_time", frame_info->prev_frame_time);
974 	trace_rtg_frame_sched(id, "frame_last_load", frame_info->prev_frame_load);
975 	trace_rtg_frame_sched(id, "frame_last_load_util",
976 		frame_info->prev_frame_load_util);
977 
978 	/* new_frame_start */
979 	if (!frame_info->margin_imme) {
980 		frame_info->frame_vload = 0;
981 		frame_info->frame_util = clamp_t(unsigned long,
982 			frame_info->prev_frame_load_util,
983 			frame_info->frame_min_util,
984 			frame_info->frame_max_util);
985 	} else {
986 		frame_info->frame_vload = calc_frame_vload(frame_info, 0);
987 		frame_info->frame_util = calc_frame_util(frame_info, false);
988 	}
989 
990 	trace_rtg_frame_sched(id, "frame_vload", frame_info->frame_vload);
991 }
992 
set_frame_end(struct frame_info * frame_info)993 static void set_frame_end(struct frame_info *frame_info)
994 {
995 	trace_rtg_frame_sched(frame_info->rtg->id, "FRAME_STATUS", FRAME_END);
996 	do_frame_end(frame_info, false);
997 }
998 
update_frame_timestamp(unsigned long status,struct frame_info * frame_info,struct related_thread_group * grp)999 static int update_frame_timestamp(unsigned long status,
1000 	struct frame_info *frame_info, struct related_thread_group *grp)
1001 {
1002 	int id = frame_info->rtg->id;
1003 
1004 	/* SCHED_FRAME timestamp */
1005 	switch (status) {
1006 	case FRAME_START:
1007 		/* collect frame_info when frame_end timestamp coming */
1008 		set_frame_start(frame_info);
1009 		break;
1010 	case FRAME_END:
1011 		/* FRAME_END should only set and update freq once */
1012 		if (unlikely(frame_info->status == FRAME_END))
1013 			return 0;
1014 		set_frame_end(frame_info);
1015 		break;
1016 	default:
1017 		pr_err("[FRAME_RTG]: %s invalid timestamp(status)\n",
1018 			__func__);
1019 		return -EINVAL;
1020 	}
1021 
1022 	frame_boost(frame_info);
1023 	trace_rtg_frame_sched(id, "frame_util", frame_info->frame_util);
1024 
1025 	/* update cpufreq force when frame_stop */
1026 	sched_set_group_normalized_util(grp->id,
1027 		frame_info->frame_util, RTG_FREQ_FORCE_UPDATE);
1028 	if (grp->preferred_cluster)
1029 		trace_rtg_frame_sched(id, "preferred_cluster",
1030 			grp->preferred_cluster->id);
1031 
1032 	return 0;
1033 }
1034 
set_frame_status(struct frame_info * frame_info,unsigned long status)1035 static int set_frame_status(struct frame_info *frame_info, unsigned long status)
1036 {
1037 	struct related_thread_group *grp = NULL;
1038 	int id;
1039 
1040 	if (!frame_info)
1041 		return -EINVAL;
1042 
1043 	grp = frame_info->rtg;
1044 	if (unlikely(!grp))
1045 		return -EINVAL;
1046 
1047 	if (atomic_read(&frame_info->frame_sched_state) == 0)
1048 		return -EINVAL;
1049 
1050 	if (!(status & FRAME_SETTIME) ||
1051 		(status == (unsigned long)FRAME_SETTIME_PARAM)) {
1052 		pr_err("[FRAME_RTG]: %s invalid timetsamp(status)\n",
1053 			__func__);
1054 		return -EINVAL;
1055 	}
1056 
1057 	if (status & FRAME_TIMESTAMP_SKIP_START) {
1058 		frame_info->timestamp_skipped = true;
1059 		status &= ~FRAME_TIMESTAMP_SKIP_START;
1060 	} else if (status & FRAME_TIMESTAMP_SKIP_END) {
1061 		frame_info->timestamp_skipped = false;
1062 		status &= ~FRAME_TIMESTAMP_SKIP_END;
1063 	} else if (frame_info->timestamp_skipped) {
1064 		/*
1065 		 * skip the following timestamp until
1066 		 * FRAME_TIMESTAMP_SKIPPED reset
1067 		 */
1068 		return 0;
1069 	}
1070 	id = grp->id;
1071 	trace_rtg_frame_sched(id, "FRAME_TIMESTAMP_SKIPPED",
1072 		frame_info->timestamp_skipped);
1073 	trace_rtg_frame_sched(id, "FRAME_MAX_UTIL", frame_info->frame_max_util);
1074 
1075 	if (status & FRAME_USE_MARGIN_IMME) {
1076 		frame_info->margin_imme = true;
1077 		status &= ~FRAME_USE_MARGIN_IMME;
1078 	} else {
1079 		frame_info->margin_imme = false;
1080 	}
1081 	trace_rtg_frame_sched(id, "FRAME_MARGIN_IMME", frame_info->margin_imme);
1082 	trace_rtg_frame_sched(id, "FRAME_TIMESTAMP", status);
1083 
1084 	return update_frame_timestamp(status, frame_info, grp);
1085 }
1086 
set_frame_timestamp(struct frame_info * frame_info,unsigned long timestamp)1087 int set_frame_timestamp(struct frame_info *frame_info, unsigned long timestamp)
1088 {
1089 	int ret;
1090 
1091 	if (!frame_info || !frame_info->rtg)
1092 		return -EINVAL;
1093 
1094 	if (atomic_read(&frame_info->frame_sched_state) == 0)
1095 		return -EINVAL;
1096 
1097 	ret = sched_set_group_window_rollover(frame_info->rtg->id);
1098 	if (!ret)
1099 		ret = set_frame_status(frame_info, timestamp);
1100 
1101 	return ret;
1102 }
1103 
set_frame_min_util(struct frame_info * frame_info,int min_util,bool is_boost)1104 int set_frame_min_util(struct frame_info *frame_info, int min_util, bool is_boost)
1105 {
1106 	int id;
1107 
1108 	if (unlikely((min_util < 0) || (min_util > SCHED_CAPACITY_SCALE))) {
1109 		pr_err("[FRAME_RTG]: %s invalid min_util value\n",
1110 			__func__);
1111 		return -EINVAL;
1112 	}
1113 
1114 	if (!frame_info || !frame_info->rtg)
1115 		return -EINVAL;
1116 
1117 	id = frame_info->rtg->id;
1118 	if (is_boost) {
1119 		frame_info->frame_boost_min_util = min_util;
1120 		trace_rtg_frame_sched(id, "FRAME_BOOST_MIN_UTIL", min_util);
1121 	} else {
1122 		frame_info->frame_min_util = min_util;
1123 
1124 		frame_info->frame_util = calc_frame_util(frame_info, false);
1125 		trace_rtg_frame_sched(id, "frame_util", frame_info->frame_util);
1126 		sched_set_group_normalized_util(id,
1127 			frame_info->frame_util, RTG_FREQ_FORCE_UPDATE);
1128 	}
1129 
1130 	return 0;
1131 }
1132 
set_frame_max_util(struct frame_info * frame_info,int max_util)1133 int set_frame_max_util(struct frame_info *frame_info, int max_util)
1134 {
1135 	int id;
1136 
1137 	if ((max_util < 0) || (max_util > SCHED_CAPACITY_SCALE)) {
1138 		pr_err("[FRAME_RTG]: %s invalid max_util value\n",
1139 			__func__);
1140 		return -EINVAL;
1141 	}
1142 
1143 	if (!frame_info || !frame_info->rtg)
1144 		return -EINVAL;
1145 
1146 	frame_info->frame_max_util = max_util;
1147 	id = frame_info->rtg->id;
1148 	trace_rtg_frame_sched(id, "FRAME_MAX_UTIL", frame_info->frame_max_util);
1149 
1150 	return 0;
1151 }
1152 
lookup_frame_info_by_grp_id(int grp_id)1153 struct frame_info *lookup_frame_info_by_grp_id(int grp_id)
1154 {
1155 	if (grp_id >= (MULTI_FRAME_ID + MULTI_FRAME_NUM) || (grp_id <= 0))
1156 		return NULL;
1157 	if (grp_id >= MULTI_FRAME_ID) {
1158 		read_lock(&g_id_manager.lock);
1159 		if (!test_bit(grp_id - MULTI_FRAME_ID, g_id_manager.id_map)) {
1160 			read_unlock(&g_id_manager.lock);
1161 			return NULL;
1162 		}
1163 		read_unlock(&g_id_manager.lock);
1164 		return rtg_frame_info(grp_id);
1165 	} else
1166 		return rtg_frame_info(grp_id);
1167 }
1168 
_init_frame_info(struct frame_info * frame_info,int id)1169 static int _init_frame_info(struct frame_info *frame_info, int id)
1170 {
1171 	struct related_thread_group *grp = NULL;
1172 	unsigned long flags;
1173 
1174 	memset(frame_info, 0, sizeof(struct frame_info));
1175 	mutex_init(&frame_info->lock);
1176 
1177 	mutex_lock(&frame_info->lock);
1178 	frame_info->frame_rate = DEFAULT_FRAME_RATE;
1179 	frame_info->frame_time = div_u64(NSEC_PER_SEC, frame_info->frame_rate);
1180 	frame_info->thread_num = 0;
1181 	frame_info->prio = NOT_RT_PRIO;
1182 	atomic_set(&(frame_info->curr_rt_thread_num), 0);
1183 	atomic_set(&(frame_info->frame_sched_state), 0);
1184 	frame_info->vload_margin = DEFAULT_VLOAD_MARGIN;
1185 	frame_info->max_vload_time =
1186 		div_u64(frame_info->frame_time, NSEC_PER_MSEC) +
1187 		frame_info->vload_margin;
1188 	frame_info->frame_min_util = FRAME_DEFAULT_MIN_UTIL;
1189 	frame_info->frame_max_util = FRAME_DEFAULT_MAX_UTIL;
1190 	frame_info->prev_min_util = FRAME_DEFAULT_MIN_PREV_UTIL;
1191 	frame_info->prev_max_util = FRAME_DEFAULT_MAX_PREV_UTIL;
1192 	frame_info->margin_imme = false;
1193 	frame_info->timestamp_skipped = false;
1194 	frame_info->status = FRAME_END;
1195 
1196 	grp = frame_rtg(id);
1197 	if (unlikely(!grp)) {
1198 		mutex_unlock(&frame_info->lock);
1199 		return -EINVAL;
1200 	}
1201 
1202 	raw_spin_lock_irqsave(&grp->lock, flags);
1203 	grp->private_data = frame_info;
1204 	grp->rtg_class = &frame_rtg_class;
1205 	raw_spin_unlock_irqrestore(&grp->lock, flags);
1206 
1207 	frame_info->rtg = grp;
1208 	mutex_unlock(&frame_info->lock);
1209 
1210 	return 0;
1211 }
1212 
init_frame_info(void)1213 static int __init init_frame_info(void)
1214 {
1215 	int ret = 0;
1216 	int id;
1217 
1218 	for (id = MULTI_FRAME_ID; id < (MULTI_FRAME_ID + MULTI_FRAME_NUM); id++) {
1219 		if (ret != 0)
1220 			break;
1221 		ret = _init_frame_info(rtg_multi_frame_info(id), id);
1222 	}
1223 
1224 	return ret;
1225 }
1226 late_initcall(init_frame_info);
1227