• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *
3  * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved.
4  *
5  * This program is free software and is provided to you under the terms of the
6  * GNU General Public License version 2 as published by the Free Software
7  * Foundation, and any use by you of this program is subject to the terms
8  * of such GNU licence.
9  *
10  * A copy of the licence is included with the program, and can also be obtained
11  * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12  * Boston, MA  02110-1301, USA.
13  *
14  */
15 
16 
17 
18 
19 
20 /*
21  * Base kernel device APIs
22  */
23 
24 #include <linux/debugfs.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/seq_file.h>
27 #include <linux/kernel.h>
28 #include <linux/module.h>
29 #include <linux/of_platform.h>
30 
31 #include <mali_kbase.h>
32 #include <mali_kbase_defs.h>
33 #include <mali_kbase_hwaccess_instr.h>
34 #include <mali_kbase_hw.h>
35 #include <mali_kbase_config_defaults.h>
36 
37 #include <mali_kbase_profiling_gator_api.h>
38 
39 /* NOTE: Magic - 0x45435254 (TRCE in ASCII).
40  * Supports tracing feature provided in the base module.
41  * Please keep it in sync with the value of base module.
42  */
43 #define TRACE_BUFFER_HEADER_SPECIAL 0x45435254
44 
45 #if KBASE_TRACE_ENABLE
46 static const char *kbasep_trace_code_string[] = {
47 	/* IMPORTANT: USE OF SPECIAL #INCLUDE OF NON-STANDARD HEADER FILE
48 	 * THIS MUST BE USED AT THE START OF THE ARRAY */
49 #define KBASE_TRACE_CODE_MAKE_CODE(X) # X
50 #include "mali_kbase_trace_defs.h"
51 #undef  KBASE_TRACE_CODE_MAKE_CODE
52 };
53 #endif
54 
55 #define DEBUG_MESSAGE_SIZE 256
56 
57 static int kbasep_trace_init(struct kbase_device *kbdev);
58 static void kbasep_trace_term(struct kbase_device *kbdev);
59 static void kbasep_trace_hook_wrapper(void *param);
60 
kbase_device_alloc(void)61 struct kbase_device *kbase_device_alloc(void)
62 {
63 	return kzalloc(sizeof(struct kbase_device), GFP_KERNEL);
64 }
65 
kbase_device_as_init(struct kbase_device * kbdev,int i)66 static int kbase_device_as_init(struct kbase_device *kbdev, int i)
67 {
68 	const char format[] = "mali_mmu%d";
69 	char name[sizeof(format)];
70 	const char poke_format[] = "mali_mmu%d_poker";
71 	char poke_name[sizeof(poke_format)];
72 
73 	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316))
74 		snprintf(poke_name, sizeof(poke_name), poke_format, i);
75 
76 	snprintf(name, sizeof(name), format, i);
77 
78 	kbdev->as[i].number = i;
79 	kbdev->as[i].fault_addr = 0ULL;
80 
81 	kbdev->as[i].pf_wq = alloc_workqueue(name, 0, 1);
82 	if (!kbdev->as[i].pf_wq)
83 		return -EINVAL;
84 
85 	INIT_WORK(&kbdev->as[i].work_pagefault, page_fault_worker);
86 	INIT_WORK(&kbdev->as[i].work_busfault, bus_fault_worker);
87 
88 	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316)) {
89 		struct hrtimer *poke_timer = &kbdev->as[i].poke_timer;
90 		struct work_struct *poke_work = &kbdev->as[i].poke_work;
91 
92 		kbdev->as[i].poke_wq = alloc_workqueue(poke_name, 0, 1);
93 		if (!kbdev->as[i].poke_wq) {
94 			destroy_workqueue(kbdev->as[i].pf_wq);
95 			return -EINVAL;
96 		}
97 		KBASE_DEBUG_ASSERT(!object_is_on_stack(poke_work));
98 		INIT_WORK(poke_work, kbasep_as_do_poke);
99 
100 		hrtimer_init(poke_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
101 
102 		poke_timer->function = kbasep_as_poke_timer_callback;
103 
104 		kbdev->as[i].poke_refcount = 0;
105 		kbdev->as[i].poke_state = 0u;
106 	}
107 
108 	return 0;
109 }
110 
kbase_device_as_term(struct kbase_device * kbdev,int i)111 static void kbase_device_as_term(struct kbase_device *kbdev, int i)
112 {
113 	destroy_workqueue(kbdev->as[i].pf_wq);
114 	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316))
115 		destroy_workqueue(kbdev->as[i].poke_wq);
116 }
117 
kbase_device_all_as_init(struct kbase_device * kbdev)118 static int kbase_device_all_as_init(struct kbase_device *kbdev)
119 {
120 	int i, err;
121 
122 	for (i = 0; i < kbdev->nr_hw_address_spaces; i++) {
123 		err = kbase_device_as_init(kbdev, i);
124 		if (err)
125 			goto free_workqs;
126 	}
127 
128 	return 0;
129 
130 free_workqs:
131 	for (; i > 0; i--)
132 		kbase_device_as_term(kbdev, i);
133 
134 	return err;
135 }
136 
kbase_device_all_as_term(struct kbase_device * kbdev)137 static void kbase_device_all_as_term(struct kbase_device *kbdev)
138 {
139 	int i;
140 
141 	for (i = 0; i < kbdev->nr_hw_address_spaces; i++)
142 		kbase_device_as_term(kbdev, i);
143 }
144 
kbase_device_init(struct kbase_device * const kbdev)145 int kbase_device_init(struct kbase_device * const kbdev)
146 {
147 	int i, err;
148 #ifdef CONFIG_ARM64
149 	struct device_node *np = NULL;
150 #endif /* CONFIG_ARM64 */
151 
152 	spin_lock_init(&kbdev->mmu_mask_change);
153 	mutex_init(&kbdev->mmu_hw_mutex);
154 #ifdef CONFIG_ARM64
155 	kbdev->cci_snoop_enabled = false;
156 	np = kbdev->dev->of_node;
157 	if (np != NULL) {
158 		if (of_property_read_u32(np, "snoop_enable_smc",
159 					&kbdev->snoop_enable_smc))
160 			kbdev->snoop_enable_smc = 0;
161 		if (of_property_read_u32(np, "snoop_disable_smc",
162 					&kbdev->snoop_disable_smc))
163 			kbdev->snoop_disable_smc = 0;
164 		/* Either both or none of the calls should be provided. */
165 		if (!((kbdev->snoop_disable_smc == 0
166 			&& kbdev->snoop_enable_smc == 0)
167 			|| (kbdev->snoop_disable_smc != 0
168 			&& kbdev->snoop_enable_smc != 0))) {
169 			WARN_ON(1);
170 			err = -EINVAL;
171 			goto fail;
172 		}
173 	}
174 #endif /* CONFIG_ARM64 */
175 	/* Get the list of workarounds for issues on the current HW
176 	 * (identified by the GPU_ID register)
177 	 */
178 	err = kbase_hw_set_issues_mask(kbdev);
179 	if (err)
180 		goto fail;
181 
182 	/* Set the list of features available on the current HW
183 	 * (identified by the GPU_ID register)
184 	 */
185 	kbase_hw_set_features_mask(kbdev);
186 
187 	kbase_gpuprops_set_features(kbdev);
188 
189 	/* On Linux 4.0+, dma coherency is determined from device tree */
190 #if defined(CONFIG_ARM64) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
191 	set_dma_ops(kbdev->dev, &noncoherent_swiotlb_dma_ops);
192 #endif
193 
194 	/* Workaround a pre-3.13 Linux issue, where dma_mask is NULL when our
195 	 * device structure was created by device-tree
196 	 */
197 	if (!kbdev->dev->dma_mask)
198 		kbdev->dev->dma_mask = &kbdev->dev->coherent_dma_mask;
199 
200 	err = dma_set_mask(kbdev->dev,
201 			DMA_BIT_MASK(kbdev->gpu_props.mmu.pa_bits));
202 	if (err)
203 		goto dma_set_mask_failed;
204 
205 	err = dma_set_coherent_mask(kbdev->dev,
206 			DMA_BIT_MASK(kbdev->gpu_props.mmu.pa_bits));
207 	if (err)
208 		goto dma_set_mask_failed;
209 
210 	kbdev->nr_hw_address_spaces = kbdev->gpu_props.num_address_spaces;
211 
212 	err = kbase_device_all_as_init(kbdev);
213 	if (err)
214 		goto as_init_failed;
215 
216 	spin_lock_init(&kbdev->hwcnt.lock);
217 
218 	err = kbasep_trace_init(kbdev);
219 	if (err)
220 		goto term_as;
221 
222 	mutex_init(&kbdev->cacheclean_lock);
223 
224 #ifdef CONFIG_MALI_TRACE_TIMELINE
225 	for (i = 0; i < BASE_JM_MAX_NR_SLOTS; ++i)
226 		kbdev->timeline.slot_atoms_submitted[i] = 0;
227 
228 	for (i = 0; i <= KBASEP_TIMELINE_PM_EVENT_LAST; ++i)
229 		atomic_set(&kbdev->timeline.pm_event_uid[i], 0);
230 #endif /* CONFIG_MALI_TRACE_TIMELINE */
231 
232 	/* fbdump profiling controls set to 0 - fbdump not enabled until changed by gator */
233 	for (i = 0; i < FBDUMP_CONTROL_MAX; i++)
234 		kbdev->kbase_profiling_controls[i] = 0;
235 
236 	kbase_debug_assert_register_hook(&kbasep_trace_hook_wrapper, kbdev);
237 
238 	atomic_set(&kbdev->ctx_num, 0);
239 
240 	err = kbase_instr_backend_init(kbdev);
241 	if (err)
242 		goto term_trace;
243 
244 	kbdev->pm.dvfs_period = DEFAULT_PM_DVFS_PERIOD;
245 
246 	kbdev->reset_timeout_ms = DEFAULT_RESET_TIMEOUT_MS;
247 
248 	if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_AARCH64_MMU))
249 		kbdev->mmu_mode = kbase_mmu_mode_get_aarch64();
250 	else
251 		kbdev->mmu_mode = kbase_mmu_mode_get_lpae();
252 
253 #ifdef CONFIG_MALI_DEBUG
254 	init_waitqueue_head(&kbdev->driver_inactive_wait);
255 #endif /* CONFIG_MALI_DEBUG */
256 
257 	return 0;
258 term_trace:
259 	kbasep_trace_term(kbdev);
260 term_as:
261 	kbase_device_all_as_term(kbdev);
262 as_init_failed:
263 dma_set_mask_failed:
264 fail:
265 	return err;
266 }
267 
kbase_device_term(struct kbase_device * kbdev)268 void kbase_device_term(struct kbase_device *kbdev)
269 {
270 	KBASE_DEBUG_ASSERT(kbdev);
271 
272 #if KBASE_TRACE_ENABLE
273 	kbase_debug_assert_register_hook(NULL, NULL);
274 #endif
275 
276 	kbase_instr_backend_term(kbdev);
277 
278 	kbasep_trace_term(kbdev);
279 
280 	kbase_device_all_as_term(kbdev);
281 }
282 
kbase_device_free(struct kbase_device * kbdev)283 void kbase_device_free(struct kbase_device *kbdev)
284 {
285 	kfree(kbdev);
286 }
287 
kbase_device_trace_buffer_install(struct kbase_context * kctx,u32 * tb,size_t size)288 int kbase_device_trace_buffer_install(
289 		struct kbase_context *kctx, u32 *tb, size_t size)
290 {
291 	unsigned long flags;
292 
293 	KBASE_DEBUG_ASSERT(kctx);
294 	KBASE_DEBUG_ASSERT(tb);
295 
296 	/* Interface uses 16-bit value to track last accessed entry. Each entry
297 	 * is composed of two 32-bit words.
298 	 * This limits the size that can be handled without an overflow. */
299 	if (0xFFFF * (2 * sizeof(u32)) < size)
300 		return -EINVAL;
301 
302 	/* set up the header */
303 	/* magic number in the first 4 bytes */
304 	tb[0] = TRACE_BUFFER_HEADER_SPECIAL;
305 	/* Store (write offset = 0, wrap counter = 0, transaction active = no)
306 	 * write offset 0 means never written.
307 	 * Offsets 1 to (wrap_offset - 1) used to store values when trace started
308 	 */
309 	tb[1] = 0;
310 
311 	/* install trace buffer */
312 	spin_lock_irqsave(&kctx->jctx.tb_lock, flags);
313 	kctx->jctx.tb_wrap_offset = size / 8;
314 	kctx->jctx.tb = tb;
315 	spin_unlock_irqrestore(&kctx->jctx.tb_lock, flags);
316 
317 	return 0;
318 }
319 
kbase_device_trace_buffer_uninstall(struct kbase_context * kctx)320 void kbase_device_trace_buffer_uninstall(struct kbase_context *kctx)
321 {
322 	unsigned long flags;
323 
324 	KBASE_DEBUG_ASSERT(kctx);
325 	spin_lock_irqsave(&kctx->jctx.tb_lock, flags);
326 	kctx->jctx.tb = NULL;
327 	kctx->jctx.tb_wrap_offset = 0;
328 	spin_unlock_irqrestore(&kctx->jctx.tb_lock, flags);
329 }
330 
kbase_device_trace_register_access(struct kbase_context * kctx,enum kbase_reg_access_type type,u16 reg_offset,u32 reg_value)331 void kbase_device_trace_register_access(struct kbase_context *kctx, enum kbase_reg_access_type type, u16 reg_offset, u32 reg_value)
332 {
333 	unsigned long flags;
334 
335 	spin_lock_irqsave(&kctx->jctx.tb_lock, flags);
336 	if (kctx->jctx.tb) {
337 		u16 wrap_count;
338 		u16 write_offset;
339 		u32 *tb = kctx->jctx.tb;
340 		u32 header_word;
341 
342 		header_word = tb[1];
343 		KBASE_DEBUG_ASSERT(0 == (header_word & 0x1));
344 
345 		wrap_count = (header_word >> 1) & 0x7FFF;
346 		write_offset = (header_word >> 16) & 0xFFFF;
347 
348 		/* mark as transaction in progress */
349 		tb[1] |= 0x1;
350 		mb();
351 
352 		/* calculate new offset */
353 		write_offset++;
354 		if (write_offset == kctx->jctx.tb_wrap_offset) {
355 			/* wrap */
356 			write_offset = 1;
357 			wrap_count++;
358 			wrap_count &= 0x7FFF;	/* 15bit wrap counter */
359 		}
360 
361 		/* store the trace entry at the selected offset */
362 		tb[write_offset * 2 + 0] = (reg_offset & ~0x3) | ((type == REG_WRITE) ? 0x1 : 0x0);
363 		tb[write_offset * 2 + 1] = reg_value;
364 		mb();
365 
366 		/* new header word */
367 		header_word = (write_offset << 16) | (wrap_count << 1) | 0x0;	/* transaction complete */
368 		tb[1] = header_word;
369 	}
370 	spin_unlock_irqrestore(&kctx->jctx.tb_lock, flags);
371 }
372 
373 /*
374  * Device trace functions
375  */
376 #if KBASE_TRACE_ENABLE
377 
kbasep_trace_init(struct kbase_device * kbdev)378 static int kbasep_trace_init(struct kbase_device *kbdev)
379 {
380 	struct kbase_trace *rbuf;
381 
382 	rbuf = kmalloc_array(KBASE_TRACE_SIZE, sizeof(*rbuf), GFP_KERNEL);
383 
384 	if (!rbuf)
385 		return -EINVAL;
386 
387 	kbdev->trace_rbuf = rbuf;
388 	spin_lock_init(&kbdev->trace_lock);
389 	return 0;
390 }
391 
kbasep_trace_term(struct kbase_device * kbdev)392 static void kbasep_trace_term(struct kbase_device *kbdev)
393 {
394 	kfree(kbdev->trace_rbuf);
395 }
396 
kbasep_trace_format_msg(struct kbase_trace * trace_msg,char * buffer,int len)397 static void kbasep_trace_format_msg(struct kbase_trace *trace_msg, char *buffer, int len)
398 {
399 	s32 written = 0;
400 
401 	/* Initial part of message */
402 	written += MAX(snprintf(buffer + written, MAX(len - written, 0), "%d.%.6d,%d,%d,%s,%p,", (int)trace_msg->timestamp.tv_sec, (int)(trace_msg->timestamp.tv_nsec / 1000), trace_msg->thread_id, trace_msg->cpu, kbasep_trace_code_string[trace_msg->code], trace_msg->ctx), 0);
403 
404 	if (trace_msg->katom)
405 		written += MAX(snprintf(buffer + written, MAX(len - written, 0), "atom %d (ud: 0x%llx 0x%llx)", trace_msg->atom_number, trace_msg->atom_udata[0], trace_msg->atom_udata[1]), 0);
406 
407 	written += MAX(snprintf(buffer + written, MAX(len - written, 0), ",%.8llx,", trace_msg->gpu_addr), 0);
408 
409 	/* NOTE: Could add function callbacks to handle different message types */
410 	/* Jobslot present */
411 	if (trace_msg->flags & KBASE_TRACE_FLAG_JOBSLOT)
412 		written += MAX(snprintf(buffer + written, MAX(len - written, 0), "%d", trace_msg->jobslot), 0);
413 
414 	written += MAX(snprintf(buffer + written, MAX(len - written, 0), ","), 0);
415 
416 	/* Refcount present */
417 	if (trace_msg->flags & KBASE_TRACE_FLAG_REFCOUNT)
418 		written += MAX(snprintf(buffer + written, MAX(len - written, 0), "%d", trace_msg->refcount), 0);
419 
420 	written += MAX(snprintf(buffer + written, MAX(len - written, 0), ","), 0);
421 
422 	/* Rest of message */
423 	written += MAX(snprintf(buffer + written, MAX(len - written, 0), "0x%.8lx", trace_msg->info_val), 0);
424 }
425 
kbasep_trace_dump_msg(struct kbase_device * kbdev,struct kbase_trace * trace_msg)426 static void kbasep_trace_dump_msg(struct kbase_device *kbdev, struct kbase_trace *trace_msg)
427 {
428 	char buffer[DEBUG_MESSAGE_SIZE];
429 
430 	kbasep_trace_format_msg(trace_msg, buffer, DEBUG_MESSAGE_SIZE);
431 	dev_dbg(kbdev->dev, "%s", buffer);
432 }
433 
kbasep_trace_add(struct kbase_device * kbdev,enum kbase_trace_code code,void * ctx,struct kbase_jd_atom * katom,u64 gpu_addr,u8 flags,int refcount,int jobslot,unsigned long info_val)434 void kbasep_trace_add(struct kbase_device *kbdev, enum kbase_trace_code code, void *ctx, struct kbase_jd_atom *katom, u64 gpu_addr, u8 flags, int refcount, int jobslot, unsigned long info_val)
435 {
436 	unsigned long irqflags;
437 	struct kbase_trace *trace_msg;
438 
439 	spin_lock_irqsave(&kbdev->trace_lock, irqflags);
440 
441 	trace_msg = &kbdev->trace_rbuf[kbdev->trace_next_in];
442 
443 	/* Fill the message */
444 	trace_msg->thread_id = task_pid_nr(current);
445 	trace_msg->cpu = task_cpu(current);
446 
447 	ktime_get_real_ts64(&trace_msg->timestamp);
448 
449 	trace_msg->code = code;
450 	trace_msg->ctx = ctx;
451 
452 	if (NULL == katom) {
453 		trace_msg->katom = false;
454 	} else {
455 		trace_msg->katom = true;
456 		trace_msg->atom_number = kbase_jd_atom_id(katom->kctx, katom);
457 		trace_msg->atom_udata[0] = katom->udata.blob[0];
458 		trace_msg->atom_udata[1] = katom->udata.blob[1];
459 	}
460 
461 	trace_msg->gpu_addr = gpu_addr;
462 	trace_msg->jobslot = jobslot;
463 	trace_msg->refcount = MIN((unsigned int)refcount, 0xFF);
464 	trace_msg->info_val = info_val;
465 	trace_msg->flags = flags;
466 
467 	/* Update the ringbuffer indices */
468 	kbdev->trace_next_in = (kbdev->trace_next_in + 1) & KBASE_TRACE_MASK;
469 	if (kbdev->trace_next_in == kbdev->trace_first_out)
470 		kbdev->trace_first_out = (kbdev->trace_first_out + 1) & KBASE_TRACE_MASK;
471 
472 	/* Done */
473 
474 	spin_unlock_irqrestore(&kbdev->trace_lock, irqflags);
475 }
476 
kbasep_trace_clear(struct kbase_device * kbdev)477 void kbasep_trace_clear(struct kbase_device *kbdev)
478 {
479 	unsigned long flags;
480 
481 	spin_lock_irqsave(&kbdev->trace_lock, flags);
482 	kbdev->trace_first_out = kbdev->trace_next_in;
483 	spin_unlock_irqrestore(&kbdev->trace_lock, flags);
484 }
485 
kbasep_trace_dump(struct kbase_device * kbdev)486 void kbasep_trace_dump(struct kbase_device *kbdev)
487 {
488 	unsigned long flags;
489 	u32 start;
490 	u32 end;
491 
492 	dev_dbg(kbdev->dev, "Dumping trace:\nsecs,nthread,cpu,code,ctx,katom,gpu_addr,jobslot,refcount,info_val");
493 	spin_lock_irqsave(&kbdev->trace_lock, flags);
494 	start = kbdev->trace_first_out;
495 	end = kbdev->trace_next_in;
496 
497 	while (start != end) {
498 		struct kbase_trace *trace_msg = &kbdev->trace_rbuf[start];
499 
500 		kbasep_trace_dump_msg(kbdev, trace_msg);
501 
502 		start = (start + 1) & KBASE_TRACE_MASK;
503 	}
504 	dev_dbg(kbdev->dev, "TRACE_END");
505 
506 	spin_unlock_irqrestore(&kbdev->trace_lock, flags);
507 
508 	KBASE_TRACE_CLEAR(kbdev);
509 }
510 
kbasep_trace_hook_wrapper(void * param)511 static void kbasep_trace_hook_wrapper(void *param)
512 {
513 	struct kbase_device *kbdev = (struct kbase_device *)param;
514 
515 	kbasep_trace_dump(kbdev);
516 }
517 
518 #ifdef CONFIG_DEBUG_FS
519 struct trace_seq_state {
520 	struct kbase_trace trace_buf[KBASE_TRACE_SIZE];
521 	u32 start;
522 	u32 end;
523 };
524 
kbasep_trace_seq_start(struct seq_file * s,loff_t * pos)525 static void *kbasep_trace_seq_start(struct seq_file *s, loff_t *pos)
526 {
527 	struct trace_seq_state *state = s->private;
528 	int i;
529 
530 	if (*pos > KBASE_TRACE_SIZE)
531 		return NULL;
532 	i = state->start + *pos;
533 	if ((state->end >= state->start && i >= state->end) ||
534 			i >= state->end + KBASE_TRACE_SIZE)
535 		return NULL;
536 
537 	i &= KBASE_TRACE_MASK;
538 
539 	return &state->trace_buf[i];
540 }
541 
kbasep_trace_seq_stop(struct seq_file * s,void * data)542 static void kbasep_trace_seq_stop(struct seq_file *s, void *data)
543 {
544 }
545 
kbasep_trace_seq_next(struct seq_file * s,void * data,loff_t * pos)546 static void *kbasep_trace_seq_next(struct seq_file *s, void *data, loff_t *pos)
547 {
548 	struct trace_seq_state *state = s->private;
549 	int i;
550 
551 	(*pos)++;
552 
553 	i = (state->start + *pos) & KBASE_TRACE_MASK;
554 	if (i == state->end)
555 		return NULL;
556 
557 	return &state->trace_buf[i];
558 }
559 
kbasep_trace_seq_show(struct seq_file * s,void * data)560 static int kbasep_trace_seq_show(struct seq_file *s, void *data)
561 {
562 	struct kbase_trace *trace_msg = data;
563 	char buffer[DEBUG_MESSAGE_SIZE];
564 
565 	kbasep_trace_format_msg(trace_msg, buffer, DEBUG_MESSAGE_SIZE);
566 	seq_printf(s, "%s\n", buffer);
567 	return 0;
568 }
569 
570 static const struct seq_operations kbasep_trace_seq_ops = {
571 	.start = kbasep_trace_seq_start,
572 	.next = kbasep_trace_seq_next,
573 	.stop = kbasep_trace_seq_stop,
574 	.show = kbasep_trace_seq_show,
575 };
576 
kbasep_trace_debugfs_open(struct inode * inode,struct file * file)577 static int kbasep_trace_debugfs_open(struct inode *inode, struct file *file)
578 {
579 	struct kbase_device *kbdev = inode->i_private;
580 	unsigned long flags;
581 
582 	struct trace_seq_state *state;
583 
584 	state = __seq_open_private(file, &kbasep_trace_seq_ops, sizeof(*state));
585 	if (!state)
586 		return -ENOMEM;
587 
588 	spin_lock_irqsave(&kbdev->trace_lock, flags);
589 	state->start = kbdev->trace_first_out;
590 	state->end = kbdev->trace_next_in;
591 	memcpy(state->trace_buf, kbdev->trace_rbuf, sizeof(state->trace_buf));
592 	spin_unlock_irqrestore(&kbdev->trace_lock, flags);
593 
594 	return 0;
595 }
596 
597 static const struct file_operations kbasep_trace_debugfs_fops = {
598 	.open = kbasep_trace_debugfs_open,
599 	.read = seq_read,
600 	.llseek = seq_lseek,
601 	.release = seq_release_private,
602 };
603 
kbasep_trace_debugfs_init(struct kbase_device * kbdev)604 void kbasep_trace_debugfs_init(struct kbase_device *kbdev)
605 {
606 	debugfs_create_file("mali_trace", S_IRUGO,
607 			kbdev->mali_debugfs_directory, kbdev,
608 			&kbasep_trace_debugfs_fops);
609 }
610 
611 #else
kbasep_trace_debugfs_init(struct kbase_device * kbdev)612 void kbasep_trace_debugfs_init(struct kbase_device *kbdev)
613 {
614 }
615 #endif				/* CONFIG_DEBUG_FS */
616 
617 #else				/* KBASE_TRACE_ENABLE  */
kbasep_trace_init(struct kbase_device * kbdev)618 static int kbasep_trace_init(struct kbase_device *kbdev)
619 {
620 	CSTD_UNUSED(kbdev);
621 	return 0;
622 }
623 
kbasep_trace_term(struct kbase_device * kbdev)624 static void kbasep_trace_term(struct kbase_device *kbdev)
625 {
626 	CSTD_UNUSED(kbdev);
627 }
628 
kbasep_trace_hook_wrapper(void * param)629 static void kbasep_trace_hook_wrapper(void *param)
630 {
631 	CSTD_UNUSED(param);
632 }
633 
kbasep_trace_dump(struct kbase_device * kbdev)634 void kbasep_trace_dump(struct kbase_device *kbdev)
635 {
636 	CSTD_UNUSED(kbdev);
637 }
638 #endif				/* KBASE_TRACE_ENABLE  */
639 
kbase_set_profiling_control(struct kbase_device * kbdev,u32 control,u32 value)640 void kbase_set_profiling_control(struct kbase_device *kbdev, u32 control, u32 value)
641 {
642 	switch (control) {
643 	case FBDUMP_CONTROL_ENABLE:
644 		/* fall through */
645 	case FBDUMP_CONTROL_RATE:
646 		/* fall through */
647 	case SW_COUNTER_ENABLE:
648 		/* fall through */
649 	case FBDUMP_CONTROL_RESIZE_FACTOR:
650 		kbdev->kbase_profiling_controls[control] = value;
651 		break;
652 	default:
653 		dev_err(kbdev->dev, "Profiling control %d not found\n", control);
654 		break;
655 	}
656 }
657 
658 /*
659  * Called by gator to control the production of
660  * profiling information at runtime
661  * */
662 
_mali_profiling_control(u32 action,u32 value)663 void _mali_profiling_control(u32 action, u32 value)
664 {
665 	struct kbase_device *kbdev = NULL;
666 
667 	/* find the first i.e. call with -1 */
668 	kbdev = kbase_find_device(-1);
669 
670 	if (NULL != kbdev)
671 		kbase_set_profiling_control(kbdev, action, value);
672 }
673 KBASE_EXPORT_SYMBOL(_mali_profiling_control);
674 
675