1 /*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 *
27 */
28
29 #include <linux/seq_file.h>
30 #include <linux/debugfs.h>
31 #include <linux/slab.h>
32 #include <linux/export.h>
33 #include "drmP.h"
34 #include "drm.h"
35 #include "intel_drv.h"
36 #include "intel_ringbuffer.h"
37 #include "i915_drm.h"
38 #include "i915_drv.h"
39
40 #define DRM_I915_RING_DEBUG 1
41
42
43 #if defined(CONFIG_DEBUG_FS)
44
45 enum {
46 ACTIVE_LIST,
47 FLUSHING_LIST,
48 INACTIVE_LIST,
49 PINNED_LIST,
50 DEFERRED_FREE_LIST,
51 };
52
yesno(int v)53 static const char *yesno(int v)
54 {
55 return v ? "yes" : "no";
56 }
57
i915_capabilities(struct seq_file * m,void * data)58 static int i915_capabilities(struct seq_file *m, void *data)
59 {
60 struct drm_info_node *node = (struct drm_info_node *) m->private;
61 struct drm_device *dev = node->minor->dev;
62 const struct intel_device_info *info = INTEL_INFO(dev);
63
64 seq_printf(m, "gen: %d\n", info->gen);
65 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
66 #define B(x) seq_printf(m, #x ": %s\n", yesno(info->x))
67 B(is_mobile);
68 B(is_i85x);
69 B(is_i915g);
70 B(is_i945gm);
71 B(is_g33);
72 B(need_gfx_hws);
73 B(is_g4x);
74 B(is_pineview);
75 B(is_broadwater);
76 B(is_crestline);
77 B(has_fbc);
78 B(has_pipe_cxsr);
79 B(has_hotplug);
80 B(cursor_needs_physical);
81 B(has_overlay);
82 B(overlay_needs_physical);
83 B(supports_tv);
84 B(has_bsd_ring);
85 B(has_blt_ring);
86 B(has_llc);
87 #undef B
88
89 return 0;
90 }
91
get_pin_flag(struct drm_i915_gem_object * obj)92 static const char *get_pin_flag(struct drm_i915_gem_object *obj)
93 {
94 if (obj->user_pin_count > 0)
95 return "P";
96 else if (obj->pin_count > 0)
97 return "p";
98 else
99 return " ";
100 }
101
get_tiling_flag(struct drm_i915_gem_object * obj)102 static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
103 {
104 switch (obj->tiling_mode) {
105 default:
106 case I915_TILING_NONE: return " ";
107 case I915_TILING_X: return "X";
108 case I915_TILING_Y: return "Y";
109 }
110 }
111
cache_level_str(int type)112 static const char *cache_level_str(int type)
113 {
114 switch (type) {
115 case I915_CACHE_NONE: return " uncached";
116 case I915_CACHE_LLC: return " snooped (LLC)";
117 case I915_CACHE_LLC_MLC: return " snooped (LLC+MLC)";
118 default: return "";
119 }
120 }
121
122 static void
describe_obj(struct seq_file * m,struct drm_i915_gem_object * obj)123 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
124 {
125 seq_printf(m, "%pK: %s%s %8zdKiB %04x %04x %d %d%s%s%s",
126 &obj->base,
127 get_pin_flag(obj),
128 get_tiling_flag(obj),
129 obj->base.size / 1024,
130 obj->base.read_domains,
131 obj->base.write_domain,
132 obj->last_rendering_seqno,
133 obj->last_fenced_seqno,
134 cache_level_str(obj->cache_level),
135 obj->dirty ? " dirty" : "",
136 obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
137 if (obj->base.name)
138 seq_printf(m, " (name: %d)", obj->base.name);
139 if (obj->fence_reg != I915_FENCE_REG_NONE)
140 seq_printf(m, " (fence: %d)", obj->fence_reg);
141 if (obj->gtt_space != NULL)
142 seq_printf(m, " (gtt offset: %08x, size: %08x)",
143 obj->gtt_offset, (unsigned int)obj->gtt_space->size);
144 if (obj->pin_mappable || obj->fault_mappable) {
145 char s[3], *t = s;
146 if (obj->pin_mappable)
147 *t++ = 'p';
148 if (obj->fault_mappable)
149 *t++ = 'f';
150 *t = '\0';
151 seq_printf(m, " (%s mappable)", s);
152 }
153 if (obj->ring != NULL)
154 seq_printf(m, " (%s)", obj->ring->name);
155 }
156
i915_gem_object_list_info(struct seq_file * m,void * data)157 static int i915_gem_object_list_info(struct seq_file *m, void *data)
158 {
159 struct drm_info_node *node = (struct drm_info_node *) m->private;
160 uintptr_t list = (uintptr_t) node->info_ent->data;
161 struct list_head *head;
162 struct drm_device *dev = node->minor->dev;
163 drm_i915_private_t *dev_priv = dev->dev_private;
164 struct drm_i915_gem_object *obj;
165 size_t total_obj_size, total_gtt_size;
166 int count, ret;
167
168 ret = mutex_lock_interruptible(&dev->struct_mutex);
169 if (ret)
170 return ret;
171
172 switch (list) {
173 case ACTIVE_LIST:
174 seq_printf(m, "Active:\n");
175 head = &dev_priv->mm.active_list;
176 break;
177 case INACTIVE_LIST:
178 seq_printf(m, "Inactive:\n");
179 head = &dev_priv->mm.inactive_list;
180 break;
181 case PINNED_LIST:
182 seq_printf(m, "Pinned:\n");
183 head = &dev_priv->mm.pinned_list;
184 break;
185 case FLUSHING_LIST:
186 seq_printf(m, "Flushing:\n");
187 head = &dev_priv->mm.flushing_list;
188 break;
189 case DEFERRED_FREE_LIST:
190 seq_printf(m, "Deferred free:\n");
191 head = &dev_priv->mm.deferred_free_list;
192 break;
193 default:
194 mutex_unlock(&dev->struct_mutex);
195 return -EINVAL;
196 }
197
198 total_obj_size = total_gtt_size = count = 0;
199 list_for_each_entry(obj, head, mm_list) {
200 seq_printf(m, " ");
201 describe_obj(m, obj);
202 seq_printf(m, "\n");
203 total_obj_size += obj->base.size;
204 total_gtt_size += obj->gtt_space->size;
205 count++;
206 }
207 mutex_unlock(&dev->struct_mutex);
208
209 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
210 count, total_obj_size, total_gtt_size);
211 return 0;
212 }
213
214 #define count_objects(list, member) do { \
215 list_for_each_entry(obj, list, member) { \
216 size += obj->gtt_space->size; \
217 ++count; \
218 if (obj->map_and_fenceable) { \
219 mappable_size += obj->gtt_space->size; \
220 ++mappable_count; \
221 } \
222 } \
223 } while (0)
224
i915_gem_object_info(struct seq_file * m,void * data)225 static int i915_gem_object_info(struct seq_file *m, void* data)
226 {
227 struct drm_info_node *node = (struct drm_info_node *) m->private;
228 struct drm_device *dev = node->minor->dev;
229 struct drm_i915_private *dev_priv = dev->dev_private;
230 u32 count, mappable_count;
231 size_t size, mappable_size;
232 struct drm_i915_gem_object *obj;
233 int ret;
234
235 ret = mutex_lock_interruptible(&dev->struct_mutex);
236 if (ret)
237 return ret;
238
239 seq_printf(m, "%u objects, %zu bytes\n",
240 dev_priv->mm.object_count,
241 dev_priv->mm.object_memory);
242
243 size = count = mappable_size = mappable_count = 0;
244 count_objects(&dev_priv->mm.gtt_list, gtt_list);
245 seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
246 count, mappable_count, size, mappable_size);
247
248 size = count = mappable_size = mappable_count = 0;
249 count_objects(&dev_priv->mm.active_list, mm_list);
250 count_objects(&dev_priv->mm.flushing_list, mm_list);
251 seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
252 count, mappable_count, size, mappable_size);
253
254 size = count = mappable_size = mappable_count = 0;
255 count_objects(&dev_priv->mm.pinned_list, mm_list);
256 seq_printf(m, " %u [%u] pinned objects, %zu [%zu] bytes\n",
257 count, mappable_count, size, mappable_size);
258
259 size = count = mappable_size = mappable_count = 0;
260 count_objects(&dev_priv->mm.inactive_list, mm_list);
261 seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
262 count, mappable_count, size, mappable_size);
263
264 size = count = mappable_size = mappable_count = 0;
265 count_objects(&dev_priv->mm.deferred_free_list, mm_list);
266 seq_printf(m, " %u [%u] freed objects, %zu [%zu] bytes\n",
267 count, mappable_count, size, mappable_size);
268
269 size = count = mappable_size = mappable_count = 0;
270 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
271 if (obj->fault_mappable) {
272 size += obj->gtt_space->size;
273 ++count;
274 }
275 if (obj->pin_mappable) {
276 mappable_size += obj->gtt_space->size;
277 ++mappable_count;
278 }
279 }
280 seq_printf(m, "%u pinned mappable objects, %zu bytes\n",
281 mappable_count, mappable_size);
282 seq_printf(m, "%u fault mappable objects, %zu bytes\n",
283 count, size);
284
285 seq_printf(m, "%zu [%zu] gtt total\n",
286 dev_priv->mm.gtt_total, dev_priv->mm.mappable_gtt_total);
287
288 mutex_unlock(&dev->struct_mutex);
289
290 return 0;
291 }
292
i915_gem_gtt_info(struct seq_file * m,void * data)293 static int i915_gem_gtt_info(struct seq_file *m, void* data)
294 {
295 struct drm_info_node *node = (struct drm_info_node *) m->private;
296 struct drm_device *dev = node->minor->dev;
297 struct drm_i915_private *dev_priv = dev->dev_private;
298 struct drm_i915_gem_object *obj;
299 size_t total_obj_size, total_gtt_size;
300 int count, ret;
301
302 ret = mutex_lock_interruptible(&dev->struct_mutex);
303 if (ret)
304 return ret;
305
306 total_obj_size = total_gtt_size = count = 0;
307 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
308 seq_printf(m, " ");
309 describe_obj(m, obj);
310 seq_printf(m, "\n");
311 total_obj_size += obj->base.size;
312 total_gtt_size += obj->gtt_space->size;
313 count++;
314 }
315
316 mutex_unlock(&dev->struct_mutex);
317
318 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
319 count, total_obj_size, total_gtt_size);
320
321 return 0;
322 }
323
324
i915_gem_pageflip_info(struct seq_file * m,void * data)325 static int i915_gem_pageflip_info(struct seq_file *m, void *data)
326 {
327 struct drm_info_node *node = (struct drm_info_node *) m->private;
328 struct drm_device *dev = node->minor->dev;
329 unsigned long flags;
330 struct intel_crtc *crtc;
331
332 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
333 const char pipe = pipe_name(crtc->pipe);
334 const char plane = plane_name(crtc->plane);
335 struct intel_unpin_work *work;
336
337 spin_lock_irqsave(&dev->event_lock, flags);
338 work = crtc->unpin_work;
339 if (work == NULL) {
340 seq_printf(m, "No flip due on pipe %c (plane %c)\n",
341 pipe, plane);
342 } else {
343 if (!work->pending) {
344 seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
345 pipe, plane);
346 } else {
347 seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
348 pipe, plane);
349 }
350 if (work->enable_stall_check)
351 seq_printf(m, "Stall check enabled, ");
352 else
353 seq_printf(m, "Stall check waiting for page flip ioctl, ");
354 seq_printf(m, "%d prepares\n", work->pending);
355
356 if (work->old_fb_obj) {
357 struct drm_i915_gem_object *obj = work->old_fb_obj;
358 if (obj)
359 seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
360 }
361 if (work->pending_flip_obj) {
362 struct drm_i915_gem_object *obj = work->pending_flip_obj;
363 if (obj)
364 seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
365 }
366 }
367 spin_unlock_irqrestore(&dev->event_lock, flags);
368 }
369
370 return 0;
371 }
372
i915_gem_request_info(struct seq_file * m,void * data)373 static int i915_gem_request_info(struct seq_file *m, void *data)
374 {
375 struct drm_info_node *node = (struct drm_info_node *) m->private;
376 struct drm_device *dev = node->minor->dev;
377 drm_i915_private_t *dev_priv = dev->dev_private;
378 struct drm_i915_gem_request *gem_request;
379 int ret, count;
380
381 ret = mutex_lock_interruptible(&dev->struct_mutex);
382 if (ret)
383 return ret;
384
385 count = 0;
386 if (!list_empty(&dev_priv->ring[RCS].request_list)) {
387 seq_printf(m, "Render requests:\n");
388 list_for_each_entry(gem_request,
389 &dev_priv->ring[RCS].request_list,
390 list) {
391 seq_printf(m, " %d @ %d\n",
392 gem_request->seqno,
393 (int) (jiffies - gem_request->emitted_jiffies));
394 }
395 count++;
396 }
397 if (!list_empty(&dev_priv->ring[VCS].request_list)) {
398 seq_printf(m, "BSD requests:\n");
399 list_for_each_entry(gem_request,
400 &dev_priv->ring[VCS].request_list,
401 list) {
402 seq_printf(m, " %d @ %d\n",
403 gem_request->seqno,
404 (int) (jiffies - gem_request->emitted_jiffies));
405 }
406 count++;
407 }
408 if (!list_empty(&dev_priv->ring[BCS].request_list)) {
409 seq_printf(m, "BLT requests:\n");
410 list_for_each_entry(gem_request,
411 &dev_priv->ring[BCS].request_list,
412 list) {
413 seq_printf(m, " %d @ %d\n",
414 gem_request->seqno,
415 (int) (jiffies - gem_request->emitted_jiffies));
416 }
417 count++;
418 }
419 mutex_unlock(&dev->struct_mutex);
420
421 if (count == 0)
422 seq_printf(m, "No requests\n");
423
424 return 0;
425 }
426
i915_ring_seqno_info(struct seq_file * m,struct intel_ring_buffer * ring)427 static void i915_ring_seqno_info(struct seq_file *m,
428 struct intel_ring_buffer *ring)
429 {
430 if (ring->get_seqno) {
431 seq_printf(m, "Current sequence (%s): %d\n",
432 ring->name, ring->get_seqno(ring));
433 seq_printf(m, "Waiter sequence (%s): %d\n",
434 ring->name, ring->waiting_seqno);
435 seq_printf(m, "IRQ sequence (%s): %d\n",
436 ring->name, ring->irq_seqno);
437 }
438 }
439
i915_gem_seqno_info(struct seq_file * m,void * data)440 static int i915_gem_seqno_info(struct seq_file *m, void *data)
441 {
442 struct drm_info_node *node = (struct drm_info_node *) m->private;
443 struct drm_device *dev = node->minor->dev;
444 drm_i915_private_t *dev_priv = dev->dev_private;
445 int ret, i;
446
447 ret = mutex_lock_interruptible(&dev->struct_mutex);
448 if (ret)
449 return ret;
450
451 for (i = 0; i < I915_NUM_RINGS; i++)
452 i915_ring_seqno_info(m, &dev_priv->ring[i]);
453
454 mutex_unlock(&dev->struct_mutex);
455
456 return 0;
457 }
458
459
i915_interrupt_info(struct seq_file * m,void * data)460 static int i915_interrupt_info(struct seq_file *m, void *data)
461 {
462 struct drm_info_node *node = (struct drm_info_node *) m->private;
463 struct drm_device *dev = node->minor->dev;
464 drm_i915_private_t *dev_priv = dev->dev_private;
465 int ret, i, pipe;
466
467 ret = mutex_lock_interruptible(&dev->struct_mutex);
468 if (ret)
469 return ret;
470
471 if (!HAS_PCH_SPLIT(dev)) {
472 seq_printf(m, "Interrupt enable: %08x\n",
473 I915_READ(IER));
474 seq_printf(m, "Interrupt identity: %08x\n",
475 I915_READ(IIR));
476 seq_printf(m, "Interrupt mask: %08x\n",
477 I915_READ(IMR));
478 for_each_pipe(pipe)
479 seq_printf(m, "Pipe %c stat: %08x\n",
480 pipe_name(pipe),
481 I915_READ(PIPESTAT(pipe)));
482 } else {
483 seq_printf(m, "North Display Interrupt enable: %08x\n",
484 I915_READ(DEIER));
485 seq_printf(m, "North Display Interrupt identity: %08x\n",
486 I915_READ(DEIIR));
487 seq_printf(m, "North Display Interrupt mask: %08x\n",
488 I915_READ(DEIMR));
489 seq_printf(m, "South Display Interrupt enable: %08x\n",
490 I915_READ(SDEIER));
491 seq_printf(m, "South Display Interrupt identity: %08x\n",
492 I915_READ(SDEIIR));
493 seq_printf(m, "South Display Interrupt mask: %08x\n",
494 I915_READ(SDEIMR));
495 seq_printf(m, "Graphics Interrupt enable: %08x\n",
496 I915_READ(GTIER));
497 seq_printf(m, "Graphics Interrupt identity: %08x\n",
498 I915_READ(GTIIR));
499 seq_printf(m, "Graphics Interrupt mask: %08x\n",
500 I915_READ(GTIMR));
501 }
502 seq_printf(m, "Interrupts received: %d\n",
503 atomic_read(&dev_priv->irq_received));
504 for (i = 0; i < I915_NUM_RINGS; i++) {
505 if (IS_GEN6(dev) || IS_GEN7(dev)) {
506 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
507 dev_priv->ring[i].name,
508 I915_READ_IMR(&dev_priv->ring[i]));
509 }
510 i915_ring_seqno_info(m, &dev_priv->ring[i]);
511 }
512 mutex_unlock(&dev->struct_mutex);
513
514 return 0;
515 }
516
i915_gem_fence_regs_info(struct seq_file * m,void * data)517 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
518 {
519 struct drm_info_node *node = (struct drm_info_node *) m->private;
520 struct drm_device *dev = node->minor->dev;
521 drm_i915_private_t *dev_priv = dev->dev_private;
522 int i, ret;
523
524 ret = mutex_lock_interruptible(&dev->struct_mutex);
525 if (ret)
526 return ret;
527
528 seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
529 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
530 for (i = 0; i < dev_priv->num_fence_regs; i++) {
531 struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
532
533 seq_printf(m, "Fenced object[%2d] = ", i);
534 if (obj == NULL)
535 seq_printf(m, "unused");
536 else
537 describe_obj(m, obj);
538 seq_printf(m, "\n");
539 }
540
541 mutex_unlock(&dev->struct_mutex);
542 return 0;
543 }
544
i915_hws_info(struct seq_file * m,void * data)545 static int i915_hws_info(struct seq_file *m, void *data)
546 {
547 struct drm_info_node *node = (struct drm_info_node *) m->private;
548 struct drm_device *dev = node->minor->dev;
549 drm_i915_private_t *dev_priv = dev->dev_private;
550 struct intel_ring_buffer *ring;
551 const volatile u32 __iomem *hws;
552 int i;
553
554 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
555 hws = (volatile u32 __iomem *)ring->status_page.page_addr;
556 if (hws == NULL)
557 return 0;
558
559 for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
560 seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
561 i * 4,
562 hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
563 }
564 return 0;
565 }
566
i915_ringbuffer_data(struct seq_file * m,void * data)567 static int i915_ringbuffer_data(struct seq_file *m, void *data)
568 {
569 struct drm_info_node *node = (struct drm_info_node *) m->private;
570 struct drm_device *dev = node->minor->dev;
571 drm_i915_private_t *dev_priv = dev->dev_private;
572 struct intel_ring_buffer *ring;
573 int ret;
574
575 ret = mutex_lock_interruptible(&dev->struct_mutex);
576 if (ret)
577 return ret;
578
579 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
580 if (!ring->obj) {
581 seq_printf(m, "No ringbuffer setup\n");
582 } else {
583 const u8 __iomem *virt = ring->virtual_start;
584 uint32_t off;
585
586 for (off = 0; off < ring->size; off += 4) {
587 uint32_t *ptr = (uint32_t *)(virt + off);
588 seq_printf(m, "%08x : %08x\n", off, *ptr);
589 }
590 }
591 mutex_unlock(&dev->struct_mutex);
592
593 return 0;
594 }
595
i915_ringbuffer_info(struct seq_file * m,void * data)596 static int i915_ringbuffer_info(struct seq_file *m, void *data)
597 {
598 struct drm_info_node *node = (struct drm_info_node *) m->private;
599 struct drm_device *dev = node->minor->dev;
600 drm_i915_private_t *dev_priv = dev->dev_private;
601 struct intel_ring_buffer *ring;
602 int ret;
603
604 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
605 if (ring->size == 0)
606 return 0;
607
608 ret = mutex_lock_interruptible(&dev->struct_mutex);
609 if (ret)
610 return ret;
611
612 seq_printf(m, "Ring %s:\n", ring->name);
613 seq_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR);
614 seq_printf(m, " Tail : %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR);
615 seq_printf(m, " Size : %08x\n", ring->size);
616 seq_printf(m, " Active : %08x\n", intel_ring_get_active_head(ring));
617 seq_printf(m, " NOPID : %08x\n", I915_READ_NOPID(ring));
618 if (IS_GEN6(dev) || IS_GEN7(dev)) {
619 seq_printf(m, " Sync 0 : %08x\n", I915_READ_SYNC_0(ring));
620 seq_printf(m, " Sync 1 : %08x\n", I915_READ_SYNC_1(ring));
621 }
622 seq_printf(m, " Control : %08x\n", I915_READ_CTL(ring));
623 seq_printf(m, " Start : %08x\n", I915_READ_START(ring));
624
625 mutex_unlock(&dev->struct_mutex);
626
627 return 0;
628 }
629
ring_str(int ring)630 static const char *ring_str(int ring)
631 {
632 switch (ring) {
633 case RCS: return "render";
634 case VCS: return "bsd";
635 case BCS: return "blt";
636 default: return "";
637 }
638 }
639
pin_flag(int pinned)640 static const char *pin_flag(int pinned)
641 {
642 if (pinned > 0)
643 return " P";
644 else if (pinned < 0)
645 return " p";
646 else
647 return "";
648 }
649
tiling_flag(int tiling)650 static const char *tiling_flag(int tiling)
651 {
652 switch (tiling) {
653 default:
654 case I915_TILING_NONE: return "";
655 case I915_TILING_X: return " X";
656 case I915_TILING_Y: return " Y";
657 }
658 }
659
dirty_flag(int dirty)660 static const char *dirty_flag(int dirty)
661 {
662 return dirty ? " dirty" : "";
663 }
664
purgeable_flag(int purgeable)665 static const char *purgeable_flag(int purgeable)
666 {
667 return purgeable ? " purgeable" : "";
668 }
669
print_error_buffers(struct seq_file * m,const char * name,struct drm_i915_error_buffer * err,int count)670 static void print_error_buffers(struct seq_file *m,
671 const char *name,
672 struct drm_i915_error_buffer *err,
673 int count)
674 {
675 seq_printf(m, "%s [%d]:\n", name, count);
676
677 while (count--) {
678 seq_printf(m, " %08x %8u %04x %04x %08x%s%s%s%s%s%s%s",
679 err->gtt_offset,
680 err->size,
681 err->read_domains,
682 err->write_domain,
683 err->seqno,
684 pin_flag(err->pinned),
685 tiling_flag(err->tiling),
686 dirty_flag(err->dirty),
687 purgeable_flag(err->purgeable),
688 err->ring != -1 ? " " : "",
689 ring_str(err->ring),
690 cache_level_str(err->cache_level));
691
692 if (err->name)
693 seq_printf(m, " (name: %d)", err->name);
694 if (err->fence_reg != I915_FENCE_REG_NONE)
695 seq_printf(m, " (fence: %d)", err->fence_reg);
696
697 seq_printf(m, "\n");
698 err++;
699 }
700 }
701
i915_ring_error_state(struct seq_file * m,struct drm_device * dev,struct drm_i915_error_state * error,unsigned ring)702 static void i915_ring_error_state(struct seq_file *m,
703 struct drm_device *dev,
704 struct drm_i915_error_state *error,
705 unsigned ring)
706 {
707 seq_printf(m, "%s command stream:\n", ring_str(ring));
708 seq_printf(m, " HEAD: 0x%08x\n", error->head[ring]);
709 seq_printf(m, " TAIL: 0x%08x\n", error->tail[ring]);
710 seq_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]);
711 seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]);
712 seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]);
713 seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]);
714 if (ring == RCS && INTEL_INFO(dev)->gen >= 4) {
715 seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1);
716 seq_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr);
717 }
718 if (INTEL_INFO(dev)->gen >= 4)
719 seq_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]);
720 seq_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]);
721 if (INTEL_INFO(dev)->gen >= 6) {
722 seq_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]);
723 seq_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
724 seq_printf(m, " SYNC_0: 0x%08x\n",
725 error->semaphore_mboxes[ring][0]);
726 seq_printf(m, " SYNC_1: 0x%08x\n",
727 error->semaphore_mboxes[ring][1]);
728 }
729 seq_printf(m, " seqno: 0x%08x\n", error->seqno[ring]);
730 seq_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
731 seq_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
732 }
733
i915_error_state(struct seq_file * m,void * unused)734 static int i915_error_state(struct seq_file *m, void *unused)
735 {
736 struct drm_info_node *node = (struct drm_info_node *) m->private;
737 struct drm_device *dev = node->minor->dev;
738 drm_i915_private_t *dev_priv = dev->dev_private;
739 struct drm_i915_error_state *error;
740 unsigned long flags;
741 int i, j, page, offset, elt;
742
743 spin_lock_irqsave(&dev_priv->error_lock, flags);
744 if (!dev_priv->first_error) {
745 seq_printf(m, "no error state collected\n");
746 goto out;
747 }
748
749 error = dev_priv->first_error;
750
751 seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
752 error->time.tv_usec);
753 seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
754 seq_printf(m, "EIR: 0x%08x\n", error->eir);
755 seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
756
757 for (i = 0; i < dev_priv->num_fence_regs; i++)
758 seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
759
760 if (INTEL_INFO(dev)->gen >= 6) {
761 seq_printf(m, "ERROR: 0x%08x\n", error->error);
762 seq_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
763 }
764
765 i915_ring_error_state(m, dev, error, RCS);
766 if (HAS_BLT(dev))
767 i915_ring_error_state(m, dev, error, BCS);
768 if (HAS_BSD(dev))
769 i915_ring_error_state(m, dev, error, VCS);
770
771 if (error->active_bo)
772 print_error_buffers(m, "Active",
773 error->active_bo,
774 error->active_bo_count);
775
776 if (error->pinned_bo)
777 print_error_buffers(m, "Pinned",
778 error->pinned_bo,
779 error->pinned_bo_count);
780
781 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
782 struct drm_i915_error_object *obj;
783
784 if ((obj = error->ring[i].batchbuffer)) {
785 seq_printf(m, "%s --- gtt_offset = 0x%08x\n",
786 dev_priv->ring[i].name,
787 obj->gtt_offset);
788 offset = 0;
789 for (page = 0; page < obj->page_count; page++) {
790 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
791 seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]);
792 offset += 4;
793 }
794 }
795 }
796
797 if (error->ring[i].num_requests) {
798 seq_printf(m, "%s --- %d requests\n",
799 dev_priv->ring[i].name,
800 error->ring[i].num_requests);
801 for (j = 0; j < error->ring[i].num_requests; j++) {
802 seq_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n",
803 error->ring[i].requests[j].seqno,
804 error->ring[i].requests[j].jiffies,
805 error->ring[i].requests[j].tail);
806 }
807 }
808
809 if ((obj = error->ring[i].ringbuffer)) {
810 seq_printf(m, "%s --- ringbuffer = 0x%08x\n",
811 dev_priv->ring[i].name,
812 obj->gtt_offset);
813 offset = 0;
814 for (page = 0; page < obj->page_count; page++) {
815 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
816 seq_printf(m, "%08x : %08x\n",
817 offset,
818 obj->pages[page][elt]);
819 offset += 4;
820 }
821 }
822 }
823 }
824
825 if (error->overlay)
826 intel_overlay_print_error_state(m, error->overlay);
827
828 if (error->display)
829 intel_display_print_error_state(m, dev, error->display);
830
831 out:
832 spin_unlock_irqrestore(&dev_priv->error_lock, flags);
833
834 return 0;
835 }
836
i915_rstdby_delays(struct seq_file * m,void * unused)837 static int i915_rstdby_delays(struct seq_file *m, void *unused)
838 {
839 struct drm_info_node *node = (struct drm_info_node *) m->private;
840 struct drm_device *dev = node->minor->dev;
841 drm_i915_private_t *dev_priv = dev->dev_private;
842 u16 crstanddelay;
843 int ret;
844
845 ret = mutex_lock_interruptible(&dev->struct_mutex);
846 if (ret)
847 return ret;
848
849 crstanddelay = I915_READ16(CRSTANDVID);
850
851 mutex_unlock(&dev->struct_mutex);
852
853 seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
854
855 return 0;
856 }
857
i915_cur_delayinfo(struct seq_file * m,void * unused)858 static int i915_cur_delayinfo(struct seq_file *m, void *unused)
859 {
860 struct drm_info_node *node = (struct drm_info_node *) m->private;
861 struct drm_device *dev = node->minor->dev;
862 drm_i915_private_t *dev_priv = dev->dev_private;
863 int ret;
864
865 if (IS_GEN5(dev)) {
866 u16 rgvswctl = I915_READ16(MEMSWCTL);
867 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
868
869 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
870 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
871 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
872 MEMSTAT_VID_SHIFT);
873 seq_printf(m, "Current P-state: %d\n",
874 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
875 } else if (IS_GEN6(dev) || IS_GEN7(dev)) {
876 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
877 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
878 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
879 u32 rpstat;
880 u32 rpupei, rpcurup, rpprevup;
881 u32 rpdownei, rpcurdown, rpprevdown;
882 int max_freq;
883
884 /* RPSTAT1 is in the GT power well */
885 ret = mutex_lock_interruptible(&dev->struct_mutex);
886 if (ret)
887 return ret;
888
889 gen6_gt_force_wake_get(dev_priv);
890
891 rpstat = I915_READ(GEN6_RPSTAT1);
892 rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
893 rpcurup = I915_READ(GEN6_RP_CUR_UP);
894 rpprevup = I915_READ(GEN6_RP_PREV_UP);
895 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
896 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
897 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
898
899 gen6_gt_force_wake_put(dev_priv);
900 mutex_unlock(&dev->struct_mutex);
901
902 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
903 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
904 seq_printf(m, "Render p-state ratio: %d\n",
905 (gt_perf_status & 0xff00) >> 8);
906 seq_printf(m, "Render p-state VID: %d\n",
907 gt_perf_status & 0xff);
908 seq_printf(m, "Render p-state limit: %d\n",
909 rp_state_limits & 0xff);
910 seq_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >>
911 GEN6_CAGF_SHIFT) * 50);
912 seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
913 GEN6_CURICONT_MASK);
914 seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
915 GEN6_CURBSYTAVG_MASK);
916 seq_printf(m, "RP PREV UP: %dus\n", rpprevup &
917 GEN6_CURBSYTAVG_MASK);
918 seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei &
919 GEN6_CURIAVG_MASK);
920 seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown &
921 GEN6_CURBSYTAVG_MASK);
922 seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown &
923 GEN6_CURBSYTAVG_MASK);
924
925 max_freq = (rp_state_cap & 0xff0000) >> 16;
926 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
927 max_freq * 50);
928
929 max_freq = (rp_state_cap & 0xff00) >> 8;
930 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
931 max_freq * 50);
932
933 max_freq = rp_state_cap & 0xff;
934 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
935 max_freq * 50);
936 } else {
937 seq_printf(m, "no P-state info available\n");
938 }
939
940 return 0;
941 }
942
i915_delayfreq_table(struct seq_file * m,void * unused)943 static int i915_delayfreq_table(struct seq_file *m, void *unused)
944 {
945 struct drm_info_node *node = (struct drm_info_node *) m->private;
946 struct drm_device *dev = node->minor->dev;
947 drm_i915_private_t *dev_priv = dev->dev_private;
948 u32 delayfreq;
949 int ret, i;
950
951 ret = mutex_lock_interruptible(&dev->struct_mutex);
952 if (ret)
953 return ret;
954
955 for (i = 0; i < 16; i++) {
956 delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
957 seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq,
958 (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
959 }
960
961 mutex_unlock(&dev->struct_mutex);
962
963 return 0;
964 }
965
MAP_TO_MV(int map)966 static inline int MAP_TO_MV(int map)
967 {
968 return 1250 - (map * 25);
969 }
970
i915_inttoext_table(struct seq_file * m,void * unused)971 static int i915_inttoext_table(struct seq_file *m, void *unused)
972 {
973 struct drm_info_node *node = (struct drm_info_node *) m->private;
974 struct drm_device *dev = node->minor->dev;
975 drm_i915_private_t *dev_priv = dev->dev_private;
976 u32 inttoext;
977 int ret, i;
978
979 ret = mutex_lock_interruptible(&dev->struct_mutex);
980 if (ret)
981 return ret;
982
983 for (i = 1; i <= 32; i++) {
984 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
985 seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
986 }
987
988 mutex_unlock(&dev->struct_mutex);
989
990 return 0;
991 }
992
ironlake_drpc_info(struct seq_file * m)993 static int ironlake_drpc_info(struct seq_file *m)
994 {
995 struct drm_info_node *node = (struct drm_info_node *) m->private;
996 struct drm_device *dev = node->minor->dev;
997 drm_i915_private_t *dev_priv = dev->dev_private;
998 u32 rgvmodectl, rstdbyctl;
999 u16 crstandvid;
1000 int ret;
1001
1002 ret = mutex_lock_interruptible(&dev->struct_mutex);
1003 if (ret)
1004 return ret;
1005
1006 rgvmodectl = I915_READ(MEMMODECTL);
1007 rstdbyctl = I915_READ(RSTDBYCTL);
1008 crstandvid = I915_READ16(CRSTANDVID);
1009
1010 mutex_unlock(&dev->struct_mutex);
1011
1012 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
1013 "yes" : "no");
1014 seq_printf(m, "Boost freq: %d\n",
1015 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1016 MEMMODE_BOOST_FREQ_SHIFT);
1017 seq_printf(m, "HW control enabled: %s\n",
1018 rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no");
1019 seq_printf(m, "SW control enabled: %s\n",
1020 rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no");
1021 seq_printf(m, "Gated voltage change: %s\n",
1022 rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no");
1023 seq_printf(m, "Starting frequency: P%d\n",
1024 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1025 seq_printf(m, "Max P-state: P%d\n",
1026 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1027 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1028 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1029 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1030 seq_printf(m, "Render standby enabled: %s\n",
1031 (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
1032 seq_printf(m, "Current RS state: ");
1033 switch (rstdbyctl & RSX_STATUS_MASK) {
1034 case RSX_STATUS_ON:
1035 seq_printf(m, "on\n");
1036 break;
1037 case RSX_STATUS_RC1:
1038 seq_printf(m, "RC1\n");
1039 break;
1040 case RSX_STATUS_RC1E:
1041 seq_printf(m, "RC1E\n");
1042 break;
1043 case RSX_STATUS_RS1:
1044 seq_printf(m, "RS1\n");
1045 break;
1046 case RSX_STATUS_RS2:
1047 seq_printf(m, "RS2 (RC6)\n");
1048 break;
1049 case RSX_STATUS_RS3:
1050 seq_printf(m, "RC3 (RC6+)\n");
1051 break;
1052 default:
1053 seq_printf(m, "unknown\n");
1054 break;
1055 }
1056
1057 return 0;
1058 }
1059
gen6_drpc_info(struct seq_file * m)1060 static int gen6_drpc_info(struct seq_file *m)
1061 {
1062
1063 struct drm_info_node *node = (struct drm_info_node *) m->private;
1064 struct drm_device *dev = node->minor->dev;
1065 struct drm_i915_private *dev_priv = dev->dev_private;
1066 u32 rpmodectl1, gt_core_status, rcctl1;
1067 unsigned forcewake_count;
1068 int count=0, ret;
1069
1070
1071 ret = mutex_lock_interruptible(&dev->struct_mutex);
1072 if (ret)
1073 return ret;
1074
1075 spin_lock_irq(&dev_priv->gt_lock);
1076 forcewake_count = dev_priv->forcewake_count;
1077 spin_unlock_irq(&dev_priv->gt_lock);
1078
1079 if (forcewake_count) {
1080 seq_printf(m, "RC information inaccurate because somebody "
1081 "holds a forcewake reference \n");
1082 } else {
1083 /* NB: we cannot use forcewake, else we read the wrong values */
1084 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
1085 udelay(10);
1086 seq_printf(m, "RC information accurate: %s\n", yesno(count < 51));
1087 }
1088
1089 gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS);
1090 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4);
1091
1092 rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1093 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1094 mutex_unlock(&dev->struct_mutex);
1095
1096 seq_printf(m, "Video Turbo Mode: %s\n",
1097 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
1098 seq_printf(m, "HW control enabled: %s\n",
1099 yesno(rpmodectl1 & GEN6_RP_ENABLE));
1100 seq_printf(m, "SW control enabled: %s\n",
1101 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
1102 GEN6_RP_MEDIA_SW_MODE));
1103 seq_printf(m, "RC1e Enabled: %s\n",
1104 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1105 seq_printf(m, "RC6 Enabled: %s\n",
1106 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1107 seq_printf(m, "Deep RC6 Enabled: %s\n",
1108 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1109 seq_printf(m, "Deepest RC6 Enabled: %s\n",
1110 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1111 seq_printf(m, "Current RC state: ");
1112 switch (gt_core_status & GEN6_RCn_MASK) {
1113 case GEN6_RC0:
1114 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1115 seq_printf(m, "Core Power Down\n");
1116 else
1117 seq_printf(m, "on\n");
1118 break;
1119 case GEN6_RC3:
1120 seq_printf(m, "RC3\n");
1121 break;
1122 case GEN6_RC6:
1123 seq_printf(m, "RC6\n");
1124 break;
1125 case GEN6_RC7:
1126 seq_printf(m, "RC7\n");
1127 break;
1128 default:
1129 seq_printf(m, "Unknown\n");
1130 break;
1131 }
1132
1133 seq_printf(m, "Core Power Down: %s\n",
1134 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1135 return 0;
1136 }
1137
i915_drpc_info(struct seq_file * m,void * unused)1138 static int i915_drpc_info(struct seq_file *m, void *unused)
1139 {
1140 struct drm_info_node *node = (struct drm_info_node *) m->private;
1141 struct drm_device *dev = node->minor->dev;
1142
1143 if (IS_GEN6(dev) || IS_GEN7(dev))
1144 return gen6_drpc_info(m);
1145 else
1146 return ironlake_drpc_info(m);
1147 }
1148
i915_fbc_status(struct seq_file * m,void * unused)1149 static int i915_fbc_status(struct seq_file *m, void *unused)
1150 {
1151 struct drm_info_node *node = (struct drm_info_node *) m->private;
1152 struct drm_device *dev = node->minor->dev;
1153 drm_i915_private_t *dev_priv = dev->dev_private;
1154
1155 if (!I915_HAS_FBC(dev)) {
1156 seq_printf(m, "FBC unsupported on this chipset\n");
1157 return 0;
1158 }
1159
1160 if (intel_fbc_enabled(dev)) {
1161 seq_printf(m, "FBC enabled\n");
1162 } else {
1163 seq_printf(m, "FBC disabled: ");
1164 switch (dev_priv->no_fbc_reason) {
1165 case FBC_NO_OUTPUT:
1166 seq_printf(m, "no outputs");
1167 break;
1168 case FBC_STOLEN_TOO_SMALL:
1169 seq_printf(m, "not enough stolen memory");
1170 break;
1171 case FBC_UNSUPPORTED_MODE:
1172 seq_printf(m, "mode not supported");
1173 break;
1174 case FBC_MODE_TOO_LARGE:
1175 seq_printf(m, "mode too large");
1176 break;
1177 case FBC_BAD_PLANE:
1178 seq_printf(m, "FBC unsupported on plane");
1179 break;
1180 case FBC_NOT_TILED:
1181 seq_printf(m, "scanout buffer not tiled");
1182 break;
1183 case FBC_MULTIPLE_PIPES:
1184 seq_printf(m, "multiple pipes are enabled");
1185 break;
1186 case FBC_MODULE_PARAM:
1187 seq_printf(m, "disabled per module param (default off)");
1188 break;
1189 default:
1190 seq_printf(m, "unknown reason");
1191 }
1192 seq_printf(m, "\n");
1193 }
1194 return 0;
1195 }
1196
i915_sr_status(struct seq_file * m,void * unused)1197 static int i915_sr_status(struct seq_file *m, void *unused)
1198 {
1199 struct drm_info_node *node = (struct drm_info_node *) m->private;
1200 struct drm_device *dev = node->minor->dev;
1201 drm_i915_private_t *dev_priv = dev->dev_private;
1202 bool sr_enabled = false;
1203
1204 if (HAS_PCH_SPLIT(dev))
1205 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1206 else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
1207 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1208 else if (IS_I915GM(dev))
1209 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1210 else if (IS_PINEVIEW(dev))
1211 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1212
1213 seq_printf(m, "self-refresh: %s\n",
1214 sr_enabled ? "enabled" : "disabled");
1215
1216 return 0;
1217 }
1218
i915_emon_status(struct seq_file * m,void * unused)1219 static int i915_emon_status(struct seq_file *m, void *unused)
1220 {
1221 struct drm_info_node *node = (struct drm_info_node *) m->private;
1222 struct drm_device *dev = node->minor->dev;
1223 drm_i915_private_t *dev_priv = dev->dev_private;
1224 unsigned long temp, chipset, gfx;
1225 int ret;
1226
1227 if (!IS_GEN5(dev))
1228 return -ENODEV;
1229
1230 ret = mutex_lock_interruptible(&dev->struct_mutex);
1231 if (ret)
1232 return ret;
1233
1234 temp = i915_mch_val(dev_priv);
1235 chipset = i915_chipset_val(dev_priv);
1236 gfx = i915_gfx_val(dev_priv);
1237 mutex_unlock(&dev->struct_mutex);
1238
1239 seq_printf(m, "GMCH temp: %ld\n", temp);
1240 seq_printf(m, "Chipset power: %ld\n", chipset);
1241 seq_printf(m, "GFX power: %ld\n", gfx);
1242 seq_printf(m, "Total power: %ld\n", chipset + gfx);
1243
1244 return 0;
1245 }
1246
i915_ring_freq_table(struct seq_file * m,void * unused)1247 static int i915_ring_freq_table(struct seq_file *m, void *unused)
1248 {
1249 struct drm_info_node *node = (struct drm_info_node *) m->private;
1250 struct drm_device *dev = node->minor->dev;
1251 drm_i915_private_t *dev_priv = dev->dev_private;
1252 int ret;
1253 int gpu_freq, ia_freq;
1254
1255 if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
1256 seq_printf(m, "unsupported on this chipset\n");
1257 return 0;
1258 }
1259
1260 ret = mutex_lock_interruptible(&dev->struct_mutex);
1261 if (ret)
1262 return ret;
1263
1264 seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n");
1265
1266 for (gpu_freq = dev_priv->min_delay; gpu_freq <= dev_priv->max_delay;
1267 gpu_freq++) {
1268 I915_WRITE(GEN6_PCODE_DATA, gpu_freq);
1269 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
1270 GEN6_PCODE_READ_MIN_FREQ_TABLE);
1271 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
1272 GEN6_PCODE_READY) == 0, 10)) {
1273 DRM_ERROR("pcode read of freq table timed out\n");
1274 continue;
1275 }
1276 ia_freq = I915_READ(GEN6_PCODE_DATA);
1277 seq_printf(m, "%d\t\t%d\n", gpu_freq * 50, ia_freq * 100);
1278 }
1279
1280 mutex_unlock(&dev->struct_mutex);
1281
1282 return 0;
1283 }
1284
i915_gfxec(struct seq_file * m,void * unused)1285 static int i915_gfxec(struct seq_file *m, void *unused)
1286 {
1287 struct drm_info_node *node = (struct drm_info_node *) m->private;
1288 struct drm_device *dev = node->minor->dev;
1289 drm_i915_private_t *dev_priv = dev->dev_private;
1290 int ret;
1291
1292 ret = mutex_lock_interruptible(&dev->struct_mutex);
1293 if (ret)
1294 return ret;
1295
1296 seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
1297
1298 mutex_unlock(&dev->struct_mutex);
1299
1300 return 0;
1301 }
1302
i915_opregion(struct seq_file * m,void * unused)1303 static int i915_opregion(struct seq_file *m, void *unused)
1304 {
1305 struct drm_info_node *node = (struct drm_info_node *) m->private;
1306 struct drm_device *dev = node->minor->dev;
1307 drm_i915_private_t *dev_priv = dev->dev_private;
1308 struct intel_opregion *opregion = &dev_priv->opregion;
1309 int ret;
1310
1311 ret = mutex_lock_interruptible(&dev->struct_mutex);
1312 if (ret)
1313 return ret;
1314
1315 if (opregion->header)
1316 seq_write(m, opregion->header, OPREGION_SIZE);
1317
1318 mutex_unlock(&dev->struct_mutex);
1319
1320 return 0;
1321 }
1322
i915_gem_framebuffer_info(struct seq_file * m,void * data)1323 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1324 {
1325 struct drm_info_node *node = (struct drm_info_node *) m->private;
1326 struct drm_device *dev = node->minor->dev;
1327 drm_i915_private_t *dev_priv = dev->dev_private;
1328 struct intel_fbdev *ifbdev;
1329 struct intel_framebuffer *fb;
1330 int ret;
1331
1332 ret = mutex_lock_interruptible(&dev->mode_config.mutex);
1333 if (ret)
1334 return ret;
1335
1336 ifbdev = dev_priv->fbdev;
1337 fb = to_intel_framebuffer(ifbdev->helper.fb);
1338
1339 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, obj ",
1340 fb->base.width,
1341 fb->base.height,
1342 fb->base.depth,
1343 fb->base.bits_per_pixel);
1344 describe_obj(m, fb->obj);
1345 seq_printf(m, "\n");
1346
1347 list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
1348 if (&fb->base == ifbdev->helper.fb)
1349 continue;
1350
1351 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, obj ",
1352 fb->base.width,
1353 fb->base.height,
1354 fb->base.depth,
1355 fb->base.bits_per_pixel);
1356 describe_obj(m, fb->obj);
1357 seq_printf(m, "\n");
1358 }
1359
1360 mutex_unlock(&dev->mode_config.mutex);
1361
1362 return 0;
1363 }
1364
i915_context_status(struct seq_file * m,void * unused)1365 static int i915_context_status(struct seq_file *m, void *unused)
1366 {
1367 struct drm_info_node *node = (struct drm_info_node *) m->private;
1368 struct drm_device *dev = node->minor->dev;
1369 drm_i915_private_t *dev_priv = dev->dev_private;
1370 int ret;
1371
1372 ret = mutex_lock_interruptible(&dev->mode_config.mutex);
1373 if (ret)
1374 return ret;
1375
1376 if (dev_priv->pwrctx) {
1377 seq_printf(m, "power context ");
1378 describe_obj(m, dev_priv->pwrctx);
1379 seq_printf(m, "\n");
1380 }
1381
1382 if (dev_priv->renderctx) {
1383 seq_printf(m, "render context ");
1384 describe_obj(m, dev_priv->renderctx);
1385 seq_printf(m, "\n");
1386 }
1387
1388 mutex_unlock(&dev->mode_config.mutex);
1389
1390 return 0;
1391 }
1392
i915_gen6_forcewake_count_info(struct seq_file * m,void * data)1393 static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
1394 {
1395 struct drm_info_node *node = (struct drm_info_node *) m->private;
1396 struct drm_device *dev = node->minor->dev;
1397 struct drm_i915_private *dev_priv = dev->dev_private;
1398 unsigned forcewake_count;
1399
1400 spin_lock_irq(&dev_priv->gt_lock);
1401 forcewake_count = dev_priv->forcewake_count;
1402 spin_unlock_irq(&dev_priv->gt_lock);
1403
1404 seq_printf(m, "forcewake count = %u\n", forcewake_count);
1405
1406 return 0;
1407 }
1408
swizzle_string(unsigned swizzle)1409 static const char *swizzle_string(unsigned swizzle)
1410 {
1411 switch(swizzle) {
1412 case I915_BIT_6_SWIZZLE_NONE:
1413 return "none";
1414 case I915_BIT_6_SWIZZLE_9:
1415 return "bit9";
1416 case I915_BIT_6_SWIZZLE_9_10:
1417 return "bit9/bit10";
1418 case I915_BIT_6_SWIZZLE_9_11:
1419 return "bit9/bit11";
1420 case I915_BIT_6_SWIZZLE_9_10_11:
1421 return "bit9/bit10/bit11";
1422 case I915_BIT_6_SWIZZLE_9_17:
1423 return "bit9/bit17";
1424 case I915_BIT_6_SWIZZLE_9_10_17:
1425 return "bit9/bit10/bit17";
1426 case I915_BIT_6_SWIZZLE_UNKNOWN:
1427 return "unkown";
1428 }
1429
1430 return "bug";
1431 }
1432
i915_swizzle_info(struct seq_file * m,void * data)1433 static int i915_swizzle_info(struct seq_file *m, void *data)
1434 {
1435 struct drm_info_node *node = (struct drm_info_node *) m->private;
1436 struct drm_device *dev = node->minor->dev;
1437 struct drm_i915_private *dev_priv = dev->dev_private;
1438
1439 mutex_lock(&dev->struct_mutex);
1440 seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1441 swizzle_string(dev_priv->mm.bit_6_swizzle_x));
1442 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
1443 swizzle_string(dev_priv->mm.bit_6_swizzle_y));
1444
1445 if (IS_GEN3(dev) || IS_GEN4(dev)) {
1446 seq_printf(m, "DDC = 0x%08x\n",
1447 I915_READ(DCC));
1448 seq_printf(m, "C0DRB3 = 0x%04x\n",
1449 I915_READ16(C0DRB3));
1450 seq_printf(m, "C1DRB3 = 0x%04x\n",
1451 I915_READ16(C1DRB3));
1452 } else if (IS_GEN6(dev) || IS_GEN7(dev)) {
1453 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
1454 I915_READ(MAD_DIMM_C0));
1455 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
1456 I915_READ(MAD_DIMM_C1));
1457 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
1458 I915_READ(MAD_DIMM_C2));
1459 seq_printf(m, "TILECTL = 0x%08x\n",
1460 I915_READ(TILECTL));
1461 seq_printf(m, "ARB_MODE = 0x%08x\n",
1462 I915_READ(ARB_MODE));
1463 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
1464 I915_READ(DISP_ARB_CTL));
1465 }
1466 mutex_unlock(&dev->struct_mutex);
1467
1468 return 0;
1469 }
1470
i915_ppgtt_info(struct seq_file * m,void * data)1471 static int i915_ppgtt_info(struct seq_file *m, void *data)
1472 {
1473 struct drm_info_node *node = (struct drm_info_node *) m->private;
1474 struct drm_device *dev = node->minor->dev;
1475 struct drm_i915_private *dev_priv = dev->dev_private;
1476 struct intel_ring_buffer *ring;
1477 int i, ret;
1478
1479
1480 ret = mutex_lock_interruptible(&dev->struct_mutex);
1481 if (ret)
1482 return ret;
1483 if (INTEL_INFO(dev)->gen == 6)
1484 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
1485
1486 for (i = 0; i < I915_NUM_RINGS; i++) {
1487 ring = &dev_priv->ring[i];
1488
1489 seq_printf(m, "%s\n", ring->name);
1490 if (INTEL_INFO(dev)->gen == 7)
1491 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring)));
1492 seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring)));
1493 seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring)));
1494 seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring)));
1495 }
1496 if (dev_priv->mm.aliasing_ppgtt) {
1497 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
1498
1499 seq_printf(m, "aliasing PPGTT:\n");
1500 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
1501 }
1502 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
1503 mutex_unlock(&dev->struct_mutex);
1504
1505 return 0;
1506 }
1507
1508 static ssize_t
i915_wedged_read(struct file * filp,char __user * ubuf,size_t max,loff_t * ppos)1509 i915_wedged_read(struct file *filp,
1510 char __user *ubuf,
1511 size_t max,
1512 loff_t *ppos)
1513 {
1514 struct drm_device *dev = filp->private_data;
1515 drm_i915_private_t *dev_priv = dev->dev_private;
1516 char buf[80];
1517 int len;
1518
1519 len = snprintf(buf, sizeof(buf),
1520 "wedged : %d\n",
1521 atomic_read(&dev_priv->mm.wedged));
1522
1523 if (len > sizeof(buf))
1524 len = sizeof(buf);
1525
1526 return simple_read_from_buffer(ubuf, max, ppos, buf, len);
1527 }
1528
1529 static ssize_t
i915_wedged_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)1530 i915_wedged_write(struct file *filp,
1531 const char __user *ubuf,
1532 size_t cnt,
1533 loff_t *ppos)
1534 {
1535 struct drm_device *dev = filp->private_data;
1536 char buf[20];
1537 int val = 1;
1538
1539 if (cnt > 0) {
1540 if (cnt > sizeof(buf) - 1)
1541 return -EINVAL;
1542
1543 if (copy_from_user(buf, ubuf, cnt))
1544 return -EFAULT;
1545 buf[cnt] = 0;
1546
1547 val = simple_strtoul(buf, NULL, 0);
1548 }
1549
1550 DRM_INFO("Manually setting wedged to %d\n", val);
1551 i915_handle_error(dev, val);
1552
1553 return cnt;
1554 }
1555
1556 static const struct file_operations i915_wedged_fops = {
1557 .owner = THIS_MODULE,
1558 .open = simple_open,
1559 .read = i915_wedged_read,
1560 .write = i915_wedged_write,
1561 .llseek = default_llseek,
1562 };
1563
1564 static ssize_t
i915_max_freq_read(struct file * filp,char __user * ubuf,size_t max,loff_t * ppos)1565 i915_max_freq_read(struct file *filp,
1566 char __user *ubuf,
1567 size_t max,
1568 loff_t *ppos)
1569 {
1570 struct drm_device *dev = filp->private_data;
1571 drm_i915_private_t *dev_priv = dev->dev_private;
1572 char buf[80];
1573 int len;
1574
1575 len = snprintf(buf, sizeof(buf),
1576 "max freq: %d\n", dev_priv->max_delay * 50);
1577
1578 if (len > sizeof(buf))
1579 len = sizeof(buf);
1580
1581 return simple_read_from_buffer(ubuf, max, ppos, buf, len);
1582 }
1583
1584 static ssize_t
i915_max_freq_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)1585 i915_max_freq_write(struct file *filp,
1586 const char __user *ubuf,
1587 size_t cnt,
1588 loff_t *ppos)
1589 {
1590 struct drm_device *dev = filp->private_data;
1591 struct drm_i915_private *dev_priv = dev->dev_private;
1592 char buf[20];
1593 int val = 1;
1594
1595 if (cnt > 0) {
1596 if (cnt > sizeof(buf) - 1)
1597 return -EINVAL;
1598
1599 if (copy_from_user(buf, ubuf, cnt))
1600 return -EFAULT;
1601 buf[cnt] = 0;
1602
1603 val = simple_strtoul(buf, NULL, 0);
1604 }
1605
1606 DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val);
1607
1608 /*
1609 * Turbo will still be enabled, but won't go above the set value.
1610 */
1611 dev_priv->max_delay = val / 50;
1612
1613 gen6_set_rps(dev, val / 50);
1614
1615 return cnt;
1616 }
1617
1618 static const struct file_operations i915_max_freq_fops = {
1619 .owner = THIS_MODULE,
1620 .open = simple_open,
1621 .read = i915_max_freq_read,
1622 .write = i915_max_freq_write,
1623 .llseek = default_llseek,
1624 };
1625
1626 static ssize_t
i915_cache_sharing_read(struct file * filp,char __user * ubuf,size_t max,loff_t * ppos)1627 i915_cache_sharing_read(struct file *filp,
1628 char __user *ubuf,
1629 size_t max,
1630 loff_t *ppos)
1631 {
1632 struct drm_device *dev = filp->private_data;
1633 drm_i915_private_t *dev_priv = dev->dev_private;
1634 char buf[80];
1635 u32 snpcr;
1636 int len;
1637
1638 mutex_lock(&dev_priv->dev->struct_mutex);
1639 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
1640 mutex_unlock(&dev_priv->dev->struct_mutex);
1641
1642 len = snprintf(buf, sizeof(buf),
1643 "%d\n", (snpcr & GEN6_MBC_SNPCR_MASK) >>
1644 GEN6_MBC_SNPCR_SHIFT);
1645
1646 if (len > sizeof(buf))
1647 len = sizeof(buf);
1648
1649 return simple_read_from_buffer(ubuf, max, ppos, buf, len);
1650 }
1651
1652 static ssize_t
i915_cache_sharing_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)1653 i915_cache_sharing_write(struct file *filp,
1654 const char __user *ubuf,
1655 size_t cnt,
1656 loff_t *ppos)
1657 {
1658 struct drm_device *dev = filp->private_data;
1659 struct drm_i915_private *dev_priv = dev->dev_private;
1660 char buf[20];
1661 u32 snpcr;
1662 int val = 1;
1663
1664 if (cnt > 0) {
1665 if (cnt > sizeof(buf) - 1)
1666 return -EINVAL;
1667
1668 if (copy_from_user(buf, ubuf, cnt))
1669 return -EFAULT;
1670 buf[cnt] = 0;
1671
1672 val = simple_strtoul(buf, NULL, 0);
1673 }
1674
1675 if (val < 0 || val > 3)
1676 return -EINVAL;
1677
1678 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %d\n", val);
1679
1680 /* Update the cache sharing policy here as well */
1681 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
1682 snpcr &= ~GEN6_MBC_SNPCR_MASK;
1683 snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
1684 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
1685
1686 return cnt;
1687 }
1688
1689 static const struct file_operations i915_cache_sharing_fops = {
1690 .owner = THIS_MODULE,
1691 .open = simple_open,
1692 .read = i915_cache_sharing_read,
1693 .write = i915_cache_sharing_write,
1694 .llseek = default_llseek,
1695 };
1696
1697 /* As the drm_debugfs_init() routines are called before dev->dev_private is
1698 * allocated we need to hook into the minor for release. */
1699 static int
drm_add_fake_info_node(struct drm_minor * minor,struct dentry * ent,const void * key)1700 drm_add_fake_info_node(struct drm_minor *minor,
1701 struct dentry *ent,
1702 const void *key)
1703 {
1704 struct drm_info_node *node;
1705
1706 node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
1707 if (node == NULL) {
1708 debugfs_remove(ent);
1709 return -ENOMEM;
1710 }
1711
1712 node->minor = minor;
1713 node->dent = ent;
1714 node->info_ent = (void *) key;
1715
1716 mutex_lock(&minor->debugfs_lock);
1717 list_add(&node->list, &minor->debugfs_list);
1718 mutex_unlock(&minor->debugfs_lock);
1719
1720 return 0;
1721 }
1722
i915_forcewake_open(struct inode * inode,struct file * file)1723 static int i915_forcewake_open(struct inode *inode, struct file *file)
1724 {
1725 struct drm_device *dev = inode->i_private;
1726 struct drm_i915_private *dev_priv = dev->dev_private;
1727 int ret;
1728
1729 if (INTEL_INFO(dev)->gen < 6)
1730 return 0;
1731
1732 ret = mutex_lock_interruptible(&dev->struct_mutex);
1733 if (ret)
1734 return ret;
1735 gen6_gt_force_wake_get(dev_priv);
1736 mutex_unlock(&dev->struct_mutex);
1737
1738 return 0;
1739 }
1740
i915_forcewake_release(struct inode * inode,struct file * file)1741 int i915_forcewake_release(struct inode *inode, struct file *file)
1742 {
1743 struct drm_device *dev = inode->i_private;
1744 struct drm_i915_private *dev_priv = dev->dev_private;
1745
1746 if (INTEL_INFO(dev)->gen < 6)
1747 return 0;
1748
1749 /*
1750 * It's bad that we can potentially hang userspace if struct_mutex gets
1751 * forever stuck. However, if we cannot acquire this lock it means that
1752 * almost certainly the driver has hung, is not unload-able. Therefore
1753 * hanging here is probably a minor inconvenience not to be seen my
1754 * almost every user.
1755 */
1756 mutex_lock(&dev->struct_mutex);
1757 gen6_gt_force_wake_put(dev_priv);
1758 mutex_unlock(&dev->struct_mutex);
1759
1760 return 0;
1761 }
1762
1763 static const struct file_operations i915_forcewake_fops = {
1764 .owner = THIS_MODULE,
1765 .open = i915_forcewake_open,
1766 .release = i915_forcewake_release,
1767 };
1768
i915_forcewake_create(struct dentry * root,struct drm_minor * minor)1769 static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
1770 {
1771 struct drm_device *dev = minor->dev;
1772 struct dentry *ent;
1773
1774 ent = debugfs_create_file("i915_forcewake_user",
1775 S_IRUSR,
1776 root, dev,
1777 &i915_forcewake_fops);
1778 if (IS_ERR(ent))
1779 return PTR_ERR(ent);
1780
1781 return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
1782 }
1783
i915_debugfs_create(struct dentry * root,struct drm_minor * minor,const char * name,const struct file_operations * fops)1784 static int i915_debugfs_create(struct dentry *root,
1785 struct drm_minor *minor,
1786 const char *name,
1787 const struct file_operations *fops)
1788 {
1789 struct drm_device *dev = minor->dev;
1790 struct dentry *ent;
1791
1792 ent = debugfs_create_file(name,
1793 S_IRUGO | S_IWUSR,
1794 root, dev,
1795 fops);
1796 if (IS_ERR(ent))
1797 return PTR_ERR(ent);
1798
1799 return drm_add_fake_info_node(minor, ent, fops);
1800 }
1801
1802 static struct drm_info_list i915_debugfs_list[] = {
1803 {"i915_capabilities", i915_capabilities, 0},
1804 {"i915_gem_objects", i915_gem_object_info, 0},
1805 {"i915_gem_gtt", i915_gem_gtt_info, 0},
1806 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
1807 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
1808 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
1809 {"i915_gem_pinned", i915_gem_object_list_info, 0, (void *) PINNED_LIST},
1810 {"i915_gem_deferred_free", i915_gem_object_list_info, 0, (void *) DEFERRED_FREE_LIST},
1811 {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
1812 {"i915_gem_request", i915_gem_request_info, 0},
1813 {"i915_gem_seqno", i915_gem_seqno_info, 0},
1814 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
1815 {"i915_gem_interrupt", i915_interrupt_info, 0},
1816 {"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
1817 {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
1818 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
1819 {"i915_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RCS},
1820 {"i915_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RCS},
1821 {"i915_bsd_ringbuffer_data", i915_ringbuffer_data, 0, (void *)VCS},
1822 {"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)VCS},
1823 {"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BCS},
1824 {"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BCS},
1825 {"i915_error_state", i915_error_state, 0},
1826 {"i915_rstdby_delays", i915_rstdby_delays, 0},
1827 {"i915_cur_delayinfo", i915_cur_delayinfo, 0},
1828 {"i915_delayfreq_table", i915_delayfreq_table, 0},
1829 {"i915_inttoext_table", i915_inttoext_table, 0},
1830 {"i915_drpc_info", i915_drpc_info, 0},
1831 {"i915_emon_status", i915_emon_status, 0},
1832 {"i915_ring_freq_table", i915_ring_freq_table, 0},
1833 {"i915_gfxec", i915_gfxec, 0},
1834 {"i915_fbc_status", i915_fbc_status, 0},
1835 {"i915_sr_status", i915_sr_status, 0},
1836 {"i915_opregion", i915_opregion, 0},
1837 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
1838 {"i915_context_status", i915_context_status, 0},
1839 {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
1840 {"i915_swizzle_info", i915_swizzle_info, 0},
1841 {"i915_ppgtt_info", i915_ppgtt_info, 0},
1842 };
1843 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
1844
i915_debugfs_init(struct drm_minor * minor)1845 int i915_debugfs_init(struct drm_minor *minor)
1846 {
1847 int ret;
1848
1849 ret = i915_debugfs_create(minor->debugfs_root, minor,
1850 "i915_wedged",
1851 &i915_wedged_fops);
1852 if (ret)
1853 return ret;
1854
1855 ret = i915_forcewake_create(minor->debugfs_root, minor);
1856 if (ret)
1857 return ret;
1858
1859 ret = i915_debugfs_create(minor->debugfs_root, minor,
1860 "i915_max_freq",
1861 &i915_max_freq_fops);
1862 if (ret)
1863 return ret;
1864
1865 ret = i915_debugfs_create(minor->debugfs_root, minor,
1866 "i915_cache_sharing",
1867 &i915_cache_sharing_fops);
1868 if (ret)
1869 return ret;
1870
1871 return drm_debugfs_create_files(i915_debugfs_list,
1872 I915_DEBUGFS_ENTRIES,
1873 minor->debugfs_root, minor);
1874 }
1875
i915_debugfs_cleanup(struct drm_minor * minor)1876 void i915_debugfs_cleanup(struct drm_minor *minor)
1877 {
1878 drm_debugfs_remove_files(i915_debugfs_list,
1879 I915_DEBUGFS_ENTRIES, minor);
1880 drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
1881 1, minor);
1882 drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops,
1883 1, minor);
1884 drm_debugfs_remove_files((struct drm_info_list *) &i915_max_freq_fops,
1885 1, minor);
1886 drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops,
1887 1, minor);
1888 }
1889
1890 #endif /* CONFIG_DEBUG_FS */
1891