• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Keith Packard <keithp@keithp.com>
25  *
26  */
27 
28 #include "drmP.h"
29 #include "drm.h"
30 #include "i915_drm.h"
31 #include "i915_drv.h"
32 
33 #if WATCH_INACTIVE
34 void
i915_verify_inactive(struct drm_device * dev,char * file,int line)35 i915_verify_inactive(struct drm_device *dev, char *file, int line)
36 {
37 	drm_i915_private_t *dev_priv = dev->dev_private;
38 	struct drm_gem_object *obj;
39 	struct drm_i915_gem_object *obj_priv;
40 
41 	list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
42 		obj = obj_priv->obj;
43 		if (obj_priv->pin_count || obj_priv->active ||
44 		    (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
45 					   I915_GEM_DOMAIN_GTT)))
46 			DRM_ERROR("inactive %p (p %d a %d w %x)  %s:%d\n",
47 				  obj,
48 				  obj_priv->pin_count, obj_priv->active,
49 				  obj->write_domain, file, line);
50 	}
51 }
52 #endif /* WATCH_INACTIVE */
53 
54 
55 #if WATCH_BUF | WATCH_EXEC | WATCH_PWRITE
56 static void
i915_gem_dump_page(struct page * page,uint32_t start,uint32_t end,uint32_t bias,uint32_t mark)57 i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end,
58 		   uint32_t bias, uint32_t mark)
59 {
60 	uint32_t *mem = kmap_atomic(page, KM_USER0);
61 	int i;
62 	for (i = start; i < end; i += 4)
63 		DRM_INFO("%08x: %08x%s\n",
64 			  (int) (bias + i), mem[i / 4],
65 			  (bias + i == mark) ? " ********" : "");
66 	kunmap_atomic(mem, KM_USER0);
67 	/* give syslog time to catch up */
68 	msleep(1);
69 }
70 
71 void
i915_gem_dump_object(struct drm_gem_object * obj,int len,const char * where,uint32_t mark)72 i915_gem_dump_object(struct drm_gem_object *obj, int len,
73 		     const char *where, uint32_t mark)
74 {
75 	struct drm_i915_gem_object *obj_priv = obj->driver_private;
76 	int page;
77 
78 	DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset);
79 	for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) {
80 		int page_len, chunk, chunk_len;
81 
82 		page_len = len - page * PAGE_SIZE;
83 		if (page_len > PAGE_SIZE)
84 			page_len = PAGE_SIZE;
85 
86 		for (chunk = 0; chunk < page_len; chunk += 128) {
87 			chunk_len = page_len - chunk;
88 			if (chunk_len > 128)
89 				chunk_len = 128;
90 			i915_gem_dump_page(obj_priv->page_list[page],
91 					   chunk, chunk + chunk_len,
92 					   obj_priv->gtt_offset +
93 					   page * PAGE_SIZE,
94 					   mark);
95 		}
96 	}
97 }
98 #endif
99 
100 #if WATCH_LRU
101 void
i915_dump_lru(struct drm_device * dev,const char * where)102 i915_dump_lru(struct drm_device *dev, const char *where)
103 {
104 	drm_i915_private_t		*dev_priv = dev->dev_private;
105 	struct drm_i915_gem_object	*obj_priv;
106 
107 	DRM_INFO("active list %s {\n", where);
108 	list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
109 			    list)
110 	{
111 		DRM_INFO("    %p: %08x\n", obj_priv,
112 			 obj_priv->last_rendering_seqno);
113 	}
114 	DRM_INFO("}\n");
115 	DRM_INFO("flushing list %s {\n", where);
116 	list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
117 			    list)
118 	{
119 		DRM_INFO("    %p: %08x\n", obj_priv,
120 			 obj_priv->last_rendering_seqno);
121 	}
122 	DRM_INFO("}\n");
123 	DRM_INFO("inactive %s {\n", where);
124 	list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
125 		DRM_INFO("    %p: %08x\n", obj_priv,
126 			 obj_priv->last_rendering_seqno);
127 	}
128 	DRM_INFO("}\n");
129 }
130 #endif
131 
132 
133 #if WATCH_COHERENCY
134 void
i915_gem_object_check_coherency(struct drm_gem_object * obj,int handle)135 i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
136 {
137 	struct drm_device *dev = obj->dev;
138 	struct drm_i915_gem_object *obj_priv = obj->driver_private;
139 	int page;
140 	uint32_t *gtt_mapping;
141 	uint32_t *backing_map = NULL;
142 	int bad_count = 0;
143 
144 	DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %dkb):\n",
145 		 __func__, obj, obj_priv->gtt_offset, handle,
146 		 obj->size / 1024);
147 
148 	gtt_mapping = ioremap(dev->agp->base + obj_priv->gtt_offset,
149 			      obj->size);
150 	if (gtt_mapping == NULL) {
151 		DRM_ERROR("failed to map GTT space\n");
152 		return;
153 	}
154 
155 	for (page = 0; page < obj->size / PAGE_SIZE; page++) {
156 		int i;
157 
158 		backing_map = kmap_atomic(obj_priv->page_list[page], KM_USER0);
159 
160 		if (backing_map == NULL) {
161 			DRM_ERROR("failed to map backing page\n");
162 			goto out;
163 		}
164 
165 		for (i = 0; i < PAGE_SIZE / 4; i++) {
166 			uint32_t cpuval = backing_map[i];
167 			uint32_t gttval = readl(gtt_mapping +
168 						page * 1024 + i);
169 
170 			if (cpuval != gttval) {
171 				DRM_INFO("incoherent CPU vs GPU at 0x%08x: "
172 					 "0x%08x vs 0x%08x\n",
173 					 (int)(obj_priv->gtt_offset +
174 					       page * PAGE_SIZE + i * 4),
175 					 cpuval, gttval);
176 				if (bad_count++ >= 8) {
177 					DRM_INFO("...\n");
178 					goto out;
179 				}
180 			}
181 		}
182 		kunmap_atomic(backing_map, KM_USER0);
183 		backing_map = NULL;
184 	}
185 
186  out:
187 	if (backing_map != NULL)
188 		kunmap_atomic(backing_map, KM_USER0);
189 	iounmap(gtt_mapping);
190 
191 	/* give syslog time to catch up */
192 	msleep(1);
193 
194 	/* Directly flush the object, since we just loaded values with the CPU
195 	 * from the backing pages and we don't want to disturb the cache
196 	 * management that we're trying to observe.
197 	 */
198 
199 	i915_gem_clflush_object(obj);
200 }
201 #endif
202