• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include "../i915_selftest.h"
26 
intel_fw_table_check(const struct intel_forcewake_range * ranges,unsigned int num_ranges,bool is_watertight)27 static int intel_fw_table_check(const struct intel_forcewake_range *ranges,
28 				unsigned int num_ranges,
29 				bool is_watertight)
30 {
31 	unsigned int i;
32 	s32 prev;
33 
34 	for (i = 0, prev = -1; i < num_ranges; i++, ranges++) {
35 		/* Check that the table is watertight */
36 		if (is_watertight && (prev + 1) != (s32)ranges->start) {
37 			pr_err("%s: entry[%d]:(%x, %x) is not watertight to previous (%x)\n",
38 			       __func__, i, ranges->start, ranges->end, prev);
39 			return -EINVAL;
40 		}
41 
42 		/* Check that the table never goes backwards */
43 		if (prev >= (s32)ranges->start) {
44 			pr_err("%s: entry[%d]:(%x, %x) is less than the previous (%x)\n",
45 			       __func__, i, ranges->start, ranges->end, prev);
46 			return -EINVAL;
47 		}
48 
49 		/* Check that the entry is valid */
50 		if (ranges->start >= ranges->end) {
51 			pr_err("%s: entry[%d]:(%x, %x) has negative length\n",
52 			       __func__, i, ranges->start, ranges->end);
53 			return -EINVAL;
54 		}
55 
56 		prev = ranges->end;
57 	}
58 
59 	return 0;
60 }
61 
intel_shadow_table_check(void)62 static int intel_shadow_table_check(void)
63 {
64 	const i915_reg_t *reg = gen8_shadowed_regs;
65 	unsigned int i;
66 	s32 prev;
67 
68 	for (i = 0, prev = -1; i < ARRAY_SIZE(gen8_shadowed_regs); i++, reg++) {
69 		u32 offset = i915_mmio_reg_offset(*reg);
70 
71 		if (prev >= (s32)offset) {
72 			pr_err("%s: entry[%d]:(%x) is before previous (%x)\n",
73 			       __func__, i, offset, prev);
74 			return -EINVAL;
75 		}
76 
77 		prev = offset;
78 	}
79 
80 	return 0;
81 }
82 
intel_uncore_mock_selftests(void)83 int intel_uncore_mock_selftests(void)
84 {
85 	struct {
86 		const struct intel_forcewake_range *ranges;
87 		unsigned int num_ranges;
88 		bool is_watertight;
89 	} fw[] = {
90 		{ __vlv_fw_ranges, ARRAY_SIZE(__vlv_fw_ranges), false },
91 		{ __chv_fw_ranges, ARRAY_SIZE(__chv_fw_ranges), false },
92 		{ __gen9_fw_ranges, ARRAY_SIZE(__gen9_fw_ranges), true },
93 	};
94 	int err, i;
95 
96 	for (i = 0; i < ARRAY_SIZE(fw); i++) {
97 		err = intel_fw_table_check(fw[i].ranges,
98 					   fw[i].num_ranges,
99 					   fw[i].is_watertight);
100 		if (err)
101 			return err;
102 	}
103 
104 	err = intel_shadow_table_check();
105 	if (err)
106 		return err;
107 
108 	return 0;
109 }
110 
intel_uncore_check_forcewake_domains(struct drm_i915_private * dev_priv)111 static int intel_uncore_check_forcewake_domains(struct drm_i915_private *dev_priv)
112 {
113 #define FW_RANGE 0x40000
114 	unsigned long *valid;
115 	u32 offset;
116 	int err;
117 
118 	if (!HAS_FPGA_DBG_UNCLAIMED(dev_priv) &&
119 	    !IS_VALLEYVIEW(dev_priv) &&
120 	    !IS_CHERRYVIEW(dev_priv))
121 		return 0;
122 
123 	if (IS_VALLEYVIEW(dev_priv)) /* XXX system lockup! */
124 		return 0;
125 
126 	if (IS_BROADWELL(dev_priv)) /* XXX random GPU hang afterwards! */
127 		return 0;
128 
129 	valid = kzalloc(BITS_TO_LONGS(FW_RANGE) * sizeof(*valid),
130 			GFP_KERNEL);
131 	if (!valid)
132 		return -ENOMEM;
133 
134 	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
135 
136 	check_for_unclaimed_mmio(dev_priv);
137 	for (offset = 0; offset < FW_RANGE; offset += 4) {
138 		i915_reg_t reg = { offset };
139 
140 		(void)I915_READ_FW(reg);
141 		if (!check_for_unclaimed_mmio(dev_priv))
142 			set_bit(offset, valid);
143 	}
144 
145 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
146 
147 	err = 0;
148 	for_each_set_bit(offset, valid, FW_RANGE) {
149 		i915_reg_t reg = { offset };
150 
151 		intel_uncore_forcewake_reset(dev_priv, false);
152 		check_for_unclaimed_mmio(dev_priv);
153 
154 		(void)I915_READ(reg);
155 		if (check_for_unclaimed_mmio(dev_priv)) {
156 			pr_err("Unclaimed mmio read to register 0x%04x\n",
157 			       offset);
158 			err = -EINVAL;
159 		}
160 	}
161 
162 	kfree(valid);
163 	return err;
164 }
165 
intel_uncore_live_selftests(struct drm_i915_private * i915)166 int intel_uncore_live_selftests(struct drm_i915_private *i915)
167 {
168 	int err;
169 
170 	/* Confirm the table we load is still valid */
171 	err = intel_fw_table_check(i915->uncore.fw_domains_table,
172 				   i915->uncore.fw_domains_table_entries,
173 				   INTEL_GEN(i915) >= 9);
174 	if (err)
175 		return err;
176 
177 	err = intel_uncore_check_forcewake_domains(i915);
178 	if (err)
179 		return err;
180 
181 	return 0;
182 }
183