1 /*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25 #include "i915_drv.h"
26 #include "intel_drv.h"
27
28 /*
29 * IOSF sideband, see VLV2_SidebandMsg_HAS.docx and
30 * VLV_VLV2_PUNIT_HAS_0.8.docx
31 */
32
33 /* Standard MMIO read, non-posted */
34 #define SB_MRD_NP 0x00
35 /* Standard MMIO write, non-posted */
36 #define SB_MWR_NP 0x01
37 /* Private register read, double-word addressing, non-posted */
38 #define SB_CRRDDA_NP 0x06
39 /* Private register write, double-word addressing, non-posted */
40 #define SB_CRWRDA_NP 0x07
41
vlv_sideband_rw(struct drm_i915_private * dev_priv,u32 devfn,u32 port,u32 opcode,u32 addr,u32 * val)42 static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
43 u32 port, u32 opcode, u32 addr, u32 *val)
44 {
45 u32 cmd, be = 0xf, bar = 0;
46 bool is_read = (opcode == SB_MRD_NP || opcode == SB_CRRDDA_NP);
47
48 cmd = (devfn << IOSF_DEVFN_SHIFT) | (opcode << IOSF_OPCODE_SHIFT) |
49 (port << IOSF_PORT_SHIFT) | (be << IOSF_BYTE_ENABLES_SHIFT) |
50 (bar << IOSF_BAR_SHIFT);
51
52 WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
53
54 if (wait_for((I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) == 0, 5)) {
55 DRM_DEBUG_DRIVER("IOSF sideband idle wait (%s) timed out\n",
56 is_read ? "read" : "write");
57 return -EAGAIN;
58 }
59
60 I915_WRITE(VLV_IOSF_ADDR, addr);
61 if (!is_read)
62 I915_WRITE(VLV_IOSF_DATA, *val);
63 I915_WRITE(VLV_IOSF_DOORBELL_REQ, cmd);
64
65 if (wait_for((I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) == 0, 5)) {
66 DRM_DEBUG_DRIVER("IOSF sideband finish wait (%s) timed out\n",
67 is_read ? "read" : "write");
68 return -ETIMEDOUT;
69 }
70
71 if (is_read)
72 *val = I915_READ(VLV_IOSF_DATA);
73 I915_WRITE(VLV_IOSF_DATA, 0);
74
75 return 0;
76 }
77
vlv_punit_read(struct drm_i915_private * dev_priv,u8 addr)78 u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr)
79 {
80 u32 val = 0;
81
82 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
83
84 mutex_lock(&dev_priv->dpio_lock);
85 vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
86 SB_CRRDDA_NP, addr, &val);
87 mutex_unlock(&dev_priv->dpio_lock);
88
89 return val;
90 }
91
vlv_punit_write(struct drm_i915_private * dev_priv,u8 addr,u32 val)92 void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val)
93 {
94 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
95
96 mutex_lock(&dev_priv->dpio_lock);
97 vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
98 SB_CRWRDA_NP, addr, &val);
99 mutex_unlock(&dev_priv->dpio_lock);
100 }
101
vlv_bunit_read(struct drm_i915_private * dev_priv,u32 reg)102 u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg)
103 {
104 u32 val = 0;
105
106 vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT,
107 SB_CRRDDA_NP, reg, &val);
108
109 return val;
110 }
111
vlv_bunit_write(struct drm_i915_private * dev_priv,u32 reg,u32 val)112 void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
113 {
114 vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT,
115 SB_CRWRDA_NP, reg, &val);
116 }
117
vlv_nc_read(struct drm_i915_private * dev_priv,u8 addr)118 u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr)
119 {
120 u32 val = 0;
121
122 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
123
124 mutex_lock(&dev_priv->dpio_lock);
125 vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_NC,
126 SB_CRRDDA_NP, addr, &val);
127 mutex_unlock(&dev_priv->dpio_lock);
128
129 return val;
130 }
131
vlv_gpio_nc_read(struct drm_i915_private * dev_priv,u32 reg)132 u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg)
133 {
134 u32 val = 0;
135 vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPIO_NC,
136 SB_CRRDDA_NP, reg, &val);
137 return val;
138 }
139
vlv_gpio_nc_write(struct drm_i915_private * dev_priv,u32 reg,u32 val)140 void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
141 {
142 vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPIO_NC,
143 SB_CRWRDA_NP, reg, &val);
144 }
145
vlv_cck_read(struct drm_i915_private * dev_priv,u32 reg)146 u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg)
147 {
148 u32 val = 0;
149 vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCK,
150 SB_CRRDDA_NP, reg, &val);
151 return val;
152 }
153
vlv_cck_write(struct drm_i915_private * dev_priv,u32 reg,u32 val)154 void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
155 {
156 vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCK,
157 SB_CRWRDA_NP, reg, &val);
158 }
159
vlv_ccu_read(struct drm_i915_private * dev_priv,u32 reg)160 u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg)
161 {
162 u32 val = 0;
163 vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCU,
164 SB_CRRDDA_NP, reg, &val);
165 return val;
166 }
167
vlv_ccu_write(struct drm_i915_private * dev_priv,u32 reg,u32 val)168 void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
169 {
170 vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCU,
171 SB_CRWRDA_NP, reg, &val);
172 }
173
vlv_gps_core_read(struct drm_i915_private * dev_priv,u32 reg)174 u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg)
175 {
176 u32 val = 0;
177 vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPS_CORE,
178 SB_CRRDDA_NP, reg, &val);
179 return val;
180 }
181
vlv_gps_core_write(struct drm_i915_private * dev_priv,u32 reg,u32 val)182 void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
183 {
184 vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPS_CORE,
185 SB_CRWRDA_NP, reg, &val);
186 }
187
vlv_dpio_read(struct drm_i915_private * dev_priv,enum pipe pipe,int reg)188 u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg)
189 {
190 u32 val = 0;
191
192 vlv_sideband_rw(dev_priv, DPIO_DEVFN, DPIO_PHY_IOSF_PORT(DPIO_PHY(pipe)),
193 SB_MRD_NP, reg, &val);
194
195 /*
196 * FIXME: There might be some registers where all 1's is a valid value,
197 * so ideally we should check the register offset instead...
198 */
199 WARN(val == 0xffffffff, "DPIO read pipe %c reg 0x%x == 0x%x\n",
200 pipe_name(pipe), reg, val);
201
202 return val;
203 }
204
vlv_dpio_write(struct drm_i915_private * dev_priv,enum pipe pipe,int reg,u32 val)205 void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val)
206 {
207 vlv_sideband_rw(dev_priv, DPIO_DEVFN, DPIO_PHY_IOSF_PORT(DPIO_PHY(pipe)),
208 SB_MWR_NP, reg, &val);
209 }
210
211 /* SBI access */
intel_sbi_read(struct drm_i915_private * dev_priv,u16 reg,enum intel_sbi_destination destination)212 u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
213 enum intel_sbi_destination destination)
214 {
215 u32 value = 0;
216 WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
217
218 if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
219 100)) {
220 DRM_ERROR("timeout waiting for SBI to become ready\n");
221 return 0;
222 }
223
224 I915_WRITE(SBI_ADDR, (reg << 16));
225
226 if (destination == SBI_ICLK)
227 value = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD;
228 else
229 value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
230 I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY);
231
232 if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
233 100)) {
234 DRM_ERROR("timeout waiting for SBI to complete read transaction\n");
235 return 0;
236 }
237
238 return I915_READ(SBI_DATA);
239 }
240
intel_sbi_write(struct drm_i915_private * dev_priv,u16 reg,u32 value,enum intel_sbi_destination destination)241 void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
242 enum intel_sbi_destination destination)
243 {
244 u32 tmp;
245
246 WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
247
248 if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
249 100)) {
250 DRM_ERROR("timeout waiting for SBI to become ready\n");
251 return;
252 }
253
254 I915_WRITE(SBI_ADDR, (reg << 16));
255 I915_WRITE(SBI_DATA, value);
256
257 if (destination == SBI_ICLK)
258 tmp = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRWR;
259 else
260 tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR;
261 I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp);
262
263 if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
264 100)) {
265 DRM_ERROR("timeout waiting for SBI to complete write transaction\n");
266 return;
267 }
268 }
269
vlv_flisdsi_read(struct drm_i915_private * dev_priv,u32 reg)270 u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg)
271 {
272 u32 val = 0;
273 vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_FLISDSI, SB_CRRDDA_NP,
274 reg, &val);
275 return val;
276 }
277
vlv_flisdsi_write(struct drm_i915_private * dev_priv,u32 reg,u32 val)278 void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
279 {
280 vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_FLISDSI, SB_CRWRDA_NP,
281 reg, &val);
282 }
283