1 /*
2 * Copyright © 2014 Red Hat
3 *
4 * Permission to use, copy, modify, distribute, and sell this software and its
5 * documentation for any purpose is hereby granted without fee, provided that
6 * the above copyright notice appear in all copies and that both that copyright
7 * notice and this permission notice appear in supporting documentation, and
8 * that the name of the copyright holders not be used in advertising or
9 * publicity pertaining to distribution of the software without specific,
10 * written prior permission. The copyright holders make no representations
11 * about the suitability of this software for any purpose. It is provided "as
12 * is" without express or implied warranty.
13 *
14 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
20 * OF THIS SOFTWARE.
21 */
22
23 #include <linux/delay.h>
24 #include <linux/errno.h>
25 #include <linux/i2c.h>
26 #include <linux/init.h>
27 #include <linux/kernel.h>
28 #include <linux/sched.h>
29 #include <linux/seq_file.h>
30
31 #include <drm/drm_atomic.h>
32 #include <drm/drm_atomic_helper.h>
33 #include <drm/drm_dp_mst_helper.h>
34 #include <drm/drm_drv.h>
35 #include <drm/drm_fixed.h>
36 #include <drm/drm_print.h>
37 #include <drm/drm_probe_helper.h>
38
39 #include "drm_crtc_helper_internal.h"
40
41 /**
42 * DOC: dp mst helper
43 *
44 * These functions contain parts of the DisplayPort 1.2a MultiStream Transport
45 * protocol. The helpers contain a topology manager and bandwidth manager.
46 * The helpers encapsulate the sending and received of sideband msgs.
47 */
48 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
49 char *buf);
50 static int test_calc_pbn_mode(void);
51
52 static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port);
53
54 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
55 int id,
56 struct drm_dp_payload *payload);
57
58 static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
59 struct drm_dp_mst_port *port,
60 int offset, int size, u8 *bytes);
61 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
62 struct drm_dp_mst_port *port,
63 int offset, int size, u8 *bytes);
64
65 static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
66 struct drm_dp_mst_branch *mstb);
67 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
68 struct drm_dp_mst_branch *mstb,
69 struct drm_dp_mst_port *port);
70 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
71 u8 *guid);
72
73 static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux);
74 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux);
75 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
76
77 #define DP_STR(x) [DP_ ## x] = #x
78
drm_dp_mst_req_type_str(u8 req_type)79 static const char *drm_dp_mst_req_type_str(u8 req_type)
80 {
81 static const char * const req_type_str[] = {
82 DP_STR(GET_MSG_TRANSACTION_VERSION),
83 DP_STR(LINK_ADDRESS),
84 DP_STR(CONNECTION_STATUS_NOTIFY),
85 DP_STR(ENUM_PATH_RESOURCES),
86 DP_STR(ALLOCATE_PAYLOAD),
87 DP_STR(QUERY_PAYLOAD),
88 DP_STR(RESOURCE_STATUS_NOTIFY),
89 DP_STR(CLEAR_PAYLOAD_ID_TABLE),
90 DP_STR(REMOTE_DPCD_READ),
91 DP_STR(REMOTE_DPCD_WRITE),
92 DP_STR(REMOTE_I2C_READ),
93 DP_STR(REMOTE_I2C_WRITE),
94 DP_STR(POWER_UP_PHY),
95 DP_STR(POWER_DOWN_PHY),
96 DP_STR(SINK_EVENT_NOTIFY),
97 DP_STR(QUERY_STREAM_ENC_STATUS),
98 };
99
100 if (req_type >= ARRAY_SIZE(req_type_str) ||
101 !req_type_str[req_type])
102 return "unknown";
103
104 return req_type_str[req_type];
105 }
106
107 #undef DP_STR
108 #define DP_STR(x) [DP_NAK_ ## x] = #x
109
drm_dp_mst_nak_reason_str(u8 nak_reason)110 static const char *drm_dp_mst_nak_reason_str(u8 nak_reason)
111 {
112 static const char * const nak_reason_str[] = {
113 DP_STR(WRITE_FAILURE),
114 DP_STR(INVALID_READ),
115 DP_STR(CRC_FAILURE),
116 DP_STR(BAD_PARAM),
117 DP_STR(DEFER),
118 DP_STR(LINK_FAILURE),
119 DP_STR(NO_RESOURCES),
120 DP_STR(DPCD_FAIL),
121 DP_STR(I2C_NAK),
122 DP_STR(ALLOCATE_FAIL),
123 };
124
125 if (nak_reason >= ARRAY_SIZE(nak_reason_str) ||
126 !nak_reason_str[nak_reason])
127 return "unknown";
128
129 return nak_reason_str[nak_reason];
130 }
131
132 #undef DP_STR
133
134 /* sideband msg handling */
drm_dp_msg_header_crc4(const uint8_t * data,size_t num_nibbles)135 static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
136 {
137 u8 bitmask = 0x80;
138 u8 bitshift = 7;
139 u8 array_index = 0;
140 int number_of_bits = num_nibbles * 4;
141 u8 remainder = 0;
142
143 while (number_of_bits != 0) {
144 number_of_bits--;
145 remainder <<= 1;
146 remainder |= (data[array_index] & bitmask) >> bitshift;
147 bitmask >>= 1;
148 bitshift--;
149 if (bitmask == 0) {
150 bitmask = 0x80;
151 bitshift = 7;
152 array_index++;
153 }
154 if ((remainder & 0x10) == 0x10)
155 remainder ^= 0x13;
156 }
157
158 number_of_bits = 4;
159 while (number_of_bits != 0) {
160 number_of_bits--;
161 remainder <<= 1;
162 if ((remainder & 0x10) != 0)
163 remainder ^= 0x13;
164 }
165
166 return remainder;
167 }
168
drm_dp_msg_data_crc4(const uint8_t * data,u8 number_of_bytes)169 static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes)
170 {
171 u8 bitmask = 0x80;
172 u8 bitshift = 7;
173 u8 array_index = 0;
174 int number_of_bits = number_of_bytes * 8;
175 u16 remainder = 0;
176
177 while (number_of_bits != 0) {
178 number_of_bits--;
179 remainder <<= 1;
180 remainder |= (data[array_index] & bitmask) >> bitshift;
181 bitmask >>= 1;
182 bitshift--;
183 if (bitmask == 0) {
184 bitmask = 0x80;
185 bitshift = 7;
186 array_index++;
187 }
188 if ((remainder & 0x100) == 0x100)
189 remainder ^= 0xd5;
190 }
191
192 number_of_bits = 8;
193 while (number_of_bits != 0) {
194 number_of_bits--;
195 remainder <<= 1;
196 if ((remainder & 0x100) != 0)
197 remainder ^= 0xd5;
198 }
199
200 return remainder & 0xff;
201 }
drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr * hdr)202 static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr)
203 {
204 u8 size = 3;
205 size += (hdr->lct / 2);
206 return size;
207 }
208
drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr * hdr,u8 * buf,int * len)209 static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
210 u8 *buf, int *len)
211 {
212 int idx = 0;
213 int i;
214 u8 crc4;
215 buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);
216 for (i = 0; i < (hdr->lct / 2); i++)
217 buf[idx++] = hdr->rad[i];
218 buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) |
219 (hdr->msg_len & 0x3f);
220 buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4);
221
222 crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1);
223 buf[idx - 1] |= (crc4 & 0xf);
224
225 *len = idx;
226 }
227
drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr * hdr,u8 * buf,int buflen,u8 * hdrlen)228 static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
229 u8 *buf, int buflen, u8 *hdrlen)
230 {
231 u8 crc4;
232 u8 len;
233 int i;
234 u8 idx;
235 if (buf[0] == 0)
236 return false;
237 len = 3;
238 len += ((buf[0] & 0xf0) >> 4) / 2;
239 if (len > buflen)
240 return false;
241 crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1);
242
243 if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) {
244 DRM_DEBUG_KMS("crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]);
245 return false;
246 }
247
248 hdr->lct = (buf[0] & 0xf0) >> 4;
249 hdr->lcr = (buf[0] & 0xf);
250 idx = 1;
251 for (i = 0; i < (hdr->lct / 2); i++)
252 hdr->rad[i] = buf[idx++];
253 hdr->broadcast = (buf[idx] >> 7) & 0x1;
254 hdr->path_msg = (buf[idx] >> 6) & 0x1;
255 hdr->msg_len = buf[idx] & 0x3f;
256 idx++;
257 hdr->somt = (buf[idx] >> 7) & 0x1;
258 hdr->eomt = (buf[idx] >> 6) & 0x1;
259 hdr->seqno = (buf[idx] >> 4) & 0x1;
260 idx++;
261 *hdrlen = idx;
262 return true;
263 }
264
drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body * req,struct drm_dp_sideband_msg_tx * raw)265 static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req,
266 struct drm_dp_sideband_msg_tx *raw)
267 {
268 int idx = 0;
269 int i;
270 u8 *buf = raw->msg;
271 buf[idx++] = req->req_type & 0x7f;
272
273 switch (req->req_type) {
274 case DP_ENUM_PATH_RESOURCES:
275 buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
276 idx++;
277 break;
278 case DP_ALLOCATE_PAYLOAD:
279 buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 |
280 (req->u.allocate_payload.number_sdp_streams & 0xf);
281 idx++;
282 buf[idx] = (req->u.allocate_payload.vcpi & 0x7f);
283 idx++;
284 buf[idx] = (req->u.allocate_payload.pbn >> 8);
285 idx++;
286 buf[idx] = (req->u.allocate_payload.pbn & 0xff);
287 idx++;
288 for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) {
289 buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) |
290 (req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf);
291 idx++;
292 }
293 if (req->u.allocate_payload.number_sdp_streams & 1) {
294 i = req->u.allocate_payload.number_sdp_streams - 1;
295 buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4;
296 idx++;
297 }
298 break;
299 case DP_QUERY_PAYLOAD:
300 buf[idx] = (req->u.query_payload.port_number & 0xf) << 4;
301 idx++;
302 buf[idx] = (req->u.query_payload.vcpi & 0x7f);
303 idx++;
304 break;
305 case DP_REMOTE_DPCD_READ:
306 buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4;
307 buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf;
308 idx++;
309 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8;
310 idx++;
311 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff);
312 idx++;
313 buf[idx] = (req->u.dpcd_read.num_bytes);
314 idx++;
315 break;
316
317 case DP_REMOTE_DPCD_WRITE:
318 buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4;
319 buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf;
320 idx++;
321 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8;
322 idx++;
323 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff);
324 idx++;
325 buf[idx] = (req->u.dpcd_write.num_bytes);
326 idx++;
327 memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes);
328 idx += req->u.dpcd_write.num_bytes;
329 break;
330 case DP_REMOTE_I2C_READ:
331 buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4;
332 buf[idx] |= (req->u.i2c_read.num_transactions & 0x3);
333 idx++;
334 for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) {
335 buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f;
336 idx++;
337 buf[idx] = req->u.i2c_read.transactions[i].num_bytes;
338 idx++;
339 memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
340 idx += req->u.i2c_read.transactions[i].num_bytes;
341
342 buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 4;
343 buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
344 idx++;
345 }
346 buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f;
347 idx++;
348 buf[idx] = (req->u.i2c_read.num_bytes_read);
349 idx++;
350 break;
351
352 case DP_REMOTE_I2C_WRITE:
353 buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4;
354 idx++;
355 buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f;
356 idx++;
357 buf[idx] = (req->u.i2c_write.num_bytes);
358 idx++;
359 memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
360 idx += req->u.i2c_write.num_bytes;
361 break;
362
363 case DP_POWER_DOWN_PHY:
364 case DP_POWER_UP_PHY:
365 buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
366 idx++;
367 break;
368 }
369 raw->cur_len = idx;
370 }
371
drm_dp_crc_sideband_chunk_req(u8 * msg,u8 len)372 static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
373 {
374 u8 crc4;
375 crc4 = drm_dp_msg_data_crc4(msg, len);
376 msg[len] = crc4;
377 }
378
drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body * rep,struct drm_dp_sideband_msg_tx * raw)379 static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep,
380 struct drm_dp_sideband_msg_tx *raw)
381 {
382 int idx = 0;
383 u8 *buf = raw->msg;
384
385 buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f);
386
387 raw->cur_len = idx;
388 }
389
390 /* this adds a chunk of msg to the builder to get the final msg */
drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx * msg,u8 * replybuf,u8 replybuflen,bool hdr)391 static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
392 u8 *replybuf, u8 replybuflen, bool hdr)
393 {
394 int ret;
395 u8 crc4;
396
397 if (hdr) {
398 u8 hdrlen;
399 struct drm_dp_sideband_msg_hdr recv_hdr;
400 ret = drm_dp_decode_sideband_msg_hdr(&recv_hdr, replybuf, replybuflen, &hdrlen);
401 if (ret == false) {
402 print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 1, replybuf, replybuflen, false);
403 return false;
404 }
405
406 /*
407 * ignore out-of-order messages or messages that are part of a
408 * failed transaction
409 */
410 if (!recv_hdr.somt && !msg->have_somt)
411 return false;
412
413 /* get length contained in this portion */
414 msg->curchunk_len = recv_hdr.msg_len;
415 msg->curchunk_hdrlen = hdrlen;
416
417 /* we have already gotten an somt - don't bother parsing */
418 if (recv_hdr.somt && msg->have_somt)
419 return false;
420
421 if (recv_hdr.somt) {
422 memcpy(&msg->initial_hdr, &recv_hdr, sizeof(struct drm_dp_sideband_msg_hdr));
423 msg->have_somt = true;
424 }
425 if (recv_hdr.eomt)
426 msg->have_eomt = true;
427
428 /* copy the bytes for the remainder of this header chunk */
429 msg->curchunk_idx = min(msg->curchunk_len, (u8)(replybuflen - hdrlen));
430 memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx);
431 } else {
432 memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
433 msg->curchunk_idx += replybuflen;
434 }
435
436 if (msg->curchunk_idx >= msg->curchunk_len) {
437 /* do CRC */
438 crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
439 /* copy chunk into bigger msg */
440 memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
441 msg->curlen += msg->curchunk_len - 1;
442 }
443 return true;
444 }
445
drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx * raw,struct drm_dp_sideband_msg_reply_body * repmsg)446 static bool drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx *raw,
447 struct drm_dp_sideband_msg_reply_body *repmsg)
448 {
449 int idx = 1;
450 int i;
451 memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16);
452 idx += 16;
453 repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
454 idx++;
455 if (idx > raw->curlen)
456 goto fail_len;
457 for (i = 0; i < repmsg->u.link_addr.nports; i++) {
458 if (raw->msg[idx] & 0x80)
459 repmsg->u.link_addr.ports[i].input_port = 1;
460
461 repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7;
462 repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf);
463
464 idx++;
465 if (idx > raw->curlen)
466 goto fail_len;
467 repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1;
468 repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1;
469 if (repmsg->u.link_addr.ports[i].input_port == 0)
470 repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
471 idx++;
472 if (idx > raw->curlen)
473 goto fail_len;
474 if (repmsg->u.link_addr.ports[i].input_port == 0) {
475 repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]);
476 idx++;
477 if (idx > raw->curlen)
478 goto fail_len;
479 memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16);
480 idx += 16;
481 if (idx > raw->curlen)
482 goto fail_len;
483 repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf;
484 repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf);
485 idx++;
486
487 }
488 if (idx > raw->curlen)
489 goto fail_len;
490 }
491
492 return true;
493 fail_len:
494 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
495 return false;
496 }
497
drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx * raw,struct drm_dp_sideband_msg_reply_body * repmsg)498 static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw,
499 struct drm_dp_sideband_msg_reply_body *repmsg)
500 {
501 int idx = 1;
502 repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf;
503 idx++;
504 if (idx > raw->curlen)
505 goto fail_len;
506 repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
507 idx++;
508 if (idx > raw->curlen)
509 goto fail_len;
510
511 memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes);
512 return true;
513 fail_len:
514 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
515 return false;
516 }
517
drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx * raw,struct drm_dp_sideband_msg_reply_body * repmsg)518 static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw,
519 struct drm_dp_sideband_msg_reply_body *repmsg)
520 {
521 int idx = 1;
522 repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf;
523 idx++;
524 if (idx > raw->curlen)
525 goto fail_len;
526 return true;
527 fail_len:
528 DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen);
529 return false;
530 }
531
drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx * raw,struct drm_dp_sideband_msg_reply_body * repmsg)532 static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw,
533 struct drm_dp_sideband_msg_reply_body *repmsg)
534 {
535 int idx = 1;
536
537 repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf);
538 idx++;
539 if (idx > raw->curlen)
540 goto fail_len;
541 repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx];
542 idx++;
543 /* TODO check */
544 memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes);
545 return true;
546 fail_len:
547 DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen);
548 return false;
549 }
550
drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx * raw,struct drm_dp_sideband_msg_reply_body * repmsg)551 static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw,
552 struct drm_dp_sideband_msg_reply_body *repmsg)
553 {
554 int idx = 1;
555 repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
556 idx++;
557 if (idx > raw->curlen)
558 goto fail_len;
559 repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
560 idx += 2;
561 if (idx > raw->curlen)
562 goto fail_len;
563 repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
564 idx += 2;
565 if (idx > raw->curlen)
566 goto fail_len;
567 return true;
568 fail_len:
569 DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen);
570 return false;
571 }
572
drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx * raw,struct drm_dp_sideband_msg_reply_body * repmsg)573 static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw,
574 struct drm_dp_sideband_msg_reply_body *repmsg)
575 {
576 int idx = 1;
577 repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
578 idx++;
579 if (idx > raw->curlen)
580 goto fail_len;
581 repmsg->u.allocate_payload.vcpi = raw->msg[idx];
582 idx++;
583 if (idx > raw->curlen)
584 goto fail_len;
585 repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
586 idx += 2;
587 if (idx > raw->curlen)
588 goto fail_len;
589 return true;
590 fail_len:
591 DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen);
592 return false;
593 }
594
drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx * raw,struct drm_dp_sideband_msg_reply_body * repmsg)595 static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw,
596 struct drm_dp_sideband_msg_reply_body *repmsg)
597 {
598 int idx = 1;
599 repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
600 idx++;
601 if (idx > raw->curlen)
602 goto fail_len;
603 repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
604 idx += 2;
605 if (idx > raw->curlen)
606 goto fail_len;
607 return true;
608 fail_len:
609 DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen);
610 return false;
611 }
612
drm_dp_sideband_parse_power_updown_phy_ack(struct drm_dp_sideband_msg_rx * raw,struct drm_dp_sideband_msg_reply_body * repmsg)613 static bool drm_dp_sideband_parse_power_updown_phy_ack(struct drm_dp_sideband_msg_rx *raw,
614 struct drm_dp_sideband_msg_reply_body *repmsg)
615 {
616 int idx = 1;
617
618 repmsg->u.port_number.port_number = (raw->msg[idx] >> 4) & 0xf;
619 idx++;
620 if (idx > raw->curlen) {
621 DRM_DEBUG_KMS("power up/down phy parse length fail %d %d\n",
622 idx, raw->curlen);
623 return false;
624 }
625 return true;
626 }
627
drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx * raw,struct drm_dp_sideband_msg_reply_body * msg)628 static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
629 struct drm_dp_sideband_msg_reply_body *msg)
630 {
631 memset(msg, 0, sizeof(*msg));
632 msg->reply_type = (raw->msg[0] & 0x80) >> 7;
633 msg->req_type = (raw->msg[0] & 0x7f);
634
635 if (msg->reply_type == DP_SIDEBAND_REPLY_NAK) {
636 memcpy(msg->u.nak.guid, &raw->msg[1], 16);
637 msg->u.nak.reason = raw->msg[17];
638 msg->u.nak.nak_data = raw->msg[18];
639 return false;
640 }
641
642 switch (msg->req_type) {
643 case DP_LINK_ADDRESS:
644 return drm_dp_sideband_parse_link_address(raw, msg);
645 case DP_QUERY_PAYLOAD:
646 return drm_dp_sideband_parse_query_payload_ack(raw, msg);
647 case DP_REMOTE_DPCD_READ:
648 return drm_dp_sideband_parse_remote_dpcd_read(raw, msg);
649 case DP_REMOTE_DPCD_WRITE:
650 return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
651 case DP_REMOTE_I2C_READ:
652 return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
653 case DP_ENUM_PATH_RESOURCES:
654 return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
655 case DP_ALLOCATE_PAYLOAD:
656 return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
657 case DP_POWER_DOWN_PHY:
658 case DP_POWER_UP_PHY:
659 return drm_dp_sideband_parse_power_updown_phy_ack(raw, msg);
660 default:
661 DRM_ERROR("Got unknown reply 0x%02x (%s)\n", msg->req_type,
662 drm_dp_mst_req_type_str(msg->req_type));
663 return false;
664 }
665 }
666
drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx * raw,struct drm_dp_sideband_msg_req_body * msg)667 static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx *raw,
668 struct drm_dp_sideband_msg_req_body *msg)
669 {
670 int idx = 1;
671
672 msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
673 idx++;
674 if (idx > raw->curlen)
675 goto fail_len;
676
677 memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16);
678 idx += 16;
679 if (idx > raw->curlen)
680 goto fail_len;
681
682 msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1;
683 msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
684 msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1;
685 msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1;
686 msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7);
687 idx++;
688 return true;
689 fail_len:
690 DRM_DEBUG_KMS("connection status reply parse length fail %d %d\n", idx, raw->curlen);
691 return false;
692 }
693
drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx * raw,struct drm_dp_sideband_msg_req_body * msg)694 static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx *raw,
695 struct drm_dp_sideband_msg_req_body *msg)
696 {
697 int idx = 1;
698
699 msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
700 idx++;
701 if (idx > raw->curlen)
702 goto fail_len;
703
704 memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16);
705 idx += 16;
706 if (idx > raw->curlen)
707 goto fail_len;
708
709 msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
710 idx++;
711 return true;
712 fail_len:
713 DRM_DEBUG_KMS("resource status reply parse length fail %d %d\n", idx, raw->curlen);
714 return false;
715 }
716
drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx * raw,struct drm_dp_sideband_msg_req_body * msg)717 static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw,
718 struct drm_dp_sideband_msg_req_body *msg)
719 {
720 memset(msg, 0, sizeof(*msg));
721 msg->req_type = (raw->msg[0] & 0x7f);
722
723 switch (msg->req_type) {
724 case DP_CONNECTION_STATUS_NOTIFY:
725 return drm_dp_sideband_parse_connection_status_notify(raw, msg);
726 case DP_RESOURCE_STATUS_NOTIFY:
727 return drm_dp_sideband_parse_resource_status_notify(raw, msg);
728 default:
729 DRM_ERROR("Got unknown request 0x%02x (%s)\n", msg->req_type,
730 drm_dp_mst_req_type_str(msg->req_type));
731 return false;
732 }
733 }
734
build_dpcd_write(struct drm_dp_sideband_msg_tx * msg,u8 port_num,u32 offset,u8 num_bytes,u8 * bytes)735 static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
736 {
737 struct drm_dp_sideband_msg_req_body req;
738
739 req.req_type = DP_REMOTE_DPCD_WRITE;
740 req.u.dpcd_write.port_number = port_num;
741 req.u.dpcd_write.dpcd_address = offset;
742 req.u.dpcd_write.num_bytes = num_bytes;
743 req.u.dpcd_write.bytes = bytes;
744 drm_dp_encode_sideband_req(&req, msg);
745
746 return 0;
747 }
748
build_link_address(struct drm_dp_sideband_msg_tx * msg)749 static int build_link_address(struct drm_dp_sideband_msg_tx *msg)
750 {
751 struct drm_dp_sideband_msg_req_body req;
752
753 req.req_type = DP_LINK_ADDRESS;
754 drm_dp_encode_sideband_req(&req, msg);
755 return 0;
756 }
757
build_enum_path_resources(struct drm_dp_sideband_msg_tx * msg,int port_num)758 static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num)
759 {
760 struct drm_dp_sideband_msg_req_body req;
761
762 req.req_type = DP_ENUM_PATH_RESOURCES;
763 req.u.port_num.port_number = port_num;
764 drm_dp_encode_sideband_req(&req, msg);
765 msg->path_msg = true;
766 return 0;
767 }
768
build_allocate_payload(struct drm_dp_sideband_msg_tx * msg,int port_num,u8 vcpi,uint16_t pbn,u8 number_sdp_streams,u8 * sdp_stream_sink)769 static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num,
770 u8 vcpi, uint16_t pbn,
771 u8 number_sdp_streams,
772 u8 *sdp_stream_sink)
773 {
774 struct drm_dp_sideband_msg_req_body req;
775 memset(&req, 0, sizeof(req));
776 req.req_type = DP_ALLOCATE_PAYLOAD;
777 req.u.allocate_payload.port_number = port_num;
778 req.u.allocate_payload.vcpi = vcpi;
779 req.u.allocate_payload.pbn = pbn;
780 req.u.allocate_payload.number_sdp_streams = number_sdp_streams;
781 memcpy(req.u.allocate_payload.sdp_stream_sink, sdp_stream_sink,
782 number_sdp_streams);
783 drm_dp_encode_sideband_req(&req, msg);
784 msg->path_msg = true;
785 return 0;
786 }
787
build_power_updown_phy(struct drm_dp_sideband_msg_tx * msg,int port_num,bool power_up)788 static int build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
789 int port_num, bool power_up)
790 {
791 struct drm_dp_sideband_msg_req_body req;
792
793 if (power_up)
794 req.req_type = DP_POWER_UP_PHY;
795 else
796 req.req_type = DP_POWER_DOWN_PHY;
797
798 req.u.port_num.port_number = port_num;
799 drm_dp_encode_sideband_req(&req, msg);
800 msg->path_msg = true;
801 return 0;
802 }
803
drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_vcpi * vcpi)804 static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
805 struct drm_dp_vcpi *vcpi)
806 {
807 int ret, vcpi_ret;
808
809 mutex_lock(&mgr->payload_lock);
810 ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1);
811 if (ret > mgr->max_payloads) {
812 ret = -EINVAL;
813 DRM_DEBUG_KMS("out of payload ids %d\n", ret);
814 goto out_unlock;
815 }
816
817 vcpi_ret = find_first_zero_bit(&mgr->vcpi_mask, mgr->max_payloads + 1);
818 if (vcpi_ret > mgr->max_payloads) {
819 ret = -EINVAL;
820 DRM_DEBUG_KMS("out of vcpi ids %d\n", ret);
821 goto out_unlock;
822 }
823
824 set_bit(ret, &mgr->payload_mask);
825 set_bit(vcpi_ret, &mgr->vcpi_mask);
826 vcpi->vcpi = vcpi_ret + 1;
827 mgr->proposed_vcpis[ret - 1] = vcpi;
828 out_unlock:
829 mutex_unlock(&mgr->payload_lock);
830 return ret;
831 }
832
drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr * mgr,int vcpi)833 static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
834 int vcpi)
835 {
836 int i;
837 if (vcpi == 0)
838 return;
839
840 mutex_lock(&mgr->payload_lock);
841 DRM_DEBUG_KMS("putting payload %d\n", vcpi);
842 clear_bit(vcpi - 1, &mgr->vcpi_mask);
843
844 for (i = 0; i < mgr->max_payloads; i++) {
845 if (mgr->proposed_vcpis[i])
846 if (mgr->proposed_vcpis[i]->vcpi == vcpi) {
847 mgr->proposed_vcpis[i] = NULL;
848 clear_bit(i + 1, &mgr->payload_mask);
849 }
850 }
851 mutex_unlock(&mgr->payload_lock);
852 }
853
check_txmsg_state(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_sideband_msg_tx * txmsg)854 static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
855 struct drm_dp_sideband_msg_tx *txmsg)
856 {
857 unsigned int state;
858
859 /*
860 * All updates to txmsg->state are protected by mgr->qlock, and the two
861 * cases we check here are terminal states. For those the barriers
862 * provided by the wake_up/wait_event pair are enough.
863 */
864 state = READ_ONCE(txmsg->state);
865 return (state == DRM_DP_SIDEBAND_TX_RX ||
866 state == DRM_DP_SIDEBAND_TX_TIMEOUT);
867 }
868
drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch * mstb,struct drm_dp_sideband_msg_tx * txmsg)869 static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
870 struct drm_dp_sideband_msg_tx *txmsg)
871 {
872 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
873 int ret;
874
875 ret = wait_event_timeout(mgr->tx_waitq,
876 check_txmsg_state(mgr, txmsg),
877 (4 * HZ));
878 mutex_lock(&mstb->mgr->qlock);
879 if (ret > 0) {
880 if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {
881 ret = -EIO;
882 goto out;
883 }
884 } else {
885 DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg, txmsg->state, txmsg->seqno);
886
887 /* dump some state */
888 ret = -EIO;
889
890 /* remove from q */
891 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
892 txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND) {
893 list_del(&txmsg->next);
894 }
895
896 if (txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
897 txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
898 mstb->tx_slots[txmsg->seqno] = NULL;
899 }
900 }
901 out:
902 mutex_unlock(&mgr->qlock);
903
904 return ret;
905 }
906
drm_dp_add_mst_branch_device(u8 lct,u8 * rad)907 static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
908 {
909 struct drm_dp_mst_branch *mstb;
910
911 mstb = kzalloc(sizeof(*mstb), GFP_KERNEL);
912 if (!mstb)
913 return NULL;
914
915 mstb->lct = lct;
916 if (lct > 1)
917 memcpy(mstb->rad, rad, lct / 2);
918 INIT_LIST_HEAD(&mstb->ports);
919 kref_init(&mstb->topology_kref);
920 kref_init(&mstb->malloc_kref);
921 return mstb;
922 }
923
drm_dp_free_mst_branch_device(struct kref * kref)924 static void drm_dp_free_mst_branch_device(struct kref *kref)
925 {
926 struct drm_dp_mst_branch *mstb =
927 container_of(kref, struct drm_dp_mst_branch, malloc_kref);
928
929 if (mstb->port_parent)
930 drm_dp_mst_put_port_malloc(mstb->port_parent);
931
932 kfree(mstb);
933 }
934
935 /**
936 * DOC: Branch device and port refcounting
937 *
938 * Topology refcount overview
939 * ~~~~~~~~~~~~~~~~~~~~~~~~~~
940 *
941 * The refcounting schemes for &struct drm_dp_mst_branch and &struct
942 * drm_dp_mst_port are somewhat unusual. Both ports and branch devices have
943 * two different kinds of refcounts: topology refcounts, and malloc refcounts.
944 *
945 * Topology refcounts are not exposed to drivers, and are handled internally
946 * by the DP MST helpers. The helpers use them in order to prevent the
947 * in-memory topology state from being changed in the middle of critical
948 * operations like changing the internal state of payload allocations. This
949 * means each branch and port will be considered to be connected to the rest
950 * of the topology until its topology refcount reaches zero. Additionally,
951 * for ports this means that their associated &struct drm_connector will stay
952 * registered with userspace until the port's refcount reaches 0.
953 *
954 * Malloc refcount overview
955 * ~~~~~~~~~~~~~~~~~~~~~~~~
956 *
957 * Malloc references are used to keep a &struct drm_dp_mst_port or &struct
958 * drm_dp_mst_branch allocated even after all of its topology references have
959 * been dropped, so that the driver or MST helpers can safely access each
960 * branch's last known state before it was disconnected from the topology.
961 * When the malloc refcount of a port or branch reaches 0, the memory
962 * allocation containing the &struct drm_dp_mst_branch or &struct
963 * drm_dp_mst_port respectively will be freed.
964 *
965 * For &struct drm_dp_mst_branch, malloc refcounts are not currently exposed
966 * to drivers. As of writing this documentation, there are no drivers that
967 * have a usecase for accessing &struct drm_dp_mst_branch outside of the MST
968 * helpers. Exposing this API to drivers in a race-free manner would take more
969 * tweaking of the refcounting scheme, however patches are welcome provided
970 * there is a legitimate driver usecase for this.
971 *
972 * Refcount relationships in a topology
973 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
974 *
975 * Let's take a look at why the relationship between topology and malloc
976 * refcounts is designed the way it is.
977 *
978 * .. kernel-figure:: dp-mst/topology-figure-1.dot
979 *
980 * An example of topology and malloc refs in a DP MST topology with two
981 * active payloads. Topology refcount increments are indicated by solid
982 * lines, and malloc refcount increments are indicated by dashed lines.
983 * Each starts from the branch which incremented the refcount, and ends at
984 * the branch to which the refcount belongs to, i.e. the arrow points the
985 * same way as the C pointers used to reference a structure.
986 *
987 * As you can see in the above figure, every branch increments the topology
988 * refcount of its children, and increments the malloc refcount of its
989 * parent. Additionally, every payload increments the malloc refcount of its
990 * assigned port by 1.
991 *
992 * So, what would happen if MSTB #3 from the above figure was unplugged from
993 * the system, but the driver hadn't yet removed payload #2 from port #3? The
994 * topology would start to look like the figure below.
995 *
996 * .. kernel-figure:: dp-mst/topology-figure-2.dot
997 *
998 * Ports and branch devices which have been released from memory are
999 * colored grey, and references which have been removed are colored red.
1000 *
1001 * Whenever a port or branch device's topology refcount reaches zero, it will
1002 * decrement the topology refcounts of all its children, the malloc refcount
1003 * of its parent, and finally its own malloc refcount. For MSTB #4 and port
1004 * #4, this means they both have been disconnected from the topology and freed
1005 * from memory. But, because payload #2 is still holding a reference to port
1006 * #3, port #3 is removed from the topology but its &struct drm_dp_mst_port
1007 * is still accessible from memory. This also means port #3 has not yet
1008 * decremented the malloc refcount of MSTB #3, so its &struct
1009 * drm_dp_mst_branch will also stay allocated in memory until port #3's
1010 * malloc refcount reaches 0.
1011 *
1012 * This relationship is necessary because in order to release payload #2, we
1013 * need to be able to figure out the last relative of port #3 that's still
1014 * connected to the topology. In this case, we would travel up the topology as
1015 * shown below.
1016 *
1017 * .. kernel-figure:: dp-mst/topology-figure-3.dot
1018 *
1019 * And finally, remove payload #2 by communicating with port #2 through
1020 * sideband transactions.
1021 */
1022
1023 /**
1024 * drm_dp_mst_get_mstb_malloc() - Increment the malloc refcount of a branch
1025 * device
1026 * @mstb: The &struct drm_dp_mst_branch to increment the malloc refcount of
1027 *
1028 * Increments &drm_dp_mst_branch.malloc_kref. When
1029 * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
1030 * will be released and @mstb may no longer be used.
1031 *
1032 * See also: drm_dp_mst_put_mstb_malloc()
1033 */
1034 static void
drm_dp_mst_get_mstb_malloc(struct drm_dp_mst_branch * mstb)1035 drm_dp_mst_get_mstb_malloc(struct drm_dp_mst_branch *mstb)
1036 {
1037 kref_get(&mstb->malloc_kref);
1038 DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref));
1039 }
1040
1041 /**
1042 * drm_dp_mst_put_mstb_malloc() - Decrement the malloc refcount of a branch
1043 * device
1044 * @mstb: The &struct drm_dp_mst_branch to decrement the malloc refcount of
1045 *
1046 * Decrements &drm_dp_mst_branch.malloc_kref. When
1047 * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
1048 * will be released and @mstb may no longer be used.
1049 *
1050 * See also: drm_dp_mst_get_mstb_malloc()
1051 */
1052 static void
drm_dp_mst_put_mstb_malloc(struct drm_dp_mst_branch * mstb)1053 drm_dp_mst_put_mstb_malloc(struct drm_dp_mst_branch *mstb)
1054 {
1055 DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref) - 1);
1056 kref_put(&mstb->malloc_kref, drm_dp_free_mst_branch_device);
1057 }
1058
drm_dp_free_mst_port(struct kref * kref)1059 static void drm_dp_free_mst_port(struct kref *kref)
1060 {
1061 struct drm_dp_mst_port *port =
1062 container_of(kref, struct drm_dp_mst_port, malloc_kref);
1063
1064 drm_dp_mst_put_mstb_malloc(port->parent);
1065 kfree(port);
1066 }
1067
1068 /**
1069 * drm_dp_mst_get_port_malloc() - Increment the malloc refcount of an MST port
1070 * @port: The &struct drm_dp_mst_port to increment the malloc refcount of
1071 *
1072 * Increments &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
1073 * reaches 0, the memory allocation for @port will be released and @port may
1074 * no longer be used.
1075 *
1076 * Because @port could potentially be freed at any time by the DP MST helpers
1077 * if &drm_dp_mst_port.malloc_kref reaches 0, including during a call to this
1078 * function, drivers that which to make use of &struct drm_dp_mst_port should
1079 * ensure that they grab at least one main malloc reference to their MST ports
1080 * in &drm_dp_mst_topology_cbs.add_connector. This callback is called before
1081 * there is any chance for &drm_dp_mst_port.malloc_kref to reach 0.
1082 *
1083 * See also: drm_dp_mst_put_port_malloc()
1084 */
1085 void
drm_dp_mst_get_port_malloc(struct drm_dp_mst_port * port)1086 drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port)
1087 {
1088 kref_get(&port->malloc_kref);
1089 DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref));
1090 }
1091 EXPORT_SYMBOL(drm_dp_mst_get_port_malloc);
1092
1093 /**
1094 * drm_dp_mst_put_port_malloc() - Decrement the malloc refcount of an MST port
1095 * @port: The &struct drm_dp_mst_port to decrement the malloc refcount of
1096 *
1097 * Decrements &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
1098 * reaches 0, the memory allocation for @port will be released and @port may
1099 * no longer be used.
1100 *
1101 * See also: drm_dp_mst_get_port_malloc()
1102 */
1103 void
drm_dp_mst_put_port_malloc(struct drm_dp_mst_port * port)1104 drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port)
1105 {
1106 DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref) - 1);
1107 kref_put(&port->malloc_kref, drm_dp_free_mst_port);
1108 }
1109 EXPORT_SYMBOL(drm_dp_mst_put_port_malloc);
1110
drm_dp_destroy_mst_branch_device(struct kref * kref)1111 static void drm_dp_destroy_mst_branch_device(struct kref *kref)
1112 {
1113 struct drm_dp_mst_branch *mstb =
1114 container_of(kref, struct drm_dp_mst_branch, topology_kref);
1115 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
1116 struct drm_dp_mst_port *port, *tmp;
1117 bool wake_tx = false;
1118
1119 mutex_lock(&mgr->lock);
1120 list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
1121 list_del(&port->next);
1122 drm_dp_mst_topology_put_port(port);
1123 }
1124 mutex_unlock(&mgr->lock);
1125
1126 /* drop any tx slots msg */
1127 mutex_lock(&mstb->mgr->qlock);
1128 if (mstb->tx_slots[0]) {
1129 mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
1130 mstb->tx_slots[0] = NULL;
1131 wake_tx = true;
1132 }
1133 if (mstb->tx_slots[1]) {
1134 mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
1135 mstb->tx_slots[1] = NULL;
1136 wake_tx = true;
1137 }
1138 mutex_unlock(&mstb->mgr->qlock);
1139
1140 if (wake_tx)
1141 wake_up_all(&mstb->mgr->tx_waitq);
1142
1143 drm_dp_mst_put_mstb_malloc(mstb);
1144 }
1145
1146 /**
1147 * drm_dp_mst_topology_try_get_mstb() - Increment the topology refcount of a
1148 * branch device unless it's zero
1149 * @mstb: &struct drm_dp_mst_branch to increment the topology refcount of
1150 *
1151 * Attempts to grab a topology reference to @mstb, if it hasn't yet been
1152 * removed from the topology (e.g. &drm_dp_mst_branch.topology_kref has
1153 * reached 0). Holding a topology reference implies that a malloc reference
1154 * will be held to @mstb as long as the user holds the topology reference.
1155 *
1156 * Care should be taken to ensure that the user has at least one malloc
1157 * reference to @mstb. If you already have a topology reference to @mstb, you
1158 * should use drm_dp_mst_topology_get_mstb() instead.
1159 *
1160 * See also:
1161 * drm_dp_mst_topology_get_mstb()
1162 * drm_dp_mst_topology_put_mstb()
1163 *
1164 * Returns:
1165 * * 1: A topology reference was grabbed successfully
1166 * * 0: @port is no longer in the topology, no reference was grabbed
1167 */
1168 static int __must_check
drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch * mstb)1169 drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb)
1170 {
1171 int ret = kref_get_unless_zero(&mstb->topology_kref);
1172
1173 if (ret)
1174 DRM_DEBUG("mstb %p (%d)\n", mstb,
1175 kref_read(&mstb->topology_kref));
1176
1177 return ret;
1178 }
1179
1180 /**
1181 * drm_dp_mst_topology_get_mstb() - Increment the topology refcount of a
1182 * branch device
1183 * @mstb: The &struct drm_dp_mst_branch to increment the topology refcount of
1184 *
1185 * Increments &drm_dp_mst_branch.topology_refcount without checking whether or
1186 * not it's already reached 0. This is only valid to use in scenarios where
1187 * you are already guaranteed to have at least one active topology reference
1188 * to @mstb. Otherwise, drm_dp_mst_topology_try_get_mstb() must be used.
1189 *
1190 * See also:
1191 * drm_dp_mst_topology_try_get_mstb()
1192 * drm_dp_mst_topology_put_mstb()
1193 */
drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch * mstb)1194 static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb)
1195 {
1196 WARN_ON(kref_read(&mstb->topology_kref) == 0);
1197 kref_get(&mstb->topology_kref);
1198 DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref));
1199 }
1200
1201 /**
1202 * drm_dp_mst_topology_put_mstb() - release a topology reference to a branch
1203 * device
1204 * @mstb: The &struct drm_dp_mst_branch to release the topology reference from
1205 *
1206 * Releases a topology reference from @mstb by decrementing
1207 * &drm_dp_mst_branch.topology_kref.
1208 *
1209 * See also:
1210 * drm_dp_mst_topology_try_get_mstb()
1211 * drm_dp_mst_topology_get_mstb()
1212 */
1213 static void
drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch * mstb)1214 drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb)
1215 {
1216 DRM_DEBUG("mstb %p (%d)\n",
1217 mstb, kref_read(&mstb->topology_kref) - 1);
1218 kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device);
1219 }
1220
drm_dp_port_teardown_pdt(struct drm_dp_mst_port * port,int old_pdt)1221 static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
1222 {
1223 struct drm_dp_mst_branch *mstb;
1224
1225 switch (old_pdt) {
1226 case DP_PEER_DEVICE_DP_LEGACY_CONV:
1227 case DP_PEER_DEVICE_SST_SINK:
1228 /* remove i2c over sideband */
1229 drm_dp_mst_unregister_i2c_bus(&port->aux);
1230 break;
1231 case DP_PEER_DEVICE_MST_BRANCHING:
1232 mstb = port->mstb;
1233 port->mstb = NULL;
1234 drm_dp_mst_topology_put_mstb(mstb);
1235 break;
1236 }
1237 }
1238
drm_dp_destroy_port(struct kref * kref)1239 static void drm_dp_destroy_port(struct kref *kref)
1240 {
1241 struct drm_dp_mst_port *port =
1242 container_of(kref, struct drm_dp_mst_port, topology_kref);
1243 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
1244
1245 if (!port->input) {
1246 kfree(port->cached_edid);
1247
1248 /*
1249 * The only time we don't have a connector
1250 * on an output port is if the connector init
1251 * fails.
1252 */
1253 if (port->connector) {
1254 /* we can't destroy the connector here, as
1255 * we might be holding the mode_config.mutex
1256 * from an EDID retrieval */
1257
1258 mutex_lock(&mgr->destroy_connector_lock);
1259 list_add(&port->next, &mgr->destroy_connector_list);
1260 mutex_unlock(&mgr->destroy_connector_lock);
1261 schedule_work(&mgr->destroy_connector_work);
1262 return;
1263 }
1264 /* no need to clean up vcpi
1265 * as if we have no connector we never setup a vcpi */
1266 drm_dp_port_teardown_pdt(port, port->pdt);
1267 port->pdt = DP_PEER_DEVICE_NONE;
1268 }
1269 drm_dp_mst_put_port_malloc(port);
1270 }
1271
1272 /**
1273 * drm_dp_mst_topology_try_get_port() - Increment the topology refcount of a
1274 * port unless it's zero
1275 * @port: &struct drm_dp_mst_port to increment the topology refcount of
1276 *
1277 * Attempts to grab a topology reference to @port, if it hasn't yet been
1278 * removed from the topology (e.g. &drm_dp_mst_port.topology_kref has reached
1279 * 0). Holding a topology reference implies that a malloc reference will be
1280 * held to @port as long as the user holds the topology reference.
1281 *
1282 * Care should be taken to ensure that the user has at least one malloc
1283 * reference to @port. If you already have a topology reference to @port, you
1284 * should use drm_dp_mst_topology_get_port() instead.
1285 *
1286 * See also:
1287 * drm_dp_mst_topology_get_port()
1288 * drm_dp_mst_topology_put_port()
1289 *
1290 * Returns:
1291 * * 1: A topology reference was grabbed successfully
1292 * * 0: @port is no longer in the topology, no reference was grabbed
1293 */
1294 static int __must_check
drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port * port)1295 drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port)
1296 {
1297 int ret = kref_get_unless_zero(&port->topology_kref);
1298
1299 if (ret)
1300 DRM_DEBUG("port %p (%d)\n", port,
1301 kref_read(&port->topology_kref));
1302
1303 return ret;
1304 }
1305
1306 /**
1307 * drm_dp_mst_topology_get_port() - Increment the topology refcount of a port
1308 * @port: The &struct drm_dp_mst_port to increment the topology refcount of
1309 *
1310 * Increments &drm_dp_mst_port.topology_refcount without checking whether or
1311 * not it's already reached 0. This is only valid to use in scenarios where
1312 * you are already guaranteed to have at least one active topology reference
1313 * to @port. Otherwise, drm_dp_mst_topology_try_get_port() must be used.
1314 *
1315 * See also:
1316 * drm_dp_mst_topology_try_get_port()
1317 * drm_dp_mst_topology_put_port()
1318 */
drm_dp_mst_topology_get_port(struct drm_dp_mst_port * port)1319 static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port)
1320 {
1321 WARN_ON(kref_read(&port->topology_kref) == 0);
1322 kref_get(&port->topology_kref);
1323 DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->topology_kref));
1324 }
1325
1326 /**
1327 * drm_dp_mst_topology_put_port() - release a topology reference to a port
1328 * @port: The &struct drm_dp_mst_port to release the topology reference from
1329 *
1330 * Releases a topology reference from @port by decrementing
1331 * &drm_dp_mst_port.topology_kref.
1332 *
1333 * See also:
1334 * drm_dp_mst_topology_try_get_port()
1335 * drm_dp_mst_topology_get_port()
1336 */
drm_dp_mst_topology_put_port(struct drm_dp_mst_port * port)1337 static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port)
1338 {
1339 DRM_DEBUG("port %p (%d)\n",
1340 port, kref_read(&port->topology_kref) - 1);
1341 kref_put(&port->topology_kref, drm_dp_destroy_port);
1342 }
1343
1344 static struct drm_dp_mst_branch *
drm_dp_mst_topology_get_mstb_validated_locked(struct drm_dp_mst_branch * mstb,struct drm_dp_mst_branch * to_find)1345 drm_dp_mst_topology_get_mstb_validated_locked(struct drm_dp_mst_branch *mstb,
1346 struct drm_dp_mst_branch *to_find)
1347 {
1348 struct drm_dp_mst_port *port;
1349 struct drm_dp_mst_branch *rmstb;
1350
1351 if (to_find == mstb)
1352 return mstb;
1353
1354 list_for_each_entry(port, &mstb->ports, next) {
1355 if (port->mstb) {
1356 rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
1357 port->mstb, to_find);
1358 if (rmstb)
1359 return rmstb;
1360 }
1361 }
1362 return NULL;
1363 }
1364
1365 static struct drm_dp_mst_branch *
drm_dp_mst_topology_get_mstb_validated(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_branch * mstb)1366 drm_dp_mst_topology_get_mstb_validated(struct drm_dp_mst_topology_mgr *mgr,
1367 struct drm_dp_mst_branch *mstb)
1368 {
1369 struct drm_dp_mst_branch *rmstb = NULL;
1370
1371 mutex_lock(&mgr->lock);
1372 if (mgr->mst_primary) {
1373 rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
1374 mgr->mst_primary, mstb);
1375
1376 if (rmstb && !drm_dp_mst_topology_try_get_mstb(rmstb))
1377 rmstb = NULL;
1378 }
1379 mutex_unlock(&mgr->lock);
1380 return rmstb;
1381 }
1382
1383 static struct drm_dp_mst_port *
drm_dp_mst_topology_get_port_validated_locked(struct drm_dp_mst_branch * mstb,struct drm_dp_mst_port * to_find)1384 drm_dp_mst_topology_get_port_validated_locked(struct drm_dp_mst_branch *mstb,
1385 struct drm_dp_mst_port *to_find)
1386 {
1387 struct drm_dp_mst_port *port, *mport;
1388
1389 list_for_each_entry(port, &mstb->ports, next) {
1390 if (port == to_find)
1391 return port;
1392
1393 if (port->mstb) {
1394 mport = drm_dp_mst_topology_get_port_validated_locked(
1395 port->mstb, to_find);
1396 if (mport)
1397 return mport;
1398 }
1399 }
1400 return NULL;
1401 }
1402
1403 static struct drm_dp_mst_port *
drm_dp_mst_topology_get_port_validated(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port)1404 drm_dp_mst_topology_get_port_validated(struct drm_dp_mst_topology_mgr *mgr,
1405 struct drm_dp_mst_port *port)
1406 {
1407 struct drm_dp_mst_port *rport = NULL;
1408
1409 mutex_lock(&mgr->lock);
1410 if (mgr->mst_primary) {
1411 rport = drm_dp_mst_topology_get_port_validated_locked(
1412 mgr->mst_primary, port);
1413
1414 if (rport && !drm_dp_mst_topology_try_get_port(rport))
1415 rport = NULL;
1416 }
1417 mutex_unlock(&mgr->lock);
1418 return rport;
1419 }
1420
drm_dp_get_port(struct drm_dp_mst_branch * mstb,u8 port_num)1421 static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
1422 {
1423 struct drm_dp_mst_port *port;
1424 int ret;
1425
1426 list_for_each_entry(port, &mstb->ports, next) {
1427 if (port->port_num == port_num) {
1428 ret = drm_dp_mst_topology_try_get_port(port);
1429 return ret ? port : NULL;
1430 }
1431 }
1432
1433 return NULL;
1434 }
1435
1436 /*
1437 * calculate a new RAD for this MST branch device
1438 * if parent has an LCT of 2 then it has 1 nibble of RAD,
1439 * if parent has an LCT of 3 then it has 2 nibbles of RAD,
1440 */
drm_dp_calculate_rad(struct drm_dp_mst_port * port,u8 * rad)1441 static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
1442 u8 *rad)
1443 {
1444 int parent_lct = port->parent->lct;
1445 int shift = 4;
1446 int idx = (parent_lct - 1) / 2;
1447 if (parent_lct > 1) {
1448 memcpy(rad, port->parent->rad, idx + 1);
1449 shift = (parent_lct % 2) ? 4 : 0;
1450 } else
1451 rad[0] = 0;
1452
1453 rad[idx] |= port->port_num << shift;
1454 return parent_lct + 1;
1455 }
1456
1457 /*
1458 * return sends link address for new mstb
1459 */
drm_dp_port_setup_pdt(struct drm_dp_mst_port * port)1460 static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
1461 {
1462 int ret;
1463 u8 rad[6], lct;
1464 bool send_link = false;
1465 switch (port->pdt) {
1466 case DP_PEER_DEVICE_DP_LEGACY_CONV:
1467 case DP_PEER_DEVICE_SST_SINK:
1468 /* add i2c over sideband */
1469 ret = drm_dp_mst_register_i2c_bus(&port->aux);
1470 break;
1471 case DP_PEER_DEVICE_MST_BRANCHING:
1472 lct = drm_dp_calculate_rad(port, rad);
1473
1474 port->mstb = drm_dp_add_mst_branch_device(lct, rad);
1475 if (port->mstb) {
1476 port->mstb->mgr = port->mgr;
1477 port->mstb->port_parent = port;
1478 /*
1479 * Make sure this port's memory allocation stays
1480 * around until its child MSTB releases it
1481 */
1482 drm_dp_mst_get_port_malloc(port);
1483
1484 send_link = true;
1485 }
1486 break;
1487 }
1488 return send_link;
1489 }
1490
1491 /**
1492 * drm_dp_mst_dpcd_read() - read a series of bytes from the DPCD via sideband
1493 * @aux: Fake sideband AUX CH
1494 * @offset: address of the (first) register to read
1495 * @buffer: buffer to store the register values
1496 * @size: number of bytes in @buffer
1497 *
1498 * Performs the same functionality for remote devices via
1499 * sideband messaging as drm_dp_dpcd_read() does for local
1500 * devices via actual AUX CH.
1501 *
1502 * Return: Number of bytes read, or negative error code on failure.
1503 */
drm_dp_mst_dpcd_read(struct drm_dp_aux * aux,unsigned int offset,void * buffer,size_t size)1504 ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux,
1505 unsigned int offset, void *buffer, size_t size)
1506 {
1507 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
1508 aux);
1509
1510 return drm_dp_send_dpcd_read(port->mgr, port,
1511 offset, size, buffer);
1512 }
1513
1514 /**
1515 * drm_dp_mst_dpcd_write() - write a series of bytes to the DPCD via sideband
1516 * @aux: Fake sideband AUX CH
1517 * @offset: address of the (first) register to write
1518 * @buffer: buffer containing the values to write
1519 * @size: number of bytes in @buffer
1520 *
1521 * Performs the same functionality for remote devices via
1522 * sideband messaging as drm_dp_dpcd_write() does for local
1523 * devices via actual AUX CH.
1524 *
1525 * Return: 0 on success, negative error code on failure.
1526 */
drm_dp_mst_dpcd_write(struct drm_dp_aux * aux,unsigned int offset,void * buffer,size_t size)1527 ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
1528 unsigned int offset, void *buffer, size_t size)
1529 {
1530 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
1531 aux);
1532
1533 return drm_dp_send_dpcd_write(port->mgr, port,
1534 offset, size, buffer);
1535 }
1536
drm_dp_check_mstb_guid(struct drm_dp_mst_branch * mstb,u8 * guid)1537 static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
1538 {
1539 int ret;
1540
1541 memcpy(mstb->guid, guid, 16);
1542
1543 if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
1544 if (mstb->port_parent) {
1545 ret = drm_dp_send_dpcd_write(
1546 mstb->mgr,
1547 mstb->port_parent,
1548 DP_GUID,
1549 16,
1550 mstb->guid);
1551 } else {
1552
1553 ret = drm_dp_dpcd_write(
1554 mstb->mgr->aux,
1555 DP_GUID,
1556 mstb->guid,
1557 16);
1558 }
1559 }
1560 }
1561
build_mst_prop_path(const struct drm_dp_mst_branch * mstb,int pnum,char * proppath,size_t proppath_size)1562 static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
1563 int pnum,
1564 char *proppath,
1565 size_t proppath_size)
1566 {
1567 int i;
1568 char temp[8];
1569 snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
1570 for (i = 0; i < (mstb->lct - 1); i++) {
1571 int shift = (i % 2) ? 0 : 4;
1572 int port_num = (mstb->rad[i / 2] >> shift) & 0xf;
1573 snprintf(temp, sizeof(temp), "-%d", port_num);
1574 strlcat(proppath, temp, proppath_size);
1575 }
1576 snprintf(temp, sizeof(temp), "-%d", pnum);
1577 strlcat(proppath, temp, proppath_size);
1578 }
1579
1580 /**
1581 * drm_dp_mst_connector_late_register() - Late MST connector registration
1582 * @connector: The MST connector
1583 * @port: The MST port for this connector
1584 *
1585 * Helper to register the remote aux device for this MST port. Drivers should
1586 * call this from their mst connector's late_register hook to enable MST aux
1587 * devices.
1588 *
1589 * Return: 0 on success, negative error code on failure.
1590 */
drm_dp_mst_connector_late_register(struct drm_connector * connector,struct drm_dp_mst_port * port)1591 int drm_dp_mst_connector_late_register(struct drm_connector *connector,
1592 struct drm_dp_mst_port *port)
1593 {
1594 DRM_DEBUG_KMS("registering %s remote bus for %s\n",
1595 port->aux.name, connector->kdev->kobj.name);
1596
1597 port->aux.dev = connector->kdev;
1598 return drm_dp_aux_register_devnode(&port->aux);
1599 }
1600 EXPORT_SYMBOL(drm_dp_mst_connector_late_register);
1601
1602 /**
1603 * drm_dp_mst_connector_early_unregister() - Early MST connector unregistration
1604 * @connector: The MST connector
1605 * @port: The MST port for this connector
1606 *
1607 * Helper to unregister the remote aux device for this MST port, registered by
1608 * drm_dp_mst_connector_late_register(). Drivers should call this from their mst
1609 * connector's early_unregister hook.
1610 */
drm_dp_mst_connector_early_unregister(struct drm_connector * connector,struct drm_dp_mst_port * port)1611 void drm_dp_mst_connector_early_unregister(struct drm_connector *connector,
1612 struct drm_dp_mst_port *port)
1613 {
1614 DRM_DEBUG_KMS("unregistering %s remote bus for %s\n",
1615 port->aux.name, connector->kdev->kobj.name);
1616 drm_dp_aux_unregister_devnode(&port->aux);
1617 }
1618 EXPORT_SYMBOL(drm_dp_mst_connector_early_unregister);
1619
drm_dp_add_port(struct drm_dp_mst_branch * mstb,struct drm_device * dev,struct drm_dp_link_addr_reply_port * port_msg)1620 static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
1621 struct drm_device *dev,
1622 struct drm_dp_link_addr_reply_port *port_msg)
1623 {
1624 struct drm_dp_mst_port *port;
1625 bool ret;
1626 bool created = false;
1627 int old_pdt = 0;
1628 int old_ddps = 0;
1629
1630 port = drm_dp_get_port(mstb, port_msg->port_number);
1631 if (!port) {
1632 port = kzalloc(sizeof(*port), GFP_KERNEL);
1633 if (!port)
1634 return;
1635 kref_init(&port->topology_kref);
1636 kref_init(&port->malloc_kref);
1637 port->parent = mstb;
1638 port->port_num = port_msg->port_number;
1639 port->mgr = mstb->mgr;
1640 port->aux.name = "DPMST";
1641 port->aux.dev = dev->dev;
1642 port->aux.is_remote = true;
1643
1644 /*
1645 * Make sure the memory allocation for our parent branch stays
1646 * around until our own memory allocation is released
1647 */
1648 drm_dp_mst_get_mstb_malloc(mstb);
1649
1650 created = true;
1651 } else {
1652 old_pdt = port->pdt;
1653 old_ddps = port->ddps;
1654 }
1655
1656 port->pdt = port_msg->peer_device_type;
1657 port->input = port_msg->input_port;
1658 port->mcs = port_msg->mcs;
1659 port->ddps = port_msg->ddps;
1660 port->ldps = port_msg->legacy_device_plug_status;
1661 port->dpcd_rev = port_msg->dpcd_revision;
1662 port->num_sdp_streams = port_msg->num_sdp_streams;
1663 port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
1664
1665 /* manage mstb port lists with mgr lock - take a reference
1666 for this list */
1667 if (created) {
1668 mutex_lock(&mstb->mgr->lock);
1669 drm_dp_mst_topology_get_port(port);
1670 list_add(&port->next, &mstb->ports);
1671 mutex_unlock(&mstb->mgr->lock);
1672 }
1673
1674 if (old_ddps != port->ddps) {
1675 if (port->ddps) {
1676 if (!port->input) {
1677 drm_dp_send_enum_path_resources(mstb->mgr,
1678 mstb, port);
1679 }
1680 } else {
1681 port->available_pbn = 0;
1682 }
1683 }
1684
1685 if (old_pdt != port->pdt && !port->input) {
1686 drm_dp_port_teardown_pdt(port, old_pdt);
1687
1688 ret = drm_dp_port_setup_pdt(port);
1689 if (ret == true)
1690 drm_dp_send_link_address(mstb->mgr, port->mstb);
1691 }
1692
1693 if (created && !port->input) {
1694 char proppath[255];
1695
1696 build_mst_prop_path(mstb, port->port_num, proppath,
1697 sizeof(proppath));
1698 port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr,
1699 port,
1700 proppath);
1701 if (!port->connector) {
1702 /* remove it from the port list */
1703 mutex_lock(&mstb->mgr->lock);
1704 list_del(&port->next);
1705 mutex_unlock(&mstb->mgr->lock);
1706 /* drop port list reference */
1707 drm_dp_mst_topology_put_port(port);
1708 goto out;
1709 }
1710 if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
1711 port->pdt == DP_PEER_DEVICE_SST_SINK) &&
1712 port->port_num >= DP_MST_LOGICAL_PORT_0) {
1713 port->cached_edid = drm_get_edid(port->connector,
1714 &port->aux.ddc);
1715 drm_connector_set_tile_property(port->connector);
1716 }
1717 (*mstb->mgr->cbs->register_connector)(port->connector);
1718 }
1719
1720 out:
1721 /* put reference to this port */
1722 drm_dp_mst_topology_put_port(port);
1723 }
1724
drm_dp_update_port(struct drm_dp_mst_branch * mstb,struct drm_dp_connection_status_notify * conn_stat)1725 static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
1726 struct drm_dp_connection_status_notify *conn_stat)
1727 {
1728 struct drm_dp_mst_port *port;
1729 int old_pdt;
1730 int old_ddps;
1731 bool dowork = false;
1732 port = drm_dp_get_port(mstb, conn_stat->port_number);
1733 if (!port)
1734 return;
1735
1736 old_ddps = port->ddps;
1737 old_pdt = port->pdt;
1738 port->pdt = conn_stat->peer_device_type;
1739 port->mcs = conn_stat->message_capability_status;
1740 port->ldps = conn_stat->legacy_device_plug_status;
1741 port->ddps = conn_stat->displayport_device_plug_status;
1742
1743 if (old_ddps != port->ddps) {
1744 if (port->ddps) {
1745 dowork = true;
1746 } else {
1747 port->available_pbn = 0;
1748 }
1749 }
1750 if (old_pdt != port->pdt && !port->input) {
1751 drm_dp_port_teardown_pdt(port, old_pdt);
1752
1753 if (drm_dp_port_setup_pdt(port))
1754 dowork = true;
1755 }
1756
1757 drm_dp_mst_topology_put_port(port);
1758 if (dowork)
1759 queue_work(system_long_wq, &mstb->mgr->work);
1760
1761 }
1762
drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr * mgr,u8 lct,u8 * rad)1763 static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
1764 u8 lct, u8 *rad)
1765 {
1766 struct drm_dp_mst_branch *mstb;
1767 struct drm_dp_mst_port *port;
1768 int i, ret;
1769 /* find the port by iterating down */
1770
1771 mutex_lock(&mgr->lock);
1772 mstb = mgr->mst_primary;
1773
1774 if (!mstb)
1775 goto out;
1776
1777 for (i = 0; i < lct - 1; i++) {
1778 int shift = (i % 2) ? 0 : 4;
1779 int port_num = (rad[i / 2] >> shift) & 0xf;
1780
1781 list_for_each_entry(port, &mstb->ports, next) {
1782 if (port->port_num == port_num) {
1783 mstb = port->mstb;
1784 if (!mstb) {
1785 DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]);
1786 goto out;
1787 }
1788
1789 break;
1790 }
1791 }
1792 }
1793 ret = drm_dp_mst_topology_try_get_mstb(mstb);
1794 if (!ret)
1795 mstb = NULL;
1796 out:
1797 mutex_unlock(&mgr->lock);
1798 return mstb;
1799 }
1800
get_mst_branch_device_by_guid_helper(struct drm_dp_mst_branch * mstb,uint8_t * guid)1801 static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
1802 struct drm_dp_mst_branch *mstb,
1803 uint8_t *guid)
1804 {
1805 struct drm_dp_mst_branch *found_mstb;
1806 struct drm_dp_mst_port *port;
1807
1808 if (memcmp(mstb->guid, guid, 16) == 0)
1809 return mstb;
1810
1811
1812 list_for_each_entry(port, &mstb->ports, next) {
1813 if (!port->mstb)
1814 continue;
1815
1816 found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
1817
1818 if (found_mstb)
1819 return found_mstb;
1820 }
1821
1822 return NULL;
1823 }
1824
1825 static struct drm_dp_mst_branch *
drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr * mgr,uint8_t * guid)1826 drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr,
1827 uint8_t *guid)
1828 {
1829 struct drm_dp_mst_branch *mstb;
1830 int ret;
1831
1832 /* find the port by iterating down */
1833 mutex_lock(&mgr->lock);
1834
1835 mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
1836 if (mstb) {
1837 ret = drm_dp_mst_topology_try_get_mstb(mstb);
1838 if (!ret)
1839 mstb = NULL;
1840 }
1841
1842 mutex_unlock(&mgr->lock);
1843 return mstb;
1844 }
1845
drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_branch * mstb)1846 static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
1847 struct drm_dp_mst_branch *mstb)
1848 {
1849 struct drm_dp_mst_port *port;
1850 struct drm_dp_mst_branch *mstb_child;
1851 if (!mstb->link_address_sent)
1852 drm_dp_send_link_address(mgr, mstb);
1853
1854 list_for_each_entry(port, &mstb->ports, next) {
1855 if (port->input)
1856 continue;
1857
1858 if (!port->ddps)
1859 continue;
1860
1861 if (!port->available_pbn)
1862 drm_dp_send_enum_path_resources(mgr, mstb, port);
1863
1864 if (port->mstb) {
1865 mstb_child = drm_dp_mst_topology_get_mstb_validated(
1866 mgr, port->mstb);
1867 if (mstb_child) {
1868 drm_dp_check_and_send_link_address(mgr, mstb_child);
1869 drm_dp_mst_topology_put_mstb(mstb_child);
1870 }
1871 }
1872 }
1873 }
1874
drm_dp_mst_link_probe_work(struct work_struct * work)1875 static void drm_dp_mst_link_probe_work(struct work_struct *work)
1876 {
1877 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work);
1878 struct drm_dp_mst_branch *mstb;
1879 int ret;
1880
1881 mutex_lock(&mgr->lock);
1882 mstb = mgr->mst_primary;
1883 if (mstb) {
1884 ret = drm_dp_mst_topology_try_get_mstb(mstb);
1885 if (!ret)
1886 mstb = NULL;
1887 }
1888 mutex_unlock(&mgr->lock);
1889 if (mstb) {
1890 drm_dp_check_and_send_link_address(mgr, mstb);
1891 drm_dp_mst_topology_put_mstb(mstb);
1892 }
1893 }
1894
drm_dp_validate_guid(struct drm_dp_mst_topology_mgr * mgr,u8 * guid)1895 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
1896 u8 *guid)
1897 {
1898 u64 salt;
1899
1900 if (memchr_inv(guid, 0, 16))
1901 return true;
1902
1903 salt = get_jiffies_64();
1904
1905 memcpy(&guid[0], &salt, sizeof(u64));
1906 memcpy(&guid[8], &salt, sizeof(u64));
1907
1908 return false;
1909 }
1910
build_dpcd_read(struct drm_dp_sideband_msg_tx * msg,u8 port_num,u32 offset,u8 num_bytes)1911 static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes)
1912 {
1913 struct drm_dp_sideband_msg_req_body req;
1914
1915 req.req_type = DP_REMOTE_DPCD_READ;
1916 req.u.dpcd_read.port_number = port_num;
1917 req.u.dpcd_read.dpcd_address = offset;
1918 req.u.dpcd_read.num_bytes = num_bytes;
1919 drm_dp_encode_sideband_req(&req, msg);
1920
1921 return 0;
1922 }
1923
drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr * mgr,bool up,u8 * msg,int len)1924 static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
1925 bool up, u8 *msg, int len)
1926 {
1927 int ret;
1928 int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE;
1929 int tosend, total, offset;
1930 int retries = 0;
1931
1932 retry:
1933 total = len;
1934 offset = 0;
1935 do {
1936 tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
1937
1938 ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
1939 &msg[offset],
1940 tosend);
1941 if (ret != tosend) {
1942 if (ret == -EIO && retries < 5) {
1943 retries++;
1944 goto retry;
1945 }
1946 DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
1947
1948 return -EIO;
1949 }
1950 offset += tosend;
1951 total -= tosend;
1952 } while (total > 0);
1953 return 0;
1954 }
1955
set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr * hdr,struct drm_dp_sideband_msg_tx * txmsg)1956 static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
1957 struct drm_dp_sideband_msg_tx *txmsg)
1958 {
1959 struct drm_dp_mst_branch *mstb = txmsg->dst;
1960 u8 req_type;
1961
1962 /* both msg slots are full */
1963 if (txmsg->seqno == -1) {
1964 if (mstb->tx_slots[0] && mstb->tx_slots[1]) {
1965 DRM_DEBUG_KMS("%s: failed to find slot\n", __func__);
1966 return -EAGAIN;
1967 }
1968 if (mstb->tx_slots[0] == NULL && mstb->tx_slots[1] == NULL) {
1969 txmsg->seqno = mstb->last_seqno;
1970 mstb->last_seqno ^= 1;
1971 } else if (mstb->tx_slots[0] == NULL)
1972 txmsg->seqno = 0;
1973 else
1974 txmsg->seqno = 1;
1975 mstb->tx_slots[txmsg->seqno] = txmsg;
1976 }
1977
1978 req_type = txmsg->msg[0] & 0x7f;
1979 if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
1980 req_type == DP_RESOURCE_STATUS_NOTIFY)
1981 hdr->broadcast = 1;
1982 else
1983 hdr->broadcast = 0;
1984 hdr->path_msg = txmsg->path_msg;
1985 hdr->lct = mstb->lct;
1986 hdr->lcr = mstb->lct - 1;
1987 if (mstb->lct > 1)
1988 memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
1989 hdr->seqno = txmsg->seqno;
1990 return 0;
1991 }
1992 /*
1993 * process a single block of the next message in the sideband queue
1994 */
process_single_tx_qlock(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_sideband_msg_tx * txmsg,bool up)1995 static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
1996 struct drm_dp_sideband_msg_tx *txmsg,
1997 bool up)
1998 {
1999 u8 chunk[48];
2000 struct drm_dp_sideband_msg_hdr hdr;
2001 int len, space, idx, tosend;
2002 int ret;
2003
2004 memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
2005
2006 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) {
2007 txmsg->seqno = -1;
2008 txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
2009 }
2010
2011 /* make hdr from dst mst - for replies use seqno
2012 otherwise assign one */
2013 ret = set_hdr_from_dst_qlock(&hdr, txmsg);
2014 if (ret < 0)
2015 return ret;
2016
2017 /* amount left to send in this message */
2018 len = txmsg->cur_len - txmsg->cur_offset;
2019
2020 /* 48 - sideband msg size - 1 byte for data CRC, x header bytes */
2021 space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr);
2022
2023 tosend = min(len, space);
2024 if (len == txmsg->cur_len)
2025 hdr.somt = 1;
2026 if (space >= len)
2027 hdr.eomt = 1;
2028
2029
2030 hdr.msg_len = tosend + 1;
2031 drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx);
2032 memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend);
2033 /* add crc at end */
2034 drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend);
2035 idx += tosend + 1;
2036
2037 ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
2038 if (ret) {
2039 DRM_DEBUG_KMS("sideband msg failed to send\n");
2040 return ret;
2041 }
2042
2043 txmsg->cur_offset += tosend;
2044 if (txmsg->cur_offset == txmsg->cur_len) {
2045 txmsg->state = DRM_DP_SIDEBAND_TX_SENT;
2046 return 1;
2047 }
2048 return 0;
2049 }
2050
process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr * mgr)2051 static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
2052 {
2053 struct drm_dp_sideband_msg_tx *txmsg;
2054 int ret;
2055
2056 WARN_ON(!mutex_is_locked(&mgr->qlock));
2057
2058 /* construct a chunk from the first msg in the tx_msg queue */
2059 if (list_empty(&mgr->tx_msg_downq))
2060 return;
2061
2062 txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next);
2063 ret = process_single_tx_qlock(mgr, txmsg, false);
2064 if (ret == 1) {
2065 /* txmsg is sent it should be in the slots now */
2066 list_del(&txmsg->next);
2067 } else if (ret) {
2068 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
2069 list_del(&txmsg->next);
2070 if (txmsg->seqno != -1)
2071 txmsg->dst->tx_slots[txmsg->seqno] = NULL;
2072 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
2073 wake_up_all(&mgr->tx_waitq);
2074 }
2075 }
2076
2077 /* called holding qlock */
process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_sideband_msg_tx * txmsg)2078 static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
2079 struct drm_dp_sideband_msg_tx *txmsg)
2080 {
2081 int ret;
2082
2083 /* construct a chunk from the first msg in the tx_msg queue */
2084 ret = process_single_tx_qlock(mgr, txmsg, true);
2085
2086 if (ret != 1)
2087 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
2088
2089 if (txmsg->seqno != -1) {
2090 WARN_ON((unsigned int)txmsg->seqno >
2091 ARRAY_SIZE(txmsg->dst->tx_slots));
2092 txmsg->dst->tx_slots[txmsg->seqno] = NULL;
2093 }
2094 }
2095
drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_sideband_msg_tx * txmsg)2096 static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
2097 struct drm_dp_sideband_msg_tx *txmsg)
2098 {
2099 mutex_lock(&mgr->qlock);
2100 list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
2101 if (list_is_singular(&mgr->tx_msg_downq))
2102 process_single_down_tx_qlock(mgr);
2103 mutex_unlock(&mgr->qlock);
2104 }
2105
drm_dp_send_link_address(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_branch * mstb)2106 static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
2107 struct drm_dp_mst_branch *mstb)
2108 {
2109 int len;
2110 struct drm_dp_sideband_msg_tx *txmsg;
2111 int ret;
2112
2113 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2114 if (!txmsg)
2115 return;
2116
2117 txmsg->dst = mstb;
2118 len = build_link_address(txmsg);
2119
2120 mstb->link_address_sent = true;
2121 drm_dp_queue_down_tx(mgr, txmsg);
2122
2123 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2124 if (ret > 0) {
2125 int i;
2126
2127 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
2128 DRM_DEBUG_KMS("link address nak received\n");
2129 } else {
2130 DRM_DEBUG_KMS("link address reply: %d\n", txmsg->reply.u.link_addr.nports);
2131 for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
2132 DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n", i,
2133 txmsg->reply.u.link_addr.ports[i].input_port,
2134 txmsg->reply.u.link_addr.ports[i].peer_device_type,
2135 txmsg->reply.u.link_addr.ports[i].port_number,
2136 txmsg->reply.u.link_addr.ports[i].dpcd_revision,
2137 txmsg->reply.u.link_addr.ports[i].mcs,
2138 txmsg->reply.u.link_addr.ports[i].ddps,
2139 txmsg->reply.u.link_addr.ports[i].legacy_device_plug_status,
2140 txmsg->reply.u.link_addr.ports[i].num_sdp_streams,
2141 txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks);
2142 }
2143
2144 drm_dp_check_mstb_guid(mstb, txmsg->reply.u.link_addr.guid);
2145
2146 for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
2147 drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
2148 }
2149 drm_kms_helper_hotplug_event(mgr->dev);
2150 }
2151 } else {
2152 mstb->link_address_sent = false;
2153 DRM_DEBUG_KMS("link address failed %d\n", ret);
2154 }
2155
2156 kfree(txmsg);
2157 }
2158
drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_branch * mstb,struct drm_dp_mst_port * port)2159 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
2160 struct drm_dp_mst_branch *mstb,
2161 struct drm_dp_mst_port *port)
2162 {
2163 int len;
2164 struct drm_dp_sideband_msg_tx *txmsg;
2165 int ret;
2166
2167 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2168 if (!txmsg)
2169 return -ENOMEM;
2170
2171 txmsg->dst = mstb;
2172 len = build_enum_path_resources(txmsg, port->port_num);
2173
2174 drm_dp_queue_down_tx(mgr, txmsg);
2175
2176 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2177 if (ret > 0) {
2178 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
2179 DRM_DEBUG_KMS("enum path resources nak received\n");
2180 } else {
2181 if (port->port_num != txmsg->reply.u.path_resources.port_number)
2182 DRM_ERROR("got incorrect port in response\n");
2183 DRM_DEBUG_KMS("enum path resources %d: %d %d\n", txmsg->reply.u.path_resources.port_number, txmsg->reply.u.path_resources.full_payload_bw_number,
2184 txmsg->reply.u.path_resources.avail_payload_bw_number);
2185 port->available_pbn = txmsg->reply.u.path_resources.avail_payload_bw_number;
2186 }
2187 }
2188
2189 kfree(txmsg);
2190 return 0;
2191 }
2192
drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch * mstb)2193 static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
2194 {
2195 if (!mstb->port_parent)
2196 return NULL;
2197
2198 if (mstb->port_parent->mstb != mstb)
2199 return mstb->port_parent;
2200
2201 return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
2202 }
2203
2204 /*
2205 * Searches upwards in the topology starting from mstb to try to find the
2206 * closest available parent of mstb that's still connected to the rest of the
2207 * topology. This can be used in order to perform operations like releasing
2208 * payloads, where the branch device which owned the payload may no longer be
2209 * around and thus would require that the payload on the last living relative
2210 * be freed instead.
2211 */
2212 static struct drm_dp_mst_branch *
drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_branch * mstb,int * port_num)2213 drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
2214 struct drm_dp_mst_branch *mstb,
2215 int *port_num)
2216 {
2217 struct drm_dp_mst_branch *rmstb = NULL;
2218 struct drm_dp_mst_port *found_port;
2219
2220 mutex_lock(&mgr->lock);
2221 if (!mgr->mst_primary)
2222 goto out;
2223
2224 do {
2225 found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
2226 if (!found_port)
2227 break;
2228
2229 if (drm_dp_mst_topology_try_get_mstb(found_port->parent)) {
2230 rmstb = found_port->parent;
2231 *port_num = found_port->port_num;
2232 } else {
2233 /* Search again, starting from this parent */
2234 mstb = found_port->parent;
2235 }
2236 } while (!rmstb);
2237 out:
2238 mutex_unlock(&mgr->lock);
2239 return rmstb;
2240 }
2241
drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port,int id,int pbn)2242 static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
2243 struct drm_dp_mst_port *port,
2244 int id,
2245 int pbn)
2246 {
2247 struct drm_dp_sideband_msg_tx *txmsg;
2248 struct drm_dp_mst_branch *mstb;
2249 int len, ret, port_num;
2250 u8 sinks[DRM_DP_MAX_SDP_STREAMS];
2251 int i;
2252
2253 port_num = port->port_num;
2254 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
2255 if (!mstb) {
2256 mstb = drm_dp_get_last_connected_port_and_mstb(mgr,
2257 port->parent,
2258 &port_num);
2259
2260 if (!mstb)
2261 return -EINVAL;
2262 }
2263
2264 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2265 if (!txmsg) {
2266 ret = -ENOMEM;
2267 goto fail_put;
2268 }
2269
2270 for (i = 0; i < port->num_sdp_streams; i++)
2271 sinks[i] = i;
2272
2273 txmsg->dst = mstb;
2274 len = build_allocate_payload(txmsg, port_num,
2275 id,
2276 pbn, port->num_sdp_streams, sinks);
2277
2278 drm_dp_queue_down_tx(mgr, txmsg);
2279
2280 /*
2281 * FIXME: there is a small chance that between getting the last
2282 * connected mstb and sending the payload message, the last connected
2283 * mstb could also be removed from the topology. In the future, this
2284 * needs to be fixed by restarting the
2285 * drm_dp_get_last_connected_port_and_mstb() search in the event of a
2286 * timeout if the topology is still connected to the system.
2287 */
2288 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2289 if (ret > 0) {
2290 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
2291 ret = -EINVAL;
2292 else
2293 ret = 0;
2294 }
2295 kfree(txmsg);
2296 fail_put:
2297 drm_dp_mst_topology_put_mstb(mstb);
2298 return ret;
2299 }
2300
drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port,bool power_up)2301 int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
2302 struct drm_dp_mst_port *port, bool power_up)
2303 {
2304 struct drm_dp_sideband_msg_tx *txmsg;
2305 int len, ret;
2306
2307 port = drm_dp_mst_topology_get_port_validated(mgr, port);
2308 if (!port)
2309 return -EINVAL;
2310
2311 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2312 if (!txmsg) {
2313 drm_dp_mst_topology_put_port(port);
2314 return -ENOMEM;
2315 }
2316
2317 txmsg->dst = port->parent;
2318 len = build_power_updown_phy(txmsg, port->port_num, power_up);
2319 drm_dp_queue_down_tx(mgr, txmsg);
2320
2321 ret = drm_dp_mst_wait_tx_reply(port->parent, txmsg);
2322 if (ret > 0) {
2323 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
2324 ret = -EINVAL;
2325 else
2326 ret = 0;
2327 }
2328 kfree(txmsg);
2329 drm_dp_mst_topology_put_port(port);
2330
2331 return ret;
2332 }
2333 EXPORT_SYMBOL(drm_dp_send_power_updown_phy);
2334
drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr * mgr,int id,struct drm_dp_payload * payload)2335 static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
2336 int id,
2337 struct drm_dp_payload *payload)
2338 {
2339 int ret;
2340
2341 ret = drm_dp_dpcd_write_payload(mgr, id, payload);
2342 if (ret < 0) {
2343 payload->payload_state = 0;
2344 return ret;
2345 }
2346 payload->payload_state = DP_PAYLOAD_LOCAL;
2347 return 0;
2348 }
2349
drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port,int id,struct drm_dp_payload * payload)2350 static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
2351 struct drm_dp_mst_port *port,
2352 int id,
2353 struct drm_dp_payload *payload)
2354 {
2355 int ret;
2356 ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn);
2357 if (ret < 0)
2358 return ret;
2359 payload->payload_state = DP_PAYLOAD_REMOTE;
2360 return ret;
2361 }
2362
drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port,int id,struct drm_dp_payload * payload)2363 static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
2364 struct drm_dp_mst_port *port,
2365 int id,
2366 struct drm_dp_payload *payload)
2367 {
2368 DRM_DEBUG_KMS("\n");
2369 /* it's okay for these to fail */
2370 if (port) {
2371 drm_dp_payload_send_msg(mgr, port, id, 0);
2372 }
2373
2374 drm_dp_dpcd_write_payload(mgr, id, payload);
2375 payload->payload_state = DP_PAYLOAD_DELETE_LOCAL;
2376 return 0;
2377 }
2378
drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr * mgr,int id,struct drm_dp_payload * payload)2379 static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
2380 int id,
2381 struct drm_dp_payload *payload)
2382 {
2383 payload->payload_state = 0;
2384 return 0;
2385 }
2386
2387 /**
2388 * drm_dp_update_payload_part1() - Execute payload update part 1
2389 * @mgr: manager to use.
2390 *
2391 * This iterates over all proposed virtual channels, and tries to
2392 * allocate space in the link for them. For 0->slots transitions,
2393 * this step just writes the VCPI to the MST device. For slots->0
2394 * transitions, this writes the updated VCPIs and removes the
2395 * remote VC payloads.
2396 *
2397 * after calling this the driver should generate ACT and payload
2398 * packets.
2399 */
drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr * mgr)2400 int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
2401 {
2402 struct drm_dp_payload req_payload;
2403 struct drm_dp_mst_port *port;
2404 int i, j;
2405 int cur_slots = 1;
2406
2407 mutex_lock(&mgr->payload_lock);
2408 for (i = 0; i < mgr->max_payloads; i++) {
2409 struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
2410 struct drm_dp_payload *payload = &mgr->payloads[i];
2411 bool put_port = false;
2412
2413 /* solve the current payloads - compare to the hw ones
2414 - update the hw view */
2415 req_payload.start_slot = cur_slots;
2416 if (vcpi) {
2417 port = container_of(vcpi, struct drm_dp_mst_port,
2418 vcpi);
2419
2420 /* Validated ports don't matter if we're releasing
2421 * VCPI
2422 */
2423 if (vcpi->num_slots) {
2424 port = drm_dp_mst_topology_get_port_validated(
2425 mgr, port);
2426 if (!port) {
2427 mutex_unlock(&mgr->payload_lock);
2428 return -EINVAL;
2429 }
2430 put_port = true;
2431 }
2432
2433 req_payload.num_slots = vcpi->num_slots;
2434 req_payload.vcpi = vcpi->vcpi;
2435 } else {
2436 port = NULL;
2437 req_payload.num_slots = 0;
2438 }
2439
2440 payload->start_slot = req_payload.start_slot;
2441 /* work out what is required to happen with this payload */
2442 if (payload->num_slots != req_payload.num_slots) {
2443
2444 /* need to push an update for this payload */
2445 if (req_payload.num_slots) {
2446 drm_dp_create_payload_step1(mgr, vcpi->vcpi,
2447 &req_payload);
2448 payload->num_slots = req_payload.num_slots;
2449 payload->vcpi = req_payload.vcpi;
2450
2451 } else if (payload->num_slots) {
2452 payload->num_slots = 0;
2453 drm_dp_destroy_payload_step1(mgr, port,
2454 payload->vcpi,
2455 payload);
2456 req_payload.payload_state =
2457 payload->payload_state;
2458 payload->start_slot = 0;
2459 }
2460 payload->payload_state = req_payload.payload_state;
2461 }
2462 cur_slots += req_payload.num_slots;
2463
2464 if (put_port)
2465 drm_dp_mst_topology_put_port(port);
2466 }
2467
2468 for (i = 0; i < mgr->max_payloads; /* do nothing */) {
2469 if (mgr->payloads[i].payload_state != DP_PAYLOAD_DELETE_LOCAL) {
2470 i++;
2471 continue;
2472 }
2473
2474 DRM_DEBUG_KMS("removing payload %d\n", i);
2475 for (j = i; j < mgr->max_payloads - 1; j++) {
2476 mgr->payloads[j] = mgr->payloads[j + 1];
2477 mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1];
2478
2479 if (mgr->proposed_vcpis[j] &&
2480 mgr->proposed_vcpis[j]->num_slots) {
2481 set_bit(j + 1, &mgr->payload_mask);
2482 } else {
2483 clear_bit(j + 1, &mgr->payload_mask);
2484 }
2485 }
2486
2487 memset(&mgr->payloads[mgr->max_payloads - 1], 0,
2488 sizeof(struct drm_dp_payload));
2489 mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL;
2490 clear_bit(mgr->max_payloads, &mgr->payload_mask);
2491 }
2492 mutex_unlock(&mgr->payload_lock);
2493
2494 return 0;
2495 }
2496 EXPORT_SYMBOL(drm_dp_update_payload_part1);
2497
2498 /**
2499 * drm_dp_update_payload_part2() - Execute payload update part 2
2500 * @mgr: manager to use.
2501 *
2502 * This iterates over all proposed virtual channels, and tries to
2503 * allocate space in the link for them. For 0->slots transitions,
2504 * this step writes the remote VC payload commands. For slots->0
2505 * this just resets some internal state.
2506 */
drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr * mgr)2507 int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
2508 {
2509 struct drm_dp_mst_port *port;
2510 int i;
2511 int ret = 0;
2512 mutex_lock(&mgr->payload_lock);
2513 for (i = 0; i < mgr->max_payloads; i++) {
2514
2515 if (!mgr->proposed_vcpis[i])
2516 continue;
2517
2518 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
2519
2520 DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state);
2521 if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
2522 ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
2523 } else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
2524 ret = drm_dp_destroy_payload_step2(mgr, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
2525 }
2526 if (ret) {
2527 mutex_unlock(&mgr->payload_lock);
2528 return ret;
2529 }
2530 }
2531 mutex_unlock(&mgr->payload_lock);
2532 return 0;
2533 }
2534 EXPORT_SYMBOL(drm_dp_update_payload_part2);
2535
drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port,int offset,int size,u8 * bytes)2536 static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
2537 struct drm_dp_mst_port *port,
2538 int offset, int size, u8 *bytes)
2539 {
2540 int len;
2541 int ret = 0;
2542 struct drm_dp_sideband_msg_tx *txmsg;
2543 struct drm_dp_mst_branch *mstb;
2544
2545 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
2546 if (!mstb)
2547 return -EINVAL;
2548
2549 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2550 if (!txmsg) {
2551 ret = -ENOMEM;
2552 goto fail_put;
2553 }
2554
2555 len = build_dpcd_read(txmsg, port->port_num, offset, size);
2556 txmsg->dst = port->parent;
2557
2558 drm_dp_queue_down_tx(mgr, txmsg);
2559
2560 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2561 if (ret < 0)
2562 goto fail_free;
2563
2564 /* DPCD read should never be NACKed */
2565 if (txmsg->reply.reply_type == 1) {
2566 DRM_ERROR("mstb %p port %d: DPCD read on addr 0x%x for %d bytes NAKed\n",
2567 mstb, port->port_num, offset, size);
2568 ret = -EIO;
2569 goto fail_free;
2570 }
2571
2572 if (txmsg->reply.u.remote_dpcd_read_ack.num_bytes != size) {
2573 ret = -EPROTO;
2574 goto fail_free;
2575 }
2576
2577 ret = min_t(size_t, txmsg->reply.u.remote_dpcd_read_ack.num_bytes,
2578 size);
2579 memcpy(bytes, txmsg->reply.u.remote_dpcd_read_ack.bytes, ret);
2580
2581 fail_free:
2582 kfree(txmsg);
2583 fail_put:
2584 drm_dp_mst_topology_put_mstb(mstb);
2585
2586 return ret;
2587 }
2588
drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port,int offset,int size,u8 * bytes)2589 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
2590 struct drm_dp_mst_port *port,
2591 int offset, int size, u8 *bytes)
2592 {
2593 int len;
2594 int ret;
2595 struct drm_dp_sideband_msg_tx *txmsg;
2596 struct drm_dp_mst_branch *mstb;
2597
2598 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
2599 if (!mstb)
2600 return -EINVAL;
2601
2602 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2603 if (!txmsg) {
2604 ret = -ENOMEM;
2605 goto fail_put;
2606 }
2607
2608 len = build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
2609 txmsg->dst = mstb;
2610
2611 drm_dp_queue_down_tx(mgr, txmsg);
2612
2613 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2614 if (ret > 0) {
2615 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
2616 ret = -EIO;
2617 else
2618 ret = 0;
2619 }
2620 kfree(txmsg);
2621 fail_put:
2622 drm_dp_mst_topology_put_mstb(mstb);
2623 return ret;
2624 }
2625
drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx * msg,u8 req_type)2626 static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
2627 {
2628 struct drm_dp_sideband_msg_reply_body reply;
2629
2630 reply.reply_type = DP_SIDEBAND_REPLY_ACK;
2631 reply.req_type = req_type;
2632 drm_dp_encode_sideband_reply(&reply, msg);
2633 return 0;
2634 }
2635
drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_branch * mstb,int req_type,int seqno,bool broadcast)2636 static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
2637 struct drm_dp_mst_branch *mstb,
2638 int req_type, int seqno, bool broadcast)
2639 {
2640 struct drm_dp_sideband_msg_tx *txmsg;
2641
2642 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2643 if (!txmsg)
2644 return -ENOMEM;
2645
2646 txmsg->dst = mstb;
2647 txmsg->seqno = seqno;
2648 drm_dp_encode_up_ack_reply(txmsg, req_type);
2649
2650 mutex_lock(&mgr->qlock);
2651
2652 process_single_up_tx_qlock(mgr, txmsg);
2653
2654 mutex_unlock(&mgr->qlock);
2655
2656 kfree(txmsg);
2657 return 0;
2658 }
2659
drm_dp_get_vc_payload_bw(int dp_link_bw,int dp_link_count,int * out)2660 static bool drm_dp_get_vc_payload_bw(int dp_link_bw,
2661 int dp_link_count,
2662 int *out)
2663 {
2664 switch (dp_link_bw) {
2665 default:
2666 DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
2667 dp_link_bw, dp_link_count);
2668 return false;
2669
2670 case DP_LINK_BW_1_62:
2671 *out = 3 * dp_link_count;
2672 break;
2673 case DP_LINK_BW_2_7:
2674 *out = 5 * dp_link_count;
2675 break;
2676 case DP_LINK_BW_5_4:
2677 *out = 10 * dp_link_count;
2678 break;
2679 case DP_LINK_BW_8_1:
2680 *out = 15 * dp_link_count;
2681 break;
2682 }
2683 return true;
2684 }
2685
2686 /**
2687 * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager
2688 * @mgr: manager to set state for
2689 * @mst_state: true to enable MST on this connector - false to disable.
2690 *
2691 * This is called by the driver when it detects an MST capable device plugged
2692 * into a DP MST capable port, or when a DP MST capable device is unplugged.
2693 */
drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr * mgr,bool mst_state)2694 int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
2695 {
2696 int ret = 0;
2697 struct drm_dp_mst_branch *mstb = NULL;
2698
2699 mutex_lock(&mgr->lock);
2700 if (mst_state == mgr->mst_state)
2701 goto out_unlock;
2702
2703 mgr->mst_state = mst_state;
2704 /* set the device into MST mode */
2705 if (mst_state) {
2706 WARN_ON(mgr->mst_primary);
2707
2708 /* get dpcd info */
2709 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
2710 if (ret != DP_RECEIVER_CAP_SIZE) {
2711 DRM_DEBUG_KMS("failed to read DPCD\n");
2712 goto out_unlock;
2713 }
2714
2715 if (!drm_dp_get_vc_payload_bw(mgr->dpcd[1],
2716 mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK,
2717 &mgr->pbn_div)) {
2718 ret = -EINVAL;
2719 goto out_unlock;
2720 }
2721
2722 /* add initial branch device at LCT 1 */
2723 mstb = drm_dp_add_mst_branch_device(1, NULL);
2724 if (mstb == NULL) {
2725 ret = -ENOMEM;
2726 goto out_unlock;
2727 }
2728 mstb->mgr = mgr;
2729
2730 /* give this the main reference */
2731 mgr->mst_primary = mstb;
2732 drm_dp_mst_topology_get_mstb(mgr->mst_primary);
2733
2734 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2735 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
2736 if (ret < 0) {
2737 goto out_unlock;
2738 }
2739
2740 {
2741 struct drm_dp_payload reset_pay;
2742 reset_pay.start_slot = 0;
2743 reset_pay.num_slots = 0x3f;
2744 drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
2745 }
2746
2747 queue_work(system_long_wq, &mgr->work);
2748
2749 ret = 0;
2750 } else {
2751 /* disable MST on the device */
2752 mstb = mgr->mst_primary;
2753 mgr->mst_primary = NULL;
2754 /* this can fail if the device is gone */
2755 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
2756 ret = 0;
2757 memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
2758 mgr->payload_mask = 0;
2759 set_bit(0, &mgr->payload_mask);
2760 mgr->vcpi_mask = 0;
2761 }
2762
2763 out_unlock:
2764 mutex_unlock(&mgr->lock);
2765 if (mstb)
2766 drm_dp_mst_topology_put_mstb(mstb);
2767 return ret;
2768
2769 }
2770 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
2771
2772 /**
2773 * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
2774 * @mgr: manager to suspend
2775 *
2776 * This function tells the MST device that we can't handle UP messages
2777 * anymore. This should stop it from sending any since we are suspended.
2778 */
drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr * mgr)2779 void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
2780 {
2781 mutex_lock(&mgr->lock);
2782 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2783 DP_MST_EN | DP_UPSTREAM_IS_SRC);
2784 mutex_unlock(&mgr->lock);
2785 flush_work(&mgr->work);
2786 flush_work(&mgr->destroy_connector_work);
2787 }
2788 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
2789
2790 /**
2791 * drm_dp_mst_topology_mgr_resume() - resume the MST manager
2792 * @mgr: manager to resume
2793 *
2794 * This will fetch DPCD and see if the device is still there,
2795 * if it is, it will rewrite the MSTM control bits, and return.
2796 *
2797 * if the device fails this returns -1, and the driver should do
2798 * a full MST reprobe, in case we were undocked.
2799 */
drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr * mgr)2800 int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
2801 {
2802 int ret = 0;
2803
2804 mutex_lock(&mgr->lock);
2805
2806 if (mgr->mst_primary) {
2807 int sret;
2808 u8 guid[16];
2809
2810 sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
2811 if (sret != DP_RECEIVER_CAP_SIZE) {
2812 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
2813 ret = -1;
2814 goto out_unlock;
2815 }
2816
2817 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2818 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
2819 if (ret < 0) {
2820 DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
2821 ret = -1;
2822 goto out_unlock;
2823 }
2824
2825 /* Some hubs forget their guids after they resume */
2826 sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
2827 if (sret != 16) {
2828 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
2829 ret = -1;
2830 goto out_unlock;
2831 }
2832 drm_dp_check_mstb_guid(mgr->mst_primary, guid);
2833
2834 ret = 0;
2835 } else
2836 ret = -1;
2837
2838 out_unlock:
2839 mutex_unlock(&mgr->lock);
2840 return ret;
2841 }
2842 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
2843
drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr * mgr,bool up)2844 static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
2845 {
2846 int len;
2847 u8 replyblock[32];
2848 int replylen, origlen, curreply;
2849 int ret;
2850 struct drm_dp_sideband_msg_rx *msg;
2851 int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE;
2852 msg = up ? &mgr->up_req_recv : &mgr->down_rep_recv;
2853
2854 len = min(mgr->max_dpcd_transaction_bytes, 16);
2855 ret = drm_dp_dpcd_read(mgr->aux, basereg,
2856 replyblock, len);
2857 if (ret != len) {
2858 DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
2859 return false;
2860 }
2861 ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
2862 if (!ret) {
2863 DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
2864 return false;
2865 }
2866 replylen = msg->curchunk_len + msg->curchunk_hdrlen;
2867
2868 origlen = replylen;
2869 replylen -= len;
2870 curreply = len;
2871 while (replylen > 0) {
2872 len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
2873 ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
2874 replyblock, len);
2875 if (ret != len) {
2876 DRM_DEBUG_KMS("failed to read a chunk (len %d, ret %d)\n",
2877 len, ret);
2878 return false;
2879 }
2880
2881 ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
2882 if (!ret) {
2883 DRM_DEBUG_KMS("failed to build sideband msg\n");
2884 return false;
2885 }
2886
2887 curreply += len;
2888 replylen -= len;
2889 }
2890 return true;
2891 }
2892
drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr * mgr)2893 static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
2894 {
2895 int ret = 0;
2896
2897 if (!drm_dp_get_one_sb_msg(mgr, false)) {
2898 memset(&mgr->down_rep_recv, 0,
2899 sizeof(struct drm_dp_sideband_msg_rx));
2900 return 0;
2901 }
2902
2903 if (mgr->down_rep_recv.have_eomt) {
2904 struct drm_dp_sideband_msg_tx *txmsg;
2905 struct drm_dp_mst_branch *mstb;
2906 int slot = -1;
2907 mstb = drm_dp_get_mst_branch_device(mgr,
2908 mgr->down_rep_recv.initial_hdr.lct,
2909 mgr->down_rep_recv.initial_hdr.rad);
2910
2911 if (!mstb) {
2912 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->down_rep_recv.initial_hdr.lct);
2913 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2914 return 0;
2915 }
2916
2917 /* find the message */
2918 slot = mgr->down_rep_recv.initial_hdr.seqno;
2919 mutex_lock(&mgr->qlock);
2920 txmsg = mstb->tx_slots[slot];
2921 /* remove from slots */
2922 mutex_unlock(&mgr->qlock);
2923
2924 if (!txmsg) {
2925 DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
2926 mstb,
2927 mgr->down_rep_recv.initial_hdr.seqno,
2928 mgr->down_rep_recv.initial_hdr.lct,
2929 mgr->down_rep_recv.initial_hdr.rad[0],
2930 mgr->down_rep_recv.msg[0]);
2931 drm_dp_mst_topology_put_mstb(mstb);
2932 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2933 return 0;
2934 }
2935
2936 drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply);
2937
2938 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
2939 DRM_DEBUG_KMS("Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n",
2940 txmsg->reply.req_type,
2941 drm_dp_mst_req_type_str(txmsg->reply.req_type),
2942 txmsg->reply.u.nak.reason,
2943 drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason),
2944 txmsg->reply.u.nak.nak_data);
2945
2946 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2947 drm_dp_mst_topology_put_mstb(mstb);
2948
2949 mutex_lock(&mgr->qlock);
2950 txmsg->state = DRM_DP_SIDEBAND_TX_RX;
2951 mstb->tx_slots[slot] = NULL;
2952 mutex_unlock(&mgr->qlock);
2953
2954 wake_up_all(&mgr->tx_waitq);
2955 }
2956 return ret;
2957 }
2958
drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr * mgr)2959 static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
2960 {
2961 int ret = 0;
2962
2963 if (!drm_dp_get_one_sb_msg(mgr, true)) {
2964 memset(&mgr->up_req_recv, 0,
2965 sizeof(struct drm_dp_sideband_msg_rx));
2966 return 0;
2967 }
2968
2969 if (mgr->up_req_recv.have_eomt) {
2970 struct drm_dp_sideband_msg_req_body msg;
2971 struct drm_dp_mst_branch *mstb = NULL;
2972 bool seqno;
2973
2974 if (!mgr->up_req_recv.initial_hdr.broadcast) {
2975 mstb = drm_dp_get_mst_branch_device(mgr,
2976 mgr->up_req_recv.initial_hdr.lct,
2977 mgr->up_req_recv.initial_hdr.rad);
2978 if (!mstb) {
2979 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
2980 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2981 return 0;
2982 }
2983 }
2984
2985 seqno = mgr->up_req_recv.initial_hdr.seqno;
2986 drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg);
2987
2988 if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
2989 drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
2990
2991 if (!mstb)
2992 mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.conn_stat.guid);
2993
2994 if (!mstb) {
2995 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
2996 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2997 return 0;
2998 }
2999
3000 drm_dp_update_port(mstb, &msg.u.conn_stat);
3001
3002 DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
3003 drm_kms_helper_hotplug_event(mgr->dev);
3004
3005 } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
3006 drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
3007 if (!mstb)
3008 mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.resource_stat.guid);
3009
3010 if (!mstb) {
3011 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
3012 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
3013 return 0;
3014 }
3015
3016 DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
3017 }
3018
3019 if (mstb)
3020 drm_dp_mst_topology_put_mstb(mstb);
3021
3022 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
3023 }
3024 return ret;
3025 }
3026
3027 /**
3028 * drm_dp_mst_hpd_irq() - MST hotplug IRQ notify
3029 * @mgr: manager to notify irq for.
3030 * @esi: 4 bytes from SINK_COUNT_ESI
3031 * @handled: whether the hpd interrupt was consumed or not
3032 *
3033 * This should be called from the driver when it detects a short IRQ,
3034 * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
3035 * topology manager will process the sideband messages received as a result
3036 * of this.
3037 */
drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr * mgr,u8 * esi,bool * handled)3038 int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled)
3039 {
3040 int ret = 0;
3041 int sc;
3042 *handled = false;
3043 sc = esi[0] & 0x3f;
3044
3045 if (sc != mgr->sink_count) {
3046 mgr->sink_count = sc;
3047 *handled = true;
3048 }
3049
3050 if (esi[1] & DP_DOWN_REP_MSG_RDY) {
3051 ret = drm_dp_mst_handle_down_rep(mgr);
3052 *handled = true;
3053 }
3054
3055 if (esi[1] & DP_UP_REQ_MSG_RDY) {
3056 ret |= drm_dp_mst_handle_up_req(mgr);
3057 *handled = true;
3058 }
3059
3060 drm_dp_mst_kick_tx(mgr);
3061 return ret;
3062 }
3063 EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
3064
3065 /**
3066 * drm_dp_mst_detect_port() - get connection status for an MST port
3067 * @connector: DRM connector for this port
3068 * @mgr: manager for this port
3069 * @port: unverified pointer to a port
3070 *
3071 * This returns the current connection state for a port. It validates the
3072 * port pointer still exists so the caller doesn't require a reference
3073 */
drm_dp_mst_detect_port(struct drm_connector * connector,struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port)3074 enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector,
3075 struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
3076 {
3077 enum drm_connector_status status = connector_status_disconnected;
3078
3079 /* we need to search for the port in the mgr in case it's gone */
3080 port = drm_dp_mst_topology_get_port_validated(mgr, port);
3081 if (!port)
3082 return connector_status_disconnected;
3083
3084 if (!port->ddps)
3085 goto out;
3086
3087 switch (port->pdt) {
3088 case DP_PEER_DEVICE_NONE:
3089 case DP_PEER_DEVICE_MST_BRANCHING:
3090 break;
3091
3092 case DP_PEER_DEVICE_SST_SINK:
3093 status = connector_status_connected;
3094 /* for logical ports - cache the EDID */
3095 if (port->port_num >= 8 && !port->cached_edid) {
3096 port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
3097 }
3098 break;
3099 case DP_PEER_DEVICE_DP_LEGACY_CONV:
3100 if (port->ldps)
3101 status = connector_status_connected;
3102 break;
3103 }
3104 out:
3105 drm_dp_mst_topology_put_port(port);
3106 return status;
3107 }
3108 EXPORT_SYMBOL(drm_dp_mst_detect_port);
3109
3110 /**
3111 * drm_dp_mst_port_has_audio() - Check whether port has audio capability or not
3112 * @mgr: manager for this port
3113 * @port: unverified pointer to a port.
3114 *
3115 * This returns whether the port supports audio or not.
3116 */
drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port)3117 bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr,
3118 struct drm_dp_mst_port *port)
3119 {
3120 bool ret = false;
3121
3122 port = drm_dp_mst_topology_get_port_validated(mgr, port);
3123 if (!port)
3124 return ret;
3125 ret = port->has_audio;
3126 drm_dp_mst_topology_put_port(port);
3127 return ret;
3128 }
3129 EXPORT_SYMBOL(drm_dp_mst_port_has_audio);
3130
3131 /**
3132 * drm_dp_mst_get_edid() - get EDID for an MST port
3133 * @connector: toplevel connector to get EDID for
3134 * @mgr: manager for this port
3135 * @port: unverified pointer to a port.
3136 *
3137 * This returns an EDID for the port connected to a connector,
3138 * It validates the pointer still exists so the caller doesn't require a
3139 * reference.
3140 */
drm_dp_mst_get_edid(struct drm_connector * connector,struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port)3141 struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
3142 {
3143 struct edid *edid = NULL;
3144
3145 /* we need to search for the port in the mgr in case it's gone */
3146 port = drm_dp_mst_topology_get_port_validated(mgr, port);
3147 if (!port)
3148 return NULL;
3149
3150 if (port->cached_edid)
3151 edid = drm_edid_duplicate(port->cached_edid);
3152 else {
3153 edid = drm_get_edid(connector, &port->aux.ddc);
3154 }
3155 port->has_audio = drm_detect_monitor_audio(edid);
3156 drm_dp_mst_topology_put_port(port);
3157 return edid;
3158 }
3159 EXPORT_SYMBOL(drm_dp_mst_get_edid);
3160
3161 /**
3162 * drm_dp_find_vcpi_slots() - Find VCPI slots for this PBN value
3163 * @mgr: manager to use
3164 * @pbn: payload bandwidth to convert into slots.
3165 *
3166 * Calculate the number of VCPI slots that will be required for the given PBN
3167 * value. This function is deprecated, and should not be used in atomic
3168 * drivers.
3169 *
3170 * RETURNS:
3171 * The total slots required for this port, or error.
3172 */
drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr * mgr,int pbn)3173 int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
3174 int pbn)
3175 {
3176 int num_slots;
3177
3178 num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
3179
3180 /* max. time slots - one slot for MTP header */
3181 if (num_slots > 63)
3182 return -ENOSPC;
3183 return num_slots;
3184 }
3185 EXPORT_SYMBOL(drm_dp_find_vcpi_slots);
3186
drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_vcpi * vcpi,int pbn,int slots)3187 static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
3188 struct drm_dp_vcpi *vcpi, int pbn, int slots)
3189 {
3190 int ret;
3191
3192 /* max. time slots - one slot for MTP header */
3193 if (slots > 63)
3194 return -ENOSPC;
3195
3196 vcpi->pbn = pbn;
3197 vcpi->aligned_pbn = slots * mgr->pbn_div;
3198 vcpi->num_slots = slots;
3199
3200 ret = drm_dp_mst_assign_payload_id(mgr, vcpi);
3201 if (ret < 0)
3202 return ret;
3203 return 0;
3204 }
3205
3206 /**
3207 * drm_dp_atomic_find_vcpi_slots() - Find and add VCPI slots to the state
3208 * @state: global atomic state
3209 * @mgr: MST topology manager for the port
3210 * @port: port to find vcpi slots for
3211 * @pbn: bandwidth required for the mode in PBN
3212 *
3213 * Allocates VCPI slots to @port, replacing any previous VCPI allocations it
3214 * may have had. Any atomic drivers which support MST must call this function
3215 * in their &drm_encoder_helper_funcs.atomic_check() callback to change the
3216 * current VCPI allocation for the new state, but only when
3217 * &drm_crtc_state.mode_changed or &drm_crtc_state.connectors_changed is set
3218 * to ensure compatibility with userspace applications that still use the
3219 * legacy modesetting UAPI.
3220 *
3221 * Allocations set by this function are not checked against the bandwidth
3222 * restraints of @mgr until the driver calls drm_dp_mst_atomic_check().
3223 *
3224 * Additionally, it is OK to call this function multiple times on the same
3225 * @port as needed. It is not OK however, to call this function and
3226 * drm_dp_atomic_release_vcpi_slots() in the same atomic check phase.
3227 *
3228 * See also:
3229 * drm_dp_atomic_release_vcpi_slots()
3230 * drm_dp_mst_atomic_check()
3231 *
3232 * Returns:
3233 * Total slots in the atomic state assigned for this port, or a negative error
3234 * code if the port no longer exists
3235 */
drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state * state,struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port,int pbn)3236 int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
3237 struct drm_dp_mst_topology_mgr *mgr,
3238 struct drm_dp_mst_port *port, int pbn)
3239 {
3240 struct drm_dp_mst_topology_state *topology_state;
3241 struct drm_dp_vcpi_allocation *pos, *vcpi = NULL;
3242 int prev_slots, req_slots, ret;
3243
3244 topology_state = drm_atomic_get_mst_topology_state(state, mgr);
3245 if (IS_ERR(topology_state))
3246 return PTR_ERR(topology_state);
3247
3248 /* Find the current allocation for this port, if any */
3249 list_for_each_entry(pos, &topology_state->vcpis, next) {
3250 if (pos->port == port) {
3251 vcpi = pos;
3252 prev_slots = vcpi->vcpi;
3253
3254 /*
3255 * This should never happen, unless the driver tries
3256 * releasing and allocating the same VCPI allocation,
3257 * which is an error
3258 */
3259 if (WARN_ON(!prev_slots)) {
3260 DRM_ERROR("cannot allocate and release VCPI on [MST PORT:%p] in the same state\n",
3261 port);
3262 return -EINVAL;
3263 }
3264
3265 break;
3266 }
3267 }
3268 if (!vcpi)
3269 prev_slots = 0;
3270
3271 req_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
3272
3273 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] VCPI %d -> %d\n",
3274 port->connector->base.id, port->connector->name,
3275 port, prev_slots, req_slots);
3276
3277 /* Add the new allocation to the state */
3278 if (!vcpi) {
3279 vcpi = kzalloc(sizeof(*vcpi), GFP_KERNEL);
3280 if (!vcpi)
3281 return -ENOMEM;
3282
3283 drm_dp_mst_get_port_malloc(port);
3284 vcpi->port = port;
3285 list_add(&vcpi->next, &topology_state->vcpis);
3286 }
3287 vcpi->vcpi = req_slots;
3288
3289 ret = req_slots;
3290 return ret;
3291 }
3292 EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots);
3293
3294 /**
3295 * drm_dp_atomic_release_vcpi_slots() - Release allocated vcpi slots
3296 * @state: global atomic state
3297 * @mgr: MST topology manager for the port
3298 * @port: The port to release the VCPI slots from
3299 *
3300 * Releases any VCPI slots that have been allocated to a port in the atomic
3301 * state. Any atomic drivers which support MST must call this function in
3302 * their &drm_connector_helper_funcs.atomic_check() callback when the
3303 * connector will no longer have VCPI allocated (e.g. because its CRTC was
3304 * removed) when it had VCPI allocated in the previous atomic state.
3305 *
3306 * It is OK to call this even if @port has been removed from the system.
3307 * Additionally, it is OK to call this function multiple times on the same
3308 * @port as needed. It is not OK however, to call this function and
3309 * drm_dp_atomic_find_vcpi_slots() on the same @port in a single atomic check
3310 * phase.
3311 *
3312 * See also:
3313 * drm_dp_atomic_find_vcpi_slots()
3314 * drm_dp_mst_atomic_check()
3315 *
3316 * Returns:
3317 * 0 if all slots for this port were added back to
3318 * &drm_dp_mst_topology_state.avail_slots or negative error code
3319 */
drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state * state,struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port)3320 int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
3321 struct drm_dp_mst_topology_mgr *mgr,
3322 struct drm_dp_mst_port *port)
3323 {
3324 struct drm_dp_mst_topology_state *topology_state;
3325 struct drm_dp_vcpi_allocation *pos;
3326 bool found = false;
3327
3328 topology_state = drm_atomic_get_mst_topology_state(state, mgr);
3329 if (IS_ERR(topology_state))
3330 return PTR_ERR(topology_state);
3331
3332 list_for_each_entry(pos, &topology_state->vcpis, next) {
3333 if (pos->port == port) {
3334 found = true;
3335 break;
3336 }
3337 }
3338 if (WARN_ON(!found)) {
3339 DRM_ERROR("no VCPI for [MST PORT:%p] found in mst state %p\n",
3340 port, &topology_state->base);
3341 return -EINVAL;
3342 }
3343
3344 DRM_DEBUG_ATOMIC("[MST PORT:%p] VCPI %d -> 0\n", port, pos->vcpi);
3345 if (pos->vcpi) {
3346 drm_dp_mst_put_port_malloc(port);
3347 pos->vcpi = 0;
3348 }
3349
3350 return 0;
3351 }
3352 EXPORT_SYMBOL(drm_dp_atomic_release_vcpi_slots);
3353
3354 /**
3355 * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel
3356 * @mgr: manager for this port
3357 * @port: port to allocate a virtual channel for.
3358 * @pbn: payload bandwidth number to request
3359 * @slots: returned number of slots for this PBN.
3360 */
drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port,int pbn,int slots)3361 bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
3362 struct drm_dp_mst_port *port, int pbn, int slots)
3363 {
3364 int ret;
3365
3366 port = drm_dp_mst_topology_get_port_validated(mgr, port);
3367 if (!port)
3368 return false;
3369
3370 if (slots < 0)
3371 return false;
3372
3373 if (port->vcpi.vcpi > 0) {
3374 DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n",
3375 port->vcpi.vcpi, port->vcpi.pbn, pbn);
3376 if (pbn == port->vcpi.pbn) {
3377 drm_dp_mst_topology_put_port(port);
3378 return true;
3379 }
3380 }
3381
3382 ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn, slots);
3383 if (ret) {
3384 DRM_DEBUG_KMS("failed to init vcpi slots=%d max=63 ret=%d\n",
3385 DIV_ROUND_UP(pbn, mgr->pbn_div), ret);
3386 goto out;
3387 }
3388 DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d\n",
3389 pbn, port->vcpi.num_slots);
3390
3391 /* Keep port allocated until its payload has been removed */
3392 drm_dp_mst_get_port_malloc(port);
3393 drm_dp_mst_topology_put_port(port);
3394 return true;
3395 out:
3396 return false;
3397 }
3398 EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
3399
drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port)3400 int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
3401 {
3402 int slots = 0;
3403 port = drm_dp_mst_topology_get_port_validated(mgr, port);
3404 if (!port)
3405 return slots;
3406
3407 slots = port->vcpi.num_slots;
3408 drm_dp_mst_topology_put_port(port);
3409 return slots;
3410 }
3411 EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots);
3412
3413 /**
3414 * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI
3415 * @mgr: manager for this port
3416 * @port: unverified pointer to a port.
3417 *
3418 * This just resets the number of slots for the ports VCPI for later programming.
3419 */
drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port)3420 void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
3421 {
3422 /*
3423 * A port with VCPI will remain allocated until its VCPI is
3424 * released, no verified ref needed
3425 */
3426
3427 port->vcpi.num_slots = 0;
3428 }
3429 EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
3430
3431 /**
3432 * drm_dp_mst_deallocate_vcpi() - deallocate a VCPI
3433 * @mgr: manager for this port
3434 * @port: port to deallocate vcpi for
3435 *
3436 * This can be called unconditionally, regardless of whether
3437 * drm_dp_mst_allocate_vcpi() succeeded or not.
3438 */
drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port)3439 void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
3440 struct drm_dp_mst_port *port)
3441 {
3442 if (!port->vcpi.vcpi)
3443 return;
3444
3445 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
3446 port->vcpi.num_slots = 0;
3447 port->vcpi.pbn = 0;
3448 port->vcpi.aligned_pbn = 0;
3449 port->vcpi.vcpi = 0;
3450 drm_dp_mst_put_port_malloc(port);
3451 }
3452 EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
3453
drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr * mgr,int id,struct drm_dp_payload * payload)3454 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
3455 int id, struct drm_dp_payload *payload)
3456 {
3457 u8 payload_alloc[3], status;
3458 int ret;
3459 int retries = 0;
3460
3461 drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
3462 DP_PAYLOAD_TABLE_UPDATED);
3463
3464 payload_alloc[0] = id;
3465 payload_alloc[1] = payload->start_slot;
3466 payload_alloc[2] = payload->num_slots;
3467
3468 ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
3469 if (ret != 3) {
3470 DRM_DEBUG_KMS("failed to write payload allocation %d\n", ret);
3471 goto fail;
3472 }
3473
3474 retry:
3475 ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
3476 if (ret < 0) {
3477 DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
3478 goto fail;
3479 }
3480
3481 if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
3482 retries++;
3483 if (retries < 20) {
3484 usleep_range(10000, 20000);
3485 goto retry;
3486 }
3487 DRM_DEBUG_KMS("status not set after read payload table status %d\n", status);
3488 ret = -EINVAL;
3489 goto fail;
3490 }
3491 ret = 0;
3492 fail:
3493 return ret;
3494 }
3495
3496
3497 /**
3498 * drm_dp_check_act_status() - Check ACT handled status.
3499 * @mgr: manager to use
3500 *
3501 * Check the payload status bits in the DPCD for ACT handled completion.
3502 */
drm_dp_check_act_status(struct drm_dp_mst_topology_mgr * mgr)3503 int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
3504 {
3505 u8 status;
3506 int ret;
3507 int count = 0;
3508
3509 do {
3510 ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
3511
3512 if (ret < 0) {
3513 DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
3514 goto fail;
3515 }
3516
3517 if (status & DP_PAYLOAD_ACT_HANDLED)
3518 break;
3519 count++;
3520 udelay(100);
3521
3522 } while (count < 30);
3523
3524 if (!(status & DP_PAYLOAD_ACT_HANDLED)) {
3525 DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count);
3526 ret = -EINVAL;
3527 goto fail;
3528 }
3529 return 0;
3530 fail:
3531 return ret;
3532 }
3533 EXPORT_SYMBOL(drm_dp_check_act_status);
3534
3535 /**
3536 * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
3537 * @clock: dot clock for the mode
3538 * @bpp: bpp for the mode.
3539 *
3540 * This uses the formula in the spec to calculate the PBN value for a mode.
3541 */
drm_dp_calc_pbn_mode(int clock,int bpp)3542 int drm_dp_calc_pbn_mode(int clock, int bpp)
3543 {
3544 u64 kbps;
3545 s64 peak_kbps;
3546 u32 numerator;
3547 u32 denominator;
3548
3549 kbps = clock * bpp;
3550
3551 /*
3552 * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
3553 * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
3554 * common multiplier to render an integer PBN for all link rate/lane
3555 * counts combinations
3556 * calculate
3557 * peak_kbps *= (1006/1000)
3558 * peak_kbps *= (64/54)
3559 * peak_kbps *= 8 convert to bytes
3560 */
3561
3562 numerator = 64 * 1006;
3563 denominator = 54 * 8 * 1000 * 1000;
3564
3565 kbps *= numerator;
3566 peak_kbps = drm_fixp_from_fraction(kbps, denominator);
3567
3568 return drm_fixp2int_ceil(peak_kbps);
3569 }
3570 EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
3571
test_calc_pbn_mode(void)3572 static int test_calc_pbn_mode(void)
3573 {
3574 int ret;
3575 ret = drm_dp_calc_pbn_mode(154000, 30);
3576 if (ret != 689) {
3577 DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
3578 154000, 30, 689, ret);
3579 return -EINVAL;
3580 }
3581 ret = drm_dp_calc_pbn_mode(234000, 30);
3582 if (ret != 1047) {
3583 DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
3584 234000, 30, 1047, ret);
3585 return -EINVAL;
3586 }
3587 ret = drm_dp_calc_pbn_mode(297000, 24);
3588 if (ret != 1063) {
3589 DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
3590 297000, 24, 1063, ret);
3591 return -EINVAL;
3592 }
3593 return 0;
3594 }
3595
3596 /* we want to kick the TX after we've ack the up/down IRQs. */
drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr * mgr)3597 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
3598 {
3599 queue_work(system_long_wq, &mgr->tx_work);
3600 }
3601
drm_dp_mst_dump_mstb(struct seq_file * m,struct drm_dp_mst_branch * mstb)3602 static void drm_dp_mst_dump_mstb(struct seq_file *m,
3603 struct drm_dp_mst_branch *mstb)
3604 {
3605 struct drm_dp_mst_port *port;
3606 int tabs = mstb->lct;
3607 char prefix[10];
3608 int i;
3609
3610 for (i = 0; i < tabs; i++)
3611 prefix[i] = '\t';
3612 prefix[i] = '\0';
3613
3614 seq_printf(m, "%smst: %p, %d\n", prefix, mstb, mstb->num_ports);
3615 list_for_each_entry(port, &mstb->ports, next) {
3616 seq_printf(m, "%sport: %d: input: %d: pdt: %d, ddps: %d ldps: %d, sdp: %d/%d, %p, conn: %p\n", prefix, port->port_num, port->input, port->pdt, port->ddps, port->ldps, port->num_sdp_streams, port->num_sdp_stream_sinks, port, port->connector);
3617 if (port->mstb)
3618 drm_dp_mst_dump_mstb(m, port->mstb);
3619 }
3620 }
3621
3622 #define DP_PAYLOAD_TABLE_SIZE 64
3623
dump_dp_payload_table(struct drm_dp_mst_topology_mgr * mgr,char * buf)3624 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
3625 char *buf)
3626 {
3627 int i;
3628
3629 for (i = 0; i < DP_PAYLOAD_TABLE_SIZE; i += 16) {
3630 if (drm_dp_dpcd_read(mgr->aux,
3631 DP_PAYLOAD_TABLE_UPDATE_STATUS + i,
3632 &buf[i], 16) != 16)
3633 return false;
3634 }
3635 return true;
3636 }
3637
fetch_monitor_name(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port,char * name,int namelen)3638 static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr,
3639 struct drm_dp_mst_port *port, char *name,
3640 int namelen)
3641 {
3642 struct edid *mst_edid;
3643
3644 mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port);
3645 drm_edid_get_monitor_name(mst_edid, name, namelen);
3646 }
3647
3648 /**
3649 * drm_dp_mst_dump_topology(): dump topology to seq file.
3650 * @m: seq_file to dump output to
3651 * @mgr: manager to dump current topology for.
3652 *
3653 * helper to dump MST topology to a seq file for debugfs.
3654 */
drm_dp_mst_dump_topology(struct seq_file * m,struct drm_dp_mst_topology_mgr * mgr)3655 void drm_dp_mst_dump_topology(struct seq_file *m,
3656 struct drm_dp_mst_topology_mgr *mgr)
3657 {
3658 int i;
3659 struct drm_dp_mst_port *port;
3660
3661 mutex_lock(&mgr->lock);
3662 if (mgr->mst_primary)
3663 drm_dp_mst_dump_mstb(m, mgr->mst_primary);
3664
3665 /* dump VCPIs */
3666 mutex_unlock(&mgr->lock);
3667
3668 mutex_lock(&mgr->payload_lock);
3669 seq_printf(m, "vcpi: %lx %lx %d\n", mgr->payload_mask, mgr->vcpi_mask,
3670 mgr->max_payloads);
3671
3672 for (i = 0; i < mgr->max_payloads; i++) {
3673 if (mgr->proposed_vcpis[i]) {
3674 char name[14];
3675
3676 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
3677 fetch_monitor_name(mgr, port, name, sizeof(name));
3678 seq_printf(m, "vcpi %d: %d %d %d sink name: %s\n", i,
3679 port->port_num, port->vcpi.vcpi,
3680 port->vcpi.num_slots,
3681 (*name != 0) ? name : "Unknown");
3682 } else
3683 seq_printf(m, "vcpi %d:unused\n", i);
3684 }
3685 for (i = 0; i < mgr->max_payloads; i++) {
3686 seq_printf(m, "payload %d: %d, %d, %d\n",
3687 i,
3688 mgr->payloads[i].payload_state,
3689 mgr->payloads[i].start_slot,
3690 mgr->payloads[i].num_slots);
3691
3692
3693 }
3694 mutex_unlock(&mgr->payload_lock);
3695
3696 mutex_lock(&mgr->lock);
3697 if (mgr->mst_primary) {
3698 u8 buf[DP_PAYLOAD_TABLE_SIZE];
3699 int ret;
3700
3701 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE);
3702 seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf);
3703 ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
3704 seq_printf(m, "faux/mst: %*ph\n", 2, buf);
3705 ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
3706 seq_printf(m, "mst ctrl: %*ph\n", 1, buf);
3707
3708 /* dump the standard OUI branch header */
3709 ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
3710 seq_printf(m, "branch oui: %*phN devid: ", 3, buf);
3711 for (i = 0x3; i < 0x8 && buf[i]; i++)
3712 seq_printf(m, "%c", buf[i]);
3713 seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n",
3714 buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]);
3715 if (dump_dp_payload_table(mgr, buf))
3716 seq_printf(m, "payload table: %*ph\n", DP_PAYLOAD_TABLE_SIZE, buf);
3717 }
3718
3719 mutex_unlock(&mgr->lock);
3720
3721 }
3722 EXPORT_SYMBOL(drm_dp_mst_dump_topology);
3723
drm_dp_tx_work(struct work_struct * work)3724 static void drm_dp_tx_work(struct work_struct *work)
3725 {
3726 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
3727
3728 mutex_lock(&mgr->qlock);
3729 if (!list_empty(&mgr->tx_msg_downq))
3730 process_single_down_tx_qlock(mgr);
3731 mutex_unlock(&mgr->qlock);
3732 }
3733
drm_dp_destroy_connector_work(struct work_struct * work)3734 static void drm_dp_destroy_connector_work(struct work_struct *work)
3735 {
3736 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
3737 struct drm_dp_mst_port *port;
3738 bool send_hotplug = false;
3739 /*
3740 * Not a regular list traverse as we have to drop the destroy
3741 * connector lock before destroying the connector, to avoid AB->BA
3742 * ordering between this lock and the config mutex.
3743 */
3744 for (;;) {
3745 mutex_lock(&mgr->destroy_connector_lock);
3746 port = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_dp_mst_port, next);
3747 if (!port) {
3748 mutex_unlock(&mgr->destroy_connector_lock);
3749 break;
3750 }
3751 list_del(&port->next);
3752 mutex_unlock(&mgr->destroy_connector_lock);
3753
3754 INIT_LIST_HEAD(&port->next);
3755
3756 mgr->cbs->destroy_connector(mgr, port->connector);
3757
3758 drm_dp_port_teardown_pdt(port, port->pdt);
3759 port->pdt = DP_PEER_DEVICE_NONE;
3760
3761 drm_dp_mst_put_port_malloc(port);
3762 send_hotplug = true;
3763 }
3764 if (send_hotplug)
3765 drm_kms_helper_hotplug_event(mgr->dev);
3766 }
3767
3768 static struct drm_private_state *
drm_dp_mst_duplicate_state(struct drm_private_obj * obj)3769 drm_dp_mst_duplicate_state(struct drm_private_obj *obj)
3770 {
3771 struct drm_dp_mst_topology_state *state, *old_state =
3772 to_dp_mst_topology_state(obj->state);
3773 struct drm_dp_vcpi_allocation *pos, *vcpi;
3774
3775 state = kmemdup(old_state, sizeof(*state), GFP_KERNEL);
3776 if (!state)
3777 return NULL;
3778
3779 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
3780
3781 INIT_LIST_HEAD(&state->vcpis);
3782
3783 list_for_each_entry(pos, &old_state->vcpis, next) {
3784 /* Prune leftover freed VCPI allocations */
3785 if (!pos->vcpi)
3786 continue;
3787
3788 vcpi = kmemdup(pos, sizeof(*vcpi), GFP_KERNEL);
3789 if (!vcpi)
3790 goto fail;
3791
3792 drm_dp_mst_get_port_malloc(vcpi->port);
3793 list_add(&vcpi->next, &state->vcpis);
3794 }
3795
3796 return &state->base;
3797
3798 fail:
3799 list_for_each_entry_safe(pos, vcpi, &state->vcpis, next) {
3800 drm_dp_mst_put_port_malloc(pos->port);
3801 kfree(pos);
3802 }
3803 kfree(state);
3804
3805 return NULL;
3806 }
3807
drm_dp_mst_destroy_state(struct drm_private_obj * obj,struct drm_private_state * state)3808 static void drm_dp_mst_destroy_state(struct drm_private_obj *obj,
3809 struct drm_private_state *state)
3810 {
3811 struct drm_dp_mst_topology_state *mst_state =
3812 to_dp_mst_topology_state(state);
3813 struct drm_dp_vcpi_allocation *pos, *tmp;
3814
3815 list_for_each_entry_safe(pos, tmp, &mst_state->vcpis, next) {
3816 /* We only keep references to ports with non-zero VCPIs */
3817 if (pos->vcpi)
3818 drm_dp_mst_put_port_malloc(pos->port);
3819 kfree(pos);
3820 }
3821
3822 kfree(mst_state);
3823 }
3824
3825 static inline int
drm_dp_mst_atomic_check_topology_state(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_topology_state * mst_state)3826 drm_dp_mst_atomic_check_topology_state(struct drm_dp_mst_topology_mgr *mgr,
3827 struct drm_dp_mst_topology_state *mst_state)
3828 {
3829 struct drm_dp_vcpi_allocation *vcpi;
3830 int avail_slots = 63, payload_count = 0;
3831
3832 list_for_each_entry(vcpi, &mst_state->vcpis, next) {
3833 /* Releasing VCPI is always OK-even if the port is gone */
3834 if (!vcpi->vcpi) {
3835 DRM_DEBUG_ATOMIC("[MST PORT:%p] releases all VCPI slots\n",
3836 vcpi->port);
3837 continue;
3838 }
3839
3840 DRM_DEBUG_ATOMIC("[MST PORT:%p] requires %d vcpi slots\n",
3841 vcpi->port, vcpi->vcpi);
3842
3843 avail_slots -= vcpi->vcpi;
3844 if (avail_slots < 0) {
3845 DRM_DEBUG_ATOMIC("[MST PORT:%p] not enough VCPI slots in mst state %p (avail=%d)\n",
3846 vcpi->port, mst_state,
3847 avail_slots + vcpi->vcpi);
3848 return -ENOSPC;
3849 }
3850
3851 if (++payload_count > mgr->max_payloads) {
3852 DRM_DEBUG_ATOMIC("[MST MGR:%p] state %p has too many payloads (max=%d)\n",
3853 mgr, mst_state, mgr->max_payloads);
3854 return -EINVAL;
3855 }
3856 }
3857 DRM_DEBUG_ATOMIC("[MST MGR:%p] mst state %p VCPI avail=%d used=%d\n",
3858 mgr, mst_state, avail_slots,
3859 63 - avail_slots);
3860
3861 return 0;
3862 }
3863
3864 /**
3865 * drm_dp_mst_atomic_check - Check that the new state of an MST topology in an
3866 * atomic update is valid
3867 * @state: Pointer to the new &struct drm_dp_mst_topology_state
3868 *
3869 * Checks the given topology state for an atomic update to ensure that it's
3870 * valid. This includes checking whether there's enough bandwidth to support
3871 * the new VCPI allocations in the atomic update.
3872 *
3873 * Any atomic drivers supporting DP MST must make sure to call this after
3874 * checking the rest of their state in their
3875 * &drm_mode_config_funcs.atomic_check() callback.
3876 *
3877 * See also:
3878 * drm_dp_atomic_find_vcpi_slots()
3879 * drm_dp_atomic_release_vcpi_slots()
3880 *
3881 * Returns:
3882 *
3883 * 0 if the new state is valid, negative error code otherwise.
3884 */
drm_dp_mst_atomic_check(struct drm_atomic_state * state)3885 int drm_dp_mst_atomic_check(struct drm_atomic_state *state)
3886 {
3887 struct drm_dp_mst_topology_mgr *mgr;
3888 struct drm_dp_mst_topology_state *mst_state;
3889 int i, ret = 0;
3890
3891 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
3892 ret = drm_dp_mst_atomic_check_topology_state(mgr, mst_state);
3893 if (ret)
3894 break;
3895 }
3896
3897 return ret;
3898 }
3899 EXPORT_SYMBOL(drm_dp_mst_atomic_check);
3900
3901 const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs = {
3902 .atomic_duplicate_state = drm_dp_mst_duplicate_state,
3903 .atomic_destroy_state = drm_dp_mst_destroy_state,
3904 };
3905 EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs);
3906
3907 /**
3908 * drm_atomic_get_mst_topology_state: get MST topology state
3909 *
3910 * @state: global atomic state
3911 * @mgr: MST topology manager, also the private object in this case
3912 *
3913 * This function wraps drm_atomic_get_priv_obj_state() passing in the MST atomic
3914 * state vtable so that the private object state returned is that of a MST
3915 * topology object. Also, drm_atomic_get_private_obj_state() expects the caller
3916 * to care of the locking, so warn if don't hold the connection_mutex.
3917 *
3918 * RETURNS:
3919 *
3920 * The MST topology state or error pointer.
3921 */
drm_atomic_get_mst_topology_state(struct drm_atomic_state * state,struct drm_dp_mst_topology_mgr * mgr)3922 struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
3923 struct drm_dp_mst_topology_mgr *mgr)
3924 {
3925 struct drm_device *dev = mgr->dev;
3926
3927 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
3928 return to_dp_mst_topology_state(drm_atomic_get_private_obj_state(state, &mgr->base));
3929 }
3930 EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
3931
3932 /**
3933 * drm_dp_mst_topology_mgr_init - initialise a topology manager
3934 * @mgr: manager struct to initialise
3935 * @dev: device providing this structure - for i2c addition.
3936 * @aux: DP helper aux channel to talk to this device
3937 * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit
3938 * @max_payloads: maximum number of payloads this GPU can source
3939 * @conn_base_id: the connector object ID the MST device is connected to.
3940 *
3941 * Return 0 for success, or negative error code on failure
3942 */
drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr * mgr,struct drm_device * dev,struct drm_dp_aux * aux,int max_dpcd_transaction_bytes,int max_payloads,int conn_base_id)3943 int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
3944 struct drm_device *dev, struct drm_dp_aux *aux,
3945 int max_dpcd_transaction_bytes,
3946 int max_payloads, int conn_base_id)
3947 {
3948 struct drm_dp_mst_topology_state *mst_state;
3949
3950 mutex_init(&mgr->lock);
3951 mutex_init(&mgr->qlock);
3952 mutex_init(&mgr->payload_lock);
3953 mutex_init(&mgr->destroy_connector_lock);
3954 INIT_LIST_HEAD(&mgr->tx_msg_downq);
3955 INIT_LIST_HEAD(&mgr->destroy_connector_list);
3956 INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
3957 INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
3958 INIT_WORK(&mgr->destroy_connector_work, drm_dp_destroy_connector_work);
3959 init_waitqueue_head(&mgr->tx_waitq);
3960 mgr->dev = dev;
3961 mgr->aux = aux;
3962 mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
3963 mgr->max_payloads = max_payloads;
3964 mgr->conn_base_id = conn_base_id;
3965 if (max_payloads + 1 > sizeof(mgr->payload_mask) * 8 ||
3966 max_payloads + 1 > sizeof(mgr->vcpi_mask) * 8)
3967 return -EINVAL;
3968 mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
3969 if (!mgr->payloads)
3970 return -ENOMEM;
3971 mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL);
3972 if (!mgr->proposed_vcpis)
3973 return -ENOMEM;
3974 set_bit(0, &mgr->payload_mask);
3975 if (test_calc_pbn_mode() < 0)
3976 DRM_ERROR("MST PBN self-test failed\n");
3977
3978 mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL);
3979 if (mst_state == NULL)
3980 return -ENOMEM;
3981
3982 mst_state->mgr = mgr;
3983 INIT_LIST_HEAD(&mst_state->vcpis);
3984
3985 drm_atomic_private_obj_init(dev, &mgr->base,
3986 &mst_state->base,
3987 &drm_dp_mst_topology_state_funcs);
3988
3989 return 0;
3990 }
3991 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
3992
3993 /**
3994 * drm_dp_mst_topology_mgr_destroy() - destroy topology manager.
3995 * @mgr: manager to destroy
3996 */
drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr * mgr)3997 void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
3998 {
3999 drm_dp_mst_topology_mgr_set_mst(mgr, false);
4000 flush_work(&mgr->work);
4001 flush_work(&mgr->destroy_connector_work);
4002 mutex_lock(&mgr->payload_lock);
4003 kfree(mgr->payloads);
4004 mgr->payloads = NULL;
4005 kfree(mgr->proposed_vcpis);
4006 mgr->proposed_vcpis = NULL;
4007 mutex_unlock(&mgr->payload_lock);
4008 mgr->dev = NULL;
4009 mgr->aux = NULL;
4010 drm_atomic_private_obj_fini(&mgr->base);
4011 mgr->funcs = NULL;
4012 }
4013 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
4014
remote_i2c_read_ok(const struct i2c_msg msgs[],int num)4015 static bool remote_i2c_read_ok(const struct i2c_msg msgs[], int num)
4016 {
4017 int i;
4018
4019 if (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)
4020 return false;
4021
4022 for (i = 0; i < num - 1; i++) {
4023 if (msgs[i].flags & I2C_M_RD ||
4024 msgs[i].len > 0xff)
4025 return false;
4026 }
4027
4028 return msgs[num - 1].flags & I2C_M_RD &&
4029 msgs[num - 1].len <= 0xff;
4030 }
4031
4032 /* I2C device */
drm_dp_mst_i2c_xfer(struct i2c_adapter * adapter,struct i2c_msg * msgs,int num)4033 static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
4034 int num)
4035 {
4036 struct drm_dp_aux *aux = adapter->algo_data;
4037 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, aux);
4038 struct drm_dp_mst_branch *mstb;
4039 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
4040 unsigned int i;
4041 struct drm_dp_sideband_msg_req_body msg;
4042 struct drm_dp_sideband_msg_tx *txmsg = NULL;
4043 int ret;
4044
4045 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
4046 if (!mstb)
4047 return -EREMOTEIO;
4048
4049 if (!remote_i2c_read_ok(msgs, num)) {
4050 DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
4051 ret = -EIO;
4052 goto out;
4053 }
4054
4055 memset(&msg, 0, sizeof(msg));
4056 msg.req_type = DP_REMOTE_I2C_READ;
4057 msg.u.i2c_read.num_transactions = num - 1;
4058 msg.u.i2c_read.port_number = port->port_num;
4059 for (i = 0; i < num - 1; i++) {
4060 msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
4061 msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
4062 msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
4063 msg.u.i2c_read.transactions[i].no_stop_bit = !(msgs[i].flags & I2C_M_STOP);
4064 }
4065 msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
4066 msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
4067
4068 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
4069 if (!txmsg) {
4070 ret = -ENOMEM;
4071 goto out;
4072 }
4073
4074 txmsg->dst = mstb;
4075 drm_dp_encode_sideband_req(&msg, txmsg);
4076
4077 drm_dp_queue_down_tx(mgr, txmsg);
4078
4079 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
4080 if (ret > 0) {
4081
4082 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
4083 ret = -EREMOTEIO;
4084 goto out;
4085 }
4086 if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) {
4087 ret = -EIO;
4088 goto out;
4089 }
4090 memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len);
4091 ret = num;
4092 }
4093 out:
4094 kfree(txmsg);
4095 drm_dp_mst_topology_put_mstb(mstb);
4096 return ret;
4097 }
4098
drm_dp_mst_i2c_functionality(struct i2c_adapter * adapter)4099 static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter)
4100 {
4101 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
4102 I2C_FUNC_SMBUS_READ_BLOCK_DATA |
4103 I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
4104 I2C_FUNC_10BIT_ADDR;
4105 }
4106
4107 static const struct i2c_algorithm drm_dp_mst_i2c_algo = {
4108 .functionality = drm_dp_mst_i2c_functionality,
4109 .master_xfer = drm_dp_mst_i2c_xfer,
4110 };
4111
4112 /**
4113 * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
4114 * @aux: DisplayPort AUX channel
4115 *
4116 * Returns 0 on success or a negative error code on failure.
4117 */
drm_dp_mst_register_i2c_bus(struct drm_dp_aux * aux)4118 static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux)
4119 {
4120 aux->ddc.algo = &drm_dp_mst_i2c_algo;
4121 aux->ddc.algo_data = aux;
4122 aux->ddc.retries = 3;
4123
4124 aux->ddc.class = I2C_CLASS_DDC;
4125 aux->ddc.owner = THIS_MODULE;
4126 aux->ddc.dev.parent = aux->dev;
4127 aux->ddc.dev.of_node = aux->dev->of_node;
4128
4129 strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev),
4130 sizeof(aux->ddc.name));
4131
4132 return i2c_add_adapter(&aux->ddc);
4133 }
4134
4135 /**
4136 * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
4137 * @aux: DisplayPort AUX channel
4138 */
drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux * aux)4139 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux)
4140 {
4141 i2c_del_adapter(&aux->ddc);
4142 }
4143