• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *
4  *  Bluetooth support for Intel devices
5  *
6  *  Copyright (C) 2015  Intel Corporation
7  */
8 
9 #include <linux/module.h>
10 #include <linux/firmware.h>
11 #include <linux/regmap.h>
12 #include <asm/unaligned.h>
13 
14 #include <net/bluetooth/bluetooth.h>
15 #include <net/bluetooth/hci_core.h>
16 
17 #include "btintel.h"
18 
19 #define VERSION "0.1"
20 
21 #define BDADDR_INTEL		(&(bdaddr_t){{0x00, 0x8b, 0x9e, 0x19, 0x03, 0x00}})
22 #define RSA_HEADER_LEN		644
23 #define CSS_HEADER_OFFSET	8
24 #define ECDSA_OFFSET		644
25 #define ECDSA_HEADER_LEN	320
26 
btintel_check_bdaddr(struct hci_dev * hdev)27 int btintel_check_bdaddr(struct hci_dev *hdev)
28 {
29 	struct hci_rp_read_bd_addr *bda;
30 	struct sk_buff *skb;
31 
32 	skb = __hci_cmd_sync(hdev, HCI_OP_READ_BD_ADDR, 0, NULL,
33 			     HCI_INIT_TIMEOUT);
34 	if (IS_ERR(skb)) {
35 		int err = PTR_ERR(skb);
36 		bt_dev_err(hdev, "Reading Intel device address failed (%d)",
37 			   err);
38 		return err;
39 	}
40 
41 	if (skb->len != sizeof(*bda)) {
42 		bt_dev_err(hdev, "Intel device address length mismatch");
43 		kfree_skb(skb);
44 		return -EIO;
45 	}
46 
47 	bda = (struct hci_rp_read_bd_addr *)skb->data;
48 
49 	/* For some Intel based controllers, the default Bluetooth device
50 	 * address 00:03:19:9E:8B:00 can be found. These controllers are
51 	 * fully operational, but have the danger of duplicate addresses
52 	 * and that in turn can cause problems with Bluetooth operation.
53 	 */
54 	if (!bacmp(&bda->bdaddr, BDADDR_INTEL)) {
55 		bt_dev_err(hdev, "Found Intel default device address (%pMR)",
56 			   &bda->bdaddr);
57 		set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
58 	}
59 
60 	kfree_skb(skb);
61 
62 	return 0;
63 }
64 EXPORT_SYMBOL_GPL(btintel_check_bdaddr);
65 
btintel_enter_mfg(struct hci_dev * hdev)66 int btintel_enter_mfg(struct hci_dev *hdev)
67 {
68 	static const u8 param[] = { 0x01, 0x00 };
69 	struct sk_buff *skb;
70 
71 	skb = __hci_cmd_sync(hdev, 0xfc11, 2, param, HCI_CMD_TIMEOUT);
72 	if (IS_ERR(skb)) {
73 		bt_dev_err(hdev, "Entering manufacturer mode failed (%ld)",
74 			   PTR_ERR(skb));
75 		return PTR_ERR(skb);
76 	}
77 	kfree_skb(skb);
78 
79 	return 0;
80 }
81 EXPORT_SYMBOL_GPL(btintel_enter_mfg);
82 
btintel_exit_mfg(struct hci_dev * hdev,bool reset,bool patched)83 int btintel_exit_mfg(struct hci_dev *hdev, bool reset, bool patched)
84 {
85 	u8 param[] = { 0x00, 0x00 };
86 	struct sk_buff *skb;
87 
88 	/* The 2nd command parameter specifies the manufacturing exit method:
89 	 * 0x00: Just disable the manufacturing mode (0x00).
90 	 * 0x01: Disable manufacturing mode and reset with patches deactivated.
91 	 * 0x02: Disable manufacturing mode and reset with patches activated.
92 	 */
93 	if (reset)
94 		param[1] |= patched ? 0x02 : 0x01;
95 
96 	skb = __hci_cmd_sync(hdev, 0xfc11, 2, param, HCI_CMD_TIMEOUT);
97 	if (IS_ERR(skb)) {
98 		bt_dev_err(hdev, "Exiting manufacturer mode failed (%ld)",
99 			   PTR_ERR(skb));
100 		return PTR_ERR(skb);
101 	}
102 	kfree_skb(skb);
103 
104 	return 0;
105 }
106 EXPORT_SYMBOL_GPL(btintel_exit_mfg);
107 
btintel_set_bdaddr(struct hci_dev * hdev,const bdaddr_t * bdaddr)108 int btintel_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
109 {
110 	struct sk_buff *skb;
111 	int err;
112 
113 	skb = __hci_cmd_sync(hdev, 0xfc31, 6, bdaddr, HCI_INIT_TIMEOUT);
114 	if (IS_ERR(skb)) {
115 		err = PTR_ERR(skb);
116 		bt_dev_err(hdev, "Changing Intel device address failed (%d)",
117 			   err);
118 		return err;
119 	}
120 	kfree_skb(skb);
121 
122 	return 0;
123 }
124 EXPORT_SYMBOL_GPL(btintel_set_bdaddr);
125 
btintel_set_diag(struct hci_dev * hdev,bool enable)126 int btintel_set_diag(struct hci_dev *hdev, bool enable)
127 {
128 	struct sk_buff *skb;
129 	u8 param[3];
130 	int err;
131 
132 	if (enable) {
133 		param[0] = 0x03;
134 		param[1] = 0x03;
135 		param[2] = 0x03;
136 	} else {
137 		param[0] = 0x00;
138 		param[1] = 0x00;
139 		param[2] = 0x00;
140 	}
141 
142 	skb = __hci_cmd_sync(hdev, 0xfc43, 3, param, HCI_INIT_TIMEOUT);
143 	if (IS_ERR(skb)) {
144 		err = PTR_ERR(skb);
145 		if (err == -ENODATA)
146 			goto done;
147 		bt_dev_err(hdev, "Changing Intel diagnostic mode failed (%d)",
148 			   err);
149 		return err;
150 	}
151 	kfree_skb(skb);
152 
153 done:
154 	btintel_set_event_mask(hdev, enable);
155 	return 0;
156 }
157 EXPORT_SYMBOL_GPL(btintel_set_diag);
158 
btintel_set_diag_mfg(struct hci_dev * hdev,bool enable)159 int btintel_set_diag_mfg(struct hci_dev *hdev, bool enable)
160 {
161 	int err, ret;
162 
163 	err = btintel_enter_mfg(hdev);
164 	if (err)
165 		return err;
166 
167 	ret = btintel_set_diag(hdev, enable);
168 
169 	err = btintel_exit_mfg(hdev, false, false);
170 	if (err)
171 		return err;
172 
173 	return ret;
174 }
175 EXPORT_SYMBOL_GPL(btintel_set_diag_mfg);
176 
btintel_hw_error(struct hci_dev * hdev,u8 code)177 void btintel_hw_error(struct hci_dev *hdev, u8 code)
178 {
179 	struct sk_buff *skb;
180 	u8 type = 0x00;
181 
182 	bt_dev_err(hdev, "Hardware error 0x%2.2x", code);
183 
184 	skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
185 	if (IS_ERR(skb)) {
186 		bt_dev_err(hdev, "Reset after hardware error failed (%ld)",
187 			   PTR_ERR(skb));
188 		return;
189 	}
190 	kfree_skb(skb);
191 
192 	skb = __hci_cmd_sync(hdev, 0xfc22, 1, &type, HCI_INIT_TIMEOUT);
193 	if (IS_ERR(skb)) {
194 		bt_dev_err(hdev, "Retrieving Intel exception info failed (%ld)",
195 			   PTR_ERR(skb));
196 		return;
197 	}
198 
199 	if (skb->len != 13) {
200 		bt_dev_err(hdev, "Exception info size mismatch");
201 		kfree_skb(skb);
202 		return;
203 	}
204 
205 	bt_dev_err(hdev, "Exception info %s", (char *)(skb->data + 1));
206 
207 	kfree_skb(skb);
208 }
209 EXPORT_SYMBOL_GPL(btintel_hw_error);
210 
btintel_version_info(struct hci_dev * hdev,struct intel_version * ver)211 void btintel_version_info(struct hci_dev *hdev, struct intel_version *ver)
212 {
213 	const char *variant;
214 
215 	switch (ver->fw_variant) {
216 	case 0x06:
217 		variant = "Bootloader";
218 		break;
219 	case 0x23:
220 		variant = "Firmware";
221 		break;
222 	default:
223 		return;
224 	}
225 
226 	bt_dev_info(hdev, "%s revision %u.%u build %u week %u %u",
227 		    variant, ver->fw_revision >> 4, ver->fw_revision & 0x0f,
228 		    ver->fw_build_num, ver->fw_build_ww,
229 		    2000 + ver->fw_build_yy);
230 }
231 EXPORT_SYMBOL_GPL(btintel_version_info);
232 
btintel_secure_send(struct hci_dev * hdev,u8 fragment_type,u32 plen,const void * param)233 int btintel_secure_send(struct hci_dev *hdev, u8 fragment_type, u32 plen,
234 			const void *param)
235 {
236 	while (plen > 0) {
237 		struct sk_buff *skb;
238 		u8 cmd_param[253], fragment_len = (plen > 252) ? 252 : plen;
239 
240 		cmd_param[0] = fragment_type;
241 		memcpy(cmd_param + 1, param, fragment_len);
242 
243 		skb = __hci_cmd_sync(hdev, 0xfc09, fragment_len + 1,
244 				     cmd_param, HCI_INIT_TIMEOUT);
245 		if (IS_ERR(skb))
246 			return PTR_ERR(skb);
247 
248 		kfree_skb(skb);
249 
250 		plen -= fragment_len;
251 		param += fragment_len;
252 	}
253 
254 	return 0;
255 }
256 EXPORT_SYMBOL_GPL(btintel_secure_send);
257 
btintel_load_ddc_config(struct hci_dev * hdev,const char * ddc_name)258 int btintel_load_ddc_config(struct hci_dev *hdev, const char *ddc_name)
259 {
260 	const struct firmware *fw;
261 	struct sk_buff *skb;
262 	const u8 *fw_ptr;
263 	int err;
264 
265 	err = request_firmware_direct(&fw, ddc_name, &hdev->dev);
266 	if (err < 0) {
267 		bt_dev_err(hdev, "Failed to load Intel DDC file %s (%d)",
268 			   ddc_name, err);
269 		return err;
270 	}
271 
272 	bt_dev_info(hdev, "Found Intel DDC parameters: %s", ddc_name);
273 
274 	fw_ptr = fw->data;
275 
276 	/* DDC file contains one or more DDC structure which has
277 	 * Length (1 byte), DDC ID (2 bytes), and DDC value (Length - 2).
278 	 */
279 	while (fw->size > fw_ptr - fw->data) {
280 		u8 cmd_plen = fw_ptr[0] + sizeof(u8);
281 
282 		skb = __hci_cmd_sync(hdev, 0xfc8b, cmd_plen, fw_ptr,
283 				     HCI_INIT_TIMEOUT);
284 		if (IS_ERR(skb)) {
285 			bt_dev_err(hdev, "Failed to send Intel_Write_DDC (%ld)",
286 				   PTR_ERR(skb));
287 			release_firmware(fw);
288 			return PTR_ERR(skb);
289 		}
290 
291 		fw_ptr += cmd_plen;
292 		kfree_skb(skb);
293 	}
294 
295 	release_firmware(fw);
296 
297 	bt_dev_info(hdev, "Applying Intel DDC parameters completed");
298 
299 	return 0;
300 }
301 EXPORT_SYMBOL_GPL(btintel_load_ddc_config);
302 
btintel_set_event_mask(struct hci_dev * hdev,bool debug)303 int btintel_set_event_mask(struct hci_dev *hdev, bool debug)
304 {
305 	u8 mask[8] = { 0x87, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
306 	struct sk_buff *skb;
307 	int err;
308 
309 	if (debug)
310 		mask[1] |= 0x62;
311 
312 	skb = __hci_cmd_sync(hdev, 0xfc52, 8, mask, HCI_INIT_TIMEOUT);
313 	if (IS_ERR(skb)) {
314 		err = PTR_ERR(skb);
315 		bt_dev_err(hdev, "Setting Intel event mask failed (%d)", err);
316 		return err;
317 	}
318 	kfree_skb(skb);
319 
320 	return 0;
321 }
322 EXPORT_SYMBOL_GPL(btintel_set_event_mask);
323 
btintel_set_event_mask_mfg(struct hci_dev * hdev,bool debug)324 int btintel_set_event_mask_mfg(struct hci_dev *hdev, bool debug)
325 {
326 	int err, ret;
327 
328 	err = btintel_enter_mfg(hdev);
329 	if (err)
330 		return err;
331 
332 	ret = btintel_set_event_mask(hdev, debug);
333 
334 	err = btintel_exit_mfg(hdev, false, false);
335 	if (err)
336 		return err;
337 
338 	return ret;
339 }
340 EXPORT_SYMBOL_GPL(btintel_set_event_mask_mfg);
341 
btintel_read_version(struct hci_dev * hdev,struct intel_version * ver)342 int btintel_read_version(struct hci_dev *hdev, struct intel_version *ver)
343 {
344 	struct sk_buff *skb;
345 
346 	skb = __hci_cmd_sync(hdev, 0xfc05, 0, NULL, HCI_CMD_TIMEOUT);
347 	if (IS_ERR(skb)) {
348 		bt_dev_err(hdev, "Reading Intel version information failed (%ld)",
349 			   PTR_ERR(skb));
350 		return PTR_ERR(skb);
351 	}
352 
353 	if (skb->len != sizeof(*ver)) {
354 		bt_dev_err(hdev, "Intel version event size mismatch");
355 		kfree_skb(skb);
356 		return -EILSEQ;
357 	}
358 
359 	memcpy(ver, skb->data, sizeof(*ver));
360 
361 	kfree_skb(skb);
362 
363 	return 0;
364 }
365 EXPORT_SYMBOL_GPL(btintel_read_version);
366 
btintel_version_info_tlv(struct hci_dev * hdev,struct intel_version_tlv * version)367 void btintel_version_info_tlv(struct hci_dev *hdev, struct intel_version_tlv *version)
368 {
369 	const char *variant;
370 
371 	switch (version->img_type) {
372 	case 0x01:
373 		variant = "Bootloader";
374 		bt_dev_info(hdev, "Device revision is %u", version->dev_rev_id);
375 		bt_dev_info(hdev, "Secure boot is %s",
376 			    version->secure_boot ? "enabled" : "disabled");
377 		bt_dev_info(hdev, "OTP lock is %s",
378 			    version->otp_lock ? "enabled" : "disabled");
379 		bt_dev_info(hdev, "API lock is %s",
380 			    version->api_lock ? "enabled" : "disabled");
381 		bt_dev_info(hdev, "Debug lock is %s",
382 			    version->debug_lock ? "enabled" : "disabled");
383 		bt_dev_info(hdev, "Minimum firmware build %u week %u %u",
384 			    version->min_fw_build_nn, version->min_fw_build_cw,
385 			    2000 + version->min_fw_build_yy);
386 		break;
387 	case 0x03:
388 		variant = "Firmware";
389 		break;
390 	default:
391 		bt_dev_err(hdev, "Unsupported image type(%02x)", version->img_type);
392 		goto done;
393 	}
394 
395 	bt_dev_info(hdev, "%s timestamp %u.%u buildtype %u build %u", variant,
396 		    2000 + (version->timestamp >> 8), version->timestamp & 0xff,
397 		    version->build_type, version->build_num);
398 
399 done:
400 	return;
401 }
402 EXPORT_SYMBOL_GPL(btintel_version_info_tlv);
403 
btintel_read_version_tlv(struct hci_dev * hdev,struct intel_version_tlv * version)404 int btintel_read_version_tlv(struct hci_dev *hdev, struct intel_version_tlv *version)
405 {
406 	struct sk_buff *skb;
407 	const u8 param[1] = { 0xFF };
408 
409 	if (!version)
410 		return -EINVAL;
411 
412 	skb = __hci_cmd_sync(hdev, 0xfc05, 1, param, HCI_CMD_TIMEOUT);
413 	if (IS_ERR(skb)) {
414 		bt_dev_err(hdev, "Reading Intel version information failed (%ld)",
415 			   PTR_ERR(skb));
416 		return PTR_ERR(skb);
417 	}
418 
419 	if (skb->data[0]) {
420 		bt_dev_err(hdev, "Intel Read Version command failed (%02x)",
421 			   skb->data[0]);
422 		kfree_skb(skb);
423 		return -EIO;
424 	}
425 
426 	/* Consume Command Complete Status field */
427 	skb_pull(skb, 1);
428 
429 	/* Event parameters contatin multiple TLVs. Read each of them
430 	 * and only keep the required data. Also, it use existing legacy
431 	 * version field like hw_platform, hw_variant, and fw_variant
432 	 * to keep the existing setup flow
433 	 */
434 	while (skb->len) {
435 		struct intel_tlv *tlv;
436 
437 		tlv = (struct intel_tlv *)skb->data;
438 		switch (tlv->type) {
439 		case INTEL_TLV_CNVI_TOP:
440 			version->cnvi_top = get_unaligned_le32(tlv->val);
441 			break;
442 		case INTEL_TLV_CNVR_TOP:
443 			version->cnvr_top = get_unaligned_le32(tlv->val);
444 			break;
445 		case INTEL_TLV_CNVI_BT:
446 			version->cnvi_bt = get_unaligned_le32(tlv->val);
447 			break;
448 		case INTEL_TLV_CNVR_BT:
449 			version->cnvr_bt = get_unaligned_le32(tlv->val);
450 			break;
451 		case INTEL_TLV_DEV_REV_ID:
452 			version->dev_rev_id = get_unaligned_le16(tlv->val);
453 			break;
454 		case INTEL_TLV_IMAGE_TYPE:
455 			version->img_type = tlv->val[0];
456 			break;
457 		case INTEL_TLV_TIME_STAMP:
458 			version->timestamp = get_unaligned_le16(tlv->val);
459 			break;
460 		case INTEL_TLV_BUILD_TYPE:
461 			version->build_type = tlv->val[0];
462 			break;
463 		case INTEL_TLV_BUILD_NUM:
464 			version->build_num = get_unaligned_le32(tlv->val);
465 			break;
466 		case INTEL_TLV_SECURE_BOOT:
467 			version->secure_boot = tlv->val[0];
468 			break;
469 		case INTEL_TLV_OTP_LOCK:
470 			version->otp_lock = tlv->val[0];
471 			break;
472 		case INTEL_TLV_API_LOCK:
473 			version->api_lock = tlv->val[0];
474 			break;
475 		case INTEL_TLV_DEBUG_LOCK:
476 			version->debug_lock = tlv->val[0];
477 			break;
478 		case INTEL_TLV_MIN_FW:
479 			version->min_fw_build_nn = tlv->val[0];
480 			version->min_fw_build_cw = tlv->val[1];
481 			version->min_fw_build_yy = tlv->val[2];
482 			break;
483 		case INTEL_TLV_LIMITED_CCE:
484 			version->limited_cce = tlv->val[0];
485 			break;
486 		case INTEL_TLV_SBE_TYPE:
487 			version->sbe_type = tlv->val[0];
488 			break;
489 		case INTEL_TLV_OTP_BDADDR:
490 			memcpy(&version->otp_bd_addr, tlv->val, tlv->len);
491 			break;
492 		default:
493 			/* Ignore rest of information */
494 			break;
495 		}
496 		/* consume the current tlv and move to next*/
497 		skb_pull(skb, tlv->len + sizeof(*tlv));
498 	}
499 
500 	kfree_skb(skb);
501 	return 0;
502 }
503 EXPORT_SYMBOL_GPL(btintel_read_version_tlv);
504 
505 /* ------- REGMAP IBT SUPPORT ------- */
506 
507 #define IBT_REG_MODE_8BIT  0x00
508 #define IBT_REG_MODE_16BIT 0x01
509 #define IBT_REG_MODE_32BIT 0x02
510 
511 struct regmap_ibt_context {
512 	struct hci_dev *hdev;
513 	__u16 op_write;
514 	__u16 op_read;
515 };
516 
517 struct ibt_cp_reg_access {
518 	__le32  addr;
519 	__u8    mode;
520 	__u8    len;
521 	__u8    data[];
522 } __packed;
523 
524 struct ibt_rp_reg_access {
525 	__u8    status;
526 	__le32  addr;
527 	__u8    data[];
528 } __packed;
529 
regmap_ibt_read(void * context,const void * addr,size_t reg_size,void * val,size_t val_size)530 static int regmap_ibt_read(void *context, const void *addr, size_t reg_size,
531 			   void *val, size_t val_size)
532 {
533 	struct regmap_ibt_context *ctx = context;
534 	struct ibt_cp_reg_access cp;
535 	struct ibt_rp_reg_access *rp;
536 	struct sk_buff *skb;
537 	int err = 0;
538 
539 	if (reg_size != sizeof(__le32))
540 		return -EINVAL;
541 
542 	switch (val_size) {
543 	case 1:
544 		cp.mode = IBT_REG_MODE_8BIT;
545 		break;
546 	case 2:
547 		cp.mode = IBT_REG_MODE_16BIT;
548 		break;
549 	case 4:
550 		cp.mode = IBT_REG_MODE_32BIT;
551 		break;
552 	default:
553 		return -EINVAL;
554 	}
555 
556 	/* regmap provides a little-endian formatted addr */
557 	cp.addr = *(__le32 *)addr;
558 	cp.len = val_size;
559 
560 	bt_dev_dbg(ctx->hdev, "Register (0x%x) read", le32_to_cpu(cp.addr));
561 
562 	skb = hci_cmd_sync(ctx->hdev, ctx->op_read, sizeof(cp), &cp,
563 			   HCI_CMD_TIMEOUT);
564 	if (IS_ERR(skb)) {
565 		err = PTR_ERR(skb);
566 		bt_dev_err(ctx->hdev, "regmap: Register (0x%x) read error (%d)",
567 			   le32_to_cpu(cp.addr), err);
568 		return err;
569 	}
570 
571 	if (skb->len != sizeof(*rp) + val_size) {
572 		bt_dev_err(ctx->hdev, "regmap: Register (0x%x) read error, bad len",
573 			   le32_to_cpu(cp.addr));
574 		err = -EINVAL;
575 		goto done;
576 	}
577 
578 	rp = (struct ibt_rp_reg_access *)skb->data;
579 
580 	if (rp->addr != cp.addr) {
581 		bt_dev_err(ctx->hdev, "regmap: Register (0x%x) read error, bad addr",
582 			   le32_to_cpu(rp->addr));
583 		err = -EINVAL;
584 		goto done;
585 	}
586 
587 	memcpy(val, rp->data, val_size);
588 
589 done:
590 	kfree_skb(skb);
591 	return err;
592 }
593 
regmap_ibt_gather_write(void * context,const void * addr,size_t reg_size,const void * val,size_t val_size)594 static int regmap_ibt_gather_write(void *context,
595 				   const void *addr, size_t reg_size,
596 				   const void *val, size_t val_size)
597 {
598 	struct regmap_ibt_context *ctx = context;
599 	struct ibt_cp_reg_access *cp;
600 	struct sk_buff *skb;
601 	int plen = sizeof(*cp) + val_size;
602 	u8 mode;
603 	int err = 0;
604 
605 	if (reg_size != sizeof(__le32))
606 		return -EINVAL;
607 
608 	switch (val_size) {
609 	case 1:
610 		mode = IBT_REG_MODE_8BIT;
611 		break;
612 	case 2:
613 		mode = IBT_REG_MODE_16BIT;
614 		break;
615 	case 4:
616 		mode = IBT_REG_MODE_32BIT;
617 		break;
618 	default:
619 		return -EINVAL;
620 	}
621 
622 	cp = kmalloc(plen, GFP_KERNEL);
623 	if (!cp)
624 		return -ENOMEM;
625 
626 	/* regmap provides a little-endian formatted addr/value */
627 	cp->addr = *(__le32 *)addr;
628 	cp->mode = mode;
629 	cp->len = val_size;
630 	memcpy(&cp->data, val, val_size);
631 
632 	bt_dev_dbg(ctx->hdev, "Register (0x%x) write", le32_to_cpu(cp->addr));
633 
634 	skb = hci_cmd_sync(ctx->hdev, ctx->op_write, plen, cp, HCI_CMD_TIMEOUT);
635 	if (IS_ERR(skb)) {
636 		err = PTR_ERR(skb);
637 		bt_dev_err(ctx->hdev, "regmap: Register (0x%x) write error (%d)",
638 			   le32_to_cpu(cp->addr), err);
639 		goto done;
640 	}
641 	kfree_skb(skb);
642 
643 done:
644 	kfree(cp);
645 	return err;
646 }
647 
regmap_ibt_write(void * context,const void * data,size_t count)648 static int regmap_ibt_write(void *context, const void *data, size_t count)
649 {
650 	/* data contains register+value, since we only support 32bit addr,
651 	 * minimum data size is 4 bytes.
652 	 */
653 	if (WARN_ONCE(count < 4, "Invalid register access"))
654 		return -EINVAL;
655 
656 	return regmap_ibt_gather_write(context, data, 4, data + 4, count - 4);
657 }
658 
regmap_ibt_free_context(void * context)659 static void regmap_ibt_free_context(void *context)
660 {
661 	kfree(context);
662 }
663 
664 static struct regmap_bus regmap_ibt = {
665 	.read = regmap_ibt_read,
666 	.write = regmap_ibt_write,
667 	.gather_write = regmap_ibt_gather_write,
668 	.free_context = regmap_ibt_free_context,
669 	.reg_format_endian_default = REGMAP_ENDIAN_LITTLE,
670 	.val_format_endian_default = REGMAP_ENDIAN_LITTLE,
671 };
672 
673 /* Config is the same for all register regions */
674 static const struct regmap_config regmap_ibt_cfg = {
675 	.name      = "btintel_regmap",
676 	.reg_bits  = 32,
677 	.val_bits  = 32,
678 };
679 
btintel_regmap_init(struct hci_dev * hdev,u16 opcode_read,u16 opcode_write)680 struct regmap *btintel_regmap_init(struct hci_dev *hdev, u16 opcode_read,
681 				   u16 opcode_write)
682 {
683 	struct regmap_ibt_context *ctx;
684 
685 	bt_dev_info(hdev, "regmap: Init R%x-W%x region", opcode_read,
686 		    opcode_write);
687 
688 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
689 	if (!ctx)
690 		return ERR_PTR(-ENOMEM);
691 
692 	ctx->op_read = opcode_read;
693 	ctx->op_write = opcode_write;
694 	ctx->hdev = hdev;
695 
696 	return regmap_init(&hdev->dev, &regmap_ibt, ctx, &regmap_ibt_cfg);
697 }
698 EXPORT_SYMBOL_GPL(btintel_regmap_init);
699 
btintel_send_intel_reset(struct hci_dev * hdev,u32 boot_param)700 int btintel_send_intel_reset(struct hci_dev *hdev, u32 boot_param)
701 {
702 	struct intel_reset params = { 0x00, 0x01, 0x00, 0x01, 0x00000000 };
703 	struct sk_buff *skb;
704 
705 	params.boot_param = cpu_to_le32(boot_param);
706 
707 	skb = __hci_cmd_sync(hdev, 0xfc01, sizeof(params), &params,
708 			     HCI_INIT_TIMEOUT);
709 	if (IS_ERR(skb)) {
710 		bt_dev_err(hdev, "Failed to send Intel Reset command");
711 		return PTR_ERR(skb);
712 	}
713 
714 	kfree_skb(skb);
715 
716 	return 0;
717 }
718 EXPORT_SYMBOL_GPL(btintel_send_intel_reset);
719 
btintel_read_boot_params(struct hci_dev * hdev,struct intel_boot_params * params)720 int btintel_read_boot_params(struct hci_dev *hdev,
721 			     struct intel_boot_params *params)
722 {
723 	struct sk_buff *skb;
724 
725 	skb = __hci_cmd_sync(hdev, 0xfc0d, 0, NULL, HCI_INIT_TIMEOUT);
726 	if (IS_ERR(skb)) {
727 		bt_dev_err(hdev, "Reading Intel boot parameters failed (%ld)",
728 			   PTR_ERR(skb));
729 		return PTR_ERR(skb);
730 	}
731 
732 	if (skb->len != sizeof(*params)) {
733 		bt_dev_err(hdev, "Intel boot parameters size mismatch");
734 		kfree_skb(skb);
735 		return -EILSEQ;
736 	}
737 
738 	memcpy(params, skb->data, sizeof(*params));
739 
740 	kfree_skb(skb);
741 
742 	if (params->status) {
743 		bt_dev_err(hdev, "Intel boot parameters command failed (%02x)",
744 			   params->status);
745 		return -bt_to_errno(params->status);
746 	}
747 
748 	bt_dev_info(hdev, "Device revision is %u",
749 		    le16_to_cpu(params->dev_revid));
750 
751 	bt_dev_info(hdev, "Secure boot is %s",
752 		    params->secure_boot ? "enabled" : "disabled");
753 
754 	bt_dev_info(hdev, "OTP lock is %s",
755 		    params->otp_lock ? "enabled" : "disabled");
756 
757 	bt_dev_info(hdev, "API lock is %s",
758 		    params->api_lock ? "enabled" : "disabled");
759 
760 	bt_dev_info(hdev, "Debug lock is %s",
761 		    params->debug_lock ? "enabled" : "disabled");
762 
763 	bt_dev_info(hdev, "Minimum firmware build %u week %u %u",
764 		    params->min_fw_build_nn, params->min_fw_build_cw,
765 		    2000 + params->min_fw_build_yy);
766 
767 	return 0;
768 }
769 EXPORT_SYMBOL_GPL(btintel_read_boot_params);
770 
btintel_sfi_rsa_header_secure_send(struct hci_dev * hdev,const struct firmware * fw)771 static int btintel_sfi_rsa_header_secure_send(struct hci_dev *hdev,
772 					      const struct firmware *fw)
773 {
774 	int err;
775 
776 	/* Start the firmware download transaction with the Init fragment
777 	 * represented by the 128 bytes of CSS header.
778 	 */
779 	err = btintel_secure_send(hdev, 0x00, 128, fw->data);
780 	if (err < 0) {
781 		bt_dev_err(hdev, "Failed to send firmware header (%d)", err);
782 		goto done;
783 	}
784 
785 	/* Send the 256 bytes of public key information from the firmware
786 	 * as the PKey fragment.
787 	 */
788 	err = btintel_secure_send(hdev, 0x03, 256, fw->data + 128);
789 	if (err < 0) {
790 		bt_dev_err(hdev, "Failed to send firmware pkey (%d)", err);
791 		goto done;
792 	}
793 
794 	/* Send the 256 bytes of signature information from the firmware
795 	 * as the Sign fragment.
796 	 */
797 	err = btintel_secure_send(hdev, 0x02, 256, fw->data + 388);
798 	if (err < 0) {
799 		bt_dev_err(hdev, "Failed to send firmware signature (%d)", err);
800 		goto done;
801 	}
802 
803 done:
804 	return err;
805 }
806 
btintel_sfi_ecdsa_header_secure_send(struct hci_dev * hdev,const struct firmware * fw)807 static int btintel_sfi_ecdsa_header_secure_send(struct hci_dev *hdev,
808 						const struct firmware *fw)
809 {
810 	int err;
811 
812 	/* Start the firmware download transaction with the Init fragment
813 	 * represented by the 128 bytes of CSS header.
814 	 */
815 	err = btintel_secure_send(hdev, 0x00, 128, fw->data + 644);
816 	if (err < 0) {
817 		bt_dev_err(hdev, "Failed to send firmware header (%d)", err);
818 		return err;
819 	}
820 
821 	/* Send the 96 bytes of public key information from the firmware
822 	 * as the PKey fragment.
823 	 */
824 	err = btintel_secure_send(hdev, 0x03, 96, fw->data + 644 + 128);
825 	if (err < 0) {
826 		bt_dev_err(hdev, "Failed to send firmware pkey (%d)", err);
827 		return err;
828 	}
829 
830 	/* Send the 96 bytes of signature information from the firmware
831 	 * as the Sign fragment
832 	 */
833 	err = btintel_secure_send(hdev, 0x02, 96, fw->data + 644 + 224);
834 	if (err < 0) {
835 		bt_dev_err(hdev, "Failed to send firmware signature (%d)",
836 			   err);
837 		return err;
838 	}
839 	return 0;
840 }
841 
btintel_download_firmware_payload(struct hci_dev * hdev,const struct firmware * fw,u32 * boot_param,size_t offset)842 static int btintel_download_firmware_payload(struct hci_dev *hdev,
843 					     const struct firmware *fw,
844 					     u32 *boot_param, size_t offset)
845 {
846 	int err;
847 	const u8 *fw_ptr;
848 	u32 frag_len;
849 
850 	fw_ptr = fw->data + offset;
851 	frag_len = 0;
852 	err = -EINVAL;
853 
854 	while (fw_ptr - fw->data < fw->size) {
855 		struct hci_command_hdr *cmd = (void *)(fw_ptr + frag_len);
856 
857 		/* Each SKU has a different reset parameter to use in the
858 		 * HCI_Intel_Reset command and it is embedded in the firmware
859 		 * data. So, instead of using static value per SKU, check
860 		 * the firmware data and save it for later use.
861 		 */
862 		if (le16_to_cpu(cmd->opcode) == 0xfc0e) {
863 			/* The boot parameter is the first 32-bit value
864 			 * and rest of 3 octets are reserved.
865 			 */
866 			*boot_param = get_unaligned_le32(fw_ptr + sizeof(*cmd));
867 
868 			bt_dev_dbg(hdev, "boot_param=0x%x", *boot_param);
869 		}
870 
871 		frag_len += sizeof(*cmd) + cmd->plen;
872 
873 		/* The parameter length of the secure send command requires
874 		 * a 4 byte alignment. It happens so that the firmware file
875 		 * contains proper Intel_NOP commands to align the fragments
876 		 * as needed.
877 		 *
878 		 * Send set of commands with 4 byte alignment from the
879 		 * firmware data buffer as a single Data fragement.
880 		 */
881 		if (!(frag_len % 4)) {
882 			err = btintel_secure_send(hdev, 0x01, frag_len, fw_ptr);
883 			if (err < 0) {
884 				bt_dev_err(hdev,
885 					   "Failed to send firmware data (%d)",
886 					   err);
887 				goto done;
888 			}
889 
890 			fw_ptr += frag_len;
891 			frag_len = 0;
892 		}
893 	}
894 
895 done:
896 	return err;
897 }
898 
btintel_download_firmware(struct hci_dev * hdev,const struct firmware * fw,u32 * boot_param)899 int btintel_download_firmware(struct hci_dev *hdev,
900 			      const struct firmware *fw,
901 			      u32 *boot_param)
902 {
903 	int err;
904 
905 	err = btintel_sfi_rsa_header_secure_send(hdev, fw);
906 	if (err)
907 		return err;
908 
909 	return btintel_download_firmware_payload(hdev, fw, boot_param,
910 						 RSA_HEADER_LEN);
911 }
912 EXPORT_SYMBOL_GPL(btintel_download_firmware);
913 
btintel_download_firmware_newgen(struct hci_dev * hdev,const struct firmware * fw,u32 * boot_param,u8 hw_variant,u8 sbe_type)914 int btintel_download_firmware_newgen(struct hci_dev *hdev,
915 				     const struct firmware *fw, u32 *boot_param,
916 				     u8 hw_variant, u8 sbe_type)
917 {
918 	int err;
919 	u32 css_header_ver;
920 
921 	/* iBT hardware variants 0x0b, 0x0c, 0x11, 0x12, 0x13, 0x14 support
922 	 * only RSA secure boot engine. Hence, the corresponding sfi file will
923 	 * have RSA header of 644 bytes followed by Command Buffer.
924 	 *
925 	 * iBT hardware variants 0x17, 0x18 onwards support both RSA and ECDSA
926 	 * secure boot engine. As a result, the corresponding sfi file will
927 	 * have RSA header of 644, ECDSA header of 320 bytes followed by
928 	 * Command Buffer.
929 	 *
930 	 * CSS Header byte positions 0x08 to 0x0B represent the CSS Header
931 	 * version: RSA(0x00010000) , ECDSA (0x00020000)
932 	 */
933 	css_header_ver = get_unaligned_le32(fw->data + CSS_HEADER_OFFSET);
934 	if (css_header_ver != 0x00010000) {
935 		bt_dev_err(hdev, "Invalid CSS Header version");
936 		return -EINVAL;
937 	}
938 
939 	if (hw_variant <= 0x14) {
940 		if (sbe_type != 0x00) {
941 			bt_dev_err(hdev, "Invalid SBE type for hardware variant (%d)",
942 				   hw_variant);
943 			return -EINVAL;
944 		}
945 
946 		err = btintel_sfi_rsa_header_secure_send(hdev, fw);
947 		if (err)
948 			return err;
949 
950 		err = btintel_download_firmware_payload(hdev, fw, boot_param, RSA_HEADER_LEN);
951 		if (err)
952 			return err;
953 	} else if (hw_variant >= 0x17) {
954 		/* Check if CSS header for ECDSA follows the RSA header */
955 		if (fw->data[ECDSA_OFFSET] != 0x06)
956 			return -EINVAL;
957 
958 		/* Check if the CSS Header version is ECDSA(0x00020000) */
959 		css_header_ver = get_unaligned_le32(fw->data + ECDSA_OFFSET + CSS_HEADER_OFFSET);
960 		if (css_header_ver != 0x00020000) {
961 			bt_dev_err(hdev, "Invalid CSS Header version");
962 			return -EINVAL;
963 		}
964 
965 		if (sbe_type == 0x00) {
966 			err = btintel_sfi_rsa_header_secure_send(hdev, fw);
967 			if (err)
968 				return err;
969 
970 			err = btintel_download_firmware_payload(hdev, fw,
971 								boot_param,
972 								RSA_HEADER_LEN + ECDSA_HEADER_LEN);
973 			if (err)
974 				return err;
975 		} else if (sbe_type == 0x01) {
976 			err = btintel_sfi_ecdsa_header_secure_send(hdev, fw);
977 			if (err)
978 				return err;
979 
980 			err = btintel_download_firmware_payload(hdev, fw,
981 								boot_param,
982 								RSA_HEADER_LEN + ECDSA_HEADER_LEN);
983 			if (err)
984 				return err;
985 		}
986 	}
987 	return 0;
988 }
989 EXPORT_SYMBOL_GPL(btintel_download_firmware_newgen);
990 
btintel_reset_to_bootloader(struct hci_dev * hdev)991 void btintel_reset_to_bootloader(struct hci_dev *hdev)
992 {
993 	struct intel_reset params;
994 	struct sk_buff *skb;
995 
996 	/* Send Intel Reset command. This will result in
997 	 * re-enumeration of BT controller.
998 	 *
999 	 * Intel Reset parameter description:
1000 	 * reset_type :   0x00 (Soft reset),
1001 	 *		  0x01 (Hard reset)
1002 	 * patch_enable : 0x00 (Do not enable),
1003 	 *		  0x01 (Enable)
1004 	 * ddc_reload :   0x00 (Do not reload),
1005 	 *		  0x01 (Reload)
1006 	 * boot_option:   0x00 (Current image),
1007 	 *                0x01 (Specified boot address)
1008 	 * boot_param:    Boot address
1009 	 *
1010 	 */
1011 	params.reset_type = 0x01;
1012 	params.patch_enable = 0x01;
1013 	params.ddc_reload = 0x01;
1014 	params.boot_option = 0x00;
1015 	params.boot_param = cpu_to_le32(0x00000000);
1016 
1017 	skb = __hci_cmd_sync(hdev, 0xfc01, sizeof(params),
1018 			     &params, HCI_INIT_TIMEOUT);
1019 	if (IS_ERR(skb)) {
1020 		bt_dev_err(hdev, "FW download error recovery failed (%ld)",
1021 			   PTR_ERR(skb));
1022 		return;
1023 	}
1024 	bt_dev_info(hdev, "Intel reset sent to retry FW download");
1025 	kfree_skb(skb);
1026 
1027 	/* Current Intel BT controllers(ThP/JfP) hold the USB reset
1028 	 * lines for 2ms when it receives Intel Reset in bootloader mode.
1029 	 * Whereas, the upcoming Intel BT controllers will hold USB reset
1030 	 * for 150ms. To keep the delay generic, 150ms is chosen here.
1031 	 */
1032 	msleep(150);
1033 }
1034 EXPORT_SYMBOL_GPL(btintel_reset_to_bootloader);
1035 
btintel_read_debug_features(struct hci_dev * hdev,struct intel_debug_features * features)1036 int btintel_read_debug_features(struct hci_dev *hdev,
1037 				struct intel_debug_features *features)
1038 {
1039 	struct sk_buff *skb;
1040 	u8 page_no = 1;
1041 
1042 	/* Intel controller supports two pages, each page is of 128-bit
1043 	 * feature bit mask. And each bit defines specific feature support
1044 	 */
1045 	skb = __hci_cmd_sync(hdev, 0xfca6, sizeof(page_no), &page_no,
1046 			     HCI_INIT_TIMEOUT);
1047 	if (IS_ERR(skb)) {
1048 		bt_dev_err(hdev, "Reading supported features failed (%ld)",
1049 			   PTR_ERR(skb));
1050 		return PTR_ERR(skb);
1051 	}
1052 
1053 	if (skb->len != (sizeof(features->page1) + 3)) {
1054 		bt_dev_err(hdev, "Supported features event size mismatch");
1055 		kfree_skb(skb);
1056 		return -EILSEQ;
1057 	}
1058 
1059 	memcpy(features->page1, skb->data + 3, sizeof(features->page1));
1060 
1061 	/* Read the supported features page2 if required in future.
1062 	 */
1063 	kfree_skb(skb);
1064 	return 0;
1065 }
1066 EXPORT_SYMBOL_GPL(btintel_read_debug_features);
1067 
btintel_set_debug_features(struct hci_dev * hdev,const struct intel_debug_features * features)1068 int btintel_set_debug_features(struct hci_dev *hdev,
1069 			       const struct intel_debug_features *features)
1070 {
1071 	u8 mask[11] = { 0x0a, 0x92, 0x02, 0x07, 0x00, 0x00, 0x00, 0x00,
1072 			0x00, 0x00, 0x00 };
1073 	struct sk_buff *skb;
1074 
1075 	if (!features)
1076 		return -EINVAL;
1077 
1078 	if (!(features->page1[0] & 0x3f)) {
1079 		bt_dev_info(hdev, "Telemetry exception format not supported");
1080 		return 0;
1081 	}
1082 
1083 	skb = __hci_cmd_sync(hdev, 0xfc8b, 11, mask, HCI_INIT_TIMEOUT);
1084 	if (IS_ERR(skb)) {
1085 		bt_dev_err(hdev, "Setting Intel telemetry ddc write event mask failed (%ld)",
1086 			   PTR_ERR(skb));
1087 		return PTR_ERR(skb);
1088 	}
1089 
1090 	kfree_skb(skb);
1091 	return 0;
1092 }
1093 EXPORT_SYMBOL_GPL(btintel_set_debug_features);
1094 
1095 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
1096 MODULE_DESCRIPTION("Bluetooth support for Intel devices ver " VERSION);
1097 MODULE_VERSION(VERSION);
1098 MODULE_LICENSE("GPL");
1099 MODULE_FIRMWARE("intel/ibt-11-5.sfi");
1100 MODULE_FIRMWARE("intel/ibt-11-5.ddc");
1101 MODULE_FIRMWARE("intel/ibt-12-16.sfi");
1102 MODULE_FIRMWARE("intel/ibt-12-16.ddc");
1103