• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  Bluetooth supports for Qualcomm Atheros chips
4  *
5  *  Copyright (c) 2015 The Linux Foundation. All rights reserved.
6  */
7 #include <linux/module.h>
8 #include <linux/firmware.h>
9 
10 #include <net/bluetooth/bluetooth.h>
11 #include <net/bluetooth/hci_core.h>
12 
13 #include "btqca.h"
14 
15 #define VERSION "0.1"
16 
qca_read_soc_version(struct hci_dev * hdev,u32 * soc_version,enum qca_btsoc_type soc_type)17 int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version,
18 			 enum qca_btsoc_type soc_type)
19 {
20 	struct sk_buff *skb;
21 	struct edl_event_hdr *edl;
22 	struct qca_btsoc_version *ver;
23 	char cmd;
24 	int err = 0;
25 	u8 event_type = HCI_EV_VENDOR;
26 	u8 rlen = sizeof(*edl) + sizeof(*ver);
27 	u8 rtype = EDL_APP_VER_RES_EVT;
28 
29 	bt_dev_dbg(hdev, "QCA Version Request");
30 
31 	/* Unlike other SoC's sending version command response as payload to
32 	 * VSE event. WCN3991 sends version command response as a payload to
33 	 * command complete event.
34 	 */
35 	if (soc_type >= QCA_WCN3991) {
36 		event_type = 0;
37 		rlen += 1;
38 		rtype = EDL_PATCH_VER_REQ_CMD;
39 	}
40 
41 	cmd = EDL_PATCH_VER_REQ_CMD;
42 	skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, EDL_PATCH_CMD_LEN,
43 				&cmd, event_type, HCI_INIT_TIMEOUT);
44 	if (IS_ERR(skb)) {
45 		err = PTR_ERR(skb);
46 		bt_dev_err(hdev, "Reading QCA version information failed (%d)",
47 			   err);
48 		return err;
49 	}
50 
51 	if (skb->len != rlen) {
52 		bt_dev_err(hdev, "QCA Version size mismatch len %d", skb->len);
53 		err = -EILSEQ;
54 		goto out;
55 	}
56 
57 	edl = (struct edl_event_hdr *)(skb->data);
58 	if (!edl) {
59 		bt_dev_err(hdev, "QCA TLV with no header");
60 		err = -EILSEQ;
61 		goto out;
62 	}
63 
64 	if (edl->cresp != EDL_CMD_REQ_RES_EVT ||
65 	    edl->rtype != rtype) {
66 		bt_dev_err(hdev, "QCA Wrong packet received %d %d", edl->cresp,
67 			   edl->rtype);
68 		err = -EIO;
69 		goto out;
70 	}
71 
72 	if (soc_type >= QCA_WCN3991)
73 		memmove(&edl->data, &edl->data[1], sizeof(*ver));
74 
75 	ver = (struct qca_btsoc_version *)(edl->data);
76 
77 	bt_dev_info(hdev, "QCA Product ID   :0x%08x",
78 		    le32_to_cpu(ver->product_id));
79 	bt_dev_info(hdev, "QCA SOC Version  :0x%08x",
80 		    le32_to_cpu(ver->soc_id));
81 	bt_dev_info(hdev, "QCA ROM Version  :0x%08x",
82 		    le16_to_cpu(ver->rom_ver));
83 	bt_dev_info(hdev, "QCA Patch Version:0x%08x",
84 		    le16_to_cpu(ver->patch_ver));
85 
86 	/* QCA chipset version can be decided by patch and SoC
87 	 * version, combination with upper 2 bytes from SoC
88 	 * and lower 2 bytes from patch will be used.
89 	 */
90 	*soc_version = (le32_to_cpu(ver->soc_id) << 16) |
91 		       (le16_to_cpu(ver->rom_ver) & 0x0000ffff);
92 	if (*soc_version == 0)
93 		err = -EILSEQ;
94 
95 out:
96 	kfree_skb(skb);
97 	if (err)
98 		bt_dev_err(hdev, "QCA Failed to get version (%d)", err);
99 
100 	return err;
101 }
102 EXPORT_SYMBOL_GPL(qca_read_soc_version);
103 
qca_send_reset(struct hci_dev * hdev)104 static int qca_send_reset(struct hci_dev *hdev)
105 {
106 	struct sk_buff *skb;
107 	int err;
108 
109 	bt_dev_dbg(hdev, "QCA HCI_RESET");
110 
111 	skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
112 	if (IS_ERR(skb)) {
113 		err = PTR_ERR(skb);
114 		bt_dev_err(hdev, "QCA Reset failed (%d)", err);
115 		return err;
116 	}
117 
118 	kfree_skb(skb);
119 
120 	return 0;
121 }
122 
qca_send_pre_shutdown_cmd(struct hci_dev * hdev)123 int qca_send_pre_shutdown_cmd(struct hci_dev *hdev)
124 {
125 	struct sk_buff *skb;
126 	int err;
127 
128 	bt_dev_dbg(hdev, "QCA pre shutdown cmd");
129 
130 	skb = __hci_cmd_sync_ev(hdev, QCA_PRE_SHUTDOWN_CMD, 0,
131 				NULL, HCI_EV_CMD_COMPLETE, HCI_INIT_TIMEOUT);
132 
133 	if (IS_ERR(skb)) {
134 		err = PTR_ERR(skb);
135 		bt_dev_err(hdev, "QCA preshutdown_cmd failed (%d)", err);
136 		return err;
137 	}
138 
139 	kfree_skb(skb);
140 
141 	return 0;
142 }
143 EXPORT_SYMBOL_GPL(qca_send_pre_shutdown_cmd);
144 
qca_tlv_check_data(struct qca_fw_config * config,u8 * fw_data,enum qca_btsoc_type soc_type)145 static void qca_tlv_check_data(struct qca_fw_config *config,
146 		u8 *fw_data, enum qca_btsoc_type soc_type)
147 {
148 	const u8 *data;
149 	u32 type_len;
150 	u16 tag_id, tag_len;
151 	int idx, length;
152 	struct tlv_type_hdr *tlv;
153 	struct tlv_type_patch *tlv_patch;
154 	struct tlv_type_nvm *tlv_nvm;
155 	uint8_t nvm_baud_rate = config->user_baud_rate;
156 
157 	tlv = (struct tlv_type_hdr *)fw_data;
158 
159 	type_len = le32_to_cpu(tlv->type_len);
160 	length = (type_len >> 8) & 0x00ffffff;
161 
162 	BT_DBG("TLV Type\t\t : 0x%x", type_len & 0x000000ff);
163 	BT_DBG("Length\t\t : %d bytes", length);
164 
165 	config->dnld_mode = QCA_SKIP_EVT_NONE;
166 	config->dnld_type = QCA_SKIP_EVT_NONE;
167 
168 	switch (config->type) {
169 	case TLV_TYPE_PATCH:
170 		tlv_patch = (struct tlv_type_patch *)tlv->data;
171 
172 		/* For Rome version 1.1 to 3.1, all segment commands
173 		 * are acked by a vendor specific event (VSE).
174 		 * For Rome >= 3.2, the download mode field indicates
175 		 * if VSE is skipped by the controller.
176 		 * In case VSE is skipped, only the last segment is acked.
177 		 */
178 		config->dnld_mode = tlv_patch->download_mode;
179 		config->dnld_type = config->dnld_mode;
180 
181 		BT_DBG("Total Length           : %d bytes",
182 		       le32_to_cpu(tlv_patch->total_size));
183 		BT_DBG("Patch Data Length      : %d bytes",
184 		       le32_to_cpu(tlv_patch->data_length));
185 		BT_DBG("Signing Format Version : 0x%x",
186 		       tlv_patch->format_version);
187 		BT_DBG("Signature Algorithm    : 0x%x",
188 		       tlv_patch->signature);
189 		BT_DBG("Download mode          : 0x%x",
190 		       tlv_patch->download_mode);
191 		BT_DBG("Reserved               : 0x%x",
192 		       tlv_patch->reserved1);
193 		BT_DBG("Product ID             : 0x%04x",
194 		       le16_to_cpu(tlv_patch->product_id));
195 		BT_DBG("Rom Build Version      : 0x%04x",
196 		       le16_to_cpu(tlv_patch->rom_build));
197 		BT_DBG("Patch Version          : 0x%04x",
198 		       le16_to_cpu(tlv_patch->patch_version));
199 		BT_DBG("Reserved               : 0x%x",
200 		       le16_to_cpu(tlv_patch->reserved2));
201 		BT_DBG("Patch Entry Address    : 0x%x",
202 		       le32_to_cpu(tlv_patch->entry));
203 		break;
204 
205 	case TLV_TYPE_NVM:
206 		idx = 0;
207 		data = tlv->data;
208 		while (idx < length) {
209 			tlv_nvm = (struct tlv_type_nvm *)(data + idx);
210 
211 			tag_id = le16_to_cpu(tlv_nvm->tag_id);
212 			tag_len = le16_to_cpu(tlv_nvm->tag_len);
213 
214 			/* Update NVM tags as needed */
215 			switch (tag_id) {
216 			case EDL_TAG_ID_HCI:
217 				/* HCI transport layer parameters
218 				 * enabling software inband sleep
219 				 * onto controller side.
220 				 */
221 				tlv_nvm->data[0] |= 0x80;
222 
223 				/* UART Baud Rate */
224 				if (soc_type >= QCA_WCN3991)
225 					tlv_nvm->data[1] = nvm_baud_rate;
226 				else
227 					tlv_nvm->data[2] = nvm_baud_rate;
228 
229 				break;
230 
231 			case EDL_TAG_ID_DEEP_SLEEP:
232 				/* Sleep enable mask
233 				 * enabling deep sleep feature on controller.
234 				 */
235 				tlv_nvm->data[0] |= 0x01;
236 
237 				break;
238 			}
239 
240 			idx += (sizeof(u16) + sizeof(u16) + 8 + tag_len);
241 		}
242 		break;
243 
244 	default:
245 		BT_ERR("Unknown TLV type %d", config->type);
246 		break;
247 	}
248 }
249 
qca_tlv_send_segment(struct hci_dev * hdev,int seg_size,const u8 * data,enum qca_tlv_dnld_mode mode,enum qca_btsoc_type soc_type)250 static int qca_tlv_send_segment(struct hci_dev *hdev, int seg_size,
251 				const u8 *data, enum qca_tlv_dnld_mode mode,
252 				enum qca_btsoc_type soc_type)
253 {
254 	struct sk_buff *skb;
255 	struct edl_event_hdr *edl;
256 	struct tlv_seg_resp *tlv_resp;
257 	u8 cmd[MAX_SIZE_PER_TLV_SEGMENT + 2];
258 	int err = 0;
259 	u8 event_type = HCI_EV_VENDOR;
260 	u8 rlen = (sizeof(*edl) + sizeof(*tlv_resp));
261 	u8 rtype = EDL_TVL_DNLD_RES_EVT;
262 
263 	cmd[0] = EDL_PATCH_TLV_REQ_CMD;
264 	cmd[1] = seg_size;
265 	memcpy(cmd + 2, data, seg_size);
266 
267 	if (mode == QCA_SKIP_EVT_VSE_CC || mode == QCA_SKIP_EVT_VSE)
268 		return __hci_cmd_send(hdev, EDL_PATCH_CMD_OPCODE, seg_size + 2,
269 				      cmd);
270 
271 	/* Unlike other SoC's sending version command response as payload to
272 	 * VSE event. WCN3991 sends version command response as a payload to
273 	 * command complete event.
274 	 */
275 	if (soc_type >= QCA_WCN3991) {
276 		event_type = 0;
277 		rlen = sizeof(*edl);
278 		rtype = EDL_PATCH_TLV_REQ_CMD;
279 	}
280 
281 	skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, seg_size + 2, cmd,
282 				event_type, HCI_INIT_TIMEOUT);
283 	if (IS_ERR(skb)) {
284 		err = PTR_ERR(skb);
285 		bt_dev_err(hdev, "QCA Failed to send TLV segment (%d)", err);
286 		return err;
287 	}
288 
289 	if (skb->len != rlen) {
290 		bt_dev_err(hdev, "QCA TLV response size mismatch");
291 		err = -EILSEQ;
292 		goto out;
293 	}
294 
295 	edl = (struct edl_event_hdr *)(skb->data);
296 	if (!edl) {
297 		bt_dev_err(hdev, "TLV with no header");
298 		err = -EILSEQ;
299 		goto out;
300 	}
301 
302 	if (edl->cresp != EDL_CMD_REQ_RES_EVT || edl->rtype != rtype) {
303 		bt_dev_err(hdev, "QCA TLV with error stat 0x%x rtype 0x%x",
304 			   edl->cresp, edl->rtype);
305 		err = -EIO;
306 	}
307 
308 	if (soc_type >= QCA_WCN3991)
309 		goto out;
310 
311 	tlv_resp = (struct tlv_seg_resp *)(edl->data);
312 	if (tlv_resp->result) {
313 		bt_dev_err(hdev, "QCA TLV with error stat 0x%x rtype 0x%x (0x%x)",
314 			   edl->cresp, edl->rtype, tlv_resp->result);
315 	}
316 
317 out:
318 	kfree_skb(skb);
319 
320 	return err;
321 }
322 
qca_inject_cmd_complete_event(struct hci_dev * hdev)323 static int qca_inject_cmd_complete_event(struct hci_dev *hdev)
324 {
325 	struct hci_event_hdr *hdr;
326 	struct hci_ev_cmd_complete *evt;
327 	struct sk_buff *skb;
328 
329 	skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_KERNEL);
330 	if (!skb)
331 		return -ENOMEM;
332 
333 	hdr = skb_put(skb, sizeof(*hdr));
334 	hdr->evt = HCI_EV_CMD_COMPLETE;
335 	hdr->plen = sizeof(*evt) + 1;
336 
337 	evt = skb_put(skb, sizeof(*evt));
338 	evt->ncmd = 1;
339 	evt->opcode = cpu_to_le16(QCA_HCI_CC_OPCODE);
340 
341 	skb_put_u8(skb, QCA_HCI_CC_SUCCESS);
342 
343 	hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
344 
345 	return hci_recv_frame(hdev, skb);
346 }
347 
qca_download_firmware(struct hci_dev * hdev,struct qca_fw_config * config,enum qca_btsoc_type soc_type)348 static int qca_download_firmware(struct hci_dev *hdev,
349 				 struct qca_fw_config *config,
350 				 enum qca_btsoc_type soc_type)
351 {
352 	const struct firmware *fw;
353 	u8 *data;
354 	const u8 *segment;
355 	int ret, size, remain, i = 0;
356 
357 	bt_dev_info(hdev, "QCA Downloading %s", config->fwname);
358 
359 	ret = request_firmware(&fw, config->fwname, &hdev->dev);
360 	if (ret) {
361 		bt_dev_err(hdev, "QCA Failed to request file: %s (%d)",
362 			   config->fwname, ret);
363 		return ret;
364 	}
365 
366 	size = fw->size;
367 	data = vmalloc(fw->size);
368 	if (!data) {
369 		bt_dev_err(hdev, "QCA Failed to allocate memory for file: %s",
370 			   config->fwname);
371 		release_firmware(fw);
372 		return -ENOMEM;
373 	}
374 
375 	memcpy(data, fw->data, size);
376 	release_firmware(fw);
377 
378 	qca_tlv_check_data(config, data, soc_type);
379 
380 	segment = data;
381 	remain = size;
382 	while (remain > 0) {
383 		int segsize = min(MAX_SIZE_PER_TLV_SEGMENT, remain);
384 
385 		bt_dev_dbg(hdev, "Send segment %d, size %d", i++, segsize);
386 
387 		remain -= segsize;
388 		/* The last segment is always acked regardless download mode */
389 		if (!remain || segsize < MAX_SIZE_PER_TLV_SEGMENT)
390 			config->dnld_mode = QCA_SKIP_EVT_NONE;
391 
392 		ret = qca_tlv_send_segment(hdev, segsize, segment,
393 					   config->dnld_mode, soc_type);
394 		if (ret)
395 			goto out;
396 
397 		segment += segsize;
398 	}
399 
400 	/* Latest qualcomm chipsets are not sending a command complete event
401 	 * for every fw packet sent. They only respond with a vendor specific
402 	 * event for the last packet. This optimization in the chip will
403 	 * decrease the BT in initialization time. Here we will inject a command
404 	 * complete event to avoid a command timeout error message.
405 	 */
406 	if (config->dnld_type == QCA_SKIP_EVT_VSE_CC ||
407 	    config->dnld_type == QCA_SKIP_EVT_VSE)
408 		ret = qca_inject_cmd_complete_event(hdev);
409 
410 out:
411 	vfree(data);
412 
413 	return ret;
414 }
415 
qca_disable_soc_logging(struct hci_dev * hdev)416 static int qca_disable_soc_logging(struct hci_dev *hdev)
417 {
418 	struct sk_buff *skb;
419 	u8 cmd[2];
420 	int err;
421 
422 	cmd[0] = QCA_DISABLE_LOGGING_SUB_OP;
423 	cmd[1] = 0x00;
424 	skb = __hci_cmd_sync_ev(hdev, QCA_DISABLE_LOGGING, sizeof(cmd), cmd,
425 				HCI_EV_CMD_COMPLETE, HCI_INIT_TIMEOUT);
426 	if (IS_ERR(skb)) {
427 		err = PTR_ERR(skb);
428 		bt_dev_err(hdev, "QCA Failed to disable soc logging(%d)", err);
429 		return err;
430 	}
431 
432 	kfree_skb(skb);
433 
434 	return 0;
435 }
436 
qca_set_bdaddr_rome(struct hci_dev * hdev,const bdaddr_t * bdaddr)437 int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr)
438 {
439 	struct sk_buff *skb;
440 	u8 cmd[9];
441 	int err;
442 
443 	cmd[0] = EDL_NVM_ACCESS_SET_REQ_CMD;
444 	cmd[1] = 0x02; 			/* TAG ID */
445 	cmd[2] = sizeof(bdaddr_t);	/* size */
446 	memcpy(cmd + 3, bdaddr, sizeof(bdaddr_t));
447 	skb = __hci_cmd_sync_ev(hdev, EDL_NVM_ACCESS_OPCODE, sizeof(cmd), cmd,
448 				HCI_EV_VENDOR, HCI_INIT_TIMEOUT);
449 	if (IS_ERR(skb)) {
450 		err = PTR_ERR(skb);
451 		bt_dev_err(hdev, "QCA Change address command failed (%d)", err);
452 		return err;
453 	}
454 
455 	kfree_skb(skb);
456 
457 	return 0;
458 }
459 EXPORT_SYMBOL_GPL(qca_set_bdaddr_rome);
460 
qca_uart_setup(struct hci_dev * hdev,uint8_t baudrate,enum qca_btsoc_type soc_type,u32 soc_ver,const char * firmware_name)461 int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
462 		   enum qca_btsoc_type soc_type, u32 soc_ver,
463 		   const char *firmware_name)
464 {
465 	struct qca_fw_config config;
466 	int err;
467 	u8 rom_ver = 0;
468 
469 	bt_dev_dbg(hdev, "QCA setup on UART");
470 
471 	config.user_baud_rate = baudrate;
472 
473 	/* Download rampatch file */
474 	config.type = TLV_TYPE_PATCH;
475 	if (qca_is_wcn399x(soc_type)) {
476 		/* Firmware files to download are based on ROM version.
477 		 * ROM version is derived from last two bytes of soc_ver.
478 		 */
479 		rom_ver = ((soc_ver & 0x00000f00) >> 0x04) |
480 			    (soc_ver & 0x0000000f);
481 		snprintf(config.fwname, sizeof(config.fwname),
482 			 "qca/crbtfw%02x.tlv", rom_ver);
483 	} else if (soc_type == QCA_QCA6390) {
484 		rom_ver = ((soc_ver & 0x00000f00) >> 0x04) |
485 			    (soc_ver & 0x0000000f);
486 		snprintf(config.fwname, sizeof(config.fwname),
487 			 "qca/htbtfw%02x.tlv", rom_ver);
488 	} else {
489 		snprintf(config.fwname, sizeof(config.fwname),
490 			 "qca/rampatch_%08x.bin", soc_ver);
491 	}
492 
493 	err = qca_download_firmware(hdev, &config, soc_type);
494 	if (err < 0) {
495 		bt_dev_err(hdev, "QCA Failed to download patch (%d)", err);
496 		return err;
497 	}
498 
499 	/* Give the controller some time to get ready to receive the NVM */
500 	msleep(10);
501 
502 	/* Download NVM configuration */
503 	config.type = TLV_TYPE_NVM;
504 	if (firmware_name)
505 		snprintf(config.fwname, sizeof(config.fwname),
506 			 "qca/%s", firmware_name);
507 	else if (qca_is_wcn399x(soc_type))
508 		snprintf(config.fwname, sizeof(config.fwname),
509 			 "qca/crnv%02x.bin", rom_ver);
510 	else if (soc_type == QCA_QCA6390)
511 		snprintf(config.fwname, sizeof(config.fwname),
512 			 "qca/htnv%02x.bin", rom_ver);
513 	else
514 		snprintf(config.fwname, sizeof(config.fwname),
515 			 "qca/nvm_%08x.bin", soc_ver);
516 
517 	err = qca_download_firmware(hdev, &config, soc_type);
518 	if (err < 0) {
519 		bt_dev_err(hdev, "QCA Failed to download NVM (%d)", err);
520 		return err;
521 	}
522 
523 	if (soc_type >= QCA_WCN3991) {
524 		err = qca_disable_soc_logging(hdev);
525 		if (err < 0)
526 			return err;
527 	}
528 
529 	/* Perform HCI reset */
530 	err = qca_send_reset(hdev);
531 	if (err < 0) {
532 		bt_dev_err(hdev, "QCA Failed to run HCI_RESET (%d)", err);
533 		return err;
534 	}
535 
536 	bt_dev_info(hdev, "QCA setup on UART is completed");
537 
538 	return 0;
539 }
540 EXPORT_SYMBOL_GPL(qca_uart_setup);
541 
qca_set_bdaddr(struct hci_dev * hdev,const bdaddr_t * bdaddr)542 int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
543 {
544 	struct sk_buff *skb;
545 	int err;
546 
547 	skb = __hci_cmd_sync_ev(hdev, EDL_WRITE_BD_ADDR_OPCODE, 6, bdaddr,
548 				HCI_EV_VENDOR, HCI_INIT_TIMEOUT);
549 	if (IS_ERR(skb)) {
550 		err = PTR_ERR(skb);
551 		bt_dev_err(hdev, "QCA Change address cmd failed (%d)", err);
552 		return err;
553 	}
554 
555 	kfree_skb(skb);
556 
557 	return 0;
558 }
559 EXPORT_SYMBOL_GPL(qca_set_bdaddr);
560 
561 
562 MODULE_AUTHOR("Ben Young Tae Kim <ytkim@qca.qualcomm.com>");
563 MODULE_DESCRIPTION("Bluetooth support for Qualcomm Atheros family ver " VERSION);
564 MODULE_VERSION(VERSION);
565 MODULE_LICENSE("GPL");
566