1 /*
2 * Copyright 2012-2020, 2023 NXP
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16 #include <errno.h>
17 #include <pthread.h>
18 #include <log/log.h>
19
20 #include <phNxpLog.h>
21 #include <phNxpUciHal.h>
22 #include <phNxpUciHal_utils.h>
23
24 using namespace std;
25 map<uint16_t, vector<uint16_t>> input_map;
26 map<uint16_t, vector<uint16_t>> conf_map;
27
28 /****************** Semaphore and mutex helper functions **********************/
29 /* Semaphore and mutex monitor */
30 struct phNxpUciHal_Monitor {
31 public:
CreatephNxpUciHal_Monitor32 static std::unique_ptr<phNxpUciHal_Monitor> Create() {
33 //auto monitor = std::unique_ptr<phNxpUciHal_Monitor>(new phNxpUciHal_Monitor());
34 auto monitor = std::make_unique<phNxpUciHal_Monitor>();
35 if (pthread_mutex_init(&monitor->reentrance_mutex_, NULL) == -1) {
36 return nullptr;
37 }
38 if (pthread_mutex_init(&monitor->concurrency_mutex_, NULL) == -1) {
39 pthread_mutex_destroy(&monitor->reentrance_mutex_);
40 return nullptr;
41 }
42 return monitor;
43 }
44
~phNxpUciHal_MonitorphNxpUciHal_Monitor45 virtual ~phNxpUciHal_Monitor() {
46 pthread_mutex_destroy(&concurrency_mutex_);
47 ReentranceUnlock();
48 pthread_mutex_destroy(&reentrance_mutex_);
49 for (auto p : sems_) {
50 NXPLOG_UCIHAL_E("Unreleased semaphore %p", p);
51 p->status = UWBSTATUS_FAILED;
52 sem_post(&p->sem);
53 }
54 sems_.clear();
55 }
56
AddSemphNxpUciHal_Monitor57 void AddSem(phNxpUciHal_Sem_t* pCallbackData) {
58 std::lock_guard<std::mutex> lock(lock_);
59 auto it = sems_.find(pCallbackData);
60 if (it == sems_.end()) {
61 sems_.insert(pCallbackData);
62 } else {
63 NXPLOG_UCIHAL_E("phNxpUciHal_init_cb_data: duplicated semaphore %p",
64 pCallbackData);
65 }
66 }
67
RemoveSemphNxpUciHal_Monitor68 void RemoveSem(phNxpUciHal_Sem_t* pCallbackData) {
69 std::lock_guard<std::mutex> lock(lock_);
70 auto it = sems_.find(pCallbackData);
71 if (it == sems_.end()) {
72 NXPLOG_UCIHAL_E("phNxpUciHal_cleanup_cb_data: orphan semaphore %p",
73 pCallbackData);
74 } else {
75 sems_.erase(it);
76 }
77 }
78
ReentrancelockphNxpUciHal_Monitor79 void Reentrancelock() {
80 pthread_mutex_lock(&reentrance_mutex_);
81 }
82
ReentranceUnlockphNxpUciHal_Monitor83 void ReentranceUnlock() {
84 pthread_mutex_unlock(&reentrance_mutex_);
85 }
86
ConcurrencylockphNxpUciHal_Monitor87 void Concurrencylock() {
88 pthread_mutex_lock(&concurrency_mutex_);
89 }
90
ConcurrencyUnlockphNxpUciHal_Monitor91 void ConcurrencyUnlock() {
92 pthread_mutex_unlock(&concurrency_mutex_);
93 }
94
95 private:
96 std::unordered_set<phNxpUciHal_Sem_t*> sems_;
97 std::mutex lock_;
98 // Mutex protecting native library against reentrance
99 pthread_mutex_t reentrance_mutex_;
100 // Mutex protecting native library against concurrency
101 pthread_mutex_t concurrency_mutex_;
102 };
103
104 static std::unique_ptr<phNxpUciHal_Monitor> nxpucihal_monitor;
105
106 /*******************************************************************************
107 **
108 ** Function phNxpUciHal_init_monitor
109 **
110 ** Description Initialize the semaphore monitor
111 **
112 ** Returns Pointer to monitor, otherwise NULL if failed
113 **
114 *******************************************************************************/
phNxpUciHal_init_monitor(void)115 bool phNxpUciHal_init_monitor(void) {
116 NXPLOG_UCIHAL_D("Entering phNxpUciHal_init_monitor");
117
118 nxpucihal_monitor = phNxpUciHal_Monitor::Create();
119
120 if (nxpucihal_monitor == nullptr) {
121 NXPLOG_UCIHAL_E("nxphal_monitor creation failed");
122 return false;
123 }
124 return true;
125 }
126
127 /*******************************************************************************
128 **
129 ** Function phNxpUciHal_cleanup_monitor
130 **
131 ** Description Clean up semaphore monitor
132 **
133 ** Returns None
134 **
135 *******************************************************************************/
phNxpUciHal_cleanup_monitor(void)136 void phNxpUciHal_cleanup_monitor(void) {
137 nxpucihal_monitor = nullptr;
138 }
139
140 /* Initialize the callback data */
phNxpUciHal_init_cb_data(phNxpUciHal_Sem_t * pCallbackData)141 tHAL_UWB_STATUS phNxpUciHal_init_cb_data(phNxpUciHal_Sem_t* pCallbackData) {
142 /* Create semaphore */
143 if (sem_init(&pCallbackData->sem, 0, 0) == -1) {
144 NXPLOG_UCIHAL_E("Semaphore creation failed");
145 return UWBSTATUS_FAILED;
146 }
147
148 /* Set default status value */
149 pCallbackData->status = UWBSTATUS_FAILED;
150
151 /* Add to active semaphore list */
152 if (nxpucihal_monitor != nullptr) {
153 nxpucihal_monitor->AddSem(pCallbackData);
154 }
155
156 return UWBSTATUS_SUCCESS;
157 }
158
159 /*******************************************************************************
160 **
161 ** Function phNxpUciHal_cleanup_cb_data
162 **
163 ** Description Clean up callback data
164 **
165 ** Returns None
166 **
167 *******************************************************************************/
phNxpUciHal_cleanup_cb_data(phNxpUciHal_Sem_t * pCallbackData)168 void phNxpUciHal_cleanup_cb_data(phNxpUciHal_Sem_t* pCallbackData) {
169 /* Destroy semaphore */
170 if (sem_destroy(&pCallbackData->sem)) {
171 NXPLOG_UCIHAL_E(
172 "phNxpUciHal_cleanup_cb_data: Failed to destroy semaphore");
173 }
174 if (nxpucihal_monitor != nullptr) {
175 nxpucihal_monitor->RemoveSem(pCallbackData);
176 }
177 }
178
REENTRANCE_LOCK()179 void REENTRANCE_LOCK() {
180 if (nxpucihal_monitor != nullptr) {
181 nxpucihal_monitor->Reentrancelock();
182 }
183 }
REENTRANCE_UNLOCK()184 void REENTRANCE_UNLOCK() {
185 if (nxpucihal_monitor != nullptr) {
186 nxpucihal_monitor->ReentranceUnlock();
187 }
188 }
CONCURRENCY_LOCK()189 void CONCURRENCY_LOCK() {
190 if (nxpucihal_monitor != nullptr) {
191 nxpucihal_monitor->Concurrencylock();
192 }
193 }
CONCURRENCY_UNLOCK()194 void CONCURRENCY_UNLOCK() {
195 if (nxpucihal_monitor != nullptr) {
196 nxpucihal_monitor->ConcurrencyUnlock();
197 }
198 }
199
phNxpUciHal_sem_timed_wait_msec(phNxpUciHal_Sem_t * pCallbackData,long msec)200 int phNxpUciHal_sem_timed_wait_msec(phNxpUciHal_Sem_t* pCallbackData, long msec)
201 {
202 int ret;
203 struct timespec absTimeout;
204 if (clock_gettime(CLOCK_MONOTONIC, &absTimeout) == -1) {
205 NXPLOG_UCIHAL_E("clock_gettime failed");
206 return -1;
207 }
208
209 if (msec > 1000L) {
210 absTimeout.tv_sec += msec / 1000L;
211 msec = msec % 1000L;
212 }
213 absTimeout.tv_nsec += msec * 1000000L;
214 if (absTimeout.tv_nsec > 1000000000L) {
215 absTimeout.tv_nsec -= 1000000000L;
216 absTimeout.tv_sec += 1;
217 }
218
219 while ((ret = sem_timedwait_monotonic_np(&pCallbackData->sem, &absTimeout)) == -1 && errno == EINTR) {
220 continue;
221 }
222 if (ret == -1 && errno == ETIMEDOUT) {
223 pCallbackData->status = UWBSTATUS_RESPONSE_TIMEOUT;
224 NXPLOG_UCIHAL_E("wait semaphore timed out");
225 return -1;
226 }
227 return 0;
228 }
229
230 /* END Semaphore and mutex helper functions */
231
232 /**************************** Other functions *********************************/
233
234 /*******************************************************************************
235 **
236 ** Function phNxpUciHal_print_packet
237 **
238 ** Description Print packet
239 **
240 ** Returns None
241 **
242 *******************************************************************************/
phNxpUciHal_print_packet(enum phNxpUciHal_Pkt_Type what,const uint8_t * p_data,uint16_t len)243 void phNxpUciHal_print_packet(enum phNxpUciHal_Pkt_Type what, const uint8_t* p_data,
244 uint16_t len) {
245 uint32_t i;
246 char print_buffer[len * 3 + 1];
247
248 if ((gLog_level.ucix_log_level >= NXPLOG_LOG_DEBUG_LOGLEVEL)) {
249 /* OK to print */
250 }
251 else
252 {
253 /* Nothing to print...
254 * Why prepare buffer without printing?
255 */
256 return;
257 }
258
259 memset(print_buffer, 0, sizeof(print_buffer));
260 for (i = 0; i < len; i++) {
261 snprintf(&print_buffer[i * 2], 3, "%02X", p_data[i]);
262 }
263 switch(what) {
264 case NXP_TML_UCI_CMD_AP_2_UWBS:
265 {
266 NXPLOG_UCIX_D("len = %3d > %s", len, print_buffer);
267 }
268 break;
269 case NXP_TML_UCI_RSP_NTF_UWBS_2_AP:
270 {
271 NXPLOG_UCIR_D("len = %3d < %s", len, print_buffer);
272 }
273 break;
274 case NXP_TML_FW_DNLD_CMD_AP_2_UWBS:
275 {
276 // TODO: Should be NXPLOG_FWDNLD_D
277 NXPLOG_UCIX_D("len = %3d > (FW)%s", len, print_buffer);
278 }
279 break;
280 case NXP_TML_FW_DNLD_RSP_UWBS_2_AP:
281 {
282 // TODO: Should be NXPLOG_FWDNLD_D
283 NXPLOG_UCIR_D("len = %3d < (FW)%s", len, print_buffer);
284 }
285 break;
286 }
287
288 phNxpUciHalProp_print_log(what, p_data, len);
289
290 return;
291 }
292
293 /*******************************************************************************
294 **
295 ** Function phNxpUciHal_emergency_recovery
296 **
297 ** Description Emergency recovery in case of no other way out
298 **
299 ** Returns None
300 **
301 *******************************************************************************/
302
phNxpUciHal_emergency_recovery(void)303 void phNxpUciHal_emergency_recovery(void) {
304 NXPLOG_UCIHAL_E("%s: abort()", __func__);
305 abort();
306 }
307
308 /*******************************************************************************
309 **
310 ** Function phNxpUciHal_byteArrayToDouble
311 **
312 ** Description convert byte array to double
313 **
314 ** Returns double
315 **
316 *******************************************************************************/
phNxpUciHal_byteArrayToDouble(const uint8_t * p_data)317 double phNxpUciHal_byteArrayToDouble(const uint8_t* p_data) {
318 double d;
319 int size_d = sizeof(d);
320 uint8_t ptr[size_d],ptr_1[size_d];
321 memcpy(&ptr, p_data, size_d);
322 for(int i=0;i<size_d;i++) {
323 ptr_1[i] = ptr[size_d - 1 - i];
324 }
325 memcpy(&d, &ptr_1, sizeof(d));
326 return d; \
327 }
328
329 std::map<uint16_t, std::vector<uint8_t>>
decodeTlvBytes(const std::vector<uint8_t> & ext_ids,const uint8_t * tlv_bytes,size_t tlv_len)330 decodeTlvBytes(const std::vector<uint8_t> &ext_ids, const uint8_t *tlv_bytes, size_t tlv_len)
331 {
332 std::map<uint16_t, std::vector<uint8_t>> ret;
333
334 size_t i = 0;
335 while ((i + 1) < tlv_len) {
336 uint16_t tag;
337 uint8_t len;
338
339 uint8_t byte0 = tlv_bytes[i++];
340 uint8_t byte1 = tlv_bytes[i++];
341 if (std::find(ext_ids.begin(), ext_ids.end(), byte0) != ext_ids.end()) {
342 if (i >= tlv_len) {
343 NXPLOG_UCIHAL_E("Failed to decode TLV bytes (offset=%zu).", i);
344 break;
345 }
346 tag = (byte0 << 8) | byte1; // 2 bytes tag as big endiann
347 len = tlv_bytes[i++];
348 } else {
349 tag = byte0;
350 len = byte1;
351 }
352 if ((i + len) > tlv_len) {
353 NXPLOG_UCIHAL_E("Failed to decode TLV bytes (offset=%zu).", i);
354 break;
355 }
356 ret[tag] = std::vector(&tlv_bytes[i], &tlv_bytes[i + len]);
357 i += len;
358 }
359
360 return ret;
361 }
362
encodeTlvBytes(const std::map<uint16_t,std::vector<uint8_t>> & tlvs)363 std::vector<uint8_t> encodeTlvBytes(const std::map<uint16_t, std::vector<uint8_t>> &tlvs)
364 {
365 std::vector<uint8_t> bytes;
366
367 for (auto const & [tag, val] : tlvs) {
368 // Tag
369 if (tag > 0xff) {
370 bytes.push_back(tag >> 8);
371 }
372 bytes.push_back(tag & 0xff);
373
374 // Length
375 bytes.push_back(val.size());
376
377 // Value
378 bytes.insert(bytes.end(), val.begin(), val.end());
379 }
380
381 return bytes;
382 }
383