1 /*
2 * Copyright (c) 2022-2023 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 /**
17 * @addtogroup DriverHdi
18 * @{
19 *
20 * @brief Provides APIs for a system ability to obtain hardware device interface (HDI) services,
21 * load or unload a device, and listen for service status, and capabilities for the hdi-gen tool to
22 * automatically generate code in interface description language (IDL).
23 *
24 * The HDF and IDL code generated allow the system ability to accesses HDI driver services.
25 *
26 * @since 1.0
27 */
28
29 /**
30 * @file hdi_smq.h
31 *
32 * @brief Provides APIs for the shared memory queue (SMQ).
33 * The SMQ is a common mechanism for inter-process communication. The SMQ must comply with the IDL syntax.
34 * You only need to define the SMQ struct in IDL for the service module.
35 * The HDI module provides common operations for reading and writing the SMQ.
36 *
37 * @since 1.0
38 */
39
40 #ifndef HDI_SHARED_MEM_QUEUE_INF_H
41 #define HDI_SHARED_MEM_QUEUE_INF_H
42
43 #include <ashmem.h>
44 #include <atomic>
45 #include <cerrno>
46 #include <datetime_ex.h>
47 #include <hdf_base.h>
48 #include <hdf_log.h>
49 #include <base/hdi_smq_meta.h>
50 #include <base/hdi_smq_syncer.h>
51 #include <memory>
52 #include <securec.h>
53 #include <cstdint>
54 #include <cstring>
55 #include <sys/mman.h>
56
57 #ifndef PAGE_SIZE
58 #define PAGE_SIZE 4096
59 #endif
60
61 #ifndef HDF_LOG_TAG
62 #define HDF_LOG_TAG smq
63 #endif
64
65 namespace OHOS {
66 namespace HDI {
67 namespace Base {
68 /**
69 * @brief Defines the <b>SharedMemQueue</b> class.
70 *
71 * The SMQ is a message queue used for simplex communication between processes.
72 * It allows data write from one end and read at the other end, in either blocking or non-blocking mode.
73 */
74 template <typename T>
75 class SharedMemQueue {
76 public:
77 /**
78 * @brief A constructor used to create a <b>SharedMemQueue</b> object.
79 *
80 * @param elementCount Indicates the queue size, that is, the maximum number of elements allowed in the queue.
81 * @param type Indicates whether the SMQ is synchronous (<b>SYNCED_SMQ</b>) or asynchronous (<b>UNSYNC_SMQ</b>).
82 */
83 SharedMemQueue(uint32_t elementCount, SmqType type);
84
85 /**
86 * @brief A function used to copy the <b>SharedMemQueue</b> object.
87 */
88 explicit SharedMemQueue(const SharedMemQueueMeta<T> &meta);
89 ~SharedMemQueue();
90
91 /**
92 * @brief Writes an array of elements to the SMQ in blocking mode.
93 *
94 * When the SMQ is full, this API is blocked until the queue is writeable.
95 *
96 * @param data Indicates the pointer to the array of elements to write.
97 * @param count Indicates the number of elements to write.
98 * @return Returns <b>0</b> if the operation is successful; returns a non-zero value otherwise.
99 */
100 int Write(const T *data, size_t count);
101
102 /**
103 * @brief Reads an array of elements from the SMQ in blocking mode.
104 *
105 * When the SMQ is empty, this API is blocked until the queue is readable.
106 *
107 * @param data Indicates the pointer to the buffer for storing the elements read.
108 * @param count Indicates the number of elements to read.
109 * @return Returns <b>0</b> if the operation is successful; returns a non-zero value otherwise.
110 */
111 int Read(T *data, size_t count);
112
113 /**
114 * @brief Writes a single element to the SMQ in blocking mode.
115 *
116 * When the SMQ is full, this API is blocked until the queue is writeable.
117 *
118 * @param data Indicates the pointer to the single element to write.
119 * @return Returns <b>0</b> if the operation is successful; returns a non-zero value otherwise.
120 */
121 int Write(const T *data);
122
123 /**
124 * @brief Reads a single element from the SMQ in blocking mode.
125 *
126 * When the SMQ is empty, this API is blocked until the queue is readable.
127 *
128 * @param data Indicates the pointer to the buffer for storing the single element read.
129 * @return Returns <b>0</b> if the operation is successful; returns a non-zero value otherwise.
130 */
131 int Read(T *data);
132
133 /**
134 * @brief Writes a fixed number of elements to the SMQ in blocking mode.
135 *
136 * When the SMQ is full, this API is blocked until the queue is writeable or the write operation times out.
137 *
138 * @param data Indicates the pointer to the array of elements to write.
139 * @param count Indicates the number of elements to write.
140 * @param waitTimeNanoSec Indicates the write operation timeout period, in nanoseconds.
141 * @return Returns <b>0</b> if the operation is successful; returns a non-zero value otherwise.
142 */
143 int Write(const T *data, size_t count, int64_t waitTimeNanoSec);
144
145 /**
146 * @brief Reads a fixed number of elements from the SMQ in blocking mode.
147 *
148 * When the number of elements in the SMQ is less than the number of elements to read,
149 * this API is blocked until the queue is readable or the read operation times out.
150 *
151 * @param data Indicates the pointer to the buffer for storing the elements read.
152 * @param count Indicates the number of elements to read.
153 * @param waitTimeNanoSec Indicates the read operation timeout period, in nanoseconds.
154 * @return Returns <b>0</b> if the operation is successful; returns a non-zero value otherwise.
155 */
156 int Read(T *data, size_t count, int64_t waitTimeNanoSec);
157
158 /**
159 * @brief Writes a single element to the SMQ in non-blocking mode.
160 *
161 * When the SMQ queue is full, the SMPQ overflows. The overflowed element will be overwritten.
162 *
163 * @param data Indicates the pointer to the single element to write.
164 * @return Returns <b>0</b> if the operation is successful; returns a non-zero value otherwise.
165 */
166 int WriteNonBlocking(const T *data);
167
168 /**
169 * @brief Reads a single element from the SMQ in non-blocking mode.
170 *
171 * @param data Indicates the pointer to the buffer for storing the element read.
172 * @return Returns <b>0</b> if the operation is successful; returns a non-zero value otherwise.
173 */
174 int ReadNonBlocking(T *data);
175
176 /**
177 * @brief Writes a fixed number of elements to the SMQ in non-blocking mode.
178 *
179 * When the SMQ is full, the SMQ overflows. The overflowed elements will be overwritten.
180 *
181 * @param data Indicates the pointer to the elements to write.
182 * @param count Indicates the number of elements to write.
183 * @return Returns <b>0</b> if the operation is successful; returns a non-zero value otherwise.
184 */
185 int WriteNonBlocking(const T *data, size_t count);
186
187 /**
188 * @brief Reads a fixed number of elements from the SMQ in non-blocking mode.
189 *
190 * If the SMQ queue does not have sufficient elements to read, a failure is returned immediately.
191 *
192 * @param data Indicates the pointer to the buffer for storing the data read.
193 * The number of elements that can be held in the buffer must be greater than the number of elements read.
194 * @param count Indicates the number of elements to read.
195 * @return Returns <b>0</b> if the operation is successful; returns a non-zero value otherwise.
196 */
197 int ReadNonBlocking(T *data, size_t count);
198
199 /**
200 * @brief Obtains the number of elements that can be written to the SMQ.
201 *
202 * @return Returns the number of elements that can be written to the SMQ.
203 */
204 size_t GetAvalidWriteSize();
205
206 /**
207 * @brief Obtains the number of elements that can be read from the SMQ.
208 *
209 * @return Returns the number of elements that can be read from the SMQ.
210 */
211 size_t GetAvalidReadSize();
212
213 /**
214 * @brief Obtains the size of the SMQ, in bytes.
215 *
216 * @return Returns the number of bytes occupied by the SMQ.
217 */
218 size_t GetSize();
219
220 /**
221 * @brief Obtains the metadata object.
222 *
223 * @return Returns the metadata object obtained.
224 */
225 std::shared_ptr<SharedMemQueueMeta<T>> GetMeta();
226
227 /**
228 * @brief Checks whether the SMQ object is valid.
229 *
230 * @return Returns <b>true</b> if the object is valid; returns <b>false</b> otherwise.
231 */
232 bool IsGood();
233
234 /**
235 * @brief Obtains the current timestamp, in nanoseconds.
236 *
237 * @return Returns the timestamp obtained.
238 */
GetNanoTime()239 static inline int64_t GetNanoTime()
240 {
241 struct timespec ts;
242 clock_gettime(CLOCK_MONOTONIC, &ts);
243 return (ts.tv_sec * SEC_TO_NANOSEC + ts.tv_nsec);
244 }
245
246 private:
247 void Init(bool resetWriteOffset);
248 uintptr_t MapMemZone(uint32_t zoneType);
249 void UnMapMemZone(void *addr, uint32_t zoneType);
250 size_t Align(size_t num, size_t alignSize);
251
252 int32_t status = HDF_FAILURE;
253 uint8_t *queueBuffer_ = nullptr;
254 std::atomic<uint64_t> *readOffset_ = nullptr;
255 std::atomic<uint64_t> *writeOffset_ = nullptr;
256 std::atomic<uint32_t> *syncerPtr_ = nullptr;
257 std::unique_ptr<SharedMemQueueSyncer> syncer_ = nullptr;
258 std::shared_ptr<SharedMemQueueMeta<T>> meta_ = nullptr;
259 };
260
261 template <typename T>
SharedMemQueue(uint32_t elementCount,SmqType type)262 SharedMemQueue<T>::SharedMemQueue(uint32_t elementCount, SmqType type)
263 {
264 if (elementCount == 0 || elementCount > UINT16_MAX) {
265 HDF_LOGE("invalid elementCount for smq:%{public}u", elementCount);
266 return;
267 }
268
269 meta_ = std::make_shared<SharedMemQueueMeta<T>>(elementCount, type);
270 HDF_LOGI("create SharedMemQueue, count=%{public}u, size=%{public}zu", elementCount, meta_->GetSize());
271 int ashmemFd = AshmemCreate("hdi_smq", Align(meta_->GetSize(), PAGE_SIZE));
272 if (ashmemFd < 0) {
273 HDF_LOGE("failed to create ashmem");
274 return;
275 }
276 meta_->SetFd(ashmemFd);
277 Init(true);
278 }
279
280 template <typename T>
SharedMemQueue(const SharedMemQueueMeta<T> & meta)281 SharedMemQueue<T>::SharedMemQueue(const SharedMemQueueMeta<T> &meta)
282 {
283 meta_ = std::make_shared<SharedMemQueueMeta<T>>(meta);
284 Init(false);
285 }
286
287 template <typename T>
~SharedMemQueue()288 SharedMemQueue<T>::~SharedMemQueue()
289 {
290 if (meta_ != nullptr && meta_->GetType() == SYNCED_SMQ && readOffset_ != nullptr) {
291 UnMapMemZone(readOffset_, SharedMemQueueMeta<T>::MemZoneType::MEMZONE_RPTR);
292 } else {
293 delete readOffset_;
294 readOffset_ = nullptr;
295 }
296
297 if (writeOffset_ != nullptr) {
298 UnMapMemZone(writeOffset_, SharedMemQueueMeta<T>::MEMZONE_WPTR);
299 }
300
301 if (syncerPtr_ != nullptr) {
302 UnMapMemZone(syncerPtr_, SharedMemQueueMeta<T>::MEMZONE_SYNCER);
303 }
304
305 if (queueBuffer_ != nullptr) {
306 UnMapMemZone(queueBuffer_, SharedMemQueueMeta<T>::MEMZONE_DATA);
307 }
308 }
309
310 template <typename T>
Init(bool resetWriteOffset)311 void SharedMemQueue<T>::Init(bool resetWriteOffset)
312 {
313 if (meta_ == nullptr) {
314 HDF_LOGE("invalid smq meta for init");
315 return;
316 }
317
318 if (meta_->GetType() == SYNCED_SMQ) {
319 readOffset_ = reinterpret_cast<std::atomic<uint64_t> *>(MapMemZone(SharedMemQueueMeta<T>::MEMZONE_RPTR));
320 } else {
321 readOffset_ = new std::atomic<uint64_t>;
322 }
323
324 if (readOffset_ == nullptr) {
325 HDF_LOGE("failed to map read offset");
326 return;
327 }
328
329 writeOffset_ = reinterpret_cast<std::atomic<uint64_t> *>(MapMemZone(SharedMemQueueMeta<T>::MEMZONE_WPTR));
330 if (writeOffset_ == nullptr) {
331 HDF_LOGE("failed to map write offset");
332 return;
333 }
334
335 syncerPtr_ = reinterpret_cast<std::atomic<uint32_t> *>(MapMemZone(SharedMemQueueMeta<T>::MEMZONE_SYNCER));
336 if (syncerPtr_ == nullptr) {
337 HDF_LOGE("failed to map sync ptr");
338 return;
339 }
340
341 queueBuffer_ = reinterpret_cast<uint8_t *>(MapMemZone(SharedMemQueueMeta<T>::MEMZONE_DATA));
342 if (queueBuffer_ == nullptr) {
343 HDF_LOGE("failed to map queue buffer");
344 return;
345 }
346
347 syncer_ = std::make_unique<SharedMemQueueSyncer>(syncerPtr_);
348
349 if (resetWriteOffset) {
350 writeOffset_->store(0, std::memory_order_release);
351 }
352 readOffset_->store(0, std::memory_order_release);
353 HDF_LOGI("smq init succ");
354 status = HDF_SUCCESS;
355 }
356
357 template <typename T>
MapMemZone(uint32_t zoneType)358 uintptr_t SharedMemQueue<T>::MapMemZone(uint32_t zoneType)
359 {
360 auto memzone = meta_->GetMemZone(zoneType);
361 if (memzone == nullptr) {
362 HDF_LOGE("invalid smq mem zone type %{public}u", zoneType);
363 return reinterpret_cast<uintptr_t>(nullptr);
364 }
365
366 int offset = (static_cast<int>(memzone->offset) / PAGE_SIZE) * PAGE_SIZE;
367 int length = static_cast<int>(memzone->offset) - offset + static_cast<int>(memzone->size);
368
369 void *ptr = mmap(0, length, PROT_READ | PROT_WRITE, MAP_SHARED, meta_->GetFd(), offset);
370 if (ptr == MAP_FAILED) {
371 HDF_LOGE(
372 "failed to map memzone %{public}u, size %{public}u, offset %{public}u , fd %{public}d, errnor=%{public}d",
373 zoneType, length, offset, meta_->GetFd(), errno);
374 return reinterpret_cast<uintptr_t>(nullptr);
375 }
376 return (reinterpret_cast<uintptr_t>(ptr) + (static_cast<int>(memzone->offset) - offset));
377 }
378
379 template <typename T>
UnMapMemZone(void * addr,uint32_t zoneType)380 void SharedMemQueue<T>::UnMapMemZone(void *addr, uint32_t zoneType)
381 {
382 auto memzone = meta_->GetMemZone(zoneType);
383 if (memzone == nullptr) {
384 return;
385 }
386 int offset = (static_cast<int>(memzone->offset) / PAGE_SIZE) * PAGE_SIZE;
387 int length = static_cast<int>(memzone->offset) - offset + static_cast<int>(memzone->size);
388 uint8_t *ptr = reinterpret_cast<uint8_t *>(addr) - (static_cast<int>(memzone->offset) - offset);
389 if (ptr == nullptr) {
390 return;
391 }
392 munmap(ptr, length);
393 }
394
395 template <typename T>
IsGood()396 bool SharedMemQueue<T>::IsGood()
397 {
398 return status == HDF_SUCCESS;
399 }
400
401 template <typename T>
Align(size_t num,size_t alignSize)402 size_t SharedMemQueue<T>::Align(size_t num, size_t alignSize)
403 {
404 return (num + alignSize - 1) & ~(alignSize - 1);
405 }
406
407 template <typename T>
Write(const T * data,size_t count)408 int SharedMemQueue<T>::Write(const T *data, size_t count)
409 {
410 return Write(data, count, 0);
411 }
412
413 template <typename T>
Read(T * data,size_t count)414 int SharedMemQueue<T>::Read(T *data, size_t count)
415 {
416 return Read(data, count, 0);
417 }
418
419 template <typename T>
Write(const T * data)420 int SharedMemQueue<T>::Write(const T *data)
421 {
422 return Write(data, 1, 0);
423 }
424
425 template <typename T>
Read(T * data)426 int SharedMemQueue<T>::Read(T *data)
427 {
428 return Read(data, 1, 0);
429 }
430
431 template <typename T>
Write(const T * data,size_t count,int64_t waitTimeNanoSec)432 int SharedMemQueue<T>::Write(const T *data, size_t count, int64_t waitTimeNanoSec)
433 {
434 if (meta_->GetType() != SmqType::SYNCED_SMQ) {
435 HDF_LOGE("unsynecd smq not support blocking write");
436 return HDF_ERR_NOT_SUPPORT;
437 }
438
439 if (WriteNonBlocking(data, count) == 0) {
440 return syncer_->Wake(SharedMemQueueSyncer::SYNC_WORD_READ);
441 }
442
443 int ret = 0;
444 auto startTime = GetNanoTime();
445 int64_t currentTime = startTime;
446 while (true) {
447 if (waitTimeNanoSec != 0) {
448 currentTime = GetNanoTime();
449 waitTimeNanoSec -= (currentTime - startTime);
450 startTime = currentTime;
451 if (waitTimeNanoSec <= 0) {
452 ret = WriteNonBlocking(data, count);
453 break;
454 }
455 }
456 ret = syncer_->Wait(SharedMemQueueSyncer::SYNC_WORD_WRITE, waitTimeNanoSec);
457 if (ret != 0 && ret != -ETIMEDOUT) {
458 break;
459 }
460
461 ret = WriteNonBlocking(data, count);
462 if (ret == 0) {
463 break;
464 }
465 HDF_LOGE("failed to write %{public}zu, retry", count);
466 }
467
468 if (ret == 0) {
469 ret = syncer_->Wake(SharedMemQueueSyncer::SYNC_WORD_READ);
470 } else {
471 HDF_LOGE("failed to write %{public}zu, ret=%{public}d", count, ret);
472 }
473
474 return ret;
475 }
476
477 template <typename T>
Read(T * data,size_t count,int64_t waitTimeNanoSec)478 int SharedMemQueue<T>::Read(T *data, size_t count, int64_t waitTimeNanoSec)
479 {
480 if (meta_->GetType() != SmqType::SYNCED_SMQ) {
481 HDF_LOGE("unsynecd smq not support blocking read");
482 return HDF_ERR_NOT_SUPPORT;
483 }
484
485 if (ReadNonBlocking(data, count) == 0) {
486 return syncer_->Wake(SharedMemQueueSyncer::SYNC_WORD_WRITE);
487 }
488
489 int ret = -ENODATA;
490 auto startTime = GetNanoTime();
491 int64_t currentTime;
492 while (true) {
493 if (waitTimeNanoSec != 0) {
494 currentTime = GetNanoTime();
495 waitTimeNanoSec -= (currentTime - startTime);
496 startTime = currentTime;
497 if (waitTimeNanoSec <= 0) {
498 ret = ReadNonBlocking(data, count);
499 break;
500 }
501 }
502 ret = syncer_->Wait(SharedMemQueueSyncer::SYNC_WORD_READ, waitTimeNanoSec);
503 if (ret != 0 && ret != -ETIMEDOUT) {
504 break;
505 }
506
507 ret = ReadNonBlocking(data, count);
508 if (ret == 0) {
509 break;
510 }
511 }
512 if (ret == 0) {
513 ret = syncer_->Wake(SharedMemQueueSyncer::SYNC_WORD_WRITE);
514 } else {
515 HDF_LOGE("failed to read %{public}zu, ret=%{public}d", count, ret);
516 }
517
518 return ret;
519 }
520
521 template <typename T>
WriteNonBlocking(const T * data)522 int SharedMemQueue<T>::WriteNonBlocking(const T *data)
523 {
524 return WriteNonBlocking(data, 1);
525 }
526
527 template <typename T>
ReadNonBlocking(T * data)528 int SharedMemQueue<T>::ReadNonBlocking(T *data)
529 {
530 return ReadNonBlocking(data, 1);
531 }
532
533 template <typename T>
WriteNonBlocking(const T * data,size_t count)534 int SharedMemQueue<T>::WriteNonBlocking(const T *data, size_t count)
535 {
536 auto avalidWrite = GetAvalidWriteSize();
537 if (count >= avalidWrite && meta_->GetType() == SmqType::SYNCED_SMQ) {
538 // synced smq can not overflow write
539 return -E2BIG;
540 }
541
542 auto wOffset = writeOffset_->load(std::memory_order_acquire);
543 auto rOffset = readOffset_->load(std::memory_order_acquire);
544 uint64_t newWriteOffset;
545 auto qCount = meta_->GetElementCount();
546 if (wOffset + count <= qCount) {
547 if (memcpy_s(queueBuffer_ + (wOffset * sizeof(T)), (qCount - wOffset) * sizeof(T),
548 data, count * sizeof(T)) != EOK) {
549 return HDF_FAILURE;
550 };
551 newWriteOffset = (wOffset + count) % qCount;
552 } else {
553 size_t firstPartSize = qCount - wOffset;
554 size_t secParcSize = count - firstPartSize;
555 if (memcpy_s(queueBuffer_ + (wOffset * sizeof(T)), (qCount - wOffset) * sizeof(T),
556 data, firstPartSize * sizeof(T)) != EOK) {
557 return HDF_FAILURE;
558 }
559 if (memcpy_s(queueBuffer_, qCount * sizeof(T), data + firstPartSize, secParcSize * sizeof(T)) != EOK) {
560 return HDF_FAILURE;
561 }
562 newWriteOffset = secParcSize;
563 }
564
565 writeOffset_->store(newWriteOffset, std::memory_order_release);
566 if (wOffset < rOffset && newWriteOffset >= rOffset) {
567 HDF_LOGW("warning:smp ring buffer overflow");
568 }
569 return 0;
570 }
571
572 template <typename T>
ReadNonBlocking(T * data,size_t count)573 int SharedMemQueue<T>::ReadNonBlocking(T *data, size_t count)
574 {
575 if (count == 0) {
576 return -EINVAL;
577 }
578
579 if (count > GetAvalidReadSize()) {
580 return -ENODATA;
581 }
582
583 auto qCount = meta_->GetElementCount();
584 auto rOffset = readOffset_->load(std::memory_order_acquire);
585 if (rOffset + count <= qCount) {
586 if (memcpy_s(data, count * sizeof(T), queueBuffer_ + (rOffset * sizeof(T)), count * sizeof(T)) != EOK) {
587 return HDF_FAILURE;
588 }
589 readOffset_->store((rOffset + count) % qCount, std::memory_order_release);
590 return 0;
591 }
592
593 size_t firstPartSize = qCount - rOffset;
594 size_t secPartSize = count - firstPartSize;
595
596 if (memcpy_s(data, count * sizeof(T), queueBuffer_ + (rOffset * sizeof(T)), firstPartSize * sizeof(T)) != EOK) {
597 return HDF_FAILURE;
598 }
599 if (memcpy_s(data + firstPartSize, (count - firstPartSize) * sizeof(T),
600 queueBuffer_, secPartSize * sizeof(T)) != EOK) {
601 return HDF_FAILURE;
602 };
603 readOffset_->store(secPartSize, std::memory_order_release);
604
605 return 0;
606 }
607
608 template <typename T>
GetAvalidWriteSize()609 size_t SharedMemQueue<T>::GetAvalidWriteSize()
610 {
611 return meta_->GetElementCount() - GetAvalidReadSize();
612 }
613
614 template <typename T>
GetAvalidReadSize()615 size_t SharedMemQueue<T>::GetAvalidReadSize()
616 {
617 auto wOffset = writeOffset_->load(std::memory_order_acquire);
618 auto rOffset = readOffset_->load(std::memory_order_acquire);
619 auto size = wOffset >= rOffset ? (wOffset - rOffset) : (wOffset + meta_->GetElementCount() - rOffset);
620 return size;
621 }
622
623 template <typename T>
GetSize()624 size_t SharedMemQueue<T>::GetSize()
625 {
626 return meta_->GetSize();
627 }
628
629 template <typename T>
GetMeta()630 std::shared_ptr<SharedMemQueueMeta<T>> SharedMemQueue<T>::GetMeta()
631 {
632 return meta_;
633 }
634 } // namespace Base
635 } // namespace HDI
636 } // namespace OHOS
637
638 #ifdef HDF_LOG_TAG
639 #undef HDF_LOG_TAG
640 #endif
641
642 #endif /* HDI_SHARED_MEM_QUEUEHDI_INF_H */
643