• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2021 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "buffer_queue.h"
17 #include <algorithm>
18 #include <fstream>
19 #include <sstream>
20 #include <sys/time.h>
21 #include <cinttypes>
22 #include <unistd.h>
23 #include <scoped_bytrace.h>
24 
25 #include "buffer_utils.h"
26 #include "buffer_log.h"
27 #include "buffer_manager.h"
28 #include "hitrace_meter.h"
29 #include "sandbox_utils.h"
30 #include "surface_buffer_impl.h"
31 #include "sync_fence.h"
32 #include "sync_fence_tracker.h"
33 
34 namespace OHOS {
35 namespace {
36 constexpr uint32_t UNIQUE_ID_OFFSET = 32;
37 constexpr uint32_t BUFFER_MEMSIZE_RATE = 1024;
38 constexpr uint32_t BUFFER_MEMSIZE_FORMAT = 2;
39 }
40 
41 static const std::map<BufferState, std::string> BufferStateStrs = {
42     {BUFFER_STATE_RELEASED,                    "0 <released>"},
43     {BUFFER_STATE_REQUESTED,                   "1 <requested>"},
44     {BUFFER_STATE_FLUSHED,                     "2 <flushed>"},
45     {BUFFER_STATE_ACQUIRED,                    "3 <acquired>"},
46 };
47 
GetUniqueIdImpl()48 static uint64_t GetUniqueIdImpl()
49 {
50     static std::atomic<uint32_t> counter { 0 };
51     static uint64_t id = static_cast<uint64_t>(GetRealPid()) << UNIQUE_ID_OFFSET;
52     return id | counter++;
53 }
54 
IsLocalRender()55 static bool IsLocalRender()
56 {
57     return GetRealPid() == gettid();
58 }
59 
BufferQueue(const std::string & name,bool isShared)60 BufferQueue::BufferQueue(const std::string &name, bool isShared)
61     : name_(name), uniqueId_(GetUniqueIdImpl()), isShared_(isShared), isLocalRender_(IsLocalRender())
62 {
63     BLOGNI("ctor, Queue id: %{public}" PRIu64 " isShared: %{public}d", uniqueId_, isShared);
64     bufferManager_ = BufferManager::GetInstance();
65     if (isShared_ == true) {
66         queueSize_ = 1;
67     }
68 }
69 
~BufferQueue()70 BufferQueue::~BufferQueue()
71 {
72     BLOGNI("dtor, Queue id: %{public}" PRIu64, uniqueId_);
73     for (auto &[id, _] : bufferQueueCache_) {
74         if (onBufferDeleteForRSMainThread_ != nullptr) {
75             onBufferDeleteForRSMainThread_(id);
76         }
77         if (onBufferDeleteForRSHardwareThread_ != nullptr) {
78             onBufferDeleteForRSHardwareThread_(id);
79         }
80     }
81 }
82 
Init()83 GSError BufferQueue::Init()
84 {
85     return GSERROR_OK;
86 }
87 
GetUsedSize()88 uint32_t BufferQueue::GetUsedSize()
89 {
90     uint32_t used_size = bufferQueueCache_.size();
91     return used_size;
92 }
93 
PopFromFreeList(sptr<SurfaceBuffer> & buffer,const BufferRequestConfig & config)94 GSError BufferQueue::PopFromFreeList(sptr<SurfaceBuffer> &buffer,
95     const BufferRequestConfig &config)
96 {
97     if (isShared_ == true && GetUsedSize() > 0) {
98         buffer = bufferQueueCache_.begin()->second.buffer;
99         return GSERROR_OK;
100     }
101 
102     for (auto it = freeList_.begin(); it != freeList_.end(); it++) {
103         if (bufferQueueCache_[*it].config == config) {
104             buffer = bufferQueueCache_[*it].buffer;
105             freeList_.erase(it);
106             return GSERROR_OK;
107         }
108     }
109 
110     if (freeList_.empty()) {
111         buffer = nullptr;
112         return GSERROR_NO_BUFFER;
113     }
114 
115     buffer = bufferQueueCache_[freeList_.front()].buffer;
116     freeList_.pop_front();
117     return GSERROR_OK;
118 }
119 
PopFromDirtyList(sptr<SurfaceBuffer> & buffer)120 GSError BufferQueue::PopFromDirtyList(sptr<SurfaceBuffer> &buffer)
121 {
122     if (isShared_ == true && GetUsedSize() > 0) {
123         buffer = bufferQueueCache_.begin()->second.buffer;
124         return GSERROR_OK;
125     }
126 
127     if (!dirtyList_.empty()) {
128         buffer = bufferQueueCache_[dirtyList_.front()].buffer;
129         dirtyList_.pop_front();
130         return GSERROR_OK;
131     } else {
132         buffer = nullptr;
133         return GSERROR_NO_BUFFER;
134     }
135 }
136 
CheckRequestConfig(const BufferRequestConfig & config)137 GSError BufferQueue::CheckRequestConfig(const BufferRequestConfig &config)
138 {
139     uint32_t align = config.strideAlignment;
140     bool isValidStrideAlignment = true;
141     isValidStrideAlignment = isValidStrideAlignment && (SURFACE_MIN_STRIDE_ALIGNMENT <= align);
142     isValidStrideAlignment = isValidStrideAlignment && (SURFACE_MAX_STRIDE_ALIGNMENT >= align);
143     if (!isValidStrideAlignment) {
144         BLOGN_INVALID("config.strideAlignment [%{public}d, %{public}d], now is %{public}d",
145                       SURFACE_MIN_STRIDE_ALIGNMENT, SURFACE_MAX_STRIDE_ALIGNMENT, align);
146         return GSERROR_INVALID_ARGUMENTS;
147     }
148 
149     if (align & (align - 1)) {
150         BLOGN_INVALID("config.strideAlignment is not power of 2 like 4, 8, 16, 32; now is %{public}d", align);
151         return GSERROR_INVALID_ARGUMENTS;
152     }
153 
154     if (config.colorGamut <= GraphicColorGamut::GRAPHIC_COLOR_GAMUT_INVALID ||
155         config.colorGamut > GraphicColorGamut::GRAPHIC_COLOR_GAMUT_DISPLAY_BT2020 + 1) {
156         BLOGN_INVALID("config.colorGamut [0, %{public}d], now is %{public}d",
157             static_cast<uint32_t>(GraphicColorGamut::GRAPHIC_COLOR_GAMUT_DISPLAY_BT2020),
158             static_cast<uint32_t>(config.colorGamut));
159         return GSERROR_INVALID_ARGUMENTS;
160     }
161 
162     if (config.transform < GraphicTransformType::GRAPHIC_ROTATE_NONE ||
163         config.transform >= GraphicTransformType::GRAPHIC_ROTATE_BUTT) {
164         BLOGN_INVALID("config.transform [0, %{public}d), now is %{public}d",
165             GraphicTransformType::GRAPHIC_ROTATE_BUTT, config.transform);
166         return GSERROR_INVALID_ARGUMENTS;
167     }
168     return GSERROR_OK;
169 }
170 
CheckFlushConfig(const BufferFlushConfigWithDamages & config)171 GSError BufferQueue::CheckFlushConfig(const BufferFlushConfigWithDamages &config)
172 {
173     for (decltype(config.damages.size()) i = 0; i < config.damages.size(); i++) {
174         if (config.damages[i].w < 0 || config.damages[i].h < 0) {
175             BLOGN_INVALID("config.damages width and height should >= 0, "
176                 "now damages[%{public}zu].w is %{public}d, .h is %{public}d, ",
177                 i, config.damages[i].w, config.damages[i].h);
178             return GSERROR_INVALID_ARGUMENTS;
179         }
180     }
181     return GSERROR_OK;
182 }
183 
QueryIfBufferAvailable()184 bool BufferQueue::QueryIfBufferAvailable()
185 {
186     std::lock_guard<std::mutex> lockGuard(mutex_);
187     bool ret = !freeList_.empty() || (GetUsedSize() < GetQueueSize());
188     return ret;
189 }
190 
RequestBuffer(const BufferRequestConfig & config,sptr<BufferExtraData> & bedata,struct IBufferProducer::RequestBufferReturnValue & retval)191 GSError BufferQueue::RequestBuffer(const BufferRequestConfig &config, sptr<BufferExtraData> &bedata,
192     struct IBufferProducer::RequestBufferReturnValue &retval)
193 {
194     ScopedBytrace func(__func__);
195     if (!GetStatus()) {
196         BLOGN_FAILURE_RET(GSERROR_NO_CONSUMER);
197     }
198     {
199         std::lock_guard<std::mutex> lockGuard(listenerMutex_);
200         if (listener_ == nullptr && listenerClazz_ == nullptr) {
201             BLOGN_FAILURE_RET(GSERROR_NO_CONSUMER);
202         }
203     }
204 
205     // check param
206     GSError ret = CheckRequestConfig(config);
207     if (ret != GSERROR_OK) {
208         BLOGN_FAILURE_API(CheckRequestConfig, ret);
209         return ret;
210     }
211 
212     std::unique_lock<std::mutex> lock(mutex_);
213     // dequeue from free list
214     sptr<SurfaceBuffer>& buffer = retval.buffer;
215     ret = PopFromFreeList(buffer, config);
216     if (ret == GSERROR_OK) {
217         return ReuseBuffer(config, bedata, retval);
218     }
219 
220     // check queue size
221     if (GetUsedSize() >= GetQueueSize()) {
222         waitReqCon_.wait_for(lock, std::chrono::milliseconds(config.timeout),
223             [this]() { return !freeList_.empty() || (GetUsedSize() < GetQueueSize()) || !GetStatus(); });
224         if (!GetStatus()) {
225             BLOGN_FAILURE_RET(GSERROR_NO_CONSUMER);
226         }
227         // try dequeue from free list again
228         ret = PopFromFreeList(buffer, config);
229         if (ret == GSERROR_OK) {
230             return ReuseBuffer(config, bedata, retval);
231         } else if (GetUsedSize() >= GetQueueSize()) {
232             BLOGND("all buffer are using, Queue id: %{public}" PRIu64, uniqueId_);
233             return GSERROR_NO_BUFFER;
234         }
235     }
236 
237     ret = AllocBuffer(buffer, config);
238     if (ret == GSERROR_OK) {
239         retval.sequence = buffer->GetSeqNum();
240         bedata = buffer->GetExtraData();
241         retval.fence = SyncFence::INVALID_FENCE;
242         BLOGND("Success alloc Buffer[%{public}d %{public}d] id: %{public}d id: %{public}" PRIu64, config.width,
243             config.height, retval.sequence, uniqueId_);
244     } else {
245         BLOGNE("Fail to alloc or map Buffer[%{public}d %{public}d] ret: %{public}d, id: %{public}" PRIu64,
246             config.width, config.height, ret, uniqueId_);
247     }
248 
249     return ret;
250 }
251 
SetProducerCacheCleanFlag(bool flag)252 GSError BufferQueue::SetProducerCacheCleanFlag(bool flag)
253 {
254     std::unique_lock<std::mutex> lock(mutex_);
255     return SetProducerCacheCleanFlagLocked(flag);
256 }
257 
SetProducerCacheCleanFlagLocked(bool flag)258 GSError BufferQueue::SetProducerCacheCleanFlagLocked(bool flag)
259 {
260     producerCacheClean_ = flag;
261     producerCacheList_.clear();
262     return GSERROR_OK;
263 }
264 
CheckProducerCacheList()265 bool BufferQueue::CheckProducerCacheList()
266 {
267     for (auto &[id, _] : bufferQueueCache_) {
268         if (std::find(producerCacheList_.begin(), producerCacheList_.end(), id) == producerCacheList_.end()) {
269             return false;
270         }
271     }
272     return true;
273 }
274 
ReuseBuffer(const BufferRequestConfig & config,sptr<BufferExtraData> & bedata,struct IBufferProducer::RequestBufferReturnValue & retval)275 GSError BufferQueue::ReuseBuffer(const BufferRequestConfig &config, sptr<BufferExtraData> &bedata,
276     struct IBufferProducer::RequestBufferReturnValue &retval)
277 {
278     ScopedBytrace func(__func__);
279     if (retval.buffer == nullptr) {
280         BLOGN_FAILURE_RET(GSERROR_INVALID_ARGUMENTS);
281     }
282     retval.sequence = retval.buffer->GetSeqNum();
283     if (bufferQueueCache_.find(retval.sequence) == bufferQueueCache_.end()) {
284         BLOGN_FAILURE_RET(GSERROR_INVALID_ARGUMENTS);
285     }
286     bool needRealloc = (config != bufferQueueCache_[retval.sequence].config);
287     // config, realloc
288     if (needRealloc) {
289         if (isShared_) {
290             BLOGN_FAILURE_RET(GSERROR_INVALID_ARGUMENTS);
291         }
292         DeleteBufferInCache(retval.sequence);
293 
294         sptr<SurfaceBuffer> buffer = nullptr;
295         auto sret = AllocBuffer(buffer, config);
296         if (sret != GSERROR_OK) {
297             BLOGN_FAILURE("realloc failed");
298             return sret;
299         }
300 
301         retval.buffer = buffer;
302         retval.sequence = buffer->GetSeqNum();
303         bufferQueueCache_[retval.sequence].config = config;
304     }
305 
306     bufferQueueCache_[retval.sequence].state = BUFFER_STATE_REQUESTED;
307     retval.fence = bufferQueueCache_[retval.sequence].fence;
308     bedata = retval.buffer->GetExtraData();
309 
310     auto &dbs = retval.deletingBuffers;
311     dbs.insert(dbs.end(), deletingList_.begin(), deletingList_.end());
312     deletingList_.clear();
313 
314     if (needRealloc || isShared_ || producerCacheClean_) {
315         BLOGND("RequestBuffer Succ realloc Buffer[%{public}d %{public}d] with new config "\
316             "qid: %{public}d id: %{public}" PRIu64, config.width, config.height, retval.sequence, uniqueId_);
317         if (producerCacheClean_) {
318             producerCacheList_.push_back(retval.sequence);
319             if (CheckProducerCacheList()) {
320                 SetProducerCacheCleanFlagLocked(false);
321             }
322         }
323     } else {
324         BLOGND("RequestBuffer Succ Buffer[%{public}d %{public}d] in seq id: %{public}d "\
325             "qid: %{public}" PRIu64 " releaseFence: %{public}d",
326             config.width, config.height, retval.sequence, uniqueId_, retval.fence->Get());
327         retval.buffer = nullptr;
328     }
329 
330     ScopedBytrace bufferName(name_ + ":" + std::to_string(retval.sequence));
331     if (isLocalRender_) {
332         static SyncFenceTracker releaseFenceThread("Release Fence");
333         releaseFenceThread.TrackFence(retval.fence);
334     }
335     return GSERROR_OK;
336 }
337 
CancelBuffer(uint32_t sequence,const sptr<BufferExtraData> & bedata)338 GSError BufferQueue::CancelBuffer(uint32_t sequence, const sptr<BufferExtraData> &bedata)
339 {
340     ScopedBytrace func(__func__);
341     if (isShared_) {
342         BLOGN_FAILURE_RET(GSERROR_INVALID_OPERATING);
343     }
344     std::lock_guard<std::mutex> lockGuard(mutex_);
345 
346     if (bufferQueueCache_.find(sequence) == bufferQueueCache_.end()) {
347         BLOGN_FAILURE_ID(sequence, "not found in cache");
348         return GSERROR_NO_ENTRY;
349     }
350 
351     if (bufferQueueCache_[sequence].state != BUFFER_STATE_REQUESTED) {
352         BLOGN_FAILURE_ID(sequence, "state is not BUFFER_STATE_REQUESTED");
353         return GSERROR_INVALID_OPERATING;
354     }
355     bufferQueueCache_[sequence].state = BUFFER_STATE_RELEASED;
356     freeList_.push_back(sequence);
357     bufferQueueCache_[sequence].buffer->SetExtraData(bedata);
358 
359     waitReqCon_.notify_all();
360     BLOGND("Success Buffer id: %{public}d Queue id: %{public}" PRIu64, sequence, uniqueId_);
361 
362     return GSERROR_OK;
363 }
364 
FlushBuffer(uint32_t sequence,const sptr<BufferExtraData> & bedata,const sptr<SyncFence> & fence,const BufferFlushConfigWithDamages & config)365 GSError BufferQueue::FlushBuffer(uint32_t sequence, const sptr<BufferExtraData> &bedata,
366     const sptr<SyncFence>& fence, const BufferFlushConfigWithDamages &config)
367 {
368     ScopedBytrace func(__func__);
369     if (!GetStatus()) {
370         BLOGN_FAILURE_RET(GSERROR_NO_CONSUMER);
371     }
372     // check param
373     auto sret = CheckFlushConfig(config);
374     if (sret != GSERROR_OK) {
375         BLOGN_FAILURE_API(CheckFlushConfig, sret);
376         return sret;
377     }
378 
379     {
380         std::lock_guard<std::mutex> lockGuard(mutex_);
381         if (bufferQueueCache_.find(sequence) == bufferQueueCache_.end()) {
382             BLOGN_FAILURE_ID(sequence, "not found in cache");
383             return GSERROR_NO_ENTRY;
384         }
385 
386         if (isShared_ == false) {
387             auto &state = bufferQueueCache_[sequence].state;
388             if (state != BUFFER_STATE_REQUESTED && state != BUFFER_STATE_ATTACHED) {
389                 BLOGN_FAILURE_ID(sequence, "invalid state %{public}d", state);
390                 return GSERROR_NO_ENTRY;
391             }
392         }
393     }
394 
395     {
396         std::lock_guard<std::mutex> lockGuard(listenerMutex_);
397         if (listener_ == nullptr && listenerClazz_ == nullptr) {
398             CancelBuffer(sequence, bedata);
399             return GSERROR_NO_CONSUMER;
400         }
401     }
402 
403     sret = DoFlushBuffer(sequence, bedata, fence, config);
404     if (sret != GSERROR_OK) {
405         return sret;
406     }
407     CountTrace(HITRACE_TAG_GRAPHIC_AGP, name_, static_cast<int32_t>(dirtyList_.size()));
408     if (sret == GSERROR_OK) {
409         std::lock_guard<std::mutex> lockGuard(listenerMutex_);
410         if (listener_ != nullptr) {
411             listener_->OnBufferAvailable();
412         } else if (listenerClazz_ != nullptr) {
413             listenerClazz_->OnBufferAvailable();
414         }
415     }
416     BLOGND("Success Buffer seq id: %{public}d Queue id: %{public}" PRIu64 " AcquireFence:%{public}d",
417         sequence, uniqueId_, fence->Get());
418     return sret;
419 }
420 
DumpToFile(uint32_t sequence)421 void BufferQueue::DumpToFile(uint32_t sequence)
422 {
423     if (access("/data/bq_dump", F_OK) == -1) {
424         return;
425     }
426 
427     sptr<SurfaceBuffer>& buffer = bufferQueueCache_[sequence].buffer;
428     if (buffer == nullptr) {
429         return;
430     }
431 
432     ScopedBytrace func(__func__);
433     struct timeval now;
434     gettimeofday(&now, nullptr);
435     constexpr int secToUsec = 1000 * 1000;
436     int64_t nowVal = (int64_t)now.tv_sec * secToUsec + (int64_t)now.tv_usec;
437 
438     std::stringstream ss;
439     ss << "/data/bq_" << GetRealPid() << "_" << name_ << "_" << nowVal << "_" << buffer->GetWidth()
440         << "x" << buffer->GetHeight() << ".raw";
441     std::ofstream rawDataFile(ss.str(), std::ofstream::binary);
442     if (!rawDataFile.good()) {
443         BLOGE("open failed: (%{public}d)%{public}s", errno, strerror(errno));
444         return;
445     }
446     rawDataFile.write(static_cast<const char *>(buffer->GetVirAddr()), buffer->GetSize());
447     rawDataFile.close();
448 }
449 
DoFlushBuffer(uint32_t sequence,const sptr<BufferExtraData> & bedata,const sptr<SyncFence> & fence,const BufferFlushConfigWithDamages & config)450 GSError BufferQueue::DoFlushBuffer(uint32_t sequence, const sptr<BufferExtraData> &bedata,
451     const sptr<SyncFence>& fence, const BufferFlushConfigWithDamages &config)
452 {
453     std::lock_guard<std::mutex> lockGuard(mutex_);
454     if (bufferQueueCache_.find(sequence) == bufferQueueCache_.end()) {
455         BLOGN_FAILURE_ID(sequence, "not found in cache");
456         return GSERROR_NO_ENTRY;
457     }
458     if (bufferQueueCache_[sequence].isDeleting) {
459         DeleteBufferInCache(sequence);
460         BLOGN_SUCCESS_ID(sequence, "delete");
461         return GSERROR_OK;
462     }
463 
464     bufferQueueCache_[sequence].state = BUFFER_STATE_FLUSHED;
465     dirtyList_.push_back(sequence);
466     bufferQueueCache_[sequence].buffer->SetExtraData(bedata);
467     bufferQueueCache_[sequence].fence = fence;
468     bufferQueueCache_[sequence].damages = config.damages;
469 
470     uint64_t usage = static_cast<uint32_t>(bufferQueueCache_[sequence].config.usage);
471     if (usage & BUFFER_USAGE_CPU_WRITE) {
472         // api flush
473         auto sret = bufferQueueCache_[sequence].buffer->FlushCache();
474         if (sret != GSERROR_OK) {
475             BLOGN_FAILURE_ID_API(sequence, FlushCache, sret);
476             return sret;
477         }
478     }
479 
480     bufferQueueCache_[sequence].timestamp = config.timestamp;
481 
482     if (isLocalRender_) {
483         static SyncFenceTracker acquireFenceThread("Acquire Fence");
484         acquireFenceThread.TrackFence(fence);
485     }
486     // if you need dump SurfaceBuffer to file, you should call DumpToFile(sequence) here
487     return GSERROR_OK;
488 }
489 
AcquireBuffer(sptr<SurfaceBuffer> & buffer,sptr<SyncFence> & fence,int64_t & timestamp,std::vector<Rect> & damages)490 GSError BufferQueue::AcquireBuffer(sptr<SurfaceBuffer> &buffer,
491     sptr<SyncFence> &fence, int64_t &timestamp, std::vector<Rect> &damages)
492 {
493     ScopedBytrace func(__func__);
494     // dequeue from dirty list
495     std::lock_guard<std::mutex> lockGuard(mutex_);
496     GSError ret = PopFromDirtyList(buffer);
497     if (ret == GSERROR_OK) {
498         uint32_t sequence = buffer->GetSeqNum();
499         if (isShared_ == false && bufferQueueCache_[sequence].state != BUFFER_STATE_FLUSHED) {
500             BLOGNW("Warning [%{public}d], Reason: state is not BUFFER_STATE_FLUSHED", sequence);
501         }
502         bufferQueueCache_[sequence].state = BUFFER_STATE_ACQUIRED;
503 
504         fence = bufferQueueCache_[sequence].fence;
505         timestamp = bufferQueueCache_[sequence].timestamp;
506         damages = bufferQueueCache_[sequence].damages;
507 
508         BLOGND("Success Buffer seq id: %{public}d Queue id: %{public}" PRIu64 " AcquireFence:%{public}d",
509             sequence, uniqueId_, fence->Get());
510     } else if (ret == GSERROR_NO_BUFFER) {
511         BLOGN_FAILURE("there is no dirty buffer");
512     }
513 
514     CountTrace(HITRACE_TAG_GRAPHIC_AGP, name_, static_cast<int32_t>(dirtyList_.size()));
515     return ret;
516 }
517 
ReleaseBuffer(sptr<SurfaceBuffer> & buffer,const sptr<SyncFence> & fence)518 GSError BufferQueue::ReleaseBuffer(sptr<SurfaceBuffer> &buffer, const sptr<SyncFence>& fence)
519 {
520     if (buffer == nullptr) {
521         BLOGE("invalid parameter: buffer is null, please check");
522         return GSERROR_INVALID_ARGUMENTS;
523     }
524 
525     uint32_t sequence = buffer->GetSeqNum();
526     ScopedBytrace bufferName(std::string(__func__) + "," + name_ + ":" + std::to_string(sequence));
527     {
528         std::lock_guard<std::mutex> lockGuard(mutex_);
529         if (bufferQueueCache_.find(sequence) == bufferQueueCache_.end()) {
530             BLOGN_FAILURE_ID(sequence, "not find in cache, Queue id: %{public}" PRIu64, uniqueId_);
531             return GSERROR_NO_ENTRY;
532         }
533 
534         if (isShared_ == false) {
535             const auto &state = bufferQueueCache_[sequence].state;
536             if (state != BUFFER_STATE_ACQUIRED && state != BUFFER_STATE_ATTACHED) {
537                 BLOGN_FAILURE_ID(sequence, "invalid state");
538                 return GSERROR_NO_ENTRY;
539             }
540         }
541 
542         bufferQueueCache_[sequence].state = BUFFER_STATE_RELEASED;
543         bufferQueueCache_[sequence].fence = fence;
544 
545         if (bufferQueueCache_[sequence].isDeleting) {
546             DeleteBufferInCache(sequence);
547             BLOGND("Succ delete Buffer seq id: %{public}d Queue id: %{public}" PRIu64 " in cache", sequence, uniqueId_);
548         } else {
549             freeList_.push_back(sequence);
550             BLOGND("Succ push Buffer seq id: %{public}d Qid: %{public}" PRIu64 " to free list,"
551                 " releaseFence: %{public}d", sequence, uniqueId_, fence->Get());
552         }
553         waitReqCon_.notify_all();
554     }
555 
556     if (onBufferRelease != nullptr) {
557         ScopedBytrace func("OnBufferRelease");
558         sptr<SurfaceBuffer> buf = buffer;
559         auto sret = onBufferRelease(buf);
560         if (sret == GSERROR_OK) {   // need to check why directly return?
561             return sret;
562         }
563     }
564 
565     sptr<IProducerListener> listener;
566     {
567         std::lock_guard<std::mutex> lockGuard(producerListenerMutex_);
568         listener = producerListener_;
569     }
570 
571     if (listener != nullptr) {
572         ScopedBytrace func("onBufferReleasedForProducer");
573         if (listener->OnBufferReleased() != GSERROR_OK) {
574             BLOGN_FAILURE_ID(sequence, "OnBufferReleased failed, Queue id: %{public}" PRIu64 "", uniqueId_);
575         }
576     }
577 
578     return GSERROR_OK;
579 }
580 
AllocBuffer(sptr<SurfaceBuffer> & buffer,const BufferRequestConfig & config)581 GSError BufferQueue::AllocBuffer(sptr<SurfaceBuffer> &buffer,
582     const BufferRequestConfig &config)
583 {
584     ScopedBytrace func(__func__);
585     sptr<SurfaceBuffer> bufferImpl = new SurfaceBufferImpl();
586     uint32_t sequence = bufferImpl->GetSeqNum();
587 
588     GSError ret = bufferImpl->Alloc(config);
589     if (ret != GSERROR_OK) {
590         BLOGN_FAILURE_ID_API(sequence, Alloc, ret);
591         return ret;
592     }
593 
594     BufferElement ele = {
595         .buffer = bufferImpl,
596         .state = BUFFER_STATE_REQUESTED,
597         .isDeleting = false,
598         .config = config,
599         .fence = SyncFence::INVALID_FENCE,
600     };
601 
602     ret = bufferImpl->Map();
603     if (ret == GSERROR_OK) {
604         BLOGN_SUCCESS_ID(sequence, "Map");
605         bufferQueueCache_[sequence] = ele;
606         buffer = bufferImpl;
607     } else {
608         BLOGN_FAILURE_ID(sequence, "Map failed");
609     }
610     return ret;
611 }
612 
DeleteBufferInCache(uint32_t sequence)613 void BufferQueue::DeleteBufferInCache(uint32_t sequence)
614 {
615     auto it = bufferQueueCache_.find(sequence);
616     if (it != bufferQueueCache_.end()) {
617         if (onBufferDeleteForRSMainThread_ != nullptr) {
618             onBufferDeleteForRSMainThread_(sequence);
619         }
620         if (onBufferDeleteForRSHardwareThread_ != nullptr) {
621             onBufferDeleteForRSHardwareThread_(sequence);
622         }
623         bufferQueueCache_.erase(it);
624         deletingList_.push_back(sequence);
625     }
626 }
627 
GetQueueSize()628 uint32_t BufferQueue::GetQueueSize()
629 {
630     return queueSize_;
631 }
632 
DeleteBuffers(int32_t count)633 void BufferQueue::DeleteBuffers(int32_t count)
634 {
635     ScopedBytrace func(__func__);
636     if (count <= 0) {
637         return;
638     }
639 
640     std::lock_guard<std::mutex> lockGuard(mutex_);
641     while (!freeList_.empty()) {
642         DeleteBufferInCache(freeList_.front());
643         freeList_.pop_front();
644         count--;
645         if (count <= 0) {
646             return;
647         }
648     }
649 
650     while (!dirtyList_.empty()) {
651         DeleteBufferInCache(dirtyList_.front());
652         dirtyList_.pop_front();
653         count--;
654         if (count <= 0) {
655             return;
656         }
657     }
658 
659     for (auto&& ele : bufferQueueCache_) {
660         ele.second.isDeleting = true;
661         // we don't have to do anything
662         count--;
663         if (count <= 0) {
664             break;
665         }
666     }
667 }
668 
AttachBuffer(sptr<SurfaceBuffer> & buffer)669 GSError BufferQueue::AttachBuffer(sptr<SurfaceBuffer> &buffer)
670 {
671     ScopedBytrace func(__func__);
672     if (isShared_) {
673         BLOGN_FAILURE_RET(GSERROR_INVALID_OPERATING);
674     }
675 
676     if (buffer == nullptr) {
677         BLOGN_FAILURE_RET(GSERROR_INVALID_ARGUMENTS);
678     }
679 
680     std::lock_guard<std::mutex> lockGuard(mutex_);
681     BufferElement ele = {
682         .buffer = buffer,
683         .state = BUFFER_STATE_ATTACHED,
684         .config = {
685             .width = buffer->GetWidth(),
686             .height = buffer->GetHeight(),
687             .strideAlignment = 0x8,
688             .format = buffer->GetFormat(),
689             .usage = buffer->GetUsage(),
690             .timeout = 0,
691         },
692         .damages = { { .w = ele.config.width, .h = ele.config.height, } },
693     };
694 
695     uint32_t sequence = buffer->GetSeqNum();
696     int32_t usedSize = static_cast<int32_t>(GetUsedSize());
697     int32_t queueSize = static_cast<int32_t>(GetQueueSize());
698     if (usedSize >= queueSize) {
699         int32_t freeSize = static_cast<int32_t>(dirtyList_.size() + freeList_.size());
700         if (freeSize >= usedSize - queueSize + 1) {
701             DeleteBuffers(usedSize - queueSize + 1);
702             bufferQueueCache_[sequence] = ele;
703             BLOGN_SUCCESS_ID(sequence, "release");
704             return GSERROR_OK;
705         } else {
706             BLOGN_FAILURE_RET(GSERROR_OUT_OF_RANGE);
707         }
708     } else {
709         bufferQueueCache_[sequence] = ele;
710         BLOGN_SUCCESS_ID(sequence, "no release");
711         return GSERROR_OK;
712     }
713 }
714 
DetachBuffer(sptr<SurfaceBuffer> & buffer)715 GSError BufferQueue::DetachBuffer(sptr<SurfaceBuffer> &buffer)
716 {
717     ScopedBytrace func(__func__);
718     if (isShared_) {
719         BLOGN_FAILURE_RET(GSERROR_INVALID_OPERATING);
720     }
721 
722     if (buffer == nullptr) {
723         BLOGN_FAILURE_RET(GSERROR_INVALID_ARGUMENTS);
724     }
725 
726     std::lock_guard<std::mutex> lockGuard(mutex_);
727     uint32_t sequence = buffer->GetSeqNum();
728     if (bufferQueueCache_.find(sequence) == bufferQueueCache_.end()) {
729         BLOGN_FAILURE_ID(sequence, "not find in cache");
730         return GSERROR_NO_ENTRY;
731     }
732 
733     if (bufferQueueCache_[sequence].state == BUFFER_STATE_REQUESTED) {
734         BLOGN_SUCCESS_ID(sequence, "requested");
735     } else if (bufferQueueCache_[sequence].state == BUFFER_STATE_ACQUIRED) {
736         BLOGN_SUCCESS_ID(sequence, "acquired");
737     } else {
738         BLOGN_FAILURE_ID_RET(sequence, GSERROR_NO_ENTRY);
739     }
740     if (onBufferDeleteForRSMainThread_ != nullptr) {
741         onBufferDeleteForRSMainThread_(sequence);
742     }
743     if (onBufferDeleteForRSHardwareThread_ != nullptr) {
744         onBufferDeleteForRSHardwareThread_(sequence);
745     }
746     bufferQueueCache_.erase(sequence);
747     return GSERROR_OK;
748 }
749 
SetQueueSize(uint32_t queueSize)750 GSError BufferQueue::SetQueueSize(uint32_t queueSize)
751 {
752     if (isShared_ == true && queueSize != 1) {
753         BLOGN_INVALID("shared queue, size must be 1");
754         return GSERROR_INVALID_ARGUMENTS;
755     }
756 
757     if (queueSize <= 0) {
758         BLOGN_INVALID("queue size (%{public}d) <= 0", queueSize);
759         return GSERROR_INVALID_ARGUMENTS;
760     }
761 
762     if (queueSize > SURFACE_MAX_QUEUE_SIZE) {
763         BLOGN_INVALID("invalid queueSize[%{public}d] > SURFACE_MAX_QUEUE_SIZE[%{public}d]",
764             queueSize, SURFACE_MAX_QUEUE_SIZE);
765         return GSERROR_INVALID_ARGUMENTS;
766     }
767 
768     DeleteBuffers(queueSize_ - queueSize);
769 
770     // if increase the queue size, try to wakeup the blocked thread
771     if (queueSize > queueSize_) {
772         queueSize_ = queueSize;
773         waitReqCon_.notify_all();
774     } else {
775         queueSize_ = queueSize;
776     }
777 
778     BLOGN_SUCCESS("queue size: %{public}d, Queue id: %{public}" PRIu64, queueSize_, uniqueId_);
779     return GSERROR_OK;
780 }
781 
GetName(std::string & name)782 GSError BufferQueue::GetName(std::string &name)
783 {
784     name = name_;
785     return GSERROR_OK;
786 }
787 
RegisterConsumerListener(sptr<IBufferConsumerListener> & listener)788 GSError BufferQueue::RegisterConsumerListener(sptr<IBufferConsumerListener> &listener)
789 {
790     std::lock_guard<std::mutex> lockGuard(listenerMutex_);
791     listener_ = listener;
792     return GSERROR_OK;
793 }
794 
RegisterConsumerListener(IBufferConsumerListenerClazz * listener)795 GSError BufferQueue::RegisterConsumerListener(IBufferConsumerListenerClazz *listener)
796 {
797     std::lock_guard<std::mutex> lockGuard(listenerMutex_);
798     listenerClazz_ = listener;
799     return GSERROR_OK;
800 }
801 
UnregisterConsumerListener()802 GSError BufferQueue::UnregisterConsumerListener()
803 {
804     std::lock_guard<std::mutex> lockGuard(listenerMutex_);
805     listener_ = nullptr;
806     listenerClazz_ = nullptr;
807     return GSERROR_OK;
808 }
809 
RegisterReleaseListener(OnReleaseFunc func)810 GSError BufferQueue::RegisterReleaseListener(OnReleaseFunc func)
811 {
812     onBufferRelease = func;
813     return GSERROR_OK;
814 }
815 
RegisterProducerReleaseListener(sptr<IProducerListener> listener)816 GSError BufferQueue::RegisterProducerReleaseListener(sptr<IProducerListener> listener)
817 {
818     std::lock_guard<std::mutex> lockGuard(producerListenerMutex_);
819     producerListener_ = listener;
820     return GSERROR_OK;
821 }
822 
RegisterDeleteBufferListener(OnDeleteBufferFunc func,bool isForUniRedraw)823 GSError BufferQueue::RegisterDeleteBufferListener(OnDeleteBufferFunc func, bool isForUniRedraw)
824 {
825     std::lock_guard<std::mutex> lockGuard(mutex_);
826     if (isForUniRedraw) {
827         if (onBufferDeleteForRSHardwareThread_ != nullptr) {
828             return GSERROR_OK;
829         }
830         onBufferDeleteForRSHardwareThread_ = func;
831     } else {
832         if (onBufferDeleteForRSMainThread_ != nullptr) {
833             return GSERROR_OK;
834         }
835         onBufferDeleteForRSMainThread_ = func;
836     }
837     return GSERROR_OK;
838 }
839 
SetDefaultWidthAndHeight(int32_t width,int32_t height)840 GSError BufferQueue::SetDefaultWidthAndHeight(int32_t width, int32_t height)
841 {
842     if (width <= 0) {
843         BLOGN_INVALID("defaultWidth is greater than 0, now is %{public}d", width);
844         return GSERROR_INVALID_ARGUMENTS;
845     }
846 
847     if (height <= 0) {
848         BLOGN_INVALID("defaultHeight is greater than 0, now is %{public}d", height);
849         return GSERROR_INVALID_ARGUMENTS;
850     }
851 
852     defaultWidth = width;
853     defaultHeight = height;
854     return GSERROR_OK;
855 }
856 
GetDefaultWidth()857 int32_t BufferQueue::GetDefaultWidth()
858 {
859     return defaultWidth;
860 }
861 
GetDefaultHeight()862 int32_t BufferQueue::GetDefaultHeight()
863 {
864     return defaultHeight;
865 }
866 
SetDefaultUsage(uint32_t usage)867 GSError BufferQueue::SetDefaultUsage(uint32_t usage)
868 {
869     defaultUsage = usage;
870     return GSERROR_OK;
871 }
872 
GetDefaultUsage()873 uint32_t BufferQueue::GetDefaultUsage()
874 {
875     return defaultUsage;
876 }
877 
ClearLocked()878 void BufferQueue::ClearLocked()
879 {
880     for (auto &[id, _] : bufferQueueCache_) {
881         if (onBufferDeleteForRSMainThread_ != nullptr) {
882             onBufferDeleteForRSMainThread_(id);
883         }
884         if (onBufferDeleteForRSHardwareThread_ != nullptr) {
885             onBufferDeleteForRSHardwareThread_(id);
886         }
887     }
888     bufferQueueCache_.clear();
889     freeList_.clear();
890     dirtyList_.clear();
891     deletingList_.clear();
892 }
893 
GoBackground()894 GSError BufferQueue::GoBackground()
895 {
896     BLOGND("GoBackground, Queue id: %{public}" PRIu64, uniqueId_);
897     {
898         std::lock_guard<std::mutex> lockGuard(listenerMutex_);
899         if (listener_ != nullptr) {
900             ScopedBytrace bufferIPCSend("OnGoBackground");
901             listener_->OnGoBackground();
902         } else if (listenerClazz_ != nullptr) {
903             ScopedBytrace bufferIPCSend("OnGoBackground");
904             listenerClazz_->OnGoBackground();
905         }
906     }
907     std::lock_guard<std::mutex> lockGuard(mutex_);
908     ClearLocked();
909     waitReqCon_.notify_all();
910     SetProducerCacheCleanFlagLocked(false);
911     return GSERROR_OK;
912 }
913 
CleanCache()914 GSError BufferQueue::CleanCache()
915 {
916     {
917         std::lock_guard<std::mutex> lockGuard(listenerMutex_);
918         if (listener_ != nullptr) {
919             ScopedBytrace bufferIPCSend("OnCleanCache");
920             listener_->OnCleanCache();
921         } else if (listenerClazz_ != nullptr) {
922             ScopedBytrace bufferIPCSend("OnCleanCache");
923             listenerClazz_->OnCleanCache();
924         }
925     }
926     std::lock_guard<std::mutex> lockGuard(mutex_);
927     ClearLocked();
928     waitReqCon_.notify_all();
929     return GSERROR_OK;
930 }
931 
OnConsumerDied()932 GSError BufferQueue::OnConsumerDied()
933 {
934     std::lock_guard<std::mutex> lockGuard(mutex_);
935     ClearLocked();
936     waitReqCon_.notify_all();
937     return GSERROR_OK;
938 }
939 
GetUniqueId() const940 uint64_t BufferQueue::GetUniqueId() const
941 {
942     return uniqueId_;
943 }
944 
SetTransform(GraphicTransformType transform)945 GSError BufferQueue::SetTransform(GraphicTransformType transform)
946 {
947     transform_ = transform;
948     return GSERROR_OK;
949 }
950 
GetTransform() const951 GraphicTransformType BufferQueue::GetTransform() const
952 {
953     return transform_;
954 }
955 
IsSupportedAlloc(const std::vector<BufferVerifyAllocInfo> & infos,std::vector<bool> & supporteds) const956 GSError BufferQueue::IsSupportedAlloc(const std::vector<BufferVerifyAllocInfo> &infos,
957                                       std::vector<bool> &supporteds) const
958 {
959     GSError ret = bufferManager_->IsSupportedAlloc(infos, supporteds);
960     if (ret != GSERROR_OK) {
961         BLOGN_FAILURE_API(IsSupportedAlloc, ret);
962     }
963     return ret;
964 }
965 
SetScalingMode(uint32_t sequence,ScalingMode scalingMode)966 GSError BufferQueue::SetScalingMode(uint32_t sequence, ScalingMode scalingMode)
967 {
968     std::lock_guard<std::mutex> lockGuard(mutex_);
969     if (bufferQueueCache_.find(sequence) == bufferQueueCache_.end()) {
970         BLOGN_FAILURE_ID(sequence, "not find in cache");
971         return GSERROR_NO_ENTRY;
972     }
973     bufferQueueCache_[sequence].scalingMode = scalingMode;
974     return GSERROR_OK;
975 }
976 
GetScalingMode(uint32_t sequence,ScalingMode & scalingMode)977 GSError BufferQueue::GetScalingMode(uint32_t sequence, ScalingMode &scalingMode)
978 {
979     std::lock_guard<std::mutex> lockGuard(mutex_);
980     if (bufferQueueCache_.find(sequence) == bufferQueueCache_.end()) {
981         BLOGN_FAILURE_ID(sequence, "not find in cache");
982         return GSERROR_NO_ENTRY;
983     }
984     scalingMode = bufferQueueCache_.at(sequence).scalingMode;
985     return GSERROR_OK;
986 }
987 
SetMetaData(uint32_t sequence,const std::vector<GraphicHDRMetaData> & metaData)988 GSError BufferQueue::SetMetaData(uint32_t sequence, const std::vector<GraphicHDRMetaData> &metaData)
989 {
990     std::lock_guard<std::mutex> lockGuard(mutex_);
991     if (metaData.size() == 0) {
992         BLOGN_INVALID("metaData size is 0");
993         return GSERROR_INVALID_ARGUMENTS;
994     }
995     if (bufferQueueCache_.find(sequence) == bufferQueueCache_.end()) {
996         BLOGN_FAILURE_ID(sequence, "not find in cache");
997         return GSERROR_NO_ENTRY;
998     }
999     bufferQueueCache_[sequence].metaData.clear();
1000     bufferQueueCache_[sequence].metaData = metaData;
1001     bufferQueueCache_[sequence].hdrMetaDataType = HDRMetaDataType::HDR_META_DATA;
1002     return GSERROR_OK;
1003 }
1004 
SetMetaDataSet(uint32_t sequence,GraphicHDRMetadataKey key,const std::vector<uint8_t> & metaData)1005 GSError BufferQueue::SetMetaDataSet(uint32_t sequence, GraphicHDRMetadataKey key,
1006                                     const std::vector<uint8_t> &metaData)
1007 {
1008     std::lock_guard<std::mutex> lockGuard(mutex_);
1009     if (key < GraphicHDRMetadataKey::GRAPHIC_MATAKEY_RED_PRIMARY_X ||
1010         key > GraphicHDRMetadataKey::GRAPHIC_MATAKEY_HDR_VIVID) {
1011         BLOGN_INVALID("key [%{public}d, %{public}d), now is %{public}d",
1012                       GraphicHDRMetadataKey::GRAPHIC_MATAKEY_RED_PRIMARY_X,
1013                       GraphicHDRMetadataKey::GRAPHIC_MATAKEY_HDR_VIVID, key);
1014         return GSERROR_INVALID_ARGUMENTS;
1015     }
1016     if (metaData.size() == 0) {
1017         BLOGN_INVALID("metaData size is 0");
1018         return GSERROR_INVALID_ARGUMENTS;
1019     }
1020     if (bufferQueueCache_.find(sequence) == bufferQueueCache_.end()) {
1021         BLOGN_FAILURE_ID(sequence, "not find in cache");
1022         return GSERROR_NO_ENTRY;
1023     }
1024     bufferQueueCache_[sequence].metaDataSet.clear();
1025     bufferQueueCache_[sequence].key = key;
1026     bufferQueueCache_[sequence].metaDataSet = metaData;
1027     bufferQueueCache_[sequence].hdrMetaDataType = HDRMetaDataType::HDR_META_DATA_SET;
1028     return GSERROR_OK;
1029 }
1030 
QueryMetaDataType(uint32_t sequence,HDRMetaDataType & type)1031 GSError BufferQueue::QueryMetaDataType(uint32_t sequence, HDRMetaDataType &type)
1032 {
1033     std::lock_guard<std::mutex> lockGuard(mutex_);
1034     if (bufferQueueCache_.find(sequence) == bufferQueueCache_.end()) {
1035         BLOGN_FAILURE_ID(sequence, "not find in cache");
1036         return GSERROR_NO_ENTRY;
1037     }
1038     type = bufferQueueCache_.at(sequence).hdrMetaDataType;
1039     return GSERROR_OK;
1040 }
1041 
GetMetaData(uint32_t sequence,std::vector<GraphicHDRMetaData> & metaData)1042 GSError BufferQueue::GetMetaData(uint32_t sequence, std::vector<GraphicHDRMetaData> &metaData)
1043 {
1044     std::lock_guard<std::mutex> lockGuard(mutex_);
1045     if (bufferQueueCache_.find(sequence) == bufferQueueCache_.end()) {
1046         BLOGN_FAILURE_ID(sequence, "not find in cache");
1047         return GSERROR_NO_ENTRY;
1048     }
1049     metaData.clear();
1050     metaData = bufferQueueCache_.at(sequence).metaData;
1051     return GSERROR_OK;
1052 }
1053 
GetMetaDataSet(uint32_t sequence,GraphicHDRMetadataKey & key,std::vector<uint8_t> & metaData)1054 GSError BufferQueue::GetMetaDataSet(uint32_t sequence, GraphicHDRMetadataKey &key,
1055                                     std::vector<uint8_t> &metaData)
1056 {
1057     std::lock_guard<std::mutex> lockGuard(mutex_);
1058     if (bufferQueueCache_.find(sequence) == bufferQueueCache_.end()) {
1059         BLOGN_FAILURE_ID(sequence, "not find in cache");
1060         return GSERROR_NO_ENTRY;
1061     }
1062     metaData.clear();
1063     key = bufferQueueCache_.at(sequence).key;
1064     metaData = bufferQueueCache_.at(sequence).metaDataSet;
1065     return GSERROR_OK;
1066 }
1067 
SetTunnelHandle(const sptr<SurfaceTunnelHandle> & handle)1068 GSError BufferQueue::SetTunnelHandle(const sptr<SurfaceTunnelHandle> &handle)
1069 {
1070     std::lock_guard<std::mutex> lockGuard(mutex_);
1071     bool tunnelHandleChange = false;
1072     if (tunnelHandle_ == nullptr) {
1073         if (handle == nullptr) {
1074             BLOGN_INVALID("tunnel handle is nullptr");
1075             return GSERROR_INVALID_ARGUMENTS;
1076         }
1077         tunnelHandleChange = true;
1078     } else {
1079         tunnelHandleChange = tunnelHandle_->Different(handle);
1080     }
1081     if (!tunnelHandleChange) {
1082         BLOGNW("same tunnel handle, please check");
1083         return GSERROR_NO_ENTRY;
1084     }
1085     tunnelHandle_ = handle;
1086     {
1087         std::lock_guard<std::mutex> lockGuard(listenerMutex_);
1088         if (listener_ != nullptr) {
1089             ScopedBytrace bufferIPCSend("OnTunnelHandleChange");
1090             listener_->OnTunnelHandleChange();
1091         } else if (listenerClazz_ != nullptr) {
1092             ScopedBytrace bufferIPCSend("OnTunnelHandleChange");
1093             listenerClazz_->OnTunnelHandleChange();
1094         } else {
1095             return GSERROR_NO_CONSUMER;
1096         }
1097     }
1098     return GSERROR_OK;
1099 }
1100 
GetTunnelHandle()1101 sptr<SurfaceTunnelHandle> BufferQueue::GetTunnelHandle()
1102 {
1103     std::lock_guard<std::mutex> lockGuard(mutex_);
1104     return tunnelHandle_;
1105 }
1106 
SetPresentTimestamp(uint32_t sequence,const GraphicPresentTimestamp & timestamp)1107 GSError BufferQueue::SetPresentTimestamp(uint32_t sequence, const GraphicPresentTimestamp &timestamp)
1108 {
1109     std::lock_guard<std::mutex> lockGuard(mutex_);
1110     if (bufferQueueCache_.find(sequence) == bufferQueueCache_.end()) {
1111         BLOGN_FAILURE_ID(sequence, "not find in cache");
1112         return GSERROR_NO_ENTRY;
1113     }
1114     bufferQueueCache_[sequence].presentTimestamp = timestamp;
1115     return GSERROR_OK;
1116 }
1117 
GetPresentTimestamp(uint32_t sequence,GraphicPresentTimestampType type,int64_t & time)1118 GSError BufferQueue::GetPresentTimestamp(uint32_t sequence, GraphicPresentTimestampType type, int64_t &time)
1119 {
1120     std::lock_guard<std::mutex> lockGuard(mutex_);
1121     if (bufferQueueCache_.find(sequence) == bufferQueueCache_.end()) {
1122         BLOGN_FAILURE_ID(sequence, "not find in cache");
1123         return GSERROR_NO_ENTRY;
1124     }
1125     if (type != bufferQueueCache_.at(sequence).presentTimestamp.type) {
1126         BLOGN_FAILURE_ID(sequence, "PresentTimestampType [%{public}d] is not supported, the supported type "\
1127         "is [%{public}d]", type, bufferQueueCache_.at(sequence).presentTimestamp.type);
1128         return GSERROR_NO_ENTRY;
1129     }
1130     switch (type) {
1131         case GraphicPresentTimestampType::GRAPHIC_DISPLAY_PTS_DELAY: {
1132             time = bufferQueueCache_.at(sequence).presentTimestamp.time;
1133             return GSERROR_OK;
1134         }
1135         case GraphicPresentTimestampType::GRAPHIC_DISPLAY_PTS_TIMESTAMP: {
1136             time = bufferQueueCache_.at(sequence).presentTimestamp.time - bufferQueueCache_.at(sequence).timestamp;
1137             return GSERROR_OK;
1138         }
1139         default: {
1140             BLOGN_FAILURE_ID(sequence, "unsupported type!");
1141             return GSERROR_TYPE_ERROR;
1142         }
1143     }
1144 }
1145 
DumpCache(std::string & result)1146 void BufferQueue::DumpCache(std::string &result)
1147 {
1148     for (auto it = bufferQueueCache_.begin(); it != bufferQueueCache_.end(); it++) {
1149         BufferElement element = it->second;
1150         if (BufferStateStrs.find(element.state) != BufferStateStrs.end()) {
1151             result += "        sequence = " + std::to_string(it->first) +
1152                 ", state = " + BufferStateStrs.at(element.state) +
1153                 ", timestamp = " + std::to_string(element.timestamp);
1154         }
1155         for (decltype(element.damages.size()) i = 0; i < element.damages.size(); i++) {
1156             result += ", damagesRect = [" + std::to_string(i) + "] = [" +
1157             std::to_string(element.damages[i].x) + ", " +
1158             std::to_string(element.damages[i].y) + ", " +
1159             std::to_string(element.damages[i].w) + ", " +
1160             std::to_string(element.damages[i].h) + "],";
1161         }
1162         result += " config = [" + std::to_string(element.config.width) + "x" +
1163             std::to_string(element.config.height) + ", " +
1164             std::to_string(element.config.strideAlignment) + ", " +
1165             std::to_string(element.config.format) +", " +
1166             std::to_string(element.config.usage) + ", " +
1167             std::to_string(element.config.timeout) + "],";
1168 
1169         double bufferMemSize = 0;
1170         if (element.buffer != nullptr) {
1171             result += " bufferWith = " + std::to_string(element.buffer->GetWidth()) +
1172                     ", bufferHeight = " + std::to_string(element.buffer->GetHeight());
1173             bufferMemSize = static_cast<double>(element.buffer->GetSize()) / BUFFER_MEMSIZE_RATE;
1174         }
1175         std::ostringstream ss;
1176         ss.precision(BUFFER_MEMSIZE_FORMAT);
1177         ss.setf(std::ios::fixed);
1178         ss << bufferMemSize;
1179         std::string str = ss.str();
1180         result += ", bufferMemSize = " + str + "(KiB).\n";
1181     }
1182 }
1183 
Dump(std::string & result)1184 void BufferQueue::Dump(std::string &result)
1185 {
1186     std::lock_guard<std::mutex> lockGuard(mutex_);
1187     std::ostringstream ss;
1188     ss.precision(BUFFER_MEMSIZE_FORMAT);
1189     ss.setf(std::ios::fixed);
1190     static double allSurfacesMemSize = 0;
1191     uint32_t totalBufferListSize = 0;
1192     double memSizeInKB = 0;
1193 
1194     for (auto it = bufferQueueCache_.begin(); it != bufferQueueCache_.end(); it++) {
1195         BufferElement element = it->second;
1196         if (element.buffer != nullptr) {
1197             totalBufferListSize += element.buffer->GetSize();
1198         }
1199     }
1200     memSizeInKB = static_cast<double>(totalBufferListSize) / BUFFER_MEMSIZE_RATE;
1201 
1202     allSurfacesMemSize += memSizeInKB;
1203     uint32_t resultLen = result.size();
1204     std::string dumpEndFlag = "dumpend";
1205     std::string dumpEndIn(result, resultLen - dumpEndFlag.size(), resultLen - 1);
1206     if (dumpEndIn == dumpEndFlag) {
1207         ss << allSurfacesMemSize;
1208         std::string dumpEndStr = ss.str();
1209         result.erase(resultLen - dumpEndFlag.size(), resultLen - 1);
1210         result += dumpEndStr + " KiB.\n";
1211         allSurfacesMemSize = 0;
1212         return;
1213     }
1214 
1215     ss.str("");
1216     ss << memSizeInKB;
1217     std::string str = ss.str();
1218     result.append("    BufferQueue:\n");
1219     result += "      default-size = [" + std::to_string(defaultWidth) + "x" + std::to_string(defaultHeight) + "]" +
1220         ", FIFO = " + std::to_string(queueSize_) +
1221         ", name = " + name_ +
1222         ", uniqueId = " + std::to_string(uniqueId_) +
1223         ", usedBufferListLen = " + std::to_string(GetUsedSize()) +
1224         ", freeBufferListLen = " + std::to_string(freeList_.size()) +
1225         ", dirtyBufferListLen = " + std::to_string(dirtyList_.size()) +
1226         ", totalBuffersMemSize = " + str + "(KiB).\n";
1227 
1228     result.append("      bufferQueueCache:\n");
1229     DumpCache(result);
1230 }
1231 
GetStatus() const1232 bool BufferQueue::GetStatus() const
1233 {
1234     return isValidStatus_;
1235 }
1236 
SetStatus(bool status)1237 void BufferQueue::SetStatus(bool status)
1238 {
1239     isValidStatus_ = status;
1240     waitReqCon_.notify_all();
1241 }
1242 }; // namespace OHOS
1243