• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2021 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "buffer_queue.h"
17 #include <algorithm>
18 #include <fstream>
19 #include <sstream>
20 #include <sys/time.h>
21 #include <cinttypes>
22 #include <unistd.h>
23 #include <scoped_bytrace.h>
24 
25 #include "buffer_utils.h"
26 #include "buffer_log.h"
27 #include "buffer_manager.h"
28 #include "hitrace_meter.h"
29 #include "surface_buffer_impl.h"
30 #include "sync_fence.h"
31 #include "sandbox_utils.h"
32 
33 namespace OHOS {
34 namespace {
35 constexpr uint32_t UNIQUE_ID_OFFSET = 32;
36 constexpr uint32_t BUFFER_MEMSIZE_RATE = 1024;
37 constexpr uint32_t BUFFER_MEMSIZE_FORMAT = 2;
38 }
39 
40 static const std::map<BufferState, std::string> BufferStateStrs = {
41     {BUFFER_STATE_RELEASED,                    "0 <released>"},
42     {BUFFER_STATE_REQUESTED,                   "1 <requested>"},
43     {BUFFER_STATE_FLUSHED,                     "2 <flushed>"},
44     {BUFFER_STATE_ACQUIRED,                    "3 <acquired>"},
45 };
46 
GetUniqueIdImpl()47 static uint64_t GetUniqueIdImpl()
48 {
49     static std::atomic<uint32_t> counter { 0 };
50     static uint64_t id = static_cast<uint64_t>(GetRealPid()) << UNIQUE_ID_OFFSET;
51     return id | counter++;
52 }
53 
BufferQueue(const std::string & name,bool isShared)54 BufferQueue::BufferQueue(const std::string &name, bool isShared)
55     : name_(name), uniqueId_(GetUniqueIdImpl()), isShared_(isShared)
56 {
57     BLOGNI("ctor, Queue id: %{public}" PRIu64 " isShared: %{public}d", uniqueId_, isShared);
58     bufferManager_ = BufferManager::GetInstance();
59     if (isShared_ == true) {
60         queueSize_ = 1;
61     }
62 }
63 
~BufferQueue()64 BufferQueue::~BufferQueue()
65 {
66     BLOGNI("dtor, Queue id: %{public}" PRIu64, uniqueId_);
67     for (auto &[id, _] : bufferQueueCache_) {
68         if (onBufferDelete_ != nullptr) {
69             onBufferDelete_(id);
70         }
71     }
72 }
73 
Init()74 GSError BufferQueue::Init()
75 {
76     return GSERROR_OK;
77 }
78 
GetUsedSize()79 uint32_t BufferQueue::GetUsedSize()
80 {
81     uint32_t used_size = bufferQueueCache_.size();
82     return used_size;
83 }
84 
PopFromFreeList(sptr<SurfaceBuffer> & buffer,const BufferRequestConfig & config)85 GSError BufferQueue::PopFromFreeList(sptr<SurfaceBuffer> &buffer,
86     const BufferRequestConfig &config)
87 {
88     if (isShared_ == true && GetUsedSize() > 0) {
89         buffer = bufferQueueCache_.begin()->second.buffer;
90         return GSERROR_OK;
91     }
92 
93     for (auto it = freeList_.begin(); it != freeList_.end(); it++) {
94         if (bufferQueueCache_[*it].config == config) {
95             buffer = bufferQueueCache_[*it].buffer;
96             freeList_.erase(it);
97             return GSERROR_OK;
98         }
99     }
100 
101     if (freeList_.empty()) {
102         buffer = nullptr;
103         return GSERROR_NO_BUFFER;
104     }
105 
106     buffer = bufferQueueCache_[freeList_.front()].buffer;
107     freeList_.pop_front();
108     return GSERROR_OK;
109 }
110 
PopFromDirtyList(sptr<SurfaceBuffer> & buffer)111 GSError BufferQueue::PopFromDirtyList(sptr<SurfaceBuffer> &buffer)
112 {
113     if (isShared_ == true && GetUsedSize() > 0) {
114         buffer = bufferQueueCache_.begin()->second.buffer;
115         return GSERROR_OK;
116     }
117 
118     if (!dirtyList_.empty()) {
119         buffer = bufferQueueCache_[dirtyList_.front()].buffer;
120         dirtyList_.pop_front();
121         return GSERROR_OK;
122     } else {
123         buffer = nullptr;
124         return GSERROR_NO_BUFFER;
125     }
126 }
127 
CheckRequestConfig(const BufferRequestConfig & config)128 GSError BufferQueue::CheckRequestConfig(const BufferRequestConfig &config)
129 {
130     uint32_t align = config.strideAlignment;
131     bool isValidStrideAlignment = true;
132     isValidStrideAlignment = isValidStrideAlignment && (SURFACE_MIN_STRIDE_ALIGNMENT <= align);
133     isValidStrideAlignment = isValidStrideAlignment && (SURFACE_MAX_STRIDE_ALIGNMENT >= align);
134     if (!isValidStrideAlignment) {
135         BLOGN_INVALID("config.strideAlignment [%{public}d, %{public}d], now is %{public}d",
136                       SURFACE_MIN_STRIDE_ALIGNMENT, SURFACE_MAX_STRIDE_ALIGNMENT, align);
137         return GSERROR_INVALID_ARGUMENTS;
138     }
139 
140     if (align & (align - 1)) {
141         BLOGN_INVALID("config.strideAlignment is not power of 2 like 4, 8, 16, 32; now is %{public}d", align);
142         return GSERROR_INVALID_ARGUMENTS;
143     }
144 
145     if (config.colorGamut <= GraphicColorGamut::GRAPHIC_COLOR_GAMUT_INVALID ||
146         config.colorGamut > GraphicColorGamut::GRAPHIC_COLOR_GAMUT_DISPLAY_BT2020 + 1) {
147         BLOGN_INVALID("config.colorGamut [0, %{public}d], now is %{public}d",
148             static_cast<uint32_t>(GraphicColorGamut::GRAPHIC_COLOR_GAMUT_DISPLAY_BT2020),
149             static_cast<uint32_t>(config.colorGamut));
150         return GSERROR_INVALID_ARGUMENTS;
151     }
152 
153     if (config.transform < GraphicTransformType::GRAPHIC_ROTATE_NONE ||
154         config.transform >= GraphicTransformType::GRAPHIC_ROTATE_BUTT) {
155         BLOGN_INVALID("config.transform [0, %{public}d), now is %{public}d",
156             GraphicTransformType::GRAPHIC_ROTATE_BUTT, config.transform);
157         return GSERROR_INVALID_ARGUMENTS;
158     }
159     return GSERROR_OK;
160 }
161 
CheckFlushConfig(const BufferFlushConfig & config)162 GSError BufferQueue::CheckFlushConfig(const BufferFlushConfig &config)
163 {
164     if (config.damage.w < 0) {
165         BLOGN_INVALID("config.damage.w >= 0, now is %{public}d", config.damage.w);
166         return GSERROR_INVALID_ARGUMENTS;
167     }
168     if (config.damage.h < 0) {
169         BLOGN_INVALID("config.damage.h >= 0, now is %{public}d", config.damage.h);
170         return GSERROR_INVALID_ARGUMENTS;
171     }
172     return GSERROR_OK;
173 }
174 
RequestBuffer(const BufferRequestConfig & config,sptr<BufferExtraData> & bedata,struct IBufferProducer::RequestBufferReturnValue & retval)175 GSError BufferQueue::RequestBuffer(const BufferRequestConfig &config, sptr<BufferExtraData> &bedata,
176     struct IBufferProducer::RequestBufferReturnValue &retval)
177 {
178     ScopedBytrace func(__func__);
179     if (!GetStatus()) {
180         BLOGN_FAILURE_RET(GSERROR_NO_CONSUMER);
181     }
182     {
183         std::lock_guard<std::mutex> lockGuard(listenerMutex_);
184         if (listener_ == nullptr && listenerClazz_ == nullptr) {
185             BLOGN_FAILURE_RET(GSERROR_NO_CONSUMER);
186         }
187     }
188 
189     // check param
190     GSError ret = CheckRequestConfig(config);
191     if (ret != GSERROR_OK) {
192         BLOGN_FAILURE_API(CheckRequestConfig, ret);
193         return ret;
194     }
195 
196     std::unique_lock<std::mutex> lock(mutex_);
197     // dequeue from free list
198     sptr<SurfaceBuffer>& buffer = retval.buffer;
199     ret = PopFromFreeList(buffer, config);
200     if (ret == GSERROR_OK) {
201         return ReuseBuffer(config, bedata, retval);
202     }
203 
204     // check queue size
205     if (GetUsedSize() >= GetQueueSize()) {
206         waitReqCon_.wait_for(lock, std::chrono::milliseconds(config.timeout),
207             [this]() { return !freeList_.empty() || (GetUsedSize() < GetQueueSize()) || !GetStatus(); });
208         if (!GetStatus()) {
209             BLOGN_FAILURE_RET(GSERROR_NO_CONSUMER);
210         }
211         // try dequeue from free list again
212         ret = PopFromFreeList(buffer, config);
213         if (ret == GSERROR_OK) {
214             return ReuseBuffer(config, bedata, retval);
215         } else if (GetUsedSize() >= GetQueueSize()) {
216             BLOGND("all buffer are using, Queue id: %{public}" PRIu64, uniqueId_);
217             return GSERROR_NO_BUFFER;
218         }
219     }
220 
221     ret = AllocBuffer(buffer, config);
222     if (ret == GSERROR_OK) {
223         retval.sequence = buffer->GetSeqNum();
224         bedata = buffer->GetExtraData();
225         retval.fence = SyncFence::INVALID_FENCE;
226         BLOGND("Success alloc Buffer[%{public}d %{public}d] id: %{public}d id: %{public}" PRIu64, config.width,
227             config.height, retval.sequence, uniqueId_);
228     } else {
229         BLOGNE("Fail to alloc or map Buffer[%{public}d %{public}d] ret: %{public}d, id: %{public}" PRIu64,
230             config.width, config.height, ret, uniqueId_);
231     }
232 
233     return ret;
234 }
235 
SetProducerCacheCleanFlagLocked(bool flag)236 GSError BufferQueue::SetProducerCacheCleanFlagLocked(bool flag)
237 {
238     producerCacheClean_ = flag;
239     producerCacheList_.clear();
240     return GSERROR_OK;
241 }
242 
CheckProducerCacheList()243 bool BufferQueue::CheckProducerCacheList()
244 {
245     for (auto &[id, _] : bufferQueueCache_) {
246         if (std::find(producerCacheList_.begin(), producerCacheList_.end(), id) == producerCacheList_.end()) {
247             return false;
248         }
249     }
250     return true;
251 }
252 
ReuseBuffer(const BufferRequestConfig & config,sptr<BufferExtraData> & bedata,struct IBufferProducer::RequestBufferReturnValue & retval)253 GSError BufferQueue::ReuseBuffer(const BufferRequestConfig &config, sptr<BufferExtraData> &bedata,
254     struct IBufferProducer::RequestBufferReturnValue &retval)
255 {
256     ScopedBytrace func(__func__);
257     if (retval.buffer == nullptr) {
258         BLOGN_FAILURE_RET(GSERROR_INVALID_ARGUMENTS);
259     }
260     retval.sequence = retval.buffer->GetSeqNum();
261     if (bufferQueueCache_.find(retval.sequence) == bufferQueueCache_.end()) {
262         BLOGN_FAILURE_RET(GSERROR_INVALID_ARGUMENTS);
263     }
264     bool needRealloc = (config != bufferQueueCache_[retval.sequence].config);
265     // config, realloc
266     if (needRealloc) {
267         if (isShared_) {
268             BLOGN_FAILURE_RET(GSERROR_INVALID_ARGUMENTS);
269         }
270         DeleteBufferInCache(retval.sequence);
271 
272         sptr<SurfaceBuffer> buffer = nullptr;
273         auto sret = AllocBuffer(buffer, config);
274         if (sret != GSERROR_OK) {
275             BLOGN_FAILURE("realloc failed");
276             return sret;
277         }
278 
279         retval.buffer = buffer;
280         retval.sequence = buffer->GetSeqNum();
281         bufferQueueCache_[retval.sequence].config = config;
282     }
283 
284     bufferQueueCache_[retval.sequence].state = BUFFER_STATE_REQUESTED;
285     retval.fence = bufferQueueCache_[retval.sequence].fence;
286     bedata = retval.buffer->GetExtraData();
287 
288     auto &dbs = retval.deletingBuffers;
289     dbs.insert(dbs.end(), deletingList_.begin(), deletingList_.end());
290     deletingList_.clear();
291 
292     if (needRealloc || isShared_ || producerCacheClean_) {
293         BLOGND("RequestBuffer Succ realloc Buffer[%{public}d %{public}d] with new config "\
294             "qid: %{public}d id: %{public}" PRIu64, config.width, config.height, retval.sequence, uniqueId_);
295         if (producerCacheClean_) {
296             producerCacheList_.push_back(retval.sequence);
297             if (CheckProducerCacheList()) {
298                 SetProducerCacheCleanFlagLocked(false);
299             }
300         }
301     } else {
302         BLOGND("RequestBuffer Succ Buffer[%{public}d %{public}d] in seq id: %{public}d "\
303             "qid: %{public}" PRIu64 " releaseFence: %{public}d",
304             config.width, config.height, retval.sequence, uniqueId_, retval.fence->Get());
305         retval.buffer = nullptr;
306     }
307 
308     ScopedBytrace bufferName(name_ + ":" + std::to_string(retval.sequence));
309     return GSERROR_OK;
310 }
311 
CancelBuffer(uint32_t sequence,const sptr<BufferExtraData> & bedata)312 GSError BufferQueue::CancelBuffer(uint32_t sequence, const sptr<BufferExtraData> &bedata)
313 {
314     ScopedBytrace func(__func__);
315     if (isShared_) {
316         BLOGN_FAILURE_RET(GSERROR_INVALID_OPERATING);
317     }
318     std::lock_guard<std::mutex> lockGuard(mutex_);
319 
320     if (bufferQueueCache_.find(sequence) == bufferQueueCache_.end()) {
321         BLOGN_FAILURE_ID(sequence, "not found in cache");
322         return GSERROR_NO_ENTRY;
323     }
324 
325     if (bufferQueueCache_[sequence].state != BUFFER_STATE_REQUESTED) {
326         BLOGN_FAILURE_ID(sequence, "state is not BUFFER_STATE_REQUESTED");
327         return GSERROR_INVALID_OPERATING;
328     }
329     bufferQueueCache_[sequence].state = BUFFER_STATE_RELEASED;
330     freeList_.push_back(sequence);
331     bufferQueueCache_[sequence].buffer->SetExtraData(bedata);
332 
333     waitReqCon_.notify_all();
334     BLOGND("Success Buffer id: %{public}d Queue id: %{public}" PRIu64, sequence, uniqueId_);
335 
336     return GSERROR_OK;
337 }
338 
FlushBuffer(uint32_t sequence,const sptr<BufferExtraData> & bedata,const sptr<SyncFence> & fence,const BufferFlushConfig & config)339 GSError BufferQueue::FlushBuffer(uint32_t sequence, const sptr<BufferExtraData> &bedata,
340     const sptr<SyncFence>& fence, const BufferFlushConfig &config)
341 {
342     ScopedBytrace func(__func__);
343     if (!GetStatus()) {
344         BLOGN_FAILURE_RET(GSERROR_NO_CONSUMER);
345     }
346     // check param
347     auto sret = CheckFlushConfig(config);
348     if (sret != GSERROR_OK) {
349         BLOGN_FAILURE_API(CheckFlushConfig, sret);
350         return sret;
351     }
352 
353     {
354         std::lock_guard<std::mutex> lockGuard(mutex_);
355         if (bufferQueueCache_.find(sequence) == bufferQueueCache_.end()) {
356             BLOGN_FAILURE_ID(sequence, "not found in cache");
357             return GSERROR_NO_ENTRY;
358         }
359 
360         if (isShared_ == false) {
361             auto &state = bufferQueueCache_[sequence].state;
362             if (state != BUFFER_STATE_REQUESTED && state != BUFFER_STATE_ATTACHED) {
363                 BLOGN_FAILURE_ID(sequence, "invalid state %{public}d", state);
364                 return GSERROR_NO_ENTRY;
365             }
366         }
367     }
368 
369     {
370         std::lock_guard<std::mutex> lockGuard(listenerMutex_);
371         if (listener_ == nullptr && listenerClazz_ == nullptr) {
372             CancelBuffer(sequence, bedata);
373             return GSERROR_NO_CONSUMER;
374         }
375     }
376 
377     ScopedBytrace bufferIPCSend("BufferIPCSend");
378     sret = DoFlushBuffer(sequence, bedata, fence, config);
379     if (sret != GSERROR_OK) {
380         return sret;
381     }
382     CountTrace(HITRACE_TAG_GRAPHIC_AGP, name_, static_cast<int32_t>(dirtyList_.size()));
383     if (sret == GSERROR_OK) {
384         std::lock_guard<std::mutex> lockGuard(listenerMutex_);
385         if (listener_ != nullptr) {
386             ScopedBytrace bufferIPCSend("OnBufferAvailable");
387             listener_->OnBufferAvailable();
388         } else if (listenerClazz_ != nullptr) {
389             ScopedBytrace bufferIPCSend("OnBufferAvailable");
390             listenerClazz_->OnBufferAvailable();
391         }
392     }
393     BLOGND("Success Buffer seq id: %{public}d Queue id: %{public}" PRIu64 " AcquireFence:%{public}d",
394         sequence, uniqueId_, fence->Get());
395     return sret;
396 }
397 
DumpToFile(uint32_t sequence)398 void BufferQueue::DumpToFile(uint32_t sequence)
399 {
400     if (access("/data/bq_dump", F_OK) == -1) {
401         return;
402     }
403 
404     ScopedBytrace func(__func__);
405     struct timeval now;
406     gettimeofday(&now, nullptr);
407     constexpr int secToUsec = 1000 * 1000;
408     int64_t nowVal = (int64_t)now.tv_sec * secToUsec + (int64_t)now.tv_usec;
409 
410     std::stringstream ss;
411     ss << "/data/bq_" << GetRealPid() << "_" << name_ << "_" << nowVal << ".raw";
412 
413     sptr<SurfaceBuffer>& buffer = bufferQueueCache_[sequence].buffer;
414     std::ofstream rawDataFile(ss.str(), std::ofstream::binary);
415     if (!rawDataFile.good()) {
416         BLOGE("open failed: (%{public}d)%{public}s", errno, strerror(errno));
417         return;
418     }
419     rawDataFile.write(static_cast<const char *>(buffer->GetVirAddr()), buffer->GetSize());
420     rawDataFile.close();
421 }
422 
DoFlushBuffer(uint32_t sequence,const sptr<BufferExtraData> & bedata,const sptr<SyncFence> & fence,const BufferFlushConfig & config)423 GSError BufferQueue::DoFlushBuffer(uint32_t sequence, const sptr<BufferExtraData> &bedata,
424     const sptr<SyncFence>& fence, const BufferFlushConfig &config)
425 {
426     ScopedBytrace func(__func__);
427     ScopedBytrace bufferName(name_ + ":" + std::to_string(sequence));
428     std::lock_guard<std::mutex> lockGuard(mutex_);
429     if (bufferQueueCache_.find(sequence) == bufferQueueCache_.end()) {
430         BLOGN_FAILURE_ID(sequence, "not found in cache");
431         return GSERROR_NO_ENTRY;
432     }
433     if (bufferQueueCache_[sequence].isDeleting) {
434         DeleteBufferInCache(sequence);
435         BLOGN_SUCCESS_ID(sequence, "delete");
436         return GSERROR_OK;
437     }
438 
439     bufferQueueCache_[sequence].state = BUFFER_STATE_FLUSHED;
440     dirtyList_.push_back(sequence);
441     bufferQueueCache_[sequence].buffer->SetExtraData(bedata);
442     bufferQueueCache_[sequence].fence = fence;
443     bufferQueueCache_[sequence].damage = config.damage;
444 
445     uint64_t usage = static_cast<uint32_t>(bufferQueueCache_[sequence].config.usage);
446     if (usage & BUFFER_USAGE_CPU_WRITE) {
447         // api flush
448         auto sret = bufferQueueCache_[sequence].buffer->FlushCache();
449         if (sret != GSERROR_OK) {
450             BLOGN_FAILURE_ID_API(sequence, FlushCache, sret);
451             return sret;
452         }
453     }
454 
455     if (config.timestamp == 0) {
456         struct timeval tv = {};
457         gettimeofday(&tv, nullptr);
458         constexpr int32_t secToUsec = 1000000;
459         bufferQueueCache_[sequence].timestamp = (int64_t)tv.tv_usec + (int64_t)tv.tv_sec * secToUsec;
460     } else {
461         bufferQueueCache_[sequence].timestamp = config.timestamp;
462     }
463 
464     DumpToFile(sequence);
465     return GSERROR_OK;
466 }
467 
AcquireBuffer(sptr<SurfaceBuffer> & buffer,sptr<SyncFence> & fence,int64_t & timestamp,Rect & damage)468 GSError BufferQueue::AcquireBuffer(sptr<SurfaceBuffer> &buffer,
469     sptr<SyncFence> &fence, int64_t &timestamp, Rect &damage)
470 {
471     ScopedBytrace func(__func__);
472     // dequeue from dirty list
473     std::lock_guard<std::mutex> lockGuard(mutex_);
474     GSError ret = PopFromDirtyList(buffer);
475     if (ret == GSERROR_OK) {
476         uint32_t sequence = buffer->GetSeqNum();
477         if (isShared_ == false && bufferQueueCache_[sequence].state != BUFFER_STATE_FLUSHED) {
478             BLOGNW("Warning [%{public}d], Reason: state is not BUFFER_STATE_FLUSHED", sequence);
479         }
480         bufferQueueCache_[sequence].state = BUFFER_STATE_ACQUIRED;
481 
482         fence = bufferQueueCache_[sequence].fence;
483         timestamp = bufferQueueCache_[sequence].timestamp;
484         damage = bufferQueueCache_[sequence].damage;
485 
486         ScopedBytrace bufferName(name_ + ":" + std::to_string(sequence));
487         BLOGND("Success Buffer seq id: %{public}d Queue id: %{public}" PRIu64 " AcquireFence:%{public}d",
488             sequence, uniqueId_, fence->Get());
489     } else if (ret == GSERROR_NO_BUFFER) {
490         BLOGN_FAILURE("there is no dirty buffer");
491     }
492 
493     CountTrace(HITRACE_TAG_GRAPHIC_AGP, name_, static_cast<int32_t>(dirtyList_.size()));
494     return ret;
495 }
496 
ReleaseBuffer(sptr<SurfaceBuffer> & buffer,const sptr<SyncFence> & fence)497 GSError BufferQueue::ReleaseBuffer(sptr<SurfaceBuffer> &buffer, const sptr<SyncFence>& fence)
498 {
499     if (buffer == nullptr) {
500         BLOGE("invalid parameter: buffer is null, please check");
501         return GSERROR_INVALID_ARGUMENTS;
502     }
503 
504     uint32_t sequence = buffer->GetSeqNum();
505     ScopedBytrace bufferName(std::string(__func__) + "," + name_ + ":" + std::to_string(sequence));
506     {
507         std::lock_guard<std::mutex> lockGuard(mutex_);
508         if (bufferQueueCache_.find(sequence) == bufferQueueCache_.end()) {
509             BLOGN_FAILURE_ID(sequence, "not find in cache, Queue id: %{public}" PRIu64, uniqueId_);
510             return GSERROR_NO_ENTRY;
511         }
512 
513         if (isShared_ == false) {
514             const auto &state = bufferQueueCache_[sequence].state;
515             if (state != BUFFER_STATE_ACQUIRED && state != BUFFER_STATE_ATTACHED) {
516                 BLOGN_FAILURE_ID(sequence, "invalid state");
517                 return GSERROR_NO_ENTRY;
518             }
519         }
520     }
521 
522     if (onBufferRelease != nullptr) {
523         ScopedBytrace func("OnBufferRelease");
524         sptr<SurfaceBuffer> buf = buffer;
525         auto sret = onBufferRelease(buf);
526         if (sret == GSERROR_OK) {   // need to check why directly return?
527             return sret;
528         }
529     }
530 
531     std::lock_guard<std::mutex> lockGuard(mutex_);
532     if (bufferQueueCache_.find(sequence) == bufferQueueCache_.end()) {
533         BLOGN_FAILURE_ID(sequence, "not find in cache, Queue id: %{public}" PRIu64 "", uniqueId_);
534         return GSERROR_NO_ENTRY;
535     }
536     bufferQueueCache_[sequence].state = BUFFER_STATE_RELEASED;
537     bufferQueueCache_[sequence].fence = fence;
538 
539     if (bufferQueueCache_[sequence].isDeleting) {
540         DeleteBufferInCache(sequence);
541         BLOGND("Succ delete Buffer seq id: %{public}d Queue id: %{public}" PRIu64 " in cache", sequence, uniqueId_);
542     } else {
543         freeList_.push_back(sequence);
544         BLOGND("Succ push Buffer seq id: %{public}d Qid: %{public}" PRIu64 " to free list, releaseFence: %{public}d",
545             sequence, uniqueId_, fence->Get());
546     }
547     waitReqCon_.notify_all();
548     return GSERROR_OK;
549 }
550 
AllocBuffer(sptr<SurfaceBuffer> & buffer,const BufferRequestConfig & config)551 GSError BufferQueue::AllocBuffer(sptr<SurfaceBuffer> &buffer,
552     const BufferRequestConfig &config)
553 {
554     ScopedBytrace func(__func__);
555     sptr<SurfaceBuffer> bufferImpl = new SurfaceBufferImpl();
556     uint32_t sequence = bufferImpl->GetSeqNum();
557 
558     GSError ret = bufferImpl->Alloc(config);
559     if (ret != GSERROR_OK) {
560         BLOGN_FAILURE_ID_API(sequence, Alloc, ret);
561         return ret;
562     }
563 
564     BufferElement ele = {
565         .buffer = bufferImpl,
566         .state = BUFFER_STATE_REQUESTED,
567         .isDeleting = false,
568         .config = config,
569         .fence = SyncFence::INVALID_FENCE,
570     };
571 
572     ret = bufferImpl->Map();
573     if (ret == GSERROR_OK) {
574         BLOGN_SUCCESS_ID(sequence, "Map");
575         bufferQueueCache_[sequence] = ele;
576         buffer = bufferImpl;
577     } else {
578         BLOGN_FAILURE_ID(sequence, "Map failed");
579     }
580     return ret;
581 }
582 
DeleteBufferInCache(uint32_t sequence)583 void BufferQueue::DeleteBufferInCache(uint32_t sequence)
584 {
585     auto it = bufferQueueCache_.find(sequence);
586     if (it != bufferQueueCache_.end()) {
587         if (onBufferDelete_ != nullptr) {
588             onBufferDelete_(sequence);
589         }
590         bufferQueueCache_.erase(it);
591         deletingList_.push_back(sequence);
592     }
593 }
594 
GetQueueSize()595 uint32_t BufferQueue::GetQueueSize()
596 {
597     return queueSize_;
598 }
599 
DeleteBuffers(int32_t count)600 void BufferQueue::DeleteBuffers(int32_t count)
601 {
602     ScopedBytrace func(__func__);
603     if (count <= 0) {
604         return;
605     }
606 
607     std::lock_guard<std::mutex> lockGuard(mutex_);
608     while (!freeList_.empty()) {
609         DeleteBufferInCache(freeList_.front());
610         freeList_.pop_front();
611         count--;
612         if (count <= 0) {
613             return;
614         }
615     }
616 
617     while (!dirtyList_.empty()) {
618         DeleteBufferInCache(dirtyList_.front());
619         dirtyList_.pop_front();
620         count--;
621         if (count <= 0) {
622             return;
623         }
624     }
625 
626     for (auto&& ele : bufferQueueCache_) {
627         ele.second.isDeleting = true;
628         // we don't have to do anything
629         count--;
630         if (count <= 0) {
631             break;
632         }
633     }
634 }
635 
AttachBuffer(sptr<SurfaceBuffer> & buffer)636 GSError BufferQueue::AttachBuffer(sptr<SurfaceBuffer> &buffer)
637 {
638     ScopedBytrace func(__func__);
639     if (isShared_) {
640         BLOGN_FAILURE_RET(GSERROR_INVALID_OPERATING);
641     }
642 
643     if (buffer == nullptr) {
644         BLOGN_FAILURE_RET(GSERROR_INVALID_ARGUMENTS);
645     }
646 
647     std::lock_guard<std::mutex> lockGuard(mutex_);
648     BufferElement ele = {
649         .buffer = buffer,
650         .state = BUFFER_STATE_ATTACHED,
651         .config = {
652             .width = buffer->GetWidth(),
653             .height = buffer->GetHeight(),
654             .strideAlignment = 0x8,
655             .format = buffer->GetFormat(),
656             .usage = buffer->GetUsage(),
657             .timeout = 0,
658         },
659         .damage = {
660             .w = ele.config.width,
661             .h = ele.config.height,
662         }
663     };
664 
665     uint32_t sequence = buffer->GetSeqNum();
666     int32_t usedSize = static_cast<int32_t>(GetUsedSize());
667     int32_t queueSize = static_cast<int32_t>(GetQueueSize());
668     if (usedSize >= queueSize) {
669         int32_t freeSize = static_cast<int32_t>(dirtyList_.size() + freeList_.size());
670         if (freeSize >= usedSize - queueSize + 1) {
671             DeleteBuffers(usedSize - queueSize + 1);
672             bufferQueueCache_[sequence] = ele;
673             BLOGN_SUCCESS_ID(sequence, "release");
674             return GSERROR_OK;
675         } else {
676             BLOGN_FAILURE_RET(GSERROR_OUT_OF_RANGE);
677         }
678     } else {
679         bufferQueueCache_[sequence] = ele;
680         BLOGN_SUCCESS_ID(sequence, "no release");
681         return GSERROR_OK;
682     }
683 }
684 
DetachBuffer(sptr<SurfaceBuffer> & buffer)685 GSError BufferQueue::DetachBuffer(sptr<SurfaceBuffer> &buffer)
686 {
687     ScopedBytrace func(__func__);
688     if (isShared_) {
689         BLOGN_FAILURE_RET(GSERROR_INVALID_OPERATING);
690     }
691 
692     if (buffer == nullptr) {
693         BLOGN_FAILURE_RET(GSERROR_INVALID_ARGUMENTS);
694     }
695 
696     std::lock_guard<std::mutex> lockGuard(mutex_);
697     uint32_t sequence = buffer->GetSeqNum();
698     if (bufferQueueCache_.find(sequence) == bufferQueueCache_.end()) {
699         BLOGN_FAILURE_ID(sequence, "not find in cache");
700         return GSERROR_NO_ENTRY;
701     }
702 
703     if (bufferQueueCache_[sequence].state == BUFFER_STATE_REQUESTED) {
704         BLOGN_SUCCESS_ID(sequence, "requested");
705     } else if (bufferQueueCache_[sequence].state == BUFFER_STATE_ACQUIRED) {
706         BLOGN_SUCCESS_ID(sequence, "acquired");
707     } else {
708         BLOGN_FAILURE_ID_RET(sequence, GSERROR_NO_ENTRY);
709     }
710     if (onBufferDelete_ != nullptr) {
711         onBufferDelete_(sequence);
712     }
713     bufferQueueCache_.erase(sequence);
714     return GSERROR_OK;
715 }
716 
SetQueueSize(uint32_t queueSize)717 GSError BufferQueue::SetQueueSize(uint32_t queueSize)
718 {
719     if (isShared_ == true && queueSize != 1) {
720         BLOGN_INVALID("shared queue, size must be 1");
721         return GSERROR_INVALID_ARGUMENTS;
722     }
723 
724     if (queueSize <= 0) {
725         BLOGN_INVALID("queue size (%{public}d) <= 0", queueSize);
726         return GSERROR_INVALID_ARGUMENTS;
727     }
728 
729     if (queueSize > SURFACE_MAX_QUEUE_SIZE) {
730         BLOGN_INVALID("invalid queueSize[%{public}d] > SURFACE_MAX_QUEUE_SIZE[%{public}d]",
731             queueSize, SURFACE_MAX_QUEUE_SIZE);
732         return GSERROR_INVALID_ARGUMENTS;
733     }
734 
735     DeleteBuffers(queueSize_ - queueSize);
736 
737     // if increase the queue size, try to wakeup the blocked thread
738     if (queueSize > queueSize_) {
739         queueSize_ = queueSize;
740         waitReqCon_.notify_all();
741     } else {
742         queueSize_ = queueSize;
743     }
744 
745     BLOGN_SUCCESS("queue size: %{public}d, Queue id: %{public}" PRIu64, queueSize_, uniqueId_);
746     return GSERROR_OK;
747 }
748 
GetName(std::string & name)749 GSError BufferQueue::GetName(std::string &name)
750 {
751     name = name_;
752     return GSERROR_OK;
753 }
754 
RegisterConsumerListener(sptr<IBufferConsumerListener> & listener)755 GSError BufferQueue::RegisterConsumerListener(sptr<IBufferConsumerListener> &listener)
756 {
757     std::lock_guard<std::mutex> lockGuard(listenerMutex_);
758     listener_ = listener;
759     return GSERROR_OK;
760 }
761 
RegisterConsumerListener(IBufferConsumerListenerClazz * listener)762 GSError BufferQueue::RegisterConsumerListener(IBufferConsumerListenerClazz *listener)
763 {
764     std::lock_guard<std::mutex> lockGuard(listenerMutex_);
765     listenerClazz_ = listener;
766     return GSERROR_OK;
767 }
768 
UnregisterConsumerListener()769 GSError BufferQueue::UnregisterConsumerListener()
770 {
771     std::lock_guard<std::mutex> lockGuard(listenerMutex_);
772     listener_ = nullptr;
773     listenerClazz_ = nullptr;
774     return GSERROR_OK;
775 }
776 
RegisterReleaseListener(OnReleaseFunc func)777 GSError BufferQueue::RegisterReleaseListener(OnReleaseFunc func)
778 {
779     onBufferRelease = func;
780     return GSERROR_OK;
781 }
782 
RegisterDeleteBufferListener(OnDeleteBufferFunc func)783 GSError BufferQueue::RegisterDeleteBufferListener(OnDeleteBufferFunc func)
784 {
785     std::lock_guard<std::mutex> lockGuard(mutex_);
786     if (onBufferDelete_ != nullptr) {
787         return GSERROR_OK;
788     }
789     onBufferDelete_ = func;
790     return GSERROR_OK;
791 }
792 
SetDefaultWidthAndHeight(int32_t width,int32_t height)793 GSError BufferQueue::SetDefaultWidthAndHeight(int32_t width, int32_t height)
794 {
795     if (width <= 0) {
796         BLOGN_INVALID("defaultWidth is greater than 0, now is %{public}d", width);
797         return GSERROR_INVALID_ARGUMENTS;
798     }
799 
800     if (height <= 0) {
801         BLOGN_INVALID("defaultHeight is greater than 0, now is %{public}d", height);
802         return GSERROR_INVALID_ARGUMENTS;
803     }
804 
805     defaultWidth = width;
806     defaultHeight = height;
807     return GSERROR_OK;
808 }
809 
GetDefaultWidth()810 int32_t BufferQueue::GetDefaultWidth()
811 {
812     return defaultWidth;
813 }
814 
GetDefaultHeight()815 int32_t BufferQueue::GetDefaultHeight()
816 {
817     return defaultHeight;
818 }
819 
SetDefaultUsage(uint32_t usage)820 GSError BufferQueue::SetDefaultUsage(uint32_t usage)
821 {
822     defaultUsage = usage;
823     return GSERROR_OK;
824 }
825 
GetDefaultUsage()826 uint32_t BufferQueue::GetDefaultUsage()
827 {
828     return defaultUsage;
829 }
830 
ClearLocked()831 void BufferQueue::ClearLocked()
832 {
833     for (auto &[id, _] : bufferQueueCache_) {
834         if (onBufferDelete_ != nullptr) {
835             onBufferDelete_(id);
836         }
837     }
838     bufferQueueCache_.clear();
839     freeList_.clear();
840     dirtyList_.clear();
841     deletingList_.clear();
842 }
843 
GoBackground()844 GSError BufferQueue::GoBackground()
845 {
846     BLOGND("GoBackground, Queue id: %{public}" PRIu64, uniqueId_);
847     {
848         std::lock_guard<std::mutex> lockGuard(listenerMutex_);
849         if (listener_ != nullptr) {
850             ScopedBytrace bufferIPCSend("OnGoBackground");
851             listener_->OnGoBackground();
852         } else if (listenerClazz_ != nullptr) {
853             ScopedBytrace bufferIPCSend("OnGoBackground");
854             listenerClazz_->OnGoBackground();
855         }
856     }
857     std::lock_guard<std::mutex> lockGuard(mutex_);
858     ClearLocked();
859     waitReqCon_.notify_all();
860     SetProducerCacheCleanFlagLocked(false);
861     return GSERROR_OK;
862 }
863 
CleanCache()864 GSError BufferQueue::CleanCache()
865 {
866     {
867         std::lock_guard<std::mutex> lockGuard(listenerMutex_);
868         if (listener_ != nullptr) {
869             ScopedBytrace bufferIPCSend("OnCleanCache");
870             listener_->OnCleanCache();
871         } else if (listenerClazz_ != nullptr) {
872             ScopedBytrace bufferIPCSend("OnCleanCache");
873             listenerClazz_->OnCleanCache();
874         }
875     }
876     std::lock_guard<std::mutex> lockGuard(mutex_);
877     ClearLocked();
878     waitReqCon_.notify_all();
879     return GSERROR_OK;
880 }
881 
OnConsumerDied()882 GSError BufferQueue::OnConsumerDied()
883 {
884     std::lock_guard<std::mutex> lockGuard(mutex_);
885     ClearLocked();
886     waitReqCon_.notify_all();
887     return GSERROR_OK;
888 }
889 
GetUniqueId() const890 uint64_t BufferQueue::GetUniqueId() const
891 {
892     return uniqueId_;
893 }
894 
SetTransform(GraphicTransformType transform)895 GSError BufferQueue::SetTransform(GraphicTransformType transform)
896 {
897     transform_ = transform;
898     return GSERROR_OK;
899 }
900 
GetTransform() const901 GraphicTransformType BufferQueue::GetTransform() const
902 {
903     return transform_;
904 }
905 
IsSupportedAlloc(const std::vector<BufferVerifyAllocInfo> & infos,std::vector<bool> & supporteds) const906 GSError BufferQueue::IsSupportedAlloc(const std::vector<BufferVerifyAllocInfo> &infos,
907                                       std::vector<bool> &supporteds) const
908 {
909     GSError ret = bufferManager_->IsSupportedAlloc(infos, supporteds);
910     if (ret != GSERROR_OK) {
911         BLOGN_FAILURE_API(IsSupportedAlloc, ret);
912     }
913     return ret;
914 }
915 
SetScalingMode(uint32_t sequence,ScalingMode scalingMode)916 GSError BufferQueue::SetScalingMode(uint32_t sequence, ScalingMode scalingMode)
917 {
918     std::lock_guard<std::mutex> lockGuard(mutex_);
919     if (bufferQueueCache_.find(sequence) == bufferQueueCache_.end()) {
920         BLOGN_FAILURE_ID(sequence, "not find in cache");
921         return GSERROR_NO_ENTRY;
922     }
923     bufferQueueCache_[sequence].scalingMode = scalingMode;
924     return GSERROR_OK;
925 }
926 
GetScalingMode(uint32_t sequence,ScalingMode & scalingMode)927 GSError BufferQueue::GetScalingMode(uint32_t sequence, ScalingMode &scalingMode)
928 {
929     std::lock_guard<std::mutex> lockGuard(mutex_);
930     if (bufferQueueCache_.find(sequence) == bufferQueueCache_.end()) {
931         BLOGN_FAILURE_ID(sequence, "not find in cache");
932         return GSERROR_NO_ENTRY;
933     }
934     scalingMode = bufferQueueCache_.at(sequence).scalingMode;
935     return GSERROR_OK;
936 }
937 
SetMetaData(uint32_t sequence,const std::vector<GraphicHDRMetaData> & metaData)938 GSError BufferQueue::SetMetaData(uint32_t sequence, const std::vector<GraphicHDRMetaData> &metaData)
939 {
940     std::lock_guard<std::mutex> lockGuard(mutex_);
941     if (metaData.size() == 0) {
942         BLOGN_INVALID("metaData size is 0");
943         return GSERROR_INVALID_ARGUMENTS;
944     }
945     if (bufferQueueCache_.find(sequence) == bufferQueueCache_.end()) {
946         BLOGN_FAILURE_ID(sequence, "not find in cache");
947         return GSERROR_NO_ENTRY;
948     }
949     bufferQueueCache_[sequence].metaData.clear();
950     bufferQueueCache_[sequence].metaData = metaData;
951     bufferQueueCache_[sequence].hdrMetaDataType = HDRMetaDataType::HDR_META_DATA;
952     return GSERROR_OK;
953 }
954 
SetMetaDataSet(uint32_t sequence,GraphicHDRMetadataKey key,const std::vector<uint8_t> & metaData)955 GSError BufferQueue::SetMetaDataSet(uint32_t sequence, GraphicHDRMetadataKey key,
956                                     const std::vector<uint8_t> &metaData)
957 {
958     std::lock_guard<std::mutex> lockGuard(mutex_);
959     if (key < GraphicHDRMetadataKey::GRAPHIC_MATAKEY_RED_PRIMARY_X ||
960         key > GraphicHDRMetadataKey::GRAPHIC_MATAKEY_HDR_VIVID) {
961         BLOGN_INVALID("key [%{public}d, %{public}d), now is %{public}d",
962                       GraphicHDRMetadataKey::GRAPHIC_MATAKEY_RED_PRIMARY_X,
963                       GraphicHDRMetadataKey::GRAPHIC_MATAKEY_HDR_VIVID, key);
964         return GSERROR_INVALID_ARGUMENTS;
965     }
966     if (metaData.size() == 0) {
967         BLOGN_INVALID("metaData size is 0");
968         return GSERROR_INVALID_ARGUMENTS;
969     }
970     if (bufferQueueCache_.find(sequence) == bufferQueueCache_.end()) {
971         BLOGN_FAILURE_ID(sequence, "not find in cache");
972         return GSERROR_NO_ENTRY;
973     }
974     bufferQueueCache_[sequence].metaDataSet.clear();
975     bufferQueueCache_[sequence].key = key;
976     bufferQueueCache_[sequence].metaDataSet = metaData;
977     bufferQueueCache_[sequence].hdrMetaDataType = HDRMetaDataType::HDR_META_DATA_SET;
978     return GSERROR_OK;
979 }
980 
QueryMetaDataType(uint32_t sequence,HDRMetaDataType & type)981 GSError BufferQueue::QueryMetaDataType(uint32_t sequence, HDRMetaDataType &type)
982 {
983     std::lock_guard<std::mutex> lockGuard(mutex_);
984     if (bufferQueueCache_.find(sequence) == bufferQueueCache_.end()) {
985         BLOGN_FAILURE_ID(sequence, "not find in cache");
986         return GSERROR_NO_ENTRY;
987     }
988     type = bufferQueueCache_.at(sequence).hdrMetaDataType;
989     return GSERROR_OK;
990 }
991 
GetMetaData(uint32_t sequence,std::vector<GraphicHDRMetaData> & metaData)992 GSError BufferQueue::GetMetaData(uint32_t sequence, std::vector<GraphicHDRMetaData> &metaData)
993 {
994     std::lock_guard<std::mutex> lockGuard(mutex_);
995     if (bufferQueueCache_.find(sequence) == bufferQueueCache_.end()) {
996         BLOGN_FAILURE_ID(sequence, "not find in cache");
997         return GSERROR_NO_ENTRY;
998     }
999     metaData.clear();
1000     metaData = bufferQueueCache_.at(sequence).metaData;
1001     return GSERROR_OK;
1002 }
1003 
GetMetaDataSet(uint32_t sequence,GraphicHDRMetadataKey & key,std::vector<uint8_t> & metaData)1004 GSError BufferQueue::GetMetaDataSet(uint32_t sequence, GraphicHDRMetadataKey &key,
1005                                     std::vector<uint8_t> &metaData)
1006 {
1007     std::lock_guard<std::mutex> lockGuard(mutex_);
1008     if (bufferQueueCache_.find(sequence) == bufferQueueCache_.end()) {
1009         BLOGN_FAILURE_ID(sequence, "not find in cache");
1010         return GSERROR_NO_ENTRY;
1011     }
1012     metaData.clear();
1013     key = bufferQueueCache_.at(sequence).key;
1014     metaData = bufferQueueCache_.at(sequence).metaDataSet;
1015     return GSERROR_OK;
1016 }
1017 
SetTunnelHandle(const sptr<SurfaceTunnelHandle> & handle)1018 GSError BufferQueue::SetTunnelHandle(const sptr<SurfaceTunnelHandle> &handle)
1019 {
1020     std::lock_guard<std::mutex> lockGuard(mutex_);
1021     bool tunnelHandleChange = false;
1022     if (tunnelHandle_ == nullptr) {
1023         if (handle == nullptr) {
1024             BLOGN_INVALID("tunnel handle is nullptr");
1025             return GSERROR_INVALID_ARGUMENTS;
1026         }
1027         tunnelHandleChange = true;
1028     } else {
1029         tunnelHandleChange = tunnelHandle_->Different(handle);
1030     }
1031     if (!tunnelHandleChange) {
1032         BLOGNW("same tunnel handle, please check");
1033         return GSERROR_NO_ENTRY;
1034     }
1035     tunnelHandle_ = handle;
1036     {
1037         std::lock_guard<std::mutex> lockGuard(listenerMutex_);
1038         if (listener_ != nullptr) {
1039             ScopedBytrace bufferIPCSend("OnTunnelHandleChange");
1040             listener_->OnTunnelHandleChange();
1041         } else if (listenerClazz_ != nullptr) {
1042             ScopedBytrace bufferIPCSend("OnTunnelHandleChange");
1043             listenerClazz_->OnTunnelHandleChange();
1044         } else {
1045             return GSERROR_NO_CONSUMER;
1046         }
1047     }
1048     return GSERROR_OK;
1049 }
1050 
GetTunnelHandle()1051 sptr<SurfaceTunnelHandle> BufferQueue::GetTunnelHandle()
1052 {
1053     std::lock_guard<std::mutex> lockGuard(mutex_);
1054     return tunnelHandle_;
1055 }
1056 
SetPresentTimestamp(uint32_t sequence,const GraphicPresentTimestamp & timestamp)1057 GSError BufferQueue::SetPresentTimestamp(uint32_t sequence, const GraphicPresentTimestamp &timestamp)
1058 {
1059     std::lock_guard<std::mutex> lockGuard(mutex_);
1060     if (bufferQueueCache_.find(sequence) == bufferQueueCache_.end()) {
1061         BLOGN_FAILURE_ID(sequence, "not find in cache");
1062         return GSERROR_NO_ENTRY;
1063     }
1064     bufferQueueCache_[sequence].presentTimestamp = timestamp;
1065     return GSERROR_OK;
1066 }
1067 
GetPresentTimestamp(uint32_t sequence,GraphicPresentTimestampType type,int64_t & time)1068 GSError BufferQueue::GetPresentTimestamp(uint32_t sequence, GraphicPresentTimestampType type, int64_t &time)
1069 {
1070     std::lock_guard<std::mutex> lockGuard(mutex_);
1071     if (bufferQueueCache_.find(sequence) == bufferQueueCache_.end()) {
1072         BLOGN_FAILURE_ID(sequence, "not find in cache");
1073         return GSERROR_NO_ENTRY;
1074     }
1075     if (type != bufferQueueCache_.at(sequence).presentTimestamp.type) {
1076         BLOGN_FAILURE_ID(sequence, "PresentTimestampType [%{public}d] is not supported, the supported type "\
1077         "is [%{public}d]", type, bufferQueueCache_.at(sequence).presentTimestamp.type);
1078         return GSERROR_NO_ENTRY;
1079     }
1080     switch (type) {
1081         case GraphicPresentTimestampType::GRAPHIC_DISPLAY_PTS_DELAY: {
1082             time = bufferQueueCache_.at(sequence).presentTimestamp.time;
1083             return GSERROR_OK;
1084         }
1085         case GraphicPresentTimestampType::GRAPHIC_DISPLAY_PTS_TIMESTAMP: {
1086             time = bufferQueueCache_.at(sequence).presentTimestamp.time - bufferQueueCache_.at(sequence).timestamp;
1087             return GSERROR_OK;
1088         }
1089         default: {
1090             BLOGN_FAILURE_ID(sequence, "unsupported type!");
1091             return GSERROR_TYPE_ERROR;
1092         }
1093     }
1094 }
1095 
DumpCache(std::string & result)1096 void BufferQueue::DumpCache(std::string &result)
1097 {
1098     for (auto it = bufferQueueCache_.begin(); it != bufferQueueCache_.end(); it++) {
1099         BufferElement element = it->second;
1100         if (BufferStateStrs.find(element.state) != BufferStateStrs.end()) {
1101             result += "        sequence = " + std::to_string(it->first) +
1102                 ", state = " + BufferStateStrs.at(element.state) +
1103                 ", timestamp = " + std::to_string(element.timestamp);
1104         }
1105         result += ", damageRect = [" + std::to_string(element.damage.x) + ", " +
1106             std::to_string(element.damage.y) + ", " +
1107             std::to_string(element.damage.w) + ", " +
1108             std::to_string(element.damage.h) + "],";
1109         result += " config = [" + std::to_string(element.config.width) + "x" +
1110             std::to_string(element.config.height) + ", " +
1111             std::to_string(element.config.strideAlignment) + ", " +
1112             std::to_string(element.config.format) +", " +
1113             std::to_string(element.config.usage) + ", " +
1114             std::to_string(element.config.timeout) + "],";
1115 
1116         double bufferMemSize = 0;
1117         if (element.buffer != nullptr) {
1118             result += " bufferWith = " + std::to_string(element.buffer->GetWidth()) +
1119                     ", bufferHeight = " + std::to_string(element.buffer->GetHeight());
1120             bufferMemSize = static_cast<double>(element.buffer->GetSize()) / BUFFER_MEMSIZE_RATE;
1121         }
1122         std::ostringstream ss;
1123         ss.precision(BUFFER_MEMSIZE_FORMAT);
1124         ss.setf(std::ios::fixed);
1125         ss << bufferMemSize;
1126         std::string str = ss.str();
1127         result += ", bufferMemSize = " + str + "(KiB).\n";
1128     }
1129 }
1130 
Dump(std::string & result)1131 void BufferQueue::Dump(std::string &result)
1132 {
1133     std::lock_guard<std::mutex> lockGuard(mutex_);
1134     std::ostringstream ss;
1135     ss.precision(BUFFER_MEMSIZE_FORMAT);
1136     ss.setf(std::ios::fixed);
1137     static double allSurfacesMemSize = 0;
1138     uint32_t totalBufferListSize = 0;
1139     double memSizeInKB = 0;
1140 
1141     for (auto it = bufferQueueCache_.begin(); it != bufferQueueCache_.end(); it++) {
1142         BufferElement element = it->second;
1143         if (element.buffer != nullptr) {
1144             totalBufferListSize += element.buffer->GetSize();
1145         }
1146     }
1147     memSizeInKB = static_cast<double>(totalBufferListSize) / BUFFER_MEMSIZE_RATE;
1148 
1149     allSurfacesMemSize += memSizeInKB;
1150     uint32_t resultLen = result.size();
1151     std::string dumpEndFlag = "dumpend";
1152     std::string dumpEndIn(result, resultLen - dumpEndFlag.size(), resultLen - 1);
1153     if (dumpEndIn == dumpEndFlag) {
1154         ss << allSurfacesMemSize;
1155         std::string dumpEndStr = ss.str();
1156         result.erase(resultLen - dumpEndFlag.size(), resultLen - 1);
1157         result += dumpEndStr + " KiB.\n";
1158         allSurfacesMemSize = 0;
1159         return;
1160     }
1161 
1162     ss.str("");
1163     ss << memSizeInKB;
1164     std::string str = ss.str();
1165     result.append("    BufferQueue:\n");
1166     result += "      default-size = [" + std::to_string(defaultWidth) + "x" + std::to_string(defaultHeight) + "]" +
1167         ", FIFO = " + std::to_string(queueSize_) +
1168         ", name = " + name_ +
1169         ", uniqueId = " + std::to_string(uniqueId_) +
1170         ", usedBufferListLen = " + std::to_string(GetUsedSize()) +
1171         ", freeBufferListLen = " + std::to_string(freeList_.size()) +
1172         ", dirtyBufferListLen = " + std::to_string(dirtyList_.size()) +
1173         ", totalBuffersMemSize = " + str + "(KiB).\n";
1174 
1175     result.append("      bufferQueueCache:\n");
1176     DumpCache(result);
1177 }
1178 
GetStatus() const1179 bool BufferQueue::GetStatus() const
1180 {
1181     return isValidStatus_;
1182 }
1183 
SetStatus(bool status)1184 void BufferQueue::SetStatus(bool status)
1185 {
1186     isValidStatus_ = status;
1187     waitReqCon_.notify_all();
1188 }
1189 }; // namespace OHOS
1190