• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2021 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "buffer_queue.h"
17 
18 #include <algorithm>
19 #include <fstream>
20 #include <sstream>
21 #include <sys/time.h>
22 #include <cinttypes>
23 #include <unistd.h>
24 
25 #include <display_type.h>
26 #include <scoped_bytrace.h>
27 
28 #include "buffer_log.h"
29 #include "buffer_manager.h"
30 
31 namespace OHOS {
32 namespace {
33 constexpr uint32_t UNIQUE_ID_OFFSET = 32;
34 }
35 
36 static const std::map<BufferState, std::string> BufferStateStrs = {
37     {BUFFER_STATE_RELEASED,                    "0 <released>"},
38     {BUFFER_STATE_REQUESTED,                   "1 <requested>"},
39     {BUFFER_STATE_FLUSHED,                     "2 <flushed>"},
40     {BUFFER_STATE_ACQUIRED,                    "3 <acquired>"},
41 };
42 
GetUniqueIdImpl()43 static uint64_t GetUniqueIdImpl()
44 {
45     static std::atomic<uint32_t> counter { 0 };
46     static uint64_t id = static_cast<uint64_t>(::getpid()) << UNIQUE_ID_OFFSET;
47     return id | counter++;
48 }
49 
BufferQueue(const std::string & name,bool isShared)50 BufferQueue::BufferQueue(const std::string &name, bool isShared)
51     : name_(name), uniqueId_(GetUniqueIdImpl()), isShared_(isShared)
52 {
53     BLOGNI("ctor, Queue id: %{public}" PRIu64 "", uniqueId_);
54     bufferManager_ = BufferManager::GetInstance();
55     if (isShared_ == true) {
56         queueSize_ = 1;
57     }
58 }
59 
~BufferQueue()60 BufferQueue::~BufferQueue()
61 {
62     BLOGNI("dtor, Queue id: %{public}" PRIu64 "", uniqueId_);
63     std::lock_guard<std::mutex> lockGuard(mutex_);
64     for (auto it = bufferQueueCache_.begin(); it != bufferQueueCache_.end(); it++) {
65         FreeBuffer(it->second.buffer);
66         if (it->second.fence > 0) {
67             close(it->second.fence);
68         }
69     }
70 }
71 
Init()72 GSError BufferQueue::Init()
73 {
74     return GSERROR_OK;
75 }
76 
GetUsedSize()77 uint32_t BufferQueue::GetUsedSize()
78 {
79     uint32_t used_size = bufferQueueCache_.size();
80     return used_size;
81 }
82 
PopFromFreeList(sptr<SurfaceBufferImpl> & buffer,const BufferRequestConfig & config)83 GSError BufferQueue::PopFromFreeList(sptr<SurfaceBufferImpl> &buffer,
84     const BufferRequestConfig &config)
85 {
86     if (isShared_ == true && GetUsedSize() > 0) {
87         buffer = bufferQueueCache_.begin()->second.buffer;
88         return GSERROR_OK;
89     }
90 
91     for (auto it = freeList_.begin(); it != freeList_.end(); it++) {
92         if (bufferQueueCache_[*it].config == config) {
93             buffer = bufferQueueCache_[*it].buffer;
94             freeList_.erase(it);
95             return GSERROR_OK;
96         }
97     }
98 
99     if (freeList_.empty()) {
100         buffer = nullptr;
101         return GSERROR_NO_BUFFER;
102     }
103 
104     buffer = bufferQueueCache_[freeList_.front()].buffer;
105     freeList_.pop_front();
106     return GSERROR_OK;
107 }
108 
PopFromDirtyList(sptr<SurfaceBufferImpl> & buffer)109 GSError BufferQueue::PopFromDirtyList(sptr<SurfaceBufferImpl> &buffer)
110 {
111     if (isShared_ == true && GetUsedSize() > 0) {
112         buffer = bufferQueueCache_.begin()->second.buffer;
113         return GSERROR_OK;
114     }
115 
116     if (!dirtyList_.empty()) {
117         buffer = bufferQueueCache_[dirtyList_.front()].buffer;
118         dirtyList_.pop_front();
119         return GSERROR_OK;
120     } else {
121         buffer = nullptr;
122         return GSERROR_NO_BUFFER;
123     }
124 }
125 
CheckRequestConfig(const BufferRequestConfig & config)126 GSError BufferQueue::CheckRequestConfig(const BufferRequestConfig &config)
127 {
128     if (config.width <= 0) {
129         BLOGN_INVALID("config.width is greater than 0, now is %{public}d", config.width);
130         return GSERROR_INVALID_ARGUMENTS;
131     }
132 
133     if (config.height <= 0) {
134         BLOGN_INVALID("config.height is greater than 0, now is %{public}d", config.height);
135         return GSERROR_INVALID_ARGUMENTS;
136     }
137 
138     uint32_t align = config.strideAlignment;
139     bool isValidStrideAlignment = true;
140     isValidStrideAlignment = isValidStrideAlignment && (SURFACE_MIN_STRIDE_ALIGNMENT <= align);
141     isValidStrideAlignment = isValidStrideAlignment && (SURFACE_MAX_STRIDE_ALIGNMENT >= align);
142     if (!isValidStrideAlignment) {
143         BLOGN_INVALID("config.strideAlignment [%{public}d, %{public}d], now is %{public}d",
144                       SURFACE_MIN_STRIDE_ALIGNMENT, SURFACE_MAX_STRIDE_ALIGNMENT, align);
145         return GSERROR_INVALID_ARGUMENTS;
146     }
147 
148     if (align & (align - 1)) {
149         BLOGN_INVALID("config.strideAlignment is not power of 2 like 4, 8, 16, 32; now is %{public}d", align);
150         return GSERROR_INVALID_ARGUMENTS;
151     }
152 
153     if (config.format < 0 || config.format > PIXEL_FMT_BUTT) {
154         BLOGN_INVALID("config.format [0, %{public}d], now is %{public}d", PIXEL_FMT_BUTT, config.format);
155         return GSERROR_INVALID_ARGUMENTS;
156     }
157 
158     if (config.colorGamut <= SurfaceColorGamut::COLOR_GAMUT_INVALID ||
159         config.colorGamut > SurfaceColorGamut::COLOR_GAMUT_DISPLAY_BT2020 + 1) {
160         BLOGN_INVALID("config.colorGamut [0, %{public}d], now is %{public}d",
161             static_cast<uint32_t>(SurfaceColorGamut::COLOR_GAMUT_DISPLAY_BT2020),
162             static_cast<uint32_t>(config.colorGamut));
163         return GSERROR_INVALID_ARGUMENTS;
164     }
165 
166     return GSERROR_OK;
167 }
168 
CheckFlushConfig(const BufferFlushConfig & config)169 GSError BufferQueue::CheckFlushConfig(const BufferFlushConfig &config)
170 {
171     if (config.damage.w < 0) {
172         BLOGN_INVALID("config.damage.w >= 0, now is %{public}d", config.damage.w);
173         return GSERROR_INVALID_ARGUMENTS;
174     }
175     if (config.damage.h < 0) {
176         BLOGN_INVALID("config.damage.h >= 0, now is %{public}d", config.damage.h);
177         return GSERROR_INVALID_ARGUMENTS;
178     }
179     return GSERROR_OK;
180 }
181 
RequestBuffer(const BufferRequestConfig & config,BufferExtraData & bedata,struct IBufferProducer::RequestBufferReturnValue & retval)182 GSError BufferQueue::RequestBuffer(const BufferRequestConfig &config, BufferExtraData &bedata,
183     struct IBufferProducer::RequestBufferReturnValue &retval)
184 {
185     ScopedBytrace func(__func__);
186     if (listener_ == nullptr && listenerClazz_ == nullptr) {
187         BLOGN_FAILURE_RET(GSERROR_NO_CONSUMER);
188     }
189 
190     // check param
191     GSError ret = CheckRequestConfig(config);
192     if (ret != GSERROR_OK) {
193         BLOGN_FAILURE_API(CheckRequestConfig, ret);
194         return ret;
195     }
196 
197     std::unique_lock<std::mutex> lock(mutex_);
198     // dequeue from free list
199     sptr<SurfaceBufferImpl> bufferImpl = SurfaceBufferImpl::FromBase(retval.buffer);
200     ret = PopFromFreeList(bufferImpl, config);
201     if (ret == GSERROR_OK) {
202         retval.buffer = bufferImpl;
203         return ReuseBuffer(config, bedata, retval);
204     }
205 
206     // check queue size
207     if (GetUsedSize() >= GetQueueSize()) {
208         waitReqCon_.wait_for(lock, std::chrono::milliseconds(config.timeout),
209             [this]() { return !freeList_.empty() || (GetUsedSize() < GetQueueSize()); });
210         // try dequeue from free list again
211         ret = PopFromFreeList(bufferImpl, config);
212         if (ret == GSERROR_OK) {
213             retval.buffer = bufferImpl;
214             return ReuseBuffer(config, bedata, retval);
215         } else if (GetUsedSize() >= GetQueueSize()) {
216             BLOGN_FAILURE("all buffer are using, Queue id: %{public}" PRIu64 "", uniqueId_);
217             return GSERROR_NO_BUFFER;
218         }
219     }
220 
221     ret = AllocBuffer(bufferImpl, config);
222     if (ret == GSERROR_OK) {
223         retval.sequence = bufferImpl->GetSeqNum();
224 
225         bufferImpl->GetExtraData(bedata);
226         retval.buffer = bufferImpl;
227         retval.fence = -1;
228         BLOGD("Success alloc Buffer id: %{public}d Queue id: %{public}" PRIu64 "", retval.sequence, uniqueId_);
229     } else {
230         BLOGE("Fail to alloc or map buffer ret: %{public}d, Queue id: %{public}" PRIu64 "", ret, uniqueId_);
231     }
232 
233     return ret;
234 }
235 
ReuseBuffer(const BufferRequestConfig & config,BufferExtraData & bedata,struct IBufferProducer::RequestBufferReturnValue & retval)236 GSError BufferQueue::ReuseBuffer(const BufferRequestConfig &config, BufferExtraData &bedata,
237     struct IBufferProducer::RequestBufferReturnValue &retval)
238 {
239     ScopedBytrace func(__func__);
240     sptr<SurfaceBufferImpl> bufferImpl = SurfaceBufferImpl::FromBase(retval.buffer);
241     retval.sequence = bufferImpl->GetSeqNum();
242     bool needRealloc = (config != bufferQueueCache_[retval.sequence].config);
243     // config, realloc
244     if (needRealloc) {
245         if (isShared_) {
246             BLOGN_FAILURE_RET(GSERROR_INVALID_ARGUMENTS);
247         }
248         DeleteBufferInCache(retval.sequence);
249 
250         auto sret = AllocBuffer(bufferImpl, config);
251         if (sret != GSERROR_OK) {
252             BLOGN_FAILURE("realloc failed");
253             return sret;
254         }
255 
256         retval.buffer = bufferImpl;
257         retval.sequence = bufferImpl->GetSeqNum();
258         bufferQueueCache_[retval.sequence].config = config;
259     }
260 
261     bufferQueueCache_[retval.sequence].state = BUFFER_STATE_REQUESTED;
262     retval.fence = bufferQueueCache_[retval.sequence].fence;
263 
264     // Prevent releasefence from being repeatedly closed after cancel.
265     bufferQueueCache_[retval.sequence].fence = -1;
266     bufferImpl->GetExtraData(bedata);
267 
268     auto &dbs = retval.deletingBuffers;
269     dbs.insert(dbs.end(), deletingList_.begin(), deletingList_.end());
270     deletingList_.clear();
271 
272     if (needRealloc) {
273         BLOGD("RequestBuffer Success realloc Buffer with new config id: %{public}d Queue id: %{public}" PRIu64 "",
274             retval.sequence, uniqueId_);
275     } else {
276         BLOGD("RequestBuffer Success Buffer in cache id: %{public}d Queue id: %{public}" PRIu64 "",
277             retval.sequence, uniqueId_);
278         retval.buffer = nullptr;
279     }
280 
281     return GSERROR_OK;
282 }
283 
CancelBuffer(int32_t sequence,const BufferExtraData & bedata)284 GSError BufferQueue::CancelBuffer(int32_t sequence, const BufferExtraData &bedata)
285 {
286     ScopedBytrace func(__func__);
287     if (isShared_) {
288         BLOGN_FAILURE_RET(GSERROR_INVALID_OPERATING);
289     }
290     std::lock_guard<std::mutex> lockGuard(mutex_);
291 
292     if (bufferQueueCache_.find(sequence) == bufferQueueCache_.end()) {
293         BLOGN_FAILURE_ID(sequence, "not found in cache");
294         return GSERROR_NO_ENTRY;
295     }
296 
297     if (bufferQueueCache_[sequence].state != BUFFER_STATE_REQUESTED) {
298         BLOGN_FAILURE_ID(sequence, "state is not BUFFER_STATE_REQUESTED");
299         return GSERROR_INVALID_OPERATING;
300     }
301     bufferQueueCache_[sequence].state = BUFFER_STATE_RELEASED;
302     freeList_.push_back(sequence);
303     bufferQueueCache_[sequence].buffer->SetExtraData(bedata);
304 
305     waitReqCon_.notify_all();
306     BLOGD("Success Buffer id: %{public}d Queue id: %{public}" PRIu64 "", sequence, uniqueId_);
307 
308     return GSERROR_OK;
309 }
310 
FlushBuffer(int32_t sequence,const BufferExtraData & bedata,int32_t fence,const BufferFlushConfig & config)311 GSError BufferQueue::FlushBuffer(int32_t sequence, const BufferExtraData &bedata,
312     int32_t fence, const BufferFlushConfig &config)
313 {
314     ScopedBytrace func(__func__);
315     // check param
316     auto sret = CheckFlushConfig(config);
317     if (sret != GSERROR_OK) {
318         BLOGN_FAILURE_API(CheckFlushConfig, sret);
319         return sret;
320     }
321 
322     {
323         std::lock_guard<std::mutex> lockGuard(mutex_);
324         if (bufferQueueCache_.find(sequence) == bufferQueueCache_.end()) {
325             BLOGN_FAILURE_ID(sequence, "not found in cache");
326             return GSERROR_NO_ENTRY;
327         }
328 
329         if (isShared_ == false) {
330             auto &state = bufferQueueCache_[sequence].state;
331             if (state != BUFFER_STATE_REQUESTED && state != BUFFER_STATE_ATTACHED) {
332                 BLOGN_FAILURE_ID(sequence, "invalid state %{public}d", state);
333                 return GSERROR_NO_ENTRY;
334             }
335         }
336     }
337 
338     if (listener_ == nullptr && listenerClazz_ == nullptr) {
339         CancelBuffer(sequence, bedata);
340         return GSERROR_NO_CONSUMER;
341     }
342 
343     ScopedBytrace bufferIPCSend("BufferIPCSend");
344     sret = DoFlushBuffer(sequence, bedata, fence, config);
345     if (sret != GSERROR_OK) {
346         return sret;
347     }
348     BLOGD("Success Buffer id: %{public}d Queue id: %{public}" PRIu64 "", sequence, uniqueId_);
349 
350     if (sret == GSERROR_OK) {
351         BLOGN_SUCCESS_ID(sequence, "OnBufferAvailable Start");
352         if (listener_ != nullptr) {
353             ScopedBytrace bufferIPCSend("OnBufferAvailable");
354             listener_->OnBufferAvailable();
355         } else if (listenerClazz_ != nullptr) {
356             ScopedBytrace bufferIPCSend("OnBufferAvailable");
357             listenerClazz_->OnBufferAvailable();
358         }
359         BLOGN_SUCCESS_ID(sequence, "OnBufferAvailable End");
360     }
361     return sret;
362 }
363 
DumpToFile(int32_t sequence)364 void BufferQueue::DumpToFile(int32_t sequence)
365 {
366     if (access("/data/bq_dump", F_OK) == -1) {
367         return;
368     }
369 
370     ScopedBytrace func(__func__);
371     struct timeval now;
372     gettimeofday(&now, nullptr);
373     constexpr int secToUsec = 1000 * 1000;
374     int64_t nowVal = (int64_t)now.tv_sec * secToUsec + (int64_t)now.tv_usec;
375 
376     std::stringstream ss;
377     ss << "/data/bq_" << getpid() << "_" << name_ << "_" << nowVal << ".raw";
378 
379     sptr<SurfaceBufferImpl> &buffer = bufferQueueCache_[sequence].buffer;
380     std::ofstream rawDataFile(ss.str(), std::ofstream::binary);
381     if (!rawDataFile.good()) {
382         BLOGE("open failed: (%{public}d)%{public}s", errno, strerror(errno));
383         return;
384     }
385     rawDataFile.write(static_cast<const char *>(buffer->GetVirAddr()), buffer->GetSize());
386     rawDataFile.close();
387 }
388 
DoFlushBuffer(int32_t sequence,const BufferExtraData & bedata,int32_t fence,const BufferFlushConfig & config)389 GSError BufferQueue::DoFlushBuffer(int32_t sequence, const BufferExtraData &bedata,
390     int32_t fence, const BufferFlushConfig &config)
391 {
392     ScopedBytrace func(__func__);
393     std::lock_guard<std::mutex> lockGuard(mutex_);
394     if (bufferQueueCache_[sequence].isDeleting) {
395         DeleteBufferInCache(sequence);
396         BLOGN_SUCCESS_ID(sequence, "delete");
397         return GSERROR_OK;
398     }
399 
400     bufferQueueCache_[sequence].state = BUFFER_STATE_FLUSHED;
401     dirtyList_.push_back(sequence);
402     bufferQueueCache_[sequence].buffer->SetExtraData(bedata);
403     bufferQueueCache_[sequence].fence = fence;
404     bufferQueueCache_[sequence].damage = config.damage;
405 
406     uint32_t usage = static_cast<uint32_t>(bufferQueueCache_[sequence].config.usage);
407     if (usage & HBM_USE_CPU_WRITE) {
408         // api flush
409         auto sret = bufferManager_->FlushCache(bufferQueueCache_[sequence].buffer);
410         if (sret != GSERROR_OK) {
411             BLOGN_FAILURE_ID_API(sequence, FlushCache, sret);
412             return sret;
413         }
414     }
415 
416     if (config.timestamp == 0) {
417         struct timeval tv = {};
418         gettimeofday(&tv, nullptr);
419         constexpr int32_t secToUsec = 1000000;
420         bufferQueueCache_[sequence].timestamp = (int64_t)tv.tv_usec + (int64_t)tv.tv_sec * secToUsec;
421     } else {
422         bufferQueueCache_[sequence].timestamp = config.timestamp;
423     }
424 
425     DumpToFile(sequence);
426     return GSERROR_OK;
427 }
428 
AcquireBuffer(sptr<SurfaceBufferImpl> & buffer,int32_t & fence,int64_t & timestamp,Rect & damage)429 GSError BufferQueue::AcquireBuffer(sptr<SurfaceBufferImpl> &buffer,
430     int32_t &fence, int64_t &timestamp, Rect &damage)
431 {
432     ScopedBytrace func(__func__);
433     // dequeue from dirty list
434     std::lock_guard<std::mutex> lockGuard(mutex_);
435     GSError ret = PopFromDirtyList(buffer);
436     if (ret == GSERROR_OK) {
437         int32_t sequence = buffer->GetSeqNum();
438         if (isShared_ == false && bufferQueueCache_[sequence].state != BUFFER_STATE_FLUSHED) {
439             BLOGNW("Warning [%{public}d], Reason: state is not BUFFER_STATE_FLUSHED", sequence);
440         }
441         bufferQueueCache_[sequence].state = BUFFER_STATE_ACQUIRED;
442 
443         fence = bufferQueueCache_[sequence].fence;
444         bufferQueueCache_[sequence].fence = -1;
445         timestamp = bufferQueueCache_[sequence].timestamp;
446         damage = bufferQueueCache_[sequence].damage;
447 
448     BLOGD("Success Buffer id: %{public}d Queue id: %{public}" PRIu64 "", sequence, uniqueId_);
449     } else if (ret == GSERROR_NO_BUFFER) {
450         BLOGN_FAILURE("there is no dirty buffer");
451     }
452 
453     return ret;
454 }
455 
ReleaseBuffer(sptr<SurfaceBufferImpl> & buffer,int32_t fence)456 GSError BufferQueue::ReleaseBuffer(sptr<SurfaceBufferImpl> &buffer, int32_t fence)
457 {
458     ScopedBytrace func(__func__);
459     int32_t sequence = buffer->GetSeqNum();
460     {
461         std::lock_guard<std::mutex> lockGuard(mutex_);
462         if (bufferQueueCache_.find(sequence) == bufferQueueCache_.end()) {
463             BLOGN_FAILURE_ID(sequence, "not find in cache");
464             return GSERROR_NO_ENTRY;
465         }
466 
467         if (isShared_ == false) {
468             auto &state = bufferQueueCache_[sequence].state;
469             if (state != BUFFER_STATE_ACQUIRED && state != BUFFER_STATE_ATTACHED) {
470                 BLOGN_FAILURE_ID(sequence, "invalid state");
471                 return GSERROR_NO_ENTRY;
472             }
473         }
474     }
475 
476     if (onBufferRelease != nullptr) {
477         ScopedBytrace func("OnBufferRelease");
478         sptr<SurfaceBuffer> buf = buffer;
479         BLOGNI("onBufferRelease start");
480         auto sret = onBufferRelease(buf);
481         BLOGNI("onBufferRelease end return %{public}s", GSErrorStr(sret).c_str());
482 
483         if (sret == GSERROR_OK) {   // need to check why directly return?
484             return sret;
485         }
486     }
487 
488     std::lock_guard<std::mutex> lockGuard(mutex_);
489     bufferQueueCache_[sequence].state = BUFFER_STATE_RELEASED;
490     bufferQueueCache_[sequence].fence = fence;
491 
492     if (bufferQueueCache_[sequence].isDeleting) {
493         DeleteBufferInCache(sequence);
494         BLOGD("Success delete Buffer id: %{public}d Queue id: %{public}" PRIu64 " in cache", sequence, uniqueId_);
495     } else {
496         freeList_.push_back(sequence);
497         BLOGD("Success push Buffer id: %{public}d Queue id: %{public}" PRIu64 " to free list", sequence, uniqueId_);
498     }
499     waitReqCon_.notify_all();
500     return GSERROR_OK;
501 }
502 
AllocBuffer(sptr<SurfaceBufferImpl> & buffer,const BufferRequestConfig & config)503 GSError BufferQueue::AllocBuffer(sptr<SurfaceBufferImpl> &buffer,
504     const BufferRequestConfig &config)
505 {
506     ScopedBytrace func(__func__);
507     buffer = new SurfaceBufferImpl();
508     int32_t sequence = buffer->GetSeqNum();
509 
510     GSError ret = bufferManager_->Alloc(config, buffer);
511     if (ret != GSERROR_OK) {
512         BLOGN_FAILURE_ID_API(sequence, Alloc, ret);
513         return ret;
514     }
515 
516     BufferElement ele = {
517         .buffer = buffer,
518         .state = BUFFER_STATE_REQUESTED,
519         .isDeleting = false,
520         .config = config,
521         .fence = -1
522     };
523 
524     ret = bufferManager_->Map(buffer);
525     if (ret == GSERROR_OK) {
526         BLOGN_SUCCESS_ID(sequence, "Map");
527         bufferQueueCache_[sequence] = ele;
528         return GSERROR_OK;
529     }
530 
531     GSError freeRet = bufferManager_->Free(buffer);
532     if (freeRet != GSERROR_OK) {
533         BLOGN_FAILURE_ID(sequence, "Map failed, Free failed");
534     } else {
535         BLOGN_FAILURE_ID(sequence, "Map failed, Free success");
536     }
537 
538     return ret;
539 }
540 
FreeBuffer(sptr<SurfaceBufferImpl> & buffer)541 GSError BufferQueue::FreeBuffer(sptr<SurfaceBufferImpl> &buffer)
542 {
543     BLOGND("Free [%{public}d]", buffer->GetSeqNum());
544     buffer->SetEglData(nullptr);
545     bufferManager_->Unmap(buffer);
546     bufferManager_->Free(buffer);
547     return GSERROR_OK;
548 }
549 
DeleteBufferInCache(int32_t sequence)550 void BufferQueue::DeleteBufferInCache(int32_t sequence)
551 {
552     auto it = bufferQueueCache_.find(sequence);
553     if (it != bufferQueueCache_.end()) {
554         FreeBuffer(it->second.buffer);
555         if (it->second.fence > 0) {
556             close(it->second.fence);
557         }
558         bufferQueueCache_.erase(it);
559         deletingList_.push_back(sequence);
560     }
561 }
562 
GetQueueSize()563 uint32_t BufferQueue::GetQueueSize()
564 {
565     return queueSize_;
566 }
567 
DeleteBuffers(int32_t count)568 void BufferQueue::DeleteBuffers(int32_t count)
569 {
570     ScopedBytrace func(__func__);
571     if (count <= 0) {
572         return;
573     }
574 
575     std::lock_guard<std::mutex> lockGuard(mutex_);
576     while (!freeList_.empty()) {
577         DeleteBufferInCache(freeList_.front());
578         freeList_.pop_front();
579         count--;
580         if (count <= 0) {
581             return;
582         }
583     }
584 
585     while (!dirtyList_.empty()) {
586         DeleteBufferInCache(dirtyList_.front());
587         dirtyList_.pop_front();
588         count--;
589         if (count <= 0) {
590             return;
591         }
592     }
593 
594     for (auto&& ele : bufferQueueCache_) {
595         ele.second.isDeleting = true;
596         if (ele.second.state == BUFFER_STATE_ACQUIRED) {
597             FreeBuffer(ele.second.buffer);
598         }
599 
600         count--;
601         if (count <= 0) {
602             break;
603         }
604     }
605 }
606 
AttachBuffer(sptr<SurfaceBufferImpl> & buffer)607 GSError BufferQueue::AttachBuffer(sptr<SurfaceBufferImpl> &buffer)
608 {
609     ScopedBytrace func(__func__);
610     if (isShared_) {
611         BLOGN_FAILURE_RET(GSERROR_INVALID_OPERATING);
612     }
613 
614     if (buffer == nullptr) {
615         BLOGN_FAILURE_RET(GSERROR_INVALID_ARGUMENTS);
616     }
617 
618     std::lock_guard<std::mutex> lockGuard(mutex_);
619     BufferElement ele = {
620         .buffer = buffer,
621         .state = BUFFER_STATE_ATTACHED,
622         .config = {
623             .width = buffer->GetWidth(),
624             .height = buffer->GetHeight(),
625             .strideAlignment = 0x8,
626             .format = buffer->GetFormat(),
627             .usage = buffer->GetUsage(),
628             .timeout = 0,
629         },
630         .damage = {
631             .w = ele.config.width,
632             .h = ele.config.height,
633         }
634     };
635 
636     int32_t sequence = buffer->GetSeqNum();
637     int32_t usedSize = static_cast<int32_t>(GetUsedSize());
638     int32_t queueSize = static_cast<int32_t>(GetQueueSize());
639     if (usedSize >= queueSize) {
640         int32_t freeSize = static_cast<int32_t>(dirtyList_.size() + freeList_.size());
641         if (freeSize >= usedSize - queueSize + 1) {
642             DeleteBuffers(usedSize - queueSize + 1);
643             bufferQueueCache_[sequence] = ele;
644             BLOGN_SUCCESS_ID(sequence, "release");
645             return GSERROR_OK;
646         } else {
647             BLOGN_FAILURE_RET(GSERROR_OUT_OF_RANGE);
648         }
649     } else {
650         bufferQueueCache_[sequence] = ele;
651         BLOGN_SUCCESS_ID(sequence, "no release");
652         return GSERROR_OK;
653     }
654 }
655 
DetachBuffer(sptr<SurfaceBufferImpl> & buffer)656 GSError BufferQueue::DetachBuffer(sptr<SurfaceBufferImpl> &buffer)
657 {
658     ScopedBytrace func(__func__);
659     if (isShared_) {
660         BLOGN_FAILURE_RET(GSERROR_INVALID_OPERATING);
661     }
662 
663     if (buffer == nullptr) {
664         BLOGN_FAILURE_RET(GSERROR_INVALID_ARGUMENTS);
665     }
666 
667     std::lock_guard<std::mutex> lockGuard(mutex_);
668     int32_t sequence = buffer->GetSeqNum();
669     if (bufferQueueCache_.find(sequence) == bufferQueueCache_.end()) {
670         BLOGN_FAILURE_ID(sequence, "not find in cache");
671         return GSERROR_NO_ENTRY;
672     }
673 
674     if (bufferQueueCache_[sequence].state == BUFFER_STATE_REQUESTED) {
675         BLOGN_SUCCESS_ID(sequence, "requested");
676     } else if (bufferQueueCache_[sequence].state == BUFFER_STATE_ACQUIRED) {
677         BLOGN_SUCCESS_ID(sequence, "acquired");
678     } else {
679         BLOGN_FAILURE_ID_RET(sequence, GSERROR_NO_ENTRY);
680     }
681 
682     bufferQueueCache_.erase(sequence);
683     return GSERROR_OK;
684 }
685 
SetQueueSize(uint32_t queueSize)686 GSError BufferQueue::SetQueueSize(uint32_t queueSize)
687 {
688     if (isShared_ == true && queueSize != 1) {
689         BLOGN_INVALID("shared queue, size must be 1");
690         return GSERROR_INVALID_ARGUMENTS;
691     }
692 
693     if (queueSize <= 0) {
694         BLOGN_INVALID("queue size (%{public}d) <= 0", queueSize);
695         return GSERROR_INVALID_ARGUMENTS;
696     }
697 
698     if (queueSize > SURFACE_MAX_QUEUE_SIZE) {
699         BLOGN_INVALID("queue size (%{public}d) > %{public}d", queueSize, SURFACE_MAX_QUEUE_SIZE);
700         return GSERROR_INVALID_ARGUMENTS;
701     }
702 
703     DeleteBuffers(queueSize_ - queueSize);
704     queueSize_ = queueSize;
705 
706     BLOGN_SUCCESS("queue size: %{public}d, Queue id: %{public}" PRIu64 "", queueSize, uniqueId_);
707     return GSERROR_OK;
708 }
709 
GetName(std::string & name)710 GSError BufferQueue::GetName(std::string &name)
711 {
712     name = name_;
713     return GSERROR_OK;
714 }
715 
RegisterConsumerListener(sptr<IBufferConsumerListener> & listener)716 GSError BufferQueue::RegisterConsumerListener(sptr<IBufferConsumerListener> &listener)
717 {
718     listener_ = listener;
719     return GSERROR_OK;
720 }
721 
RegisterConsumerListener(IBufferConsumerListenerClazz * listener)722 GSError BufferQueue::RegisterConsumerListener(IBufferConsumerListenerClazz *listener)
723 {
724     listenerClazz_ = listener;
725     return GSERROR_OK;
726 }
727 
UnregisterConsumerListener()728 GSError BufferQueue::UnregisterConsumerListener()
729 {
730     listener_ = nullptr;
731     listenerClazz_ = nullptr;
732     return GSERROR_OK;
733 }
734 
RegisterReleaseListener(OnReleaseFunc func)735 GSError BufferQueue::RegisterReleaseListener(OnReleaseFunc func)
736 {
737     onBufferRelease = func;
738     return GSERROR_OK;
739 }
740 
SetDefaultWidthAndHeight(int32_t width,int32_t height)741 GSError BufferQueue::SetDefaultWidthAndHeight(int32_t width, int32_t height)
742 {
743     if (width <= 0) {
744         BLOGN_INVALID("defaultWidth is greater than 0, now is %{public}d", width);
745         return GSERROR_INVALID_ARGUMENTS;
746     }
747 
748     if (height <= 0) {
749         BLOGN_INVALID("defaultHeight is greater than 0, now is %{public}d", height);
750         return GSERROR_INVALID_ARGUMENTS;
751     }
752 
753     defaultWidth = width;
754     defaultHeight = height;
755     return GSERROR_OK;
756 }
757 
GetDefaultWidth()758 int32_t BufferQueue::GetDefaultWidth()
759 {
760     return defaultWidth;
761 }
762 
GetDefaultHeight()763 int32_t BufferQueue::GetDefaultHeight()
764 {
765     return defaultHeight;
766 }
767 
SetDefaultUsage(uint32_t usage)768 GSError BufferQueue::SetDefaultUsage(uint32_t usage)
769 {
770     defaultUsage = usage;
771     return GSERROR_OK;
772 }
773 
GetDefaultUsage()774 uint32_t BufferQueue::GetDefaultUsage()
775 {
776     return defaultUsage;
777 }
778 
CleanCache()779 GSError BufferQueue::CleanCache()
780 {
781     std::lock_guard<std::mutex> lockGuard(mutex_);
782     auto it = bufferQueueCache_.begin();
783     while (it != bufferQueueCache_.end()) {
784         if (it->second.fence > 0) {
785             close(it->second.fence);
786         }
787         bufferQueueCache_.erase(it++);
788     }
789     freeList_.clear();
790     dirtyList_.clear();
791     deletingList_.clear();
792     waitReqCon_.notify_all();
793     return GSERROR_OK;
794 }
795 
GetUniqueId() const796 uint64_t BufferQueue::GetUniqueId() const
797 {
798     return uniqueId_;
799 }
800 
DumpCache(std::string & result)801 void BufferQueue::DumpCache(std::string &result)
802 {
803     for (auto it = bufferQueueCache_.begin(); it != bufferQueueCache_.end(); it++) {
804         BufferElement element = it->second;
805         result += "        sequence = " + std::to_string(it->first) +
806             ", state = " + BufferStateStrs.at(element.state) +
807             ", timestamp = " + std::to_string(element.timestamp);
808         result += ", damageRect = [" + std::to_string(element.damage.x) + ", " +
809             std::to_string(element.damage.y) + ", " +
810             std::to_string(element.damage.w) + ", " +
811             std::to_string(element.damage.h) + "],";
812         result += " config = [" + std::to_string(element.config.width) + "x" +
813             std::to_string(element.config.height) + ", " +
814             std::to_string(element.config.strideAlignment) + ", " +
815             std::to_string(element.config.format) +", " +
816             std::to_string(element.config.usage) + ", " +
817             std::to_string(element.config.timeout) + "].\n";
818     }
819 }
820 
Dump(std::string & result)821 void BufferQueue::Dump(std::string &result)
822 {
823     std::lock_guard<std::mutex> lockGuard(mutex_);
824     result.append("    BufferQueue:\n");
825     result += "      default-size = [" + std::to_string(defaultWidth) + "x" + std::to_string(defaultHeight) + "]" +
826         ", FIFO = " + std::to_string(queueSize_) +
827         ", name = " + name_ +
828         ", uniqueId = " + std::to_string(uniqueId_) + ".\n";
829 
830     result.append("      bufferQueueCache:\n");
831     DumpCache(result);
832 }
833 }; // namespace OHOS
834