1 /*
2 * Copyright (c) 2024-2024 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include <mutex>
17 #include <list>
18 #include <algorithm>
19 #include <cassert>
20 #include <limits>
21 #include <securec.h>
22 #include "media_cached_buffer.h"
23 #include "common/log.h"
24 #include "avcodec_log.h"
25 #include "avcodec_errors.h"
26
27 namespace {
28 constexpr OHOS::HiviewDFX::HiLogLabel LABEL = { LOG_CORE, LOG_DOMAIN_STREAM_SOURCE, "HiStreamer" };
29 }
30
31 namespace OHOS {
32 namespace Media {
33
34 constexpr size_t CACHE_FRAGMENT_MAX_NUM_DEFAULT = 300; // Maximum number of fragment nodes
35 constexpr size_t CACHE_FRAGMENT_MAX_NUM_LARGE = 10; // Maximum number of fragment nodes
36 constexpr size_t CACHE_FRAGMENT_MIN_NUM_DEFAULT = 3; // Minimum number of fragment nodes
37 constexpr double NEW_FRAGMENT_INIT_CHUNK_NUM = 128.0; // Restricting the cache size of seek operation, 128 = 2MB
38 constexpr double NEW_FRAGMENT_NIT_DEFAULT_DENOMINATOR = 0.25;
39 constexpr double CACHE_RELEASE_FACTOR_DEFAULT = 10;
40 constexpr double TO_PERCENT = 100;
41 constexpr int64_t MAX_TOTAL_READ_SIZE = 2000000;
42 constexpr int64_t UP_LIMIT_MAX_TOTAL_READ_SIZE = 3000000;
43 constexpr int64_t ACCESS_OFFSET_MAX_LENGTH = 2 * 1024;
44
BoundedIntervalComp(int64_t mid,uint64_t start,int64_t end)45 inline constexpr bool BoundedIntervalComp(int64_t mid, uint64_t start, int64_t end)
46 {
47 return (static_cast<int64_t>(start) <= mid && mid <= end);
48 }
49
LeftBoundedRightOpenComp(int64_t mid,uint64_t start,int64_t end)50 inline constexpr bool LeftBoundedRightOpenComp(int64_t mid, uint64_t start, int64_t end)
51 {
52 return (static_cast<int64_t>(start) <= mid && mid < end);
53 }
54
IncreaseStep(uint8_t * & src,uint64_t & offset,size_t & writeSize,size_t step)55 inline void IncreaseStep(uint8_t*& src, uint64_t& offset, size_t& writeSize, size_t step)
56 {
57 src += step;
58 offset += static_cast<uint64_t>(step);
59 writeSize += step;
60 }
61
InitChunkInfo(CacheChunk & chunkInfo,uint64_t offset)62 inline void InitChunkInfo(CacheChunk& chunkInfo, uint64_t offset)
63 {
64 chunkInfo.offset = offset;
65 chunkInfo.dataLength = 0;
66 }
67
CacheMediaChunkBufferImpl()68 CacheMediaChunkBufferImpl::CacheMediaChunkBufferImpl()
69 : totalBuffSize_(0), totalReadSize_(0), chunkMaxNum_(0), chunkSize_(0), bufferAddr_(nullptr),
70 fragmentMaxNum_(CACHE_FRAGMENT_MAX_NUM_DEFAULT),
71 lruCache_(CACHE_FRAGMENT_MAX_NUM_DEFAULT) {}
72
~CacheMediaChunkBufferImpl()73 CacheMediaChunkBufferImpl::~CacheMediaChunkBufferImpl()
74 {
75 std::lock_guard lock(mutex_);
76 freeChunks_.clear();
77 fragmentCacheBuffer_.clear();
78 readPos_ = fragmentCacheBuffer_.end();
79 writePos_ = fragmentCacheBuffer_.end();
80 chunkMaxNum_ = 0;
81 totalReadSize_ = 0;
82 if (bufferAddr_ != nullptr) {
83 free(bufferAddr_);
84 bufferAddr_ = nullptr;
85 }
86 }
87
Init(uint64_t totalBuffSize,uint32_t chunkSize)88 bool CacheMediaChunkBufferImpl::Init(uint64_t totalBuffSize, uint32_t chunkSize)
89 {
90 if (isLargeOffsetSpan_) {
91 lruCache_.ReCacheSize(CACHE_FRAGMENT_MAX_NUM_LARGE);
92 } else {
93 lruCache_.ReCacheSize(CACHE_FRAGMENT_MAX_NUM_DEFAULT);
94 }
95
96 if (totalBuffSize == 0 || chunkSize == 0 || totalBuffSize < chunkSize) {
97 return false;
98 }
99
100 double newFragmentInitChunkNum = NEW_FRAGMENT_INIT_CHUNK_NUM;
101 uint64_t diff = (totalBuffSize + chunkSize) > 1 ? (totalBuffSize + chunkSize) - 1 : 0;
102 int64_t chunkNum = static_cast<int64_t>(diff / chunkSize) + 1;
103 if ((chunkNum - static_cast<int64_t>(newFragmentInitChunkNum)) < 0) {
104 return false;
105 }
106 if (newFragmentInitChunkNum > static_cast<double>(chunkNum) * NEW_FRAGMENT_NIT_DEFAULT_DENOMINATOR) {
107 newFragmentInitChunkNum = std::max(1.0, static_cast<double>(chunkNum) * NEW_FRAGMENT_NIT_DEFAULT_DENOMINATOR);
108 }
109 std::lock_guard lock(mutex_);
110 if (bufferAddr_ != nullptr) {
111 return false;
112 }
113
114 readPos_ = fragmentCacheBuffer_.end();
115 writePos_ = fragmentCacheBuffer_.end();
116 size_t sizePerChunk = sizeof(CacheChunk) + chunkSize;
117 FALSE_RETURN_V_MSG_E(static_cast<int64_t>(sizePerChunk) * chunkNum > 0, false,
118 "Invalid sizePerChunk and chunkNum.");
119 bufferAddr_ = static_cast<uint8_t*>(malloc(sizePerChunk * chunkNum));
120 if (bufferAddr_ == nullptr) {
121 return false;
122 }
123
124 uint8_t* temp = bufferAddr_;
125 for (auto i = 0; i < chunkNum; ++i) {
126 auto chunkInfo = reinterpret_cast<CacheChunk*>(temp);
127 chunkInfo->offset = 0;
128 chunkInfo->dataLength = 0;
129 chunkInfo->chunkSize = static_cast<uint32_t>(chunkSize);
130 freeChunks_.push_back(chunkInfo);
131 temp += sizePerChunk;
132 }
133 chunkMaxNum_ = chunkNum >= 1 ? static_cast<uint32_t>(chunkNum) - 1 : 0; // -1
134 totalBuffSize_ = totalBuffSize;
135 chunkSize_ = chunkSize;
136 initReadSizeFactor_ = newFragmentInitChunkNum / (chunkMaxNum_ - newFragmentInitChunkNum);
137 return true;
138 }
139
140 // Upadate the chunk read from the fragment
UpdateAccessPos(FragmentIterator & fragmentPos,ChunkIterator & chunkPos,uint64_t offsetChunk)141 void CacheMediaChunkBufferImpl::UpdateAccessPos(FragmentIterator& fragmentPos, ChunkIterator& chunkPos,
142 uint64_t offsetChunk)
143 {
144 if (chunkPos == fragmentPos->chunks.end()) {
145 auto preChunkPos = std::prev(chunkPos);
146 if (((*preChunkPos)->offset + (*preChunkPos)->chunkSize) == offsetChunk) {
147 fragmentPos->accessPos = chunkPos;
148 } else {
149 fragmentPos->accessPos = preChunkPos;
150 }
151 } else if ((*chunkPos)->offset == offsetChunk) {
152 fragmentPos->accessPos = chunkPos;
153 } else {
154 fragmentPos->accessPos = std::prev(chunkPos);
155 }
156 }
157
Read(void * ptr,uint64_t offset,size_t readSize)158 size_t CacheMediaChunkBufferImpl::Read(void* ptr, uint64_t offset, size_t readSize)
159 {
160 std::lock_guard lock(mutex_);
161 size_t hasReadSize = 0;
162 uint8_t* dst = static_cast<uint8_t*>(ptr);
163 uint64_t hasReadOffset = offset;
164 size_t oneReadSize = ReadInner(dst, hasReadOffset, readSize);
165 hasReadSize = oneReadSize;
166 while (hasReadSize < readSize && oneReadSize != 0) {
167 dst += oneReadSize;
168 hasReadOffset += static_cast<uint64_t>(oneReadSize);
169 oneReadSize = ReadInner(dst, hasReadOffset, readSize - hasReadSize);
170 hasReadSize += oneReadSize;
171 }
172 return hasReadSize;
173 }
174
ReadInner(void * ptr,uint64_t offset,size_t readSize)175 size_t CacheMediaChunkBufferImpl::ReadInner(void* ptr, uint64_t offset, size_t readSize)
176 {
177 auto fragmentPos = GetOffsetFragmentCache(readPos_, offset, LeftBoundedRightOpenComp);
178 if (readSize == 0 || fragmentPos == fragmentCacheBuffer_.end()) {
179 return 0;
180 }
181 auto chunkPos = fragmentPos->accessPos;
182 if (chunkPos == fragmentPos->chunks.end() ||
183 offset < (*chunkPos)->offset ||
184 offset > (*chunkPos)->offset + (*chunkPos)->dataLength) {
185 chunkPos = GetOffsetChunkCache(fragmentPos->chunks, offset, LeftBoundedRightOpenComp);
186 }
187
188 uint8_t* dst = static_cast<uint8_t*>(ptr);
189 uint64_t offsetChunk = offset;
190 if (chunkPos != fragmentPos->chunks.end()) {
191 uint64_t readOffset = offset > fragmentPos->offsetBegin ? offset - fragmentPos->offsetBegin : 0;
192 uint64_t temp = readOffset > static_cast<uint64_t>(fragmentPos->accessLength) ?
193 readOffset - static_cast<uint64_t>(fragmentPos->accessLength) : 0;
194 if (temp >= ACCESS_OFFSET_MAX_LENGTH) {
195 chunkPos = SplitFragmentCacheBuffer(fragmentPos, offset, chunkPos);
196 }
197 size_t hasReadSize = 0;
198 while (hasReadSize < readSize && chunkPos != fragmentPos->chunks.end()) {
199 auto chunkInfo = *chunkPos;
200 uint64_t diff = offsetChunk > chunkInfo->offset ? offsetChunk - chunkInfo->offset : 0;
201 if (offsetChunk < chunkInfo->offset || diff > chunkInfo->dataLength) {
202 DumpAndCheckInner();
203 return 0;
204 }
205 uint64_t readDiff = chunkInfo->dataLength > diff ? chunkInfo->dataLength - diff : 0;
206 auto readOne = std::min(static_cast<size_t>(readDiff), readSize - hasReadSize);
207 errno_t res = memcpy_s(dst + hasReadSize, readOne, (*chunkPos)->data + diff, readOne);
208 FALSE_RETURN_V_MSG_E(res == EOK, 0, "memcpy_s data err");
209 hasReadSize += readOne;
210 offsetChunk += static_cast<uint64_t>(readOne);
211 chunkPos++;
212 }
213 UpdateAccessPos(fragmentPos, chunkPos, offsetChunk);
214 uint64_t lengthDiff = offsetChunk > fragmentPos->offsetBegin ? offsetChunk - fragmentPos->offsetBegin : 0;
215 fragmentPos->accessLength = static_cast<int64_t>(lengthDiff);
216 fragmentPos->readTime = Clock::now();
217 fragmentPos->totalReadSize += hasReadSize;
218 totalReadSize_ += hasReadSize;
219 readPos_ = fragmentPos;
220 lruCache_.Refer(fragmentPos->offsetBegin, fragmentPos);
221 return hasReadSize;
222 }
223 return 0;
224 }
225
WriteInPlace(FragmentIterator & fragmentPos,uint8_t * ptr,uint64_t inOffset,size_t inWriteSize,size_t & outWriteSize)226 bool CacheMediaChunkBufferImpl::WriteInPlace(FragmentIterator& fragmentPos, uint8_t* ptr, uint64_t inOffset,
227 size_t inWriteSize, size_t& outWriteSize)
228 {
229 uint64_t offset = inOffset;
230 size_t writeSize = inWriteSize;
231 uint8_t* src = ptr;
232 auto& chunkList = fragmentPos->chunks;
233 outWriteSize = 0;
234 ChunkIterator chunkPos = std::upper_bound(chunkList.begin(), chunkList.end(), offset,
235 [](auto inputOffset, const CacheChunk* chunk) {
236 return (inputOffset <= chunk->offset + chunk->dataLength);
237 });
238 if (chunkPos == chunkList.end()) {
239 DumpInner(0);
240 return false;
241 }
242 size_t writeSizeTmp = 0;
243 auto chunkInfoTmp = *chunkPos;
244 uint64_t accessLengthTmp = inOffset > writePos_->offsetBegin ? inOffset - writePos_->offsetBegin : 0;
245 if (chunkInfoTmp->offset <= offset &&
246 offset < chunkInfoTmp->offset + static_cast<uint64_t>(chunkInfoTmp->dataLength)) {
247 size_t diff = static_cast<size_t>(offset > chunkInfoTmp->offset ? offset - chunkInfoTmp->offset : 0);
248 size_t copyLen = static_cast<size_t>(chunkInfoTmp->dataLength - diff);
249 copyLen = std::min(copyLen, writeSize);
250 errno_t res = memcpy_s(chunkInfoTmp->data + diff, copyLen, src, copyLen);
251 FALSE_RETURN_V_MSG_E(res == EOK, false, "memcpy_s data err");
252 IncreaseStep(src, offset, writeSizeTmp, copyLen);
253 if (writePos_->accessLength > static_cast<int64_t>(accessLengthTmp)) {
254 writePos_->accessPos = chunkPos;
255 writePos_->accessLength = static_cast<int64_t>(accessLengthTmp);
256 }
257 } else if (writePos_->accessLength > static_cast<int64_t>(accessLengthTmp)) {
258 writePos_->accessPos = std::next(chunkPos);
259 writePos_->accessLength = static_cast<int64_t>(accessLengthTmp);
260 }
261 ++chunkPos;
262 while (writeSizeTmp < writeSize && chunkPos != chunkList.end()) {
263 chunkInfoTmp = *chunkPos;
264 auto copyLen = std::min(chunkInfoTmp->dataLength, (uint32_t)(writeSize - writeSizeTmp));
265 errno_t res = memcpy_s(chunkInfoTmp->data, copyLen, src, copyLen);
266 FALSE_RETURN_V_MSG_E(res == EOK, false, "memcpy_s data err");
267 IncreaseStep(src, offset, writeSizeTmp, copyLen);
268 ++chunkPos;
269 }
270 outWriteSize = writeSizeTmp;
271 return true;
272 }
273
WriteMergerPre(uint64_t offset,size_t writeSize,FragmentIterator & nextFragmentPos)274 bool CacheMediaChunkBufferImpl::WriteMergerPre(uint64_t offset, size_t writeSize, FragmentIterator& nextFragmentPos)
275 {
276 nextFragmentPos = std::next(writePos_);
277 bool isLoop = true;
278 while (isLoop) {
279 if (nextFragmentPos == fragmentCacheBuffer_.end() ||
280 offset + static_cast<uint64_t>(writeSize) < nextFragmentPos->offsetBegin) {
281 nextFragmentPos = fragmentCacheBuffer_.end();
282 isLoop = false;
283 break;
284 }
285 if (offset + static_cast<uint64_t>(writeSize) <
286 nextFragmentPos->offsetBegin + static_cast<uint64_t>(nextFragmentPos->dataLength)) {
287 auto endPos = GetOffsetChunkCache(nextFragmentPos->chunks,
288 offset + static_cast<uint64_t>(writeSize), LeftBoundedRightOpenComp);
289 freeChunks_.splice(freeChunks_.end(), nextFragmentPos->chunks, nextFragmentPos->chunks.begin(), endPos);
290 if (endPos == nextFragmentPos->chunks.end()) {
291 nextFragmentPos = EraseFragmentCache(nextFragmentPos);
292 DumpInner(0);
293 return false;
294 }
295 auto &chunkInfo = *endPos;
296 uint64_t newOffset = offset + static_cast<uint64_t>(writeSize);
297 uint64_t dataLength = static_cast<uint64_t>(chunkInfo->dataLength);
298 uint64_t moveLen = (chunkInfo->offset + dataLength) > newOffset ?
299 (chunkInfo->offset + dataLength) - newOffset : 0;
300 auto mergeDataLen = chunkInfo->dataLength > moveLen ? chunkInfo->dataLength - moveLen : 0;
301 errno_t res = memmove_s(chunkInfo->data, moveLen, chunkInfo->data + mergeDataLen, moveLen);
302 FALSE_RETURN_V_MSG_E(res == EOK, false, "memmove_s data err");
303 chunkInfo->offset = newOffset;
304 chunkInfo->dataLength = static_cast<uint32_t>(moveLen);
305 uint64_t lostLength = newOffset > nextFragmentPos->offsetBegin ?
306 newOffset - nextFragmentPos->offsetBegin : 0;
307 nextFragmentPos->dataLength -= static_cast<int64_t>(lostLength);
308 lruCache_.Update(nextFragmentPos->offsetBegin, newOffset, nextFragmentPos);
309 nextFragmentPos->offsetBegin = newOffset;
310 nextFragmentPos->accessLength = 0;
311 nextFragmentPos->accessPos = nextFragmentPos->chunks.end();
312 isLoop = false;
313 break;
314 } else {
315 freeChunks_.splice(freeChunks_.end(), nextFragmentPos->chunks);
316 writePos_->totalReadSize += nextFragmentPos->totalReadSize;
317 nextFragmentPos->totalReadSize = 0; // avoid total size sub, chunk num reduce.
318 nextFragmentPos = EraseFragmentCache(nextFragmentPos);
319 }
320 }
321 return true;
322 }
323
WriteMergerPost(FragmentIterator & nextFragmentPos)324 void CacheMediaChunkBufferImpl::WriteMergerPost(FragmentIterator& nextFragmentPos)
325 {
326 if (nextFragmentPos == fragmentCacheBuffer_.end() || writePos_->chunks.empty() ||
327 nextFragmentPos->chunks.empty()) {
328 return;
329 }
330 auto preChunkInfo = writePos_->chunks.back();
331 auto nextChunkInfo = nextFragmentPos->chunks.front();
332 if (preChunkInfo->offset + preChunkInfo->dataLength != nextChunkInfo->offset) {
333 DumpAndCheckInner();
334 return;
335 }
336 writePos_->dataLength += nextFragmentPos->dataLength;
337 writePos_->totalReadSize += nextFragmentPos->totalReadSize;
338 nextFragmentPos->totalReadSize = 0; // avoid total size sub, chunk num reduce
339 writePos_->chunks.splice(writePos_->chunks.end(), nextFragmentPos->chunks);
340 EraseFragmentCache(nextFragmentPos);
341 }
342
Write(void * ptr,uint64_t inOffset,size_t inWriteSize)343 size_t CacheMediaChunkBufferImpl::Write(void* ptr, uint64_t inOffset, size_t inWriteSize)
344 {
345 std::lock_guard lock(mutex_);
346 uint64_t offset = inOffset;
347 size_t writeSize = inWriteSize;
348 uint8_t* src = static_cast<uint8_t*>(ptr);
349 size_t dupWriteSize = 0;
350
351 auto fragmentPos = GetOffsetFragmentCache(writePos_, offset, BoundedIntervalComp);
352 ChunkIterator chunkPos;
353 if (fragmentPos != fragmentCacheBuffer_.end()) {
354 auto& chunkList = fragmentPos->chunks;
355 writePos_ = fragmentPos;
356 if ((fragmentPos->offsetBegin + static_cast<uint64_t>(fragmentPos->dataLength)) != offset) {
357 auto ret = WriteInPlace(fragmentPos, src, offset, writeSize, dupWriteSize);
358 if (!ret || dupWriteSize >= writeSize) {
359 return writeSize;
360 }
361 src += dupWriteSize;
362 offset += dupWriteSize;
363 writeSize -= dupWriteSize;
364 }
365 chunkPos = std::prev(chunkList.end());
366 } else {
367 if (freeChunks_.empty()) {
368 MEDIA_LOG_D("no free chunk.");
369 return dupWriteSize;
370 }
371 MEDIA_LOG_D("not find fragment.");
372 chunkPos = AddFragmentCacheBuffer(offset);
373 }
374 FragmentIterator nextFragmentPos = fragmentCacheBuffer_.end();
375 auto success = WriteMergerPre(offset, writeSize, nextFragmentPos);
376 if (!success) {
377 return dupWriteSize;
378 }
379 auto writeSizeTmp = WriteChunk(*writePos_, chunkPos, src, offset, writeSize);
380 if (writeSize != writeSizeTmp) {
381 nextFragmentPos = fragmentCacheBuffer_.end();
382 }
383 WriteMergerPost(nextFragmentPos);
384 return writeSizeTmp + dupWriteSize;
385 }
386
Seek(uint64_t offset)387 bool CacheMediaChunkBufferImpl::Seek(uint64_t offset)
388 {
389 std::lock_guard lock(mutex_);
390 auto readPos = GetOffsetFragmentCache(readPos_, offset, BoundedIntervalComp);
391 if (readPos != fragmentCacheBuffer_.end()) {
392 readPos_ = readPos;
393 bool isSeekHit = false;
394 auto chunkPos = GetOffsetChunkCache(readPos->chunks, offset, LeftBoundedRightOpenComp);
395 if (chunkPos != readPos->chunks.end()) {
396 auto readOffset = offset > readPos->offsetBegin ? offset - readPos->offsetBegin : 0;
397 uint64_t diff = readOffset > static_cast<uint64_t>(readPos->accessLength) ?
398 readOffset - static_cast<uint64_t>(readPos->accessLength) : 0;
399 if (diff >= ACCESS_OFFSET_MAX_LENGTH) {
400 chunkPos = SplitFragmentCacheBuffer(readPos, offset, chunkPos);
401 }
402
403 if (chunkPos == readPos->chunks.end()) {
404 return false;
405 }
406 lruCache_.Refer(readPos->offsetBegin, readPos);
407 (*readPos).accessPos = chunkPos;
408 auto tmpLength = offset > (*readPos).offsetBegin ? offset - (*readPos).offsetBegin : 0;
409 (*readPos).accessLength = static_cast<int64_t>(tmpLength);
410 readPos->readTime = Clock::now();
411 isSeekHit = true;
412 }
413 ResetReadSizeAlloc();
414 uint64_t newReadSizeInit = static_cast<uint64_t>(1 + initReadSizeFactor_ * static_cast<double>(totalReadSize_));
415 readPos->totalReadSize += newReadSizeInit;
416 totalReadSize_ += newReadSizeInit;
417 return isSeekHit;
418 }
419 return false;
420 }
421
GetBufferSize(uint64_t offset)422 size_t CacheMediaChunkBufferImpl::GetBufferSize(uint64_t offset)
423 {
424 std::lock_guard lock(mutex_);
425 auto readPos = GetOffsetFragmentCache(readPos_, offset, LeftBoundedRightOpenComp);
426 size_t bufferSize = 0;
427 while (readPos != fragmentCacheBuffer_.end()) {
428 uint64_t nextOffsetBegin = readPos->offsetBegin + static_cast<uint64_t>(readPos->dataLength);
429 bufferSize = static_cast<size_t>(nextOffsetBegin > offset ? nextOffsetBegin - offset : 0);
430 readPos++;
431 if (readPos == fragmentCacheBuffer_.end() || nextOffsetBegin != readPos->offsetBegin) {
432 break;
433 }
434 }
435 return bufferSize;
436 }
437
HandleFragmentPos(FragmentIterator & fragmentIter)438 void CacheMediaChunkBufferImpl::HandleFragmentPos(FragmentIterator& fragmentIter)
439 {
440 uint64_t nextOffsetBegin = fragmentIter->offsetBegin + static_cast<uint64_t>(fragmentIter->dataLength);
441 ++fragmentIter;
442 while (fragmentIter != fragmentCacheBuffer_.end()) {
443 if (nextOffsetBegin != fragmentIter->offsetBegin) {
444 break;
445 }
446 nextOffsetBegin = fragmentIter->offsetBegin + static_cast<uint64_t>(fragmentIter->dataLength);
447 ++fragmentIter;
448 }
449 }
450
GetNextBufferOffset(uint64_t offset)451 uint64_t CacheMediaChunkBufferImpl::GetNextBufferOffset(uint64_t offset)
452 {
453 std::lock_guard lock(mutex_);
454 auto fragmentIter = std::upper_bound(fragmentCacheBuffer_.begin(), fragmentCacheBuffer_.end(), offset,
455 [](auto inputOffset, const FragmentCacheBuffer& fragment) {
456 return (inputOffset < fragment.offsetBegin + fragment.dataLength);
457 });
458 if (fragmentIter != fragmentCacheBuffer_.end()) {
459 if (LeftBoundedRightOpenComp(offset, fragmentIter->offsetBegin,
460 fragmentIter->offsetBegin + fragmentIter->dataLength)) {
461 HandleFragmentPos(fragmentIter);
462 }
463 }
464 if (fragmentIter != fragmentCacheBuffer_.end()) {
465 return fragmentIter->offsetBegin;
466 }
467 return 0;
468 }
469
EraseFragmentCache(const FragmentIterator & iter)470 FragmentIterator CacheMediaChunkBufferImpl::EraseFragmentCache(const FragmentIterator& iter)
471 {
472 if (iter == readPos_) {
473 readPos_ = fragmentCacheBuffer_.end();
474 }
475 if (iter == writePos_) {
476 writePos_ = fragmentCacheBuffer_.end();
477 }
478 totalReadSize_ -= iter->totalReadSize;
479 lruCache_.Delete(iter->offsetBegin);
480 return fragmentCacheBuffer_.erase(iter);
481 }
482
WriteOneChunkData(CacheChunk & chunkInfo,uint8_t * src,uint64_t offset,size_t writeSize)483 inline size_t WriteOneChunkData(CacheChunk& chunkInfo, uint8_t* src, uint64_t offset, size_t writeSize)
484 {
485 uint64_t copyBegin = offset > chunkInfo.offset ? offset - chunkInfo.offset : 0;
486 if (copyBegin < 0 || copyBegin > chunkInfo.chunkSize) {
487 return 0;
488 }
489 size_t writePerOne = static_cast<size_t>(chunkInfo.chunkSize - static_cast<size_t>(copyBegin));
490 writePerOne = std::min(writePerOne, writeSize);
491 errno_t res = memcpy_s(chunkInfo.data + copyBegin, writePerOne, src, writePerOne);
492 FALSE_RETURN_V_MSG_E(res == EOK, 0, "memcpy_s data err");
493 chunkInfo.dataLength = static_cast<uint32_t>(static_cast<size_t>(copyBegin) + writePerOne);
494 return writePerOne;
495 }
496
PopFreeCacheChunk(CacheChunkList & freeChunks,uint64_t offset)497 inline CacheChunk* PopFreeCacheChunk(CacheChunkList& freeChunks, uint64_t offset)
498 {
499 if (freeChunks.empty()) {
500 return nullptr;
501 }
502 auto tmp = freeChunks.front();
503 freeChunks.pop_front();
504 InitChunkInfo(*tmp, offset);
505 return tmp;
506 }
507
WriteChunk(FragmentCacheBuffer & fragmentCacheBuffer,ChunkIterator & chunkPos,void * ptr,uint64_t offset,size_t writeSize)508 size_t CacheMediaChunkBufferImpl::WriteChunk(FragmentCacheBuffer& fragmentCacheBuffer, ChunkIterator& chunkPos,
509 void* ptr, uint64_t offset, size_t writeSize)
510 {
511 if (chunkPos == fragmentCacheBuffer.chunks.end()) {
512 MEDIA_LOG_D("input valid.");
513 return 0;
514 }
515 size_t writedTmp = 0;
516 auto chunkInfo = *chunkPos;
517 uint8_t* src = static_cast<uint8_t*>(ptr);
518 if (chunkInfo->chunkSize > chunkInfo->dataLength) {
519 writedTmp += WriteOneChunkData(*chunkInfo, src, offset, writeSize);
520 fragmentCacheBuffer.dataLength += static_cast<int64_t>(writedTmp);
521 }
522 while (writedTmp < writeSize) {
523 auto chunkOffset = offset + static_cast<uint64_t>(writedTmp);
524 auto freeChunk = GetFreeCacheChunk(chunkOffset);
525 if (freeChunk == nullptr) {
526 return writedTmp;
527 }
528 auto writePerOne = WriteOneChunkData(*freeChunk, src + writedTmp, chunkOffset, writeSize - writedTmp);
529 fragmentCacheBuffer.chunks.push_back(freeChunk);
530 writedTmp += writePerOne;
531 fragmentCacheBuffer.dataLength += static_cast<int64_t>(writePerOne);
532
533 if (fragmentCacheBuffer.accessPos == fragmentCacheBuffer.chunks.end()) {
534 fragmentCacheBuffer.accessPos = std::prev(fragmentCacheBuffer.chunks.end());
535 }
536 }
537 return writedTmp;
538 }
539
UpdateFragmentCacheForDelHead(FragmentIterator & fragmentIter)540 CacheChunk* CacheMediaChunkBufferImpl::UpdateFragmentCacheForDelHead(FragmentIterator& fragmentIter)
541 {
542 FragmentCacheBuffer& fragment = *fragmentIter;
543 if (fragment.chunks.empty()) {
544 return nullptr;
545 }
546 auto cacheChunk = fragment.chunks.front();
547 fragment.chunks.pop_front();
548
549 auto oldOffsetBegin = fragment.offsetBegin;
550 int64_t dataLength = static_cast<int64_t>(cacheChunk->dataLength);
551 fragment.offsetBegin += static_cast<uint64_t>(dataLength);
552 fragment.dataLength -= dataLength;
553 if (fragment.accessLength > dataLength) {
554 fragment.accessLength -= dataLength;
555 } else {
556 fragment.accessLength = 0;
557 }
558 lruCache_.Update(oldOffsetBegin, fragmentIter->offsetBegin, fragmentIter);
559 return cacheChunk;
560 }
561
UpdateFragmentCacheForDelTail(FragmentCacheBuffer & fragment)562 CacheChunk* UpdateFragmentCacheForDelTail(FragmentCacheBuffer& fragment)
563 {
564 if (fragment.chunks.empty()) {
565 return nullptr;
566 }
567 if (fragment.accessPos == std::prev(fragment.chunks.end())) {
568 fragment.accessPos = fragment.chunks.end();
569 }
570
571 auto cacheChunk = fragment.chunks.back();
572 fragment.chunks.pop_back();
573
574 auto dataLength = cacheChunk->dataLength;
575 if (fragment.accessLength > fragment.dataLength - static_cast<int64_t>(dataLength)) {
576 fragment.accessLength = fragment.dataLength - static_cast<int64_t>(dataLength);
577 }
578 fragment.dataLength -= static_cast<int64_t>(dataLength);
579 return cacheChunk;
580 }
581
CheckThresholdFragmentCacheBuffer(FragmentIterator & currWritePos)582 bool CacheMediaChunkBufferImpl::CheckThresholdFragmentCacheBuffer(FragmentIterator& currWritePos)
583 {
584 int64_t offset = -1;
585 FragmentIterator fragmentIterator = fragmentCacheBuffer_.end();
586 auto ret = lruCache_.GetLruNode(offset, fragmentIterator);
587 if (!ret) {
588 return false;
589 }
590 if (fragmentIterator == fragmentCacheBuffer_.end()) {
591 return false;
592 }
593 if (currWritePos == fragmentIterator) {
594 lruCache_.Refer(offset, currWritePos);
595 ret = lruCache_.GetLruNode(offset, fragmentIterator);
596 if (!ret) {
597 return false;
598 }
599 }
600 freeChunks_.splice(freeChunks_.end(), fragmentIterator->chunks);
601 EraseFragmentCache(fragmentIterator);
602 return true;
603 }
604
605 /***
606 * 总体策略:
607 * 计算最大允许Fragment数,大于 FRAGMENT_MAX_NUM(4)则剔除最近为未读取的Fragment(不包含当前写的节点)
608 * 新分配的节点固定分配 个chunk大小,通过公式计算,保证其能够下载;
609 * 每个Fragment最大允许的Chunk数:(本Fragment读取字节(fragmentReadSize)/ 总读取字节(totalReadSize))* 总Chunk个数
610 * 计算改Fragment最大允许的chunk个数
611 * 如果超过,则删除对应已读chunk,如果没有已读chunk,还超则返回不允许继续写,返回失败;(说明该Fragment不能再写更多的内容)
612 * 如果没有超过则从空闲队列中获取chunk,没有则
613 * for循环其他Fragment,计算每个Fragment的最大允许chunk个数:
614 * 如果超过,则删除对应已读chunk
615 * 如果还不够,则
616 * for循环其他Fragment,计算每个Fragment的最大允许chunk个数:
617 * 如果超过,则删除对应末尾未读chunk
618 *
619 * 如果还没有则返回失败
620 *
621 * 备注:是否一开始:优先从空闲队列中获取,没有则继续。
622 */
DeleteHasReadFragmentCacheBuffer(FragmentIterator & fragmentIter,size_t allowChunkNum)623 void CacheMediaChunkBufferImpl::DeleteHasReadFragmentCacheBuffer(FragmentIterator& fragmentIter, size_t allowChunkNum)
624 {
625 auto& fragmentCacheChunks = *fragmentIter;
626 while (fragmentCacheChunks.chunks.size() >= allowChunkNum &&
627 fragmentCacheChunks.accessLength > static_cast<int64_t>(static_cast<double>(fragmentCacheChunks.dataLength) *
628 CACHE_RELEASE_FACTOR_DEFAULT / TO_PERCENT)) {
629 if (fragmentCacheChunks.accessPos != fragmentCacheChunks.chunks.begin()) {
630 auto tmp = UpdateFragmentCacheForDelHead(fragmentIter);
631 if (tmp != nullptr) {
632 freeChunks_.push_back(tmp);
633 }
634 } else {
635 MEDIA_LOG_D("judge has read finish.");
636 break;
637 }
638 }
639 }
640
DeleteUnreadFragmentCacheBuffer(FragmentIterator & fragmentIter,size_t allowChunkNum)641 void CacheMediaChunkBufferImpl::DeleteUnreadFragmentCacheBuffer(FragmentIterator& fragmentIter, size_t allowChunkNum)
642 {
643 auto& fragmentCacheChunks = *fragmentIter;
644 while (fragmentCacheChunks.chunks.size() > allowChunkNum) {
645 if (!fragmentCacheChunks.chunks.empty()) {
646 auto tmp = UpdateFragmentCacheForDelTail(fragmentCacheChunks);
647 if (tmp != nullptr) {
648 freeChunks_.push_back(tmp);
649 }
650 } else {
651 break;
652 }
653 }
654 }
655
GetFreeCacheChunk(uint64_t offset,bool checkAllowFailContinue)656 CacheChunk* CacheMediaChunkBufferImpl::GetFreeCacheChunk(uint64_t offset, bool checkAllowFailContinue)
657 {
658 if (writePos_ == fragmentCacheBuffer_.end()) {
659 return nullptr;
660 }
661 if (!freeChunks_.empty()) {
662 return PopFreeCacheChunk(freeChunks_, offset);
663 }
664 auto currWritePos = GetOffsetFragmentCache(writePos_, offset, BoundedIntervalComp);
665 size_t allowChunkNum = 0;
666 if (currWritePos != fragmentCacheBuffer_.end()) {
667 allowChunkNum = CalcAllowMaxChunkNum(currWritePos->totalReadSize, currWritePos->offsetBegin);
668 DeleteHasReadFragmentCacheBuffer(currWritePos, allowChunkNum);
669 if (currWritePos->chunks.size() >= allowChunkNum && !checkAllowFailContinue) {
670 return nullptr;
671 }
672 }
673 if (!freeChunks_.empty()) {
674 return PopFreeCacheChunk(freeChunks_, offset);
675 }
676 for (auto iter = fragmentCacheBuffer_.begin(); iter != fragmentCacheBuffer_.end(); ++iter) {
677 if (iter != currWritePos) {
678 allowChunkNum = CalcAllowMaxChunkNum(iter->totalReadSize, iter->offsetBegin);
679 DeleteHasReadFragmentCacheBuffer(iter, allowChunkNum);
680 }
681 }
682 if (!freeChunks_.empty()) {
683 return PopFreeCacheChunk(freeChunks_, offset);
684 }
685 while (fragmentCacheBuffer_.size() > CACHE_FRAGMENT_MIN_NUM_DEFAULT) {
686 auto result = CheckThresholdFragmentCacheBuffer(currWritePos);
687 if (!freeChunks_.empty()) {
688 return PopFreeCacheChunk(freeChunks_, offset);
689 }
690 if (!result) {
691 break;
692 }
693 }
694 for (auto iter = fragmentCacheBuffer_.begin(); iter != fragmentCacheBuffer_.end(); ++iter) {
695 if (iter != currWritePos) {
696 allowChunkNum = CalcAllowMaxChunkNum(iter->totalReadSize, iter->offsetBegin);
697 DeleteUnreadFragmentCacheBuffer(iter, allowChunkNum);
698 }
699 }
700 if (!freeChunks_.empty()) {
701 return PopFreeCacheChunk(freeChunks_, offset);
702 }
703 return nullptr;
704 }
705
GetFreeCacheChunk(uint64_t offset,bool checkAllowFailContinue)706 CacheChunk* CacheMediaChunkBufferHlsImpl::GetFreeCacheChunk(uint64_t offset, bool checkAllowFailContinue)
707 {
708 if (writePos_ == fragmentCacheBuffer_.end()) {
709 return nullptr;
710 }
711 if (!freeChunks_.empty()) {
712 return PopFreeCacheChunk(freeChunks_, offset);
713 }
714 auto currWritePos = GetOffsetFragmentCache(writePos_, offset, BoundedIntervalComp);
715 size_t allowChunkNum = 0;
716 if (currWritePos != fragmentCacheBuffer_.end()) {
717 allowChunkNum = CalcAllowMaxChunkNum(currWritePos->totalReadSize, currWritePos->offsetBegin);
718 DeleteHasReadFragmentCacheBuffer(currWritePos, allowChunkNum);
719 if (currWritePos->chunks.size() >= allowChunkNum && !checkAllowFailContinue) {
720 MEDIA_LOG_D("allowChunkNum limit.");
721 return nullptr;
722 }
723 } else {
724 MEDIA_LOG_D("curr write is new fragment.");
725 }
726 MEDIA_LOG_D("clear other fragment has read chunk.");
727 for (auto iter = fragmentCacheBuffer_.begin(); iter != fragmentCacheBuffer_.end(); ++iter) {
728 if (iter != currWritePos) {
729 allowChunkNum = CalcAllowMaxChunkNum(iter->totalReadSize, iter->offsetBegin);
730 DeleteHasReadFragmentCacheBuffer(iter, allowChunkNum);
731 }
732 }
733 if (!freeChunks_.empty()) {
734 return PopFreeCacheChunk(freeChunks_, offset);
735 }
736 return nullptr;
737 }
738
GetFragmentIterator(FragmentIterator & currFragmentIter,uint64_t offset,ChunkIterator chunkPos,CacheChunk * splitHead,CacheChunk * & chunkInfo)739 FragmentIterator CacheMediaChunkBufferImpl::GetFragmentIterator(FragmentIterator& currFragmentIter,
740 uint64_t offset, ChunkIterator chunkPos, CacheChunk* splitHead, CacheChunk*& chunkInfo)
741 {
742 auto newFragmentPos = fragmentCacheBuffer_.emplace(std::next(currFragmentIter), offset);
743 if (splitHead == nullptr) {
744 newFragmentPos->chunks.splice(newFragmentPos->chunks.end(), currFragmentIter->chunks, chunkPos,
745 currFragmentIter->chunks.end());
746 } else {
747 splitHead->dataLength = 0;
748 newFragmentPos->chunks.splice(newFragmentPos->chunks.end(), currFragmentIter->chunks, std::next(chunkPos),
749 currFragmentIter->chunks.end());
750 newFragmentPos->chunks.push_front(splitHead);
751 splitHead->offset = offset;
752 uint64_t diff = offset > chunkInfo->offset ? offset - chunkInfo->offset : 0;
753 if (chunkInfo->dataLength >= diff) {
754 splitHead->dataLength = chunkInfo->dataLength - static_cast<uint32_t>(diff);
755 chunkInfo->dataLength = static_cast<uint32_t>(diff);
756 memcpy_s(splitHead->data, splitHead->dataLength, chunkInfo->data + diff, splitHead->dataLength);
757 }
758 }
759 newFragmentPos->offsetBegin = offset;
760 uint64_t diff = offset > currFragmentIter->offsetBegin ? offset - currFragmentIter->offsetBegin : 0;
761 newFragmentPos->dataLength = currFragmentIter->dataLength > static_cast<int64_t>(diff) ?
762 currFragmentIter->dataLength - static_cast<int64_t>(diff) : 0;
763 newFragmentPos->accessLength = 0;
764 uint64_t newReadSizeInit = static_cast<uint64_t>(1 + initReadSizeFactor_ * static_cast<double>(totalReadSize_));
765 newReadSizeInit = std::max(newReadSizeInit, currFragmentIter->totalReadSize);
766
767 newFragmentPos->totalReadSize = newReadSizeInit;
768 totalReadSize_ += newReadSizeInit;
769 newFragmentPos->readTime = Clock::now();
770 newFragmentPos->accessPos = newFragmentPos->chunks.begin();
771 newFragmentPos->isSplit = currFragmentIter->isSplit;
772 currFragmentIter->isSplit = true;
773 currFragmentIter->dataLength = static_cast<int64_t>(offset > currFragmentIter->offsetBegin ?
774 offset - currFragmentIter->offsetBegin : 0);
775 return newFragmentPos;
776 }
777
SplitFragmentCacheBuffer(FragmentIterator & currFragmentIter,uint64_t offset,ChunkIterator chunkPos)778 ChunkIterator CacheMediaChunkBufferImpl::SplitFragmentCacheBuffer(FragmentIterator& currFragmentIter,
779 uint64_t offset, ChunkIterator chunkPos)
780 {
781 ResetReadSizeAlloc();
782 auto& chunkInfo = *chunkPos;
783 CacheChunk* splitHead = nullptr;
784 if (offset != chunkInfo->offset) {
785 splitHead = freeChunks_.empty() ? GetFreeCacheChunk(offset, true) : PopFreeCacheChunk(freeChunks_, offset);
786 if (splitHead == nullptr) {
787 return chunkPos;
788 }
789 }
790 auto newFragmentPos = GetFragmentIterator(currFragmentIter, offset, chunkPos, splitHead, chunkInfo);
791 currFragmentIter = newFragmentPos;
792 if (fragmentCacheBuffer_.size() > CACHE_FRAGMENT_MAX_NUM_DEFAULT) {
793 CheckThresholdFragmentCacheBuffer(currFragmentIter);
794 }
795 lruCache_.Refer(newFragmentPos->offsetBegin, newFragmentPos);
796 return newFragmentPos->accessPos;
797 }
798
SplitFragmentCacheBuffer(FragmentIterator & currFragmentIter,uint64_t offset,ChunkIterator chunkPos)799 ChunkIterator CacheMediaChunkBufferHlsImpl::SplitFragmentCacheBuffer(FragmentIterator& currFragmentIter,
800 uint64_t offset, ChunkIterator chunkPos)
801 {
802 ResetReadSizeAlloc();
803 auto& chunkInfo = *chunkPos;
804 CacheChunk* splitHead = nullptr;
805 if (offset != chunkInfo->offset) {
806 splitHead = freeChunks_.empty() ? GetFreeCacheChunk(offset, true) : PopFreeCacheChunk(freeChunks_, offset);
807 if (splitHead == nullptr) {
808 return chunkPos;
809 }
810 }
811 auto newFragmentPos = fragmentCacheBuffer_.emplace(std::next(currFragmentIter), offset);
812 if (splitHead == nullptr) {
813 newFragmentPos->chunks.splice(newFragmentPos->chunks.end(), currFragmentIter->chunks, chunkPos,
814 currFragmentIter->chunks.end());
815 } else {
816 newFragmentPos->chunks.splice(newFragmentPos->chunks.end(), currFragmentIter->chunks, std::next(chunkPos),
817 currFragmentIter->chunks.end());
818 newFragmentPos->chunks.push_front(splitHead);
819 splitHead->offset = offset;
820 uint64_t diff = offset > chunkInfo->offset ? offset - chunkInfo->offset : 0;
821 if (chunkInfo->dataLength >= diff) {
822 splitHead->dataLength = chunkInfo->dataLength > static_cast<uint32_t>(diff) ?
823 chunkInfo->dataLength - static_cast<uint32_t>(diff) : 0;
824 chunkInfo->dataLength = static_cast<uint32_t>(diff);
825 memcpy_s(splitHead->data, splitHead->dataLength, chunkInfo->data + diff, splitHead->dataLength);
826 } else {
827 splitHead->dataLength = 0; // It can't happen. us_asan can check.
828 }
829 }
830 newFragmentPos->offsetBegin = offset;
831 uint64_t diff = offset > currFragmentIter->offsetBegin ? offset - currFragmentIter->offsetBegin : 0;
832 newFragmentPos->dataLength = currFragmentIter->dataLength > static_cast<int64_t>(diff) ?
833 currFragmentIter->dataLength - static_cast<int64_t>(diff) : 0;
834 newFragmentPos->accessLength = 0;
835 uint64_t newReadSizeInit = static_cast<uint64_t>(1 + initReadSizeFactor_ * static_cast<double>(totalReadSize_));
836 if (currFragmentIter->totalReadSize > newReadSizeInit) {
837 newReadSizeInit = currFragmentIter->totalReadSize;
838 }
839 newFragmentPos->totalReadSize = newReadSizeInit;
840 totalReadSize_ += newReadSizeInit;
841 newFragmentPos->readTime = Clock::now();
842 newFragmentPos->accessPos = newFragmentPos->chunks.begin();
843 currFragmentIter->dataLength = static_cast<int64_t>(offset > diff ? offset - diff : 0);
844 currFragmentIter = newFragmentPos;
845
846 lruCache_.Refer(newFragmentPos->offsetBegin, newFragmentPos);
847 return newFragmentPos->accessPos;
848 }
849
AddFragmentCacheBuffer(uint64_t offset)850 ChunkIterator CacheMediaChunkBufferImpl::AddFragmentCacheBuffer(uint64_t offset)
851 {
852 size_t fragmentThreshold = CACHE_FRAGMENT_MAX_NUM_DEFAULT;
853 if (isLargeOffsetSpan_) {
854 fragmentThreshold = CACHE_FRAGMENT_MAX_NUM_LARGE;
855 }
856 if (fragmentCacheBuffer_.size() >= fragmentThreshold) {
857 auto fragmentIterTmp = fragmentCacheBuffer_.end();
858 CheckThresholdFragmentCacheBuffer(fragmentIterTmp);
859 }
860 ResetReadSizeAlloc();
861 auto fragmentInsertPos = std::upper_bound(fragmentCacheBuffer_.begin(), fragmentCacheBuffer_.end(), offset,
862 [](auto mediaOffset, const FragmentCacheBuffer& fragment) {
863 if (mediaOffset <= fragment.offsetBegin + fragment.dataLength) {
864 return true;
865 }
866 return false;
867 });
868 auto newFragmentPos = fragmentCacheBuffer_.emplace(fragmentInsertPos, offset);
869 uint64_t newReadSizeInit = static_cast<uint64_t>(1 + initReadSizeFactor_ * static_cast<double>(totalReadSize_));
870 totalReadSize_ += newReadSizeInit;
871 newFragmentPos->totalReadSize = newReadSizeInit;
872 writePos_ = newFragmentPos;
873 writePos_->accessPos = writePos_->chunks.end();
874 lruCache_.Refer(newFragmentPos->offsetBegin, newFragmentPos);
875 auto freeChunk = GetFreeCacheChunk(offset);
876 if (freeChunk == nullptr) {
877 MEDIA_LOG_D("get free cache chunk fail.");
878 return writePos_->chunks.end();
879 }
880 writePos_->accessPos = newFragmentPos->chunks.emplace(newFragmentPos->chunks.end(), freeChunk);
881 return writePos_->accessPos;
882 }
883
AddFragmentCacheBuffer(uint64_t offset)884 ChunkIterator CacheMediaChunkBufferHlsImpl::AddFragmentCacheBuffer(uint64_t offset)
885 {
886 ResetReadSizeAlloc();
887 auto fragmentInsertPos = std::upper_bound(fragmentCacheBuffer_.begin(), fragmentCacheBuffer_.end(), offset,
888 [](auto mediaOffset, const FragmentCacheBuffer& fragment) {
889 if (mediaOffset <= fragment.offsetBegin + fragment.dataLength) {
890 return true;
891 }
892 return false;
893 });
894 auto newFragmentPos = fragmentCacheBuffer_.emplace(fragmentInsertPos, offset);
895 uint64_t newReadSizeInit = static_cast<uint64_t>(1 + initReadSizeFactor_ * static_cast<double>(totalReadSize_));
896 totalReadSize_ += newReadSizeInit;
897 newFragmentPos->totalReadSize = newReadSizeInit;
898 writePos_ = newFragmentPos;
899 writePos_->accessPos = writePos_->chunks.end();
900 lruCache_.Refer(newFragmentPos->offsetBegin, newFragmentPos);
901 auto freeChunk = GetFreeCacheChunk(offset);
902 if (freeChunk == nullptr) {
903 MEDIA_LOG_D("get free cache chunk fail.");
904 return writePos_->chunks.end();
905 }
906 writePos_->accessPos = newFragmentPos->chunks.emplace(newFragmentPos->chunks.end(), freeChunk);
907 return writePos_->accessPos;
908 }
909
ResetReadSizeAlloc()910 void CacheMediaChunkBufferImpl::ResetReadSizeAlloc()
911 {
912 size_t chunkNum = chunkMaxNum_ + 1 >= freeChunks_.size() ?
913 chunkMaxNum_ + 1 - freeChunks_.size() : 0;
914 if (totalReadSize_ > static_cast<size_t>(UP_LIMIT_MAX_TOTAL_READ_SIZE) && chunkNum > 0) {
915 size_t preChunkSize = static_cast<size_t>(MAX_TOTAL_READ_SIZE - 1) / chunkNum;
916 for (auto iter = fragmentCacheBuffer_.begin(); iter != fragmentCacheBuffer_.end(); ++iter) {
917 iter->totalReadSize = preChunkSize * iter->chunks.size();
918 }
919 totalReadSize_ = preChunkSize * chunkNum;
920 }
921 }
922
Dump(uint64_t param)923 void CacheMediaChunkBufferImpl::Dump(uint64_t param)
924 {
925 std::lock_guard lock(mutex_);
926 DumpInner(param);
927 }
928
DumpInner(uint64_t param)929 void CacheMediaChunkBufferImpl::DumpInner(uint64_t param)
930 {
931 (void)param;
932 MEDIA_LOG_D("cacheBuff total buffer size : " PUBLIC_LOG_U64, totalBuffSize_);
933 MEDIA_LOG_D("cacheBuff total chunk size : " PUBLIC_LOG_U32, chunkSize_);
934 MEDIA_LOG_D("cacheBuff total chunk num : " PUBLIC_LOG_U32, chunkMaxNum_);
935 MEDIA_LOG_D("cacheBuff total read size : " PUBLIC_LOG_U64, totalReadSize_);
936 MEDIA_LOG_D("cacheBuff read size factor : " PUBLIC_LOG_F, initReadSizeFactor_);
937 MEDIA_LOG_D("cacheBuff free chunk num: : " PUBLIC_LOG_ZU, freeChunks_.size());
938 MEDIA_LOG_D("cacheBuff fragment num: : " PUBLIC_LOG_ZU, fragmentCacheBuffer_.size());
939 for (auto const & fragment : fragmentCacheBuffer_) {
940 MEDIA_LOG_D("cacheBuff - fragment offset : " PUBLIC_LOG_U64, fragment.offsetBegin);
941 MEDIA_LOG_D("cacheBuff fragment length : " PUBLIC_LOG_D64, fragment.dataLength);
942 MEDIA_LOG_D("cacheBuff chunk num : " PUBLIC_LOG_ZU, fragment.chunks.size());
943 MEDIA_LOG_D("cacheBuff access length : " PUBLIC_LOG_U64, fragment.accessLength);
944 MEDIA_LOG_D("cacheBuff read size : " PUBLIC_LOG_U64, fragment.totalReadSize);
945 if (fragment.accessPos != fragment.chunks.end()) {
946 auto &chunkInfo = *fragment.accessPos;
947 MEDIA_LOG_D("cacheBuff access offset: " PUBLIC_LOG_D64 ", len: " PUBLIC_LOG_U32,
948 chunkInfo->offset, chunkInfo->dataLength);
949 } else {
950 MEDIA_LOG_D("cacheBuff access ended");
951 }
952 if (!fragment.chunks.empty()) {
953 auto &chunkInfo = fragment.chunks.back();
954 MEDIA_LOG_D("cacheBuff last chunk offset: " PUBLIC_LOG_D64 ", len: " PUBLIC_LOG_U32,
955 chunkInfo->offset, chunkInfo->dataLength);
956 }
957 MEDIA_LOG_D("cacheBuff ");
958 }
959 }
960
Check()961 bool CacheMediaChunkBufferImpl::Check()
962 {
963 std::lock_guard lock(mutex_);
964 return CheckInner();
965 }
966
Clear()967 void CacheMediaChunkBufferImpl::Clear()
968 {
969 std::lock_guard lock(mutex_);
970 auto iter = fragmentCacheBuffer_.begin();
971 while (iter != fragmentCacheBuffer_.end()) {
972 freeChunks_.splice(freeChunks_.end(), iter->chunks);
973 iter = EraseFragmentCache(iter);
974 }
975 lruCache_.Reset();
976 totalReadSize_ = 0;
977 }
978
GetFreeSize()979 uint64_t CacheMediaChunkBufferImpl::GetFreeSize()
980 {
981 std::lock_guard lock(mutex_);
982 uint64_t totalFreeSize = totalBuffSize_;
983 for (auto iter = fragmentCacheBuffer_.begin(); iter != fragmentCacheBuffer_.end(); iter++) {
984 uint64_t fragmentDataLen = static_cast<uint64_t>(iter->dataLength);
985 totalFreeSize = totalFreeSize > fragmentDataLen ? totalFreeSize - fragmentDataLen : 0;
986 }
987 return totalFreeSize;
988 }
989
990 // Release all fragments before the offset.
ClearChunksOfFragment(uint64_t offset)991 bool CacheMediaChunkBufferImpl::ClearChunksOfFragment(uint64_t offset)
992 {
993 std::lock_guard lock(mutex_);
994 bool res = false;
995 auto fragmentPos = GetOffsetFragmentCache(readPos_, offset, LeftBoundedRightOpenComp);
996 if (fragmentPos == fragmentCacheBuffer_.end()) {
997 return false;
998 }
999 auto& fragment = *fragmentPos;
1000 uint32_t chunkSize = fragment.chunks.size();
1001 for (uint32_t i = 0; i < chunkSize; ++i) {
1002 auto chunkIter = fragment.chunks.front();
1003 if (chunkIter->offset + chunkIter->dataLength >= offset) {
1004 break;
1005 }
1006
1007 auto chunkPos = fragmentPos->accessPos;
1008 if (chunkIter->offset >= (*chunkPos)->offset) { // Update accessPos of fragment
1009 chunkPos = GetOffsetChunkCache(fragmentPos->chunks, chunkIter->offset + chunkIter->dataLength,
1010 LeftBoundedRightOpenComp);
1011 (*fragmentPos).accessPos = chunkPos;
1012 }
1013
1014 MEDIA_LOG_D("ClearChunksOfFragment clear chunk, offsetBegin: " PUBLIC_LOG_U64 " offsetEnd " PUBLIC_LOG_U64,
1015 chunkIter->offset, chunkIter->offset + chunkIter->dataLength);
1016 auto tmp = UpdateFragmentCacheForDelHead(fragmentPos);
1017 if (tmp != nullptr) {
1018 res = true;
1019 freeChunks_.push_back(tmp);
1020 }
1021 }
1022 return res;
1023 }
1024
1025 // Release all chunks before the offset in the fragment to which the specified offset belongs.
ClearFragmentBeforeOffset(uint64_t offset)1026 bool CacheMediaChunkBufferImpl::ClearFragmentBeforeOffset(uint64_t offset)
1027 {
1028 std::lock_guard lock(mutex_);
1029 bool res = false;
1030 for (auto iter = fragmentCacheBuffer_.begin(); iter != fragmentCacheBuffer_.end();) {
1031 if (iter->offsetBegin >= offset) {
1032 break;
1033 }
1034 if (iter->offsetBegin + static_cast<uint64_t>(iter->dataLength) <= offset) {
1035 MEDIA_LOG_D("ClearFragmentBeforeOffset clear fragment, offsetBegin: " PUBLIC_LOG_U64 " offsetEnd "
1036 PUBLIC_LOG_U64, iter->offsetBegin, iter->offsetBegin + iter->dataLength);
1037 freeChunks_.splice(freeChunks_.end(), iter->chunks);
1038 iter = EraseFragmentCache(iter);
1039 res = true;
1040 continue;
1041 }
1042 iter++;
1043 }
1044 return res;
1045 }
1046
1047 // Release all chunks of read fragment between minReadOffset and maxReadOffset.
ClearMiddleReadFragment(uint64_t minOffset,uint64_t maxOffset)1048 bool CacheMediaChunkBufferImpl::ClearMiddleReadFragment(uint64_t minOffset, uint64_t maxOffset)
1049 {
1050 std::lock_guard lock(mutex_);
1051 bool res = false;
1052 for (auto iter = fragmentCacheBuffer_.begin(); iter != fragmentCacheBuffer_.end(); iter++) {
1053 if (iter->offsetBegin + static_cast<uint64_t>(iter->dataLength) < minOffset) {
1054 continue;
1055 }
1056 if (iter->offsetBegin > maxOffset) {
1057 break;
1058 }
1059 if (iter->accessLength <= chunkSize_) {
1060 continue;
1061 }
1062 MEDIA_LOG_D("ClearMiddleReadFragment, minOffset: " PUBLIC_LOG_U64 " maxOffset: "
1063 PUBLIC_LOG_U64 " offsetBegin: " PUBLIC_LOG_U64 " dataLength: " PUBLIC_LOG_D64 " accessLength "
1064 PUBLIC_LOG_D64, minOffset, maxOffset, iter->offsetBegin, iter->dataLength, iter->accessLength);
1065 auto& fragment = *iter;
1066 uint32_t chunksSize = fragment.chunks.size();
1067 for (uint32_t i = 0; i < chunksSize; ++i) {
1068 auto chunkIter = fragment.chunks.front();
1069 if (chunkIter->dataLength >= iter->accessLength ||
1070 (chunkIter->offset + chunkIter->dataLength >= maxOffset &&
1071 chunkIter->offset <= minOffset)) {
1072 break;
1073 }
1074 auto tmp = UpdateFragmentCacheForDelHead(iter);
1075 if (tmp != nullptr) {
1076 freeChunks_.push_back(tmp);
1077 }
1078 }
1079 }
1080 return res;
1081 }
1082
IsReadSplit(uint64_t offset)1083 bool CacheMediaChunkBufferImpl::IsReadSplit(uint64_t offset)
1084 {
1085 std::lock_guard lock(mutex_);
1086 auto readPos = GetOffsetFragmentCache(readPos_, offset, LeftBoundedRightOpenComp);
1087 if (readPos != fragmentCacheBuffer_.end()) {
1088 return readPos->isSplit;
1089 }
1090 return false;
1091 }
1092
SetIsLargeOffsetSpan(bool isLargeOffsetSpan)1093 void CacheMediaChunkBufferImpl::SetIsLargeOffsetSpan(bool isLargeOffsetSpan)
1094 {
1095 isLargeOffsetSpan_ = isLargeOffsetSpan;
1096 }
1097
DumpAndCheckInner()1098 bool CacheMediaChunkBufferImpl::DumpAndCheckInner()
1099 {
1100 DumpInner(0);
1101 return CheckInner();
1102 }
1103
CheckFragment(const FragmentCacheBuffer & fragment,bool & checkSuccess)1104 void CacheMediaChunkBufferImpl::CheckFragment(const FragmentCacheBuffer& fragment, bool& checkSuccess)
1105 {
1106 if (fragment.accessPos != fragment.chunks.end()) {
1107 auto& accessChunk = *fragment.accessPos;
1108 auto accessLength = accessChunk->offset > fragment.offsetBegin ?
1109 accessChunk->offset - fragment.offsetBegin : 0;
1110 if (fragment.accessLength < accessLength ||
1111 fragment.accessLength >
1112 (static_cast<int64_t>(accessLength) + static_cast<int64_t>(accessChunk->dataLength))) {
1113 checkSuccess = false;
1114 }
1115 }
1116 }
1117
CheckInner()1118 bool CacheMediaChunkBufferImpl::CheckInner()
1119 {
1120 uint64_t chunkNum = 0;
1121 uint64_t totalReadSize = 0;
1122 bool checkSuccess = true;
1123 chunkNum = freeChunks_.size();
1124 for (auto const& fragment : fragmentCacheBuffer_) {
1125 int64_t dataLength = 0;
1126 chunkNum += fragment.chunks.size();
1127 totalReadSize += fragment.totalReadSize;
1128
1129 auto prev = fragment.chunks.begin();
1130 auto next = fragment.chunks.end();
1131 if (!fragment.chunks.empty()) {
1132 dataLength += static_cast<int64_t>((*prev)->dataLength);
1133 next = std::next(prev);
1134 if ((*prev)->offset != fragment.offsetBegin) {
1135 checkSuccess = false;
1136 }
1137 }
1138 while (next != fragment.chunks.end()) {
1139 auto &chunkPrev = *prev;
1140 auto &chunkNext = *next;
1141 dataLength += static_cast<int64_t>(chunkNext->dataLength);
1142 if (chunkPrev->offset + chunkPrev->dataLength != chunkNext->offset) {
1143 checkSuccess = false;
1144 }
1145 ++next;
1146 ++prev;
1147 }
1148 if (dataLength != fragment.dataLength) {
1149 checkSuccess = false;
1150 }
1151 CheckFragment(fragment, checkSuccess);
1152 }
1153 if (chunkNum != chunkMaxNum_ + 1) {
1154 checkSuccess = false;
1155 }
1156
1157 if (totalReadSize != totalReadSize_) {
1158 checkSuccess = false;
1159 }
1160 return checkSuccess;
1161 }
1162
1163
CacheMediaChunkBuffer()1164 CacheMediaChunkBuffer::CacheMediaChunkBuffer()
1165 {
1166 MEDIA_LOG_D("enter");
1167 impl_ = std::make_unique<CacheMediaChunkBufferImpl>();
1168 };
1169
~CacheMediaChunkBuffer()1170 CacheMediaChunkBuffer::~CacheMediaChunkBuffer()
1171 {
1172 MEDIA_LOG_D("exit");
1173 }
1174
Init(uint64_t totalBuffSize,uint32_t chunkSize)1175 bool CacheMediaChunkBuffer::Init(uint64_t totalBuffSize, uint32_t chunkSize)
1176 {
1177 return impl_->Init(totalBuffSize, chunkSize);
1178 }
1179
Read(void * ptr,uint64_t offset,size_t readSize)1180 size_t CacheMediaChunkBuffer::Read(void* ptr, uint64_t offset, size_t readSize)
1181 {
1182 return impl_->Read(ptr, offset, readSize);
1183 }
1184
Write(void * ptr,uint64_t offset,size_t writeSize)1185 size_t CacheMediaChunkBuffer::Write(void* ptr, uint64_t offset, size_t writeSize)
1186 {
1187 return impl_->Write(ptr, offset, writeSize);
1188 }
1189
Seek(uint64_t offset)1190 bool CacheMediaChunkBuffer::Seek(uint64_t offset)
1191 {
1192 return impl_->Seek(offset);
1193 }
1194
GetBufferSize(uint64_t offset)1195 size_t CacheMediaChunkBuffer::GetBufferSize(uint64_t offset)
1196 {
1197 return impl_->GetBufferSize(offset);
1198 }
1199
GetNextBufferOffset(uint64_t offset)1200 uint64_t CacheMediaChunkBuffer::GetNextBufferOffset(uint64_t offset)
1201 {
1202 return impl_->GetNextBufferOffset(offset);
1203 }
1204
Clear()1205 void CacheMediaChunkBuffer::Clear()
1206 {
1207 return impl_->Clear();
1208 }
1209
GetFreeSize()1210 uint64_t CacheMediaChunkBuffer::GetFreeSize()
1211 {
1212 return impl_->GetFreeSize();
1213 }
1214
ClearFragmentBeforeOffset(uint64_t offset)1215 bool CacheMediaChunkBuffer::ClearFragmentBeforeOffset(uint64_t offset)
1216 {
1217 return impl_->ClearFragmentBeforeOffset(offset);
1218 }
1219
ClearChunksOfFragment(uint64_t offset)1220 bool CacheMediaChunkBuffer::ClearChunksOfFragment(uint64_t offset)
1221 {
1222 return impl_->ClearChunksOfFragment(offset);
1223 }
1224
ClearMiddleReadFragment(uint64_t minOffset,uint64_t maxOffset)1225 bool CacheMediaChunkBuffer::ClearMiddleReadFragment(uint64_t minOffset, uint64_t maxOffset)
1226 {
1227 return impl_->ClearMiddleReadFragment(minOffset, maxOffset);
1228 }
1229
IsReadSplit(uint64_t offset)1230 bool CacheMediaChunkBuffer::IsReadSplit(uint64_t offset)
1231 {
1232 return impl_->IsReadSplit(offset);
1233 }
1234
SetIsLargeOffsetSpan(bool isLargeOffsetSpan)1235 void CacheMediaChunkBuffer::SetIsLargeOffsetSpan(bool isLargeOffsetSpan)
1236 {
1237 return impl_->SetIsLargeOffsetSpan(isLargeOffsetSpan);
1238 }
1239
SetReadBlocking(bool isReadBlockingAllowed)1240 void CacheMediaChunkBuffer::SetReadBlocking(bool isReadBlockingAllowed)
1241 {
1242 (void)isReadBlockingAllowed;
1243 }
1244
Dump(uint64_t param)1245 void CacheMediaChunkBuffer::Dump(uint64_t param)
1246 {
1247 return impl_->Dump(param);
1248 }
1249
Check()1250 bool CacheMediaChunkBuffer::Check()
1251 {
1252 return impl_->Check();
1253 }
1254 }
1255 }