1 /* 2 * Copyright (c) 2025 Huawei Device Co., Ltd. 3 * Licensed under the Apache License, Version 2.0 (the "License"); 4 * you may not use this file except in compliance with the License. 5 * You may obtain a copy of the License at 6 * 7 * http://www.apache.org/licenses/LICENSE-2.0 8 * 9 * Unless required by applicable law or agreed to in writing, software 10 * distributed under the License is distributed on an "AS IS" BASIS, 11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 * See the License for the specific language governing permissions and 13 * limitations under the License. 14 */ 15 #include "common_components/common/page_cache.h" 16 17 namespace common { 18 PageCache PageCache::instance_; 19 GetPageMutex()20std::mutex& PageCache::GetPageMutex() { return pageMtx_; } 21 22 // Get a Span of a k-page. NewSpan(size_t k)23Span* PageCache::NewSpan(size_t k) 24 { 25 CHECK_CC(k > 0 && k < MAX_NPAGES); 26 // 1. Check if the corresponding SpanList bucket contains a Span based on direct addressing using k. 27 if (!pageCacheSpans_[k].Empty()) { 28 Span* kSpan = pageCacheSpans_[k].PopFront(); 29 30 for (size_t i = 0; i < kSpan->pageNum; ++i) { 31 idSpanMap_[kSpan->pageId + i] = kSpan; 32 } 33 34 return kSpan; 35 } 36 // 2. find bigger bucket 37 for (size_t n = k + 1; n < MAX_NPAGES; ++n) { 38 if (!pageCacheSpans_[n].Empty()) { 39 Span* kSpan = new Span(); 40 Span* nSpan = pageCacheSpans_[n].PopFront(); 41 42 kSpan->pageNum = k; 43 kSpan->pageId = nSpan->pageId; 44 45 nSpan->pageNum -= k; 46 nSpan->pageId += k; 47 pageCacheSpans_[nSpan->pageNum].PushFront(nSpan); 48 49 for (size_t i = 0; i < kSpan->pageNum; ++i) { 50 idSpanMap_[kSpan->pageId + i] = kSpan; 51 } 52 53 return kSpan; 54 } 55 } 56 // 3. No suitable Span is available; allocate a 128KB Span. 57 Span* bigSpan = new Span; 58 59 void* ptr = SystemAlloc(MAX_NPAGES - 1); 60 61 bigSpan->pageNum = MAX_NPAGES - 1; 62 bigSpan->pageId = reinterpret_cast<pageID>(ptr) >> PAGE_SHIFT; 63 64 pageCacheSpans_[MAX_NPAGES - 1].PushFront(bigSpan); 65 66 return NewSpan(k); 67 } 68 MapObjectToSpan(void * obj)69Span* PageCache::MapObjectToSpan(void* obj) 70 { 71 CHECK_CC(obj != nullptr); 72 ScopedPageCacheMutex mtx; 73 pageID id = (reinterpret_cast<pageID>(obj) >> PAGE_SHIFT); 74 auto ret = idSpanMap_.find(id); 75 if (ret != idSpanMap_.end()) { 76 return ret->second; 77 } else { 78 LOGF_CHECK(false) << "MapObjectToSpan false"; 79 return nullptr; 80 } 81 } 82 ReleaseSpanToPageCache(Span * span)83void PageCache::ReleaseSpanToPageCache(Span* span) 84 { 85 ScopedPageCacheMutex mtx; 86 // Merge copy 87 while (1) { 88 pageID prevId = span->pageId - 1; 89 auto ret = idSpanMap_.find(prevId); 90 if (ret == idSpanMap_.end()) { 91 break; 92 } 93 94 Span* prevSpan = ret->second; 95 if (prevSpan->isUse) { 96 break; 97 } 98 99 if (prevSpan->pageNum + span->pageNum > MAX_NPAGES - 1) { 100 break; 101 } 102 103 span->pageNum += prevSpan->pageNum; 104 span->pageId = prevSpan->pageId; 105 106 pageCacheSpans_[prevSpan->pageNum].Erase(prevSpan); 107 delete prevSpan; 108 } 109 110 // Merge backward 111 while (1) { 112 pageID nextId = span->pageId + span->pageNum; 113 auto ret = idSpanMap_.find(nextId); 114 if (ret == idSpanMap_.end()) { 115 break; 116 } 117 118 Span* nextSpan = ret->second; 119 if (nextSpan->isUse) { 120 break; 121 } 122 123 if (span->pageNum + nextSpan->pageNum > MAX_NPAGES - 1) { 124 break; 125 } 126 127 span->pageNum += nextSpan->pageNum; 128 129 pageCacheSpans_[nextSpan->pageNum].Erase(nextSpan); 130 delete nextSpan; 131 } 132 133 // Hang the merged Span into the bucket. 134 // Update the mapping relationship. 135 pageCacheSpans_[span->pageNum].PushFront(span); 136 span->isUse = false; 137 for (size_t i = 0; i < span->pageNum; ++i) { 138 idSpanMap_[span->pageId + i] = span; 139 } 140 } 141 } // namespace common 142