• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2023 Institute of Parallel And Distributed Systems (IPADS), Shanghai Jiao Tong University (SJTU)
3  * Licensed under the Mulan PSL v2.
4  * You can use this software according to the terms and conditions of the Mulan PSL v2.
5  * You may obtain a copy of Mulan PSL v2 at:
6  *     http://license.coscl.org.cn/MulanPSL2
7  * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
8  * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
9  * PURPOSE.
10  * See the Mulan PSL v2 for more details.
11  */
12 #include <mm/mm.h>
13 #include <common/kprint.h>
14 #include <common/macro.h>
15 #include <mm/slab.h>
16 #include <mm/buddy.h>
17 
18 /* The following two will be filled by parse_mem_map. */
19 paddr_t physmem_map[N_PHYS_MEM_POOLS][2];
20 int physmem_map_num;
21 
22 struct phys_mem_pool global_mem[N_PHYS_MEM_POOLS];
23 
24 /*
25  * The layout of each physmem:
26  * | metadata (npages * sizeof(struct page)) | start_vaddr ... (npages *
27  * PAGE_SIZE) |
28  */
init_buddy_for_one_physmem_map(int physmem_map_idx)29 static void init_buddy_for_one_physmem_map(int physmem_map_idx)
30 {
31     paddr_t free_mem_start = 0;
32     paddr_t free_mem_end = 0;
33     struct page *page_meta_start = NULL;
34     unsigned long npages = 0;
35     unsigned long npages1 = 0;
36     paddr_t free_page_start = 0;
37 
38     free_mem_start = physmem_map[physmem_map_idx][0];
39     free_mem_end = physmem_map[physmem_map_idx][1];
40     kdebug("mem pool %d, free_mem_start: 0x%lx, free_mem_end: 0x%lx\n",
41            physmem_map_idx,
42            free_mem_start,
43            free_mem_end);
44 #ifdef KSTACK_BASE
45     /* KSTACK_BASE should not locate in free_mem_start ~ free_mem_end */
46     BUG_ON(KSTACK_BASE >= phys_to_virt(free_mem_start)
47            && KSTACK_BASE < phys_to_virt(free_mem_end));
48 #endif
49     npages =
50         (free_mem_end - free_mem_start) / (PAGE_SIZE + sizeof(struct page));
51     free_page_start =
52         ROUND_UP(free_mem_start + npages * sizeof(struct page), PAGE_SIZE);
53 
54     /* Recalculate npages after alignment. */
55     npages1 = (free_mem_end - free_page_start) / PAGE_SIZE;
56     npages = npages < npages1 ? npages : npages1;
57 
58     page_meta_start = (struct page *)phys_to_virt(free_mem_start);
59     kdebug(
60         "page_meta_start: 0x%lx, npages: 0x%lx, meta_size: 0x%lx, free_page_start: 0x%lx\n",
61         page_meta_start,
62         npages,
63         sizeof(struct page),
64         free_page_start);
65 
66     /* Initialize the buddy allocator based on this free memory region. */
67     init_buddy(&global_mem[physmem_map_idx],
68                page_meta_start,
69                phys_to_virt(free_page_start),
70                npages);
71 }
72 
mm_init(void * physmem_info)73 void mm_init(void *physmem_info)
74 {
75     int physmem_map_idx;
76 
77     /* Step-1: parse the physmem_info to get each continuous range of the
78      * physmem. */
79     physmem_map_num = 0;
80     parse_mem_map(physmem_info);
81 
82     /* Step-2: init the buddy allocators for each continuous range of the
83      * physmem. */
84     for (physmem_map_idx = 0; physmem_map_idx < physmem_map_num;
85          ++physmem_map_idx)
86         init_buddy_for_one_physmem_map(physmem_map_idx);
87 
88     /* Step-3: init the slab allocator. */
89     init_slab();
90 }
91 
get_free_mem_size(void)92 unsigned long get_free_mem_size(void)
93 {
94     unsigned long size;
95     int i;
96 
97     size = get_free_mem_size_from_slab();
98     for (i = 0; i < physmem_map_num; ++i)
99         size += get_free_mem_size_from_buddy(&global_mem[i]);
100 
101     return size;
102 }
103 
get_total_mem_size(void)104 unsigned long get_total_mem_size(void)
105 {
106     unsigned long size = 0;
107     int i;
108 
109     for (i = 0; i < physmem_map_num; ++i)
110         size += get_total_mem_size_from_buddy(&global_mem[i]);
111 
112     return size;
113 }