1 /*
2 * Copyright (c) 2023 Institute of Parallel And Distributed Systems (IPADS), Shanghai Jiao Tong University (SJTU)
3 * Licensed under the Mulan PSL v2.
4 * You can use this software according to the terms and conditions of the Mulan PSL v2.
5 * You may obtain a copy of Mulan PSL v2 at:
6 * http://license.coscl.org.cn/MulanPSL2
7 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
8 * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
9 * PURPOSE.
10 * See the Mulan PSL v2 for more details.
11 */
12
13 #include "drv_io_share.h"
14 #include <chcore/memory.h>
15 #include <chcore/container/hashtable.h>
16 #include <pthread.h>
17 #include <chcore/syscall.h>
18 #include <malloc.h>
19
20 #define HTABLE_SIZE 509
21 static struct htable addr2ent;
22 static pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;
23 static bool io_init;
24
25 struct entry {
26 paddr_t paddr;
27 vaddr_t vaddr;
28 unsigned long size;
29 cap_t pmo;
30 struct hlist_node node;
31 };
32
__self_init(void)33 static void __self_init(void)
34 {
35 if (io_init) {
36 return;
37 }
38 pthread_mutex_lock(&lock);
39 if (!io_init) {
40 init_htable(&addr2ent, HTABLE_SIZE);
41 }
42 pthread_mutex_unlock(&lock);
43 }
44
__hash(paddr_t paddr)45 static inline u32 __hash(paddr_t paddr)
46 {
47 return (u32)(paddr >> 12);
48 }
49
__get_entry(paddr_t paddr,vaddr_t vaddr)50 static struct entry *__get_entry(paddr_t paddr, vaddr_t vaddr)
51 {
52 struct entry *entry;
53 struct hlist_head *bucket;
54
55 bucket = htable_get_bucket(&addr2ent, __hash(paddr));
56 for_each_in_hlist (entry, node, bucket) {
57 if (entry->paddr == paddr && entry->vaddr == vaddr) {
58 return entry;
59 }
60 }
61
62 return NULL;
63 }
64
ioremap(uintptr_t phys_addr,unsigned long size,int32_t prot)65 void *ioremap(uintptr_t phys_addr, unsigned long size, int32_t prot)
66 {
67 struct entry *entry;
68 void *vaddr;
69 paddr_t paddr;
70 cap_t pmo;
71 int perm;
72
73 __self_init();
74 pthread_mutex_lock(&lock);
75
76 entry = malloc(sizeof(*entry));
77 if (entry == NULL) {
78 goto out_unlock;
79 }
80
81 paddr = ROUND_DOWN((paddr_t)phys_addr, PAGE_SIZE);
82 size = ROUND_UP(size, PAGE_SIZE);
83
84 pmo = usys_create_device_pmo(paddr, size);
85 if (pmo <= 0) {
86 goto out_free_entry;
87 }
88
89 perm = 0;
90 if (prot & PROT_READ) {
91 perm |= VMR_READ;
92 }
93 if (prot & PROT_WRITE) {
94 perm |= VMR_WRITE;
95 }
96 vaddr = chcore_auto_map_pmo(pmo, size, perm);
97 if (vaddr == NULL) {
98 goto out_revoke_pmo;
99 }
100
101 init_hlist_node(&entry->node);
102 entry->paddr = paddr;
103 entry->vaddr = (vaddr_t)vaddr;
104 entry->size = size;
105 entry->pmo = pmo;
106 htable_add(&addr2ent, __hash(paddr), &entry->node);
107
108 return vaddr;
109
110 out_revoke_pmo:
111 usys_revoke_cap(pmo, false);
112 out_free_entry:
113 free(entry);
114 out_unlock:
115 pthread_mutex_unlock(&lock);
116 return NULL;
117 }
118
iounmap(uintptr_t pddr,const void * addr)119 int32_t iounmap(uintptr_t pddr, const void *addr)
120 {
121 struct entry *entry;
122 int ret = 0;
123
124 __self_init();
125
126 pthread_mutex_lock(&lock);
127
128 entry = __get_entry(pddr, (vaddr_t)addr);
129 if (entry == NULL) {
130 ret = -ENOENT;
131 goto out_unlock;
132 }
133
134 htable_del(&entry->node);
135 chcore_auto_unmap_pmo(entry->pmo, entry->vaddr, entry->size);
136 usys_revoke_cap(entry->pmo, false);
137 free(entry);
138
139 out_unlock:
140 pthread_mutex_unlock(&lock);
141 return ret;
142 }
143