• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/hyperhold/hp_iotab.c
4  *
5  * Copyright (c) 2020-2022 Huawei Technologies Co., Ltd.
6  */
7 
8 #define pr_fmt(fmt) "[HYPERHOLD]" fmt
9 
10 #include <linux/slab.h>
11 #include <linux/mm.h>
12 
13 #include "hp_iotab.h"
14 
15 atomic64_t hpio_mem = ATOMIC64_INIT(0);
hpio_memory(void)16 u64 hpio_memory(void)
17 {
18 	return atomic64_read(&hpio_mem);
19 }
20 
21 struct hp_iotab {
22 	struct list_head io_list;
23 	rwlock_t lock;
24 	u32 io_cnt;
25 	wait_queue_head_t empty_wq;
26 };
27 
28 /* store all inflight hpio in iotab */
29 struct hp_iotab iotab = {
30 	.io_list = LIST_HEAD_INIT(iotab.io_list),
31 	.lock = __RW_LOCK_UNLOCKED(iotab.lock),
32 	.io_cnt = 0,
33 	.empty_wq = __WAIT_QUEUE_HEAD_INITIALIZER(iotab.empty_wq),
34 };
35 
__iotab_search_get(struct hp_iotab * iotab,u32 eid)36 static struct hpio *__iotab_search_get(struct hp_iotab *iotab, u32 eid)
37 {
38 	struct hpio *hpio = NULL;
39 
40 	list_for_each_entry(hpio, &iotab->io_list, list)
41 		if (hpio->eid == eid && kref_get_unless_zero(&hpio->refcnt))
42 			return hpio;
43 
44 	return NULL;
45 }
46 
iotab_search_get(struct hp_iotab * iotab,u32 eid)47 static struct hpio *iotab_search_get(struct hp_iotab *iotab, u32 eid)
48 {
49 	struct hpio *hpio = NULL;
50 	unsigned long flags;
51 
52 	read_lock_irqsave(&iotab->lock, flags);
53 	hpio = __iotab_search_get(iotab, eid);
54 	read_unlock_irqrestore(&iotab->lock, flags);
55 
56 	pr_info("find hpio %p for eid %u.\n", hpio, eid);
57 
58 	return hpio;
59 }
60 
61 /*
62  * insert @hpio into @iotab, cancel insertion if there is a hpio of the same
63  * @eid, inc the refcnt of duplicated hpio and return it
64  */
iotab_insert(struct hp_iotab * iotab,struct hpio * hpio)65 static struct hpio *iotab_insert(struct hp_iotab *iotab, struct hpio *hpio)
66 {
67 	struct hpio *dup = NULL;
68 	unsigned long flags;
69 
70 	write_lock_irqsave(&iotab->lock, flags);
71 	dup = __iotab_search_get(iotab, hpio->eid);
72 	if (dup) {
73 		pr_info("find exist hpio %p for eid %u, insert hpio %p failed.\n",
74 				dup, hpio->eid, hpio);
75 		goto unlock;
76 	}
77 	list_add(&hpio->list, &iotab->io_list);
78 	iotab->io_cnt++;
79 	pr_info("insert new hpio %p for eid %u.\n", hpio, hpio->eid);
80 unlock:
81 	write_unlock_irqrestore(&iotab->lock, flags);
82 
83 	return dup;
84 }
85 
iotab_delete(struct hp_iotab * iotab,struct hpio * hpio)86 static void iotab_delete(struct hp_iotab *iotab, struct hpio *hpio)
87 {
88 	unsigned long flags;
89 
90 	write_lock_irqsave(&iotab->lock, flags);
91 	list_del(&hpio->list);
92 	iotab->io_cnt--;
93 	if (!iotab->io_cnt)
94 		wake_up(&iotab->empty_wq);
95 	write_unlock_irqrestore(&iotab->lock, flags);
96 
97 	pr_info("delete hpio %p for eid %u from iotab.\n", hpio, hpio->eid);
98 }
99 
hpio_clear_pages(struct hpio * hpio)100 static void hpio_clear_pages(struct hpio *hpio)
101 {
102 	int i;
103 
104 	if (!hpio->pages)
105 		return;
106 
107 	for (i = 0; i < hpio->nr_page; i++)
108 		if (hpio->pages[i]) {
109 			put_page(hpio->pages[i]);
110 			atomic64_sub(PAGE_SIZE, &hpio_mem);
111 		}
112 	kfree(hpio->pages);
113 	atomic64_sub(sizeof(struct page *) * hpio->nr_page, &hpio_mem);
114 	hpio->nr_page = 0;
115 	hpio->pages = NULL;
116 }
117 
118 /*
119  * alloc pages array for @hpio, fill in new alloced pages if @new_page
120  */
hpio_fill_pages(struct hpio * hpio,u32 nr_page,gfp_t gfp,bool new_page)121 static bool hpio_fill_pages(struct hpio *hpio, u32 nr_page, gfp_t gfp, bool new_page)
122 {
123 	int i;
124 
125 	BUG_ON(hpio->pages);
126 	hpio->nr_page = nr_page;
127 	hpio->pages = kcalloc(hpio->nr_page, sizeof(struct page *), gfp);
128 	if (!hpio->pages)
129 		goto err;
130 	atomic64_add(sizeof(struct page *) * hpio->nr_page, &hpio_mem);
131 
132 	if (!new_page)
133 		goto out;
134 	for (i = 0; i < hpio->nr_page; i++) {
135 		hpio->pages[i] = alloc_page(gfp);
136 		if (!hpio->pages[i])
137 			goto err;
138 		atomic64_add(PAGE_SIZE, &hpio_mem);
139 	}
140 out:
141 	return true;
142 err:
143 	hpio_clear_pages(hpio);
144 
145 	return false;
146 }
147 
hpio_free(struct hpio * hpio)148 void hpio_free(struct hpio *hpio)
149 {
150 	if (!hpio)
151 		return;
152 
153 	pr_info("free hpio = %p.\n", hpio);
154 
155 	hpio_clear_pages(hpio);
156 	kfree(hpio);
157 	atomic64_sub(sizeof(struct hpio), &hpio_mem);
158 }
159 
hpio_alloc(u32 nr_page,gfp_t gfp,unsigned int op,bool new_page)160 struct hpio *hpio_alloc(u32 nr_page, gfp_t gfp, unsigned int op, bool new_page)
161 {
162 	struct hpio *hpio = NULL;
163 
164 	hpio = kzalloc(sizeof(struct hpio), gfp);
165 	if (!hpio)
166 		goto err;
167 	atomic64_add(sizeof(struct hpio), &hpio_mem);
168 	if (!hpio_fill_pages(hpio, nr_page, gfp, new_page))
169 		goto err;
170 	hpio->op = op;
171 	atomic_set(&hpio->state, HPIO_INIT);
172 	kref_init(&hpio->refcnt);
173 	init_completion(&hpio->wait);
174 
175 	return hpio;
176 err:
177 	hpio_free(hpio);
178 
179 	return NULL;
180 }
181 
hpio_get(u32 eid)182 struct hpio *hpio_get(u32 eid)
183 {
184 	return iotab_search_get(&iotab, eid);
185 }
186 
hpio_get_alloc(u32 eid,u32 nr_page,gfp_t gfp,unsigned int op)187 struct hpio *hpio_get_alloc(u32 eid, u32 nr_page, gfp_t gfp, unsigned int op)
188 {
189 	struct hpio *hpio = NULL;
190 	struct hpio *dup = NULL;
191 
192 	hpio = iotab_search_get(&iotab, eid);
193 	if (hpio) {
194 		pr_info("find exist hpio %p for eid %u.\n", hpio, eid);
195 		goto out;
196 	}
197 	hpio = hpio_alloc(nr_page, gfp, op, true);
198 	if (!hpio)
199 		goto out;
200 	hpio->eid = eid;
201 
202 	pr_info("alloc hpio %p for eid %u.\n", hpio, eid);
203 
204 	dup = iotab_insert(&iotab, hpio);
205 	if (dup) {
206 		hpio_free(hpio);
207 		hpio = dup;
208 	}
209 out:
210 	return hpio;
211 }
212 
hpio_release(struct kref * kref)213 static void hpio_release(struct kref *kref)
214 {
215 	struct hpio *hpio = container_of(kref, struct hpio, refcnt);
216 
217 	iotab_delete(&iotab, hpio);
218 	if (hpio->free_extent)
219 		hpio->free_extent(hpio->eid);
220 	hpio_free(hpio);
221 }
222 
hpio_put(struct hpio * hpio)223 bool hpio_put(struct hpio *hpio)
224 {
225 	pr_info("put hpio %p for eid %u, ref = %u.\n", hpio, hpio->eid, kref_read(&hpio->refcnt));
226 	return kref_put(&hpio->refcnt, hpio_release);
227 }
228 
hpio_complete(struct hpio * hpio)229 void hpio_complete(struct hpio *hpio)
230 {
231 	pr_info("complete hpio %p for eid %u.\n", hpio, hpio->eid);
232 	complete_all(&hpio->wait);
233 }
234 
hpio_wait(struct hpio * hpio)235 void hpio_wait(struct hpio *hpio)
236 {
237 	wait_for_completion(&hpio->wait);
238 }
239 
hpio_get_state(struct hpio * hpio)240 enum hpio_state hpio_get_state(struct hpio *hpio)
241 {
242 	return atomic_read(&hpio->state);
243 }
244 
hpio_set_state(struct hpio * hpio,enum hpio_state state)245 void hpio_set_state(struct hpio *hpio, enum hpio_state state)
246 {
247 	atomic_set(&hpio->state, state);
248 }
249 
hpio_change_state(struct hpio * hpio,enum hpio_state from,enum hpio_state to)250 bool hpio_change_state(struct hpio *hpio, enum hpio_state from, enum hpio_state to)
251 {
252 	return atomic_cmpxchg(&hpio->state, from, to) == from;
253 }
254 
dump_iotab(struct hp_iotab * iotab)255 static void dump_iotab(struct hp_iotab *iotab)
256 {
257 	struct hpio *hpio = NULL;
258 	unsigned long flags;
259 
260 	pr_info("dump inflight hpio in iotab.\n");
261 	read_lock_irqsave(&iotab->lock, flags);
262 	list_for_each_entry(hpio, &iotab->io_list, list)
263 		pr_info("hpio %p for eid %u is inflight.\n", hpio, hpio->eid);
264 	read_unlock_irqrestore(&iotab->lock, flags);
265 }
266 
wait_for_iotab_empty(void)267 void wait_for_iotab_empty(void)
268 {
269 	dump_iotab(&iotab);
270 	wait_event(iotab.empty_wq, !iotab.io_cnt);
271 }
272