• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2012 Linutronix GmbH
3  * Copyright (c) 2014 sigma star gmbh
4  * Author: Richard Weinberger <richard@nod.at>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; version 2.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
13  * the GNU General Public License for more details.
14  *
15  */
16 
17 /**
18  * update_fastmap_work_fn - calls ubi_update_fastmap from a work queue
19  * @wrk: the work description object
20  */
update_fastmap_work_fn(struct work_struct * wrk)21 static void update_fastmap_work_fn(struct work_struct *wrk)
22 {
23 	struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work);
24 
25 	ubi_update_fastmap(ubi);
26 	spin_lock(&ubi->wl_lock);
27 	ubi->fm_work_scheduled = 0;
28 	spin_unlock(&ubi->wl_lock);
29 }
30 
31 /**
32  * find_anchor_wl_entry - find wear-leveling entry to used as anchor PEB.
33  * @root: the RB-tree where to look for
34  */
find_anchor_wl_entry(struct rb_root * root)35 static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
36 {
37 	struct rb_node *p;
38 	struct ubi_wl_entry *e, *victim = NULL;
39 	int max_ec = UBI_MAX_ERASECOUNTER;
40 
41 	ubi_rb_for_each_entry(p, e, root, u.rb) {
42 		if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) {
43 			victim = e;
44 			max_ec = e->ec;
45 		}
46 	}
47 
48 	return victim;
49 }
50 
return_unused_peb(struct ubi_device * ubi,struct ubi_wl_entry * e)51 static inline void return_unused_peb(struct ubi_device *ubi,
52 				     struct ubi_wl_entry *e)
53 {
54 	wl_tree_add(e, &ubi->free);
55 	ubi->free_count++;
56 }
57 
58 /**
59  * return_unused_pool_pebs - returns unused PEB to the free tree.
60  * @ubi: UBI device description object
61  * @pool: fastmap pool description object
62  */
return_unused_pool_pebs(struct ubi_device * ubi,struct ubi_fm_pool * pool)63 static void return_unused_pool_pebs(struct ubi_device *ubi,
64 				    struct ubi_fm_pool *pool)
65 {
66 	int i;
67 	struct ubi_wl_entry *e;
68 
69 	for (i = pool->used; i < pool->size; i++) {
70 		e = ubi->lookuptbl[pool->pebs[i]];
71 		return_unused_peb(ubi, e);
72 	}
73 }
74 
75 /**
76  * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number.
77  * @ubi: UBI device description object
78  * @anchor: This PEB will be used as anchor PEB by fastmap
79  *
80  * The function returns a physical erase block with a given maximal number
81  * and removes it from the wl subsystem.
82  * Must be called with wl_lock held!
83  */
ubi_wl_get_fm_peb(struct ubi_device * ubi,int anchor)84 struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
85 {
86 	struct ubi_wl_entry *e = NULL;
87 
88 	if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1))
89 		goto out;
90 
91 	if (anchor)
92 		e = find_anchor_wl_entry(&ubi->free);
93 	else
94 		e = find_mean_wl_entry(ubi, &ubi->free);
95 
96 	if (!e)
97 		goto out;
98 
99 	self_check_in_wl_tree(ubi, e, &ubi->free);
100 
101 	/* remove it from the free list,
102 	 * the wl subsystem does no longer know this erase block */
103 	rb_erase(&e->u.rb, &ubi->free);
104 	ubi->free_count--;
105 out:
106 	return e;
107 }
108 
109 /**
110  * ubi_refill_pools - refills all fastmap PEB pools.
111  * @ubi: UBI device description object
112  */
ubi_refill_pools(struct ubi_device * ubi)113 void ubi_refill_pools(struct ubi_device *ubi)
114 {
115 	struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
116 	struct ubi_fm_pool *pool = &ubi->fm_pool;
117 	struct ubi_wl_entry *e;
118 	int enough;
119 
120 	spin_lock(&ubi->wl_lock);
121 
122 	return_unused_pool_pebs(ubi, wl_pool);
123 	return_unused_pool_pebs(ubi, pool);
124 
125 	wl_pool->size = 0;
126 	pool->size = 0;
127 
128 	for (;;) {
129 		enough = 0;
130 		if (pool->size < pool->max_size) {
131 			if (!ubi->free.rb_node)
132 				break;
133 
134 			e = wl_get_wle(ubi);
135 			if (!e)
136 				break;
137 
138 			pool->pebs[pool->size] = e->pnum;
139 			pool->size++;
140 		} else
141 			enough++;
142 
143 		if (wl_pool->size < wl_pool->max_size) {
144 			if (!ubi->free.rb_node ||
145 			   (ubi->free_count - ubi->beb_rsvd_pebs < 5))
146 				break;
147 
148 			e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
149 			self_check_in_wl_tree(ubi, e, &ubi->free);
150 			rb_erase(&e->u.rb, &ubi->free);
151 			ubi->free_count--;
152 
153 			wl_pool->pebs[wl_pool->size] = e->pnum;
154 			wl_pool->size++;
155 		} else
156 			enough++;
157 
158 		if (enough == 2)
159 			break;
160 	}
161 
162 	wl_pool->used = 0;
163 	pool->used = 0;
164 
165 	spin_unlock(&ubi->wl_lock);
166 }
167 
168 /**
169  * produce_free_peb - produce a free physical eraseblock.
170  * @ubi: UBI device description object
171  *
172  * This function tries to make a free PEB by means of synchronous execution of
173  * pending works. This may be needed if, for example the background thread is
174  * disabled. Returns zero in case of success and a negative error code in case
175  * of failure.
176  */
produce_free_peb(struct ubi_device * ubi)177 static int produce_free_peb(struct ubi_device *ubi)
178 {
179 	int err;
180 
181 	while (!ubi->free.rb_node && ubi->works_count) {
182 		dbg_wl("do one work synchronously");
183 		err = do_work(ubi);
184 
185 		if (err)
186 			return err;
187 	}
188 
189 	return 0;
190 }
191 
192 /**
193  * ubi_wl_get_peb - get a physical eraseblock.
194  * @ubi: UBI device description object
195  *
196  * This function returns a physical eraseblock in case of success and a
197  * negative error code in case of failure.
198  * Returns with ubi->fm_eba_sem held in read mode!
199  */
ubi_wl_get_peb(struct ubi_device * ubi)200 int ubi_wl_get_peb(struct ubi_device *ubi)
201 {
202 	int ret, retried = 0;
203 	struct ubi_fm_pool *pool = &ubi->fm_pool;
204 	struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
205 
206 again:
207 	down_read(&ubi->fm_eba_sem);
208 	spin_lock(&ubi->wl_lock);
209 
210 	/* We check here also for the WL pool because at this point we can
211 	 * refill the WL pool synchronous. */
212 	if (pool->used == pool->size || wl_pool->used == wl_pool->size) {
213 		spin_unlock(&ubi->wl_lock);
214 		up_read(&ubi->fm_eba_sem);
215 		ret = ubi_update_fastmap(ubi);
216 		if (ret) {
217 			ubi_msg(ubi, "Unable to write a new fastmap: %i", ret);
218 			down_read(&ubi->fm_eba_sem);
219 			return -ENOSPC;
220 		}
221 		down_read(&ubi->fm_eba_sem);
222 		spin_lock(&ubi->wl_lock);
223 	}
224 
225 	if (pool->used == pool->size) {
226 		spin_unlock(&ubi->wl_lock);
227 		if (retried) {
228 			ubi_err(ubi, "Unable to get a free PEB from user WL pool");
229 			ret = -ENOSPC;
230 			goto out;
231 		}
232 		retried = 1;
233 		up_read(&ubi->fm_eba_sem);
234 		ret = produce_free_peb(ubi);
235 		if (ret < 0) {
236 			down_read(&ubi->fm_eba_sem);
237 			goto out;
238 		}
239 		goto again;
240 	}
241 
242 	ubi_assert(pool->used < pool->size);
243 	ret = pool->pebs[pool->used++];
244 	prot_queue_add(ubi, ubi->lookuptbl[ret]);
245 	spin_unlock(&ubi->wl_lock);
246 out:
247 	return ret;
248 }
249 
250 /* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system.
251  *
252  * @ubi: UBI device description object
253  */
get_peb_for_wl(struct ubi_device * ubi)254 static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
255 {
256 	struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
257 	int pnum;
258 
259 	ubi_assert(rwsem_is_locked(&ubi->fm_eba_sem));
260 
261 	if (pool->used == pool->size) {
262 		/* We cannot update the fastmap here because this
263 		 * function is called in atomic context.
264 		 * Let's fail here and refill/update it as soon as possible. */
265 		if (!ubi->fm_work_scheduled) {
266 			ubi->fm_work_scheduled = 1;
267 			schedule_work(&ubi->fm_work);
268 		}
269 		return NULL;
270 	}
271 
272 	pnum = pool->pebs[pool->used++];
273 	return ubi->lookuptbl[pnum];
274 }
275 
276 /**
277  * ubi_ensure_anchor_pebs - schedule wear-leveling to produce an anchor PEB.
278  * @ubi: UBI device description object
279  */
ubi_ensure_anchor_pebs(struct ubi_device * ubi)280 int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
281 {
282 	struct ubi_work *wrk;
283 	struct ubi_wl_entry *anchor;
284 
285 	spin_lock(&ubi->wl_lock);
286 
287 	/* Do we already have an anchor? */
288 	if (ubi->fm_anchor) {
289 		spin_unlock(&ubi->wl_lock);
290 		return 0;
291 	}
292 
293 	/* See if we can find an anchor PEB on the list of free PEBs */
294 	anchor = ubi_wl_get_fm_peb(ubi, 1);
295 	if (anchor) {
296 		ubi->fm_anchor = anchor;
297 		spin_unlock(&ubi->wl_lock);
298 		return 0;
299 	}
300 
301 	/* No luck, trigger wear leveling to produce a new anchor PEB */
302 	ubi->fm_do_produce_anchor = 1;
303 	if (ubi->wl_scheduled) {
304 		spin_unlock(&ubi->wl_lock);
305 		return 0;
306 	}
307 	ubi->wl_scheduled = 1;
308 	spin_unlock(&ubi->wl_lock);
309 
310 	wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
311 	if (!wrk) {
312 		spin_lock(&ubi->wl_lock);
313 		ubi->wl_scheduled = 0;
314 		spin_unlock(&ubi->wl_lock);
315 		return -ENOMEM;
316 	}
317 
318 	wrk->func = &wear_leveling_worker;
319 	__schedule_ubi_work(ubi, wrk);
320 	return 0;
321 }
322 
323 /**
324  * ubi_wl_put_fm_peb - returns a PEB used in a fastmap to the wear-leveling
325  * sub-system.
326  * see: ubi_wl_put_peb()
327  *
328  * @ubi: UBI device description object
329  * @fm_e: physical eraseblock to return
330  * @lnum: the last used logical eraseblock number for the PEB
331  * @torture: if this physical eraseblock has to be tortured
332  */
ubi_wl_put_fm_peb(struct ubi_device * ubi,struct ubi_wl_entry * fm_e,int lnum,int torture)333 int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
334 		      int lnum, int torture)
335 {
336 	struct ubi_wl_entry *e;
337 	int vol_id, pnum = fm_e->pnum;
338 
339 	dbg_wl("PEB %d", pnum);
340 
341 	ubi_assert(pnum >= 0);
342 	ubi_assert(pnum < ubi->peb_count);
343 
344 	spin_lock(&ubi->wl_lock);
345 	e = ubi->lookuptbl[pnum];
346 
347 	/* This can happen if we recovered from a fastmap the very
348 	 * first time and writing now a new one. In this case the wl system
349 	 * has never seen any PEB used by the original fastmap.
350 	 */
351 	if (!e) {
352 		e = fm_e;
353 		ubi_assert(e->ec >= 0);
354 		ubi->lookuptbl[pnum] = e;
355 	}
356 
357 	spin_unlock(&ubi->wl_lock);
358 
359 	vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
360 	return schedule_erase(ubi, e, vol_id, lnum, torture, true);
361 }
362 
363 /**
364  * ubi_is_erase_work - checks whether a work is erase work.
365  * @wrk: The work object to be checked
366  */
ubi_is_erase_work(struct ubi_work * wrk)367 int ubi_is_erase_work(struct ubi_work *wrk)
368 {
369 	return wrk->func == erase_worker;
370 }
371 
ubi_fastmap_close(struct ubi_device * ubi)372 static void ubi_fastmap_close(struct ubi_device *ubi)
373 {
374 	int i;
375 
376 	return_unused_pool_pebs(ubi, &ubi->fm_pool);
377 	return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);
378 
379 	if (ubi->fm_anchor) {
380 		return_unused_peb(ubi, ubi->fm_anchor);
381 		ubi->fm_anchor = NULL;
382 	}
383 
384 	if (ubi->fm) {
385 		for (i = 0; i < ubi->fm->used_blocks; i++)
386 			kfree(ubi->fm->e[i]);
387 	}
388 	kfree(ubi->fm);
389 }
390 
391 /**
392  * may_reserve_for_fm - tests whether a PEB shall be reserved for fastmap.
393  * See find_mean_wl_entry()
394  *
395  * @ubi: UBI device description object
396  * @e: physical eraseblock to return
397  * @root: RB tree to test against.
398  */
may_reserve_for_fm(struct ubi_device * ubi,struct ubi_wl_entry * e,struct rb_root * root)399 static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,
400 					   struct ubi_wl_entry *e,
401 					   struct rb_root *root) {
402 	if (e && !ubi->fm_disabled && !ubi->fm &&
403 	    e->pnum < UBI_FM_MAX_START)
404 		e = rb_entry(rb_next(root->rb_node),
405 			     struct ubi_wl_entry, u.rb);
406 
407 	return e;
408 }
409