• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * UWB reservation management.
3  *
4  * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License version
8  * 2 as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 #include <linux/kernel.h>
19 #include <linux/uwb.h>
20 #include <linux/random.h>
21 
22 #include "uwb-internal.h"
23 
24 static void uwb_rsv_timer(unsigned long arg);
25 
26 static const char *rsv_states[] = {
27 	[UWB_RSV_STATE_NONE]                 = "none            ",
28 	[UWB_RSV_STATE_O_INITIATED]          = "o initiated     ",
29 	[UWB_RSV_STATE_O_PENDING]            = "o pending       ",
30 	[UWB_RSV_STATE_O_MODIFIED]           = "o modified      ",
31 	[UWB_RSV_STATE_O_ESTABLISHED]        = "o established   ",
32 	[UWB_RSV_STATE_O_TO_BE_MOVED]        = "o to be moved   ",
33 	[UWB_RSV_STATE_O_MOVE_EXPANDING]     = "o move expanding",
34 	[UWB_RSV_STATE_O_MOVE_COMBINING]     = "o move combining",
35 	[UWB_RSV_STATE_O_MOVE_REDUCING]      = "o move reducing ",
36 	[UWB_RSV_STATE_T_ACCEPTED]           = "t accepted      ",
37 	[UWB_RSV_STATE_T_CONFLICT]           = "t conflict      ",
38 	[UWB_RSV_STATE_T_PENDING]            = "t pending       ",
39 	[UWB_RSV_STATE_T_DENIED]             = "t denied        ",
40 	[UWB_RSV_STATE_T_RESIZED]            = "t resized       ",
41 	[UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = "t expanding acc ",
42 	[UWB_RSV_STATE_T_EXPANDING_CONFLICT] = "t expanding conf",
43 	[UWB_RSV_STATE_T_EXPANDING_PENDING]  = "t expanding pend",
44 	[UWB_RSV_STATE_T_EXPANDING_DENIED]   = "t expanding den ",
45 };
46 
47 static const char *rsv_types[] = {
48 	[UWB_DRP_TYPE_ALIEN_BP] = "alien-bp",
49 	[UWB_DRP_TYPE_HARD]     = "hard",
50 	[UWB_DRP_TYPE_SOFT]     = "soft",
51 	[UWB_DRP_TYPE_PRIVATE]  = "private",
52 	[UWB_DRP_TYPE_PCA]      = "pca",
53 };
54 
uwb_rsv_has_two_drp_ies(struct uwb_rsv * rsv)55 bool uwb_rsv_has_two_drp_ies(struct uwb_rsv *rsv)
56 {
57 	static const bool has_two_drp_ies[] = {
58 		[UWB_RSV_STATE_O_INITIATED]               = false,
59 		[UWB_RSV_STATE_O_PENDING]                 = false,
60 		[UWB_RSV_STATE_O_MODIFIED]                = false,
61 		[UWB_RSV_STATE_O_ESTABLISHED]             = false,
62 		[UWB_RSV_STATE_O_TO_BE_MOVED]             = false,
63 		[UWB_RSV_STATE_O_MOVE_COMBINING]          = false,
64 		[UWB_RSV_STATE_O_MOVE_REDUCING]           = false,
65 		[UWB_RSV_STATE_O_MOVE_EXPANDING]          = true,
66 		[UWB_RSV_STATE_T_ACCEPTED]                = false,
67 		[UWB_RSV_STATE_T_CONFLICT]                = false,
68 		[UWB_RSV_STATE_T_PENDING]                 = false,
69 		[UWB_RSV_STATE_T_DENIED]                  = false,
70 		[UWB_RSV_STATE_T_RESIZED]                 = false,
71 		[UWB_RSV_STATE_T_EXPANDING_ACCEPTED]      = true,
72 		[UWB_RSV_STATE_T_EXPANDING_CONFLICT]      = true,
73 		[UWB_RSV_STATE_T_EXPANDING_PENDING]       = true,
74 		[UWB_RSV_STATE_T_EXPANDING_DENIED]        = true,
75 	};
76 
77 	return has_two_drp_ies[rsv->state];
78 }
79 
80 /**
81  * uwb_rsv_state_str - return a string for a reservation state
82  * @state: the reservation state.
83  */
uwb_rsv_state_str(enum uwb_rsv_state state)84 const char *uwb_rsv_state_str(enum uwb_rsv_state state)
85 {
86 	if (state < UWB_RSV_STATE_NONE || state >= UWB_RSV_STATE_LAST)
87 		return "unknown";
88 	return rsv_states[state];
89 }
90 EXPORT_SYMBOL_GPL(uwb_rsv_state_str);
91 
92 /**
93  * uwb_rsv_type_str - return a string for a reservation type
94  * @type: the reservation type
95  */
uwb_rsv_type_str(enum uwb_drp_type type)96 const char *uwb_rsv_type_str(enum uwb_drp_type type)
97 {
98 	if (type < UWB_DRP_TYPE_ALIEN_BP || type > UWB_DRP_TYPE_PCA)
99 		return "invalid";
100 	return rsv_types[type];
101 }
102 EXPORT_SYMBOL_GPL(uwb_rsv_type_str);
103 
uwb_rsv_dump(char * text,struct uwb_rsv * rsv)104 void uwb_rsv_dump(char *text, struct uwb_rsv *rsv)
105 {
106 	struct device *dev = &rsv->rc->uwb_dev.dev;
107 	struct uwb_dev_addr devaddr;
108 	char owner[UWB_ADDR_STRSIZE], target[UWB_ADDR_STRSIZE];
109 
110 	uwb_dev_addr_print(owner, sizeof(owner), &rsv->owner->dev_addr);
111 	if (rsv->target.type == UWB_RSV_TARGET_DEV)
112 		devaddr = rsv->target.dev->dev_addr;
113 	else
114 		devaddr = rsv->target.devaddr;
115 	uwb_dev_addr_print(target, sizeof(target), &devaddr);
116 
117 	dev_dbg(dev, "rsv %s %s -> %s: %s\n",
118 		text, owner, target, uwb_rsv_state_str(rsv->state));
119 }
120 
uwb_rsv_release(struct kref * kref)121 static void uwb_rsv_release(struct kref *kref)
122 {
123 	struct uwb_rsv *rsv = container_of(kref, struct uwb_rsv, kref);
124 
125 	kfree(rsv);
126 }
127 
uwb_rsv_get(struct uwb_rsv * rsv)128 void uwb_rsv_get(struct uwb_rsv *rsv)
129 {
130 	kref_get(&rsv->kref);
131 }
132 
uwb_rsv_put(struct uwb_rsv * rsv)133 void uwb_rsv_put(struct uwb_rsv *rsv)
134 {
135 	kref_put(&rsv->kref, uwb_rsv_release);
136 }
137 
138 /*
139  * Get a free stream index for a reservation.
140  *
141  * If the target is a DevAddr (e.g., a WUSB cluster reservation) then
142  * the stream is allocated from a pool of per-RC stream indexes,
143  * otherwise a unique stream index for the target is selected.
144  */
uwb_rsv_get_stream(struct uwb_rsv * rsv)145 static int uwb_rsv_get_stream(struct uwb_rsv *rsv)
146 {
147 	struct uwb_rc *rc = rsv->rc;
148 	struct device *dev = &rc->uwb_dev.dev;
149 	unsigned long *streams_bm;
150 	int stream;
151 
152 	switch (rsv->target.type) {
153 	case UWB_RSV_TARGET_DEV:
154 		streams_bm = rsv->target.dev->streams;
155 		break;
156 	case UWB_RSV_TARGET_DEVADDR:
157 		streams_bm = rc->uwb_dev.streams;
158 		break;
159 	default:
160 		return -EINVAL;
161 	}
162 
163 	stream = find_first_zero_bit(streams_bm, UWB_NUM_STREAMS);
164 	if (stream >= UWB_NUM_STREAMS)
165 		return -EBUSY;
166 
167 	rsv->stream = stream;
168 	set_bit(stream, streams_bm);
169 
170 	dev_dbg(dev, "get stream %d\n", rsv->stream);
171 
172 	return 0;
173 }
174 
uwb_rsv_put_stream(struct uwb_rsv * rsv)175 static void uwb_rsv_put_stream(struct uwb_rsv *rsv)
176 {
177 	struct uwb_rc *rc = rsv->rc;
178 	struct device *dev = &rc->uwb_dev.dev;
179 	unsigned long *streams_bm;
180 
181 	switch (rsv->target.type) {
182 	case UWB_RSV_TARGET_DEV:
183 		streams_bm = rsv->target.dev->streams;
184 		break;
185 	case UWB_RSV_TARGET_DEVADDR:
186 		streams_bm = rc->uwb_dev.streams;
187 		break;
188 	default:
189 		return;
190 	}
191 
192 	clear_bit(rsv->stream, streams_bm);
193 
194 	dev_dbg(dev, "put stream %d\n", rsv->stream);
195 }
196 
uwb_rsv_backoff_win_timer(unsigned long arg)197 void uwb_rsv_backoff_win_timer(unsigned long arg)
198 {
199 	struct uwb_drp_backoff_win *bow = (struct uwb_drp_backoff_win *)arg;
200 	struct uwb_rc *rc = container_of(bow, struct uwb_rc, bow);
201 	struct device *dev = &rc->uwb_dev.dev;
202 
203 	bow->can_reserve_extra_mases = true;
204 	if (bow->total_expired <= 4) {
205 		bow->total_expired++;
206 	} else {
207 		/* after 4 backoff window has expired we can exit from
208 		 * the backoff procedure */
209 		bow->total_expired = 0;
210 		bow->window = UWB_DRP_BACKOFF_WIN_MIN >> 1;
211 	}
212 	dev_dbg(dev, "backoff_win_timer total_expired=%d, n=%d\n: ", bow->total_expired, bow->n);
213 
214 	/* try to relocate all the "to be moved" relocations */
215 	uwb_rsv_handle_drp_avail_change(rc);
216 }
217 
uwb_rsv_backoff_win_increment(struct uwb_rc * rc)218 void uwb_rsv_backoff_win_increment(struct uwb_rc *rc)
219 {
220 	struct uwb_drp_backoff_win *bow = &rc->bow;
221 	struct device *dev = &rc->uwb_dev.dev;
222 	unsigned timeout_us;
223 
224 	dev_dbg(dev, "backoff_win_increment: window=%d\n", bow->window);
225 
226 	bow->can_reserve_extra_mases = false;
227 
228 	if((bow->window << 1) == UWB_DRP_BACKOFF_WIN_MAX)
229 		return;
230 
231 	bow->window <<= 1;
232 	bow->n = random32() & (bow->window - 1);
233 	dev_dbg(dev, "new_window=%d, n=%d\n: ", bow->window, bow->n);
234 
235 	/* reset the timer associated variables */
236 	timeout_us = bow->n * UWB_SUPERFRAME_LENGTH_US;
237 	bow->total_expired = 0;
238 	mod_timer(&bow->timer, jiffies + usecs_to_jiffies(timeout_us));
239 }
240 
uwb_rsv_stroke_timer(struct uwb_rsv * rsv)241 static void uwb_rsv_stroke_timer(struct uwb_rsv *rsv)
242 {
243 	int sframes = UWB_MAX_LOST_BEACONS;
244 
245 	/*
246 	 * Multicast reservations can become established within 1
247 	 * super frame and should not be terminated if no response is
248 	 * received.
249 	 */
250 	if (rsv->is_multicast) {
251 		if (rsv->state == UWB_RSV_STATE_O_INITIATED
252 		    || rsv->state == UWB_RSV_STATE_O_MOVE_EXPANDING
253 		    || rsv->state == UWB_RSV_STATE_O_MOVE_COMBINING
254 		    || rsv->state == UWB_RSV_STATE_O_MOVE_REDUCING)
255 			sframes = 1;
256 		if (rsv->state == UWB_RSV_STATE_O_ESTABLISHED)
257 			sframes = 0;
258 
259 	}
260 
261 	if (sframes > 0) {
262 		/*
263 		 * Add an additional 2 superframes to account for the
264 		 * time to send the SET DRP IE command.
265 		 */
266 		unsigned timeout_us = (sframes + 2) * UWB_SUPERFRAME_LENGTH_US;
267 		mod_timer(&rsv->timer, jiffies + usecs_to_jiffies(timeout_us));
268 	} else
269 		del_timer(&rsv->timer);
270 }
271 
272 /*
273  * Update a reservations state, and schedule an update of the
274  * transmitted DRP IEs.
275  */
uwb_rsv_state_update(struct uwb_rsv * rsv,enum uwb_rsv_state new_state)276 static void uwb_rsv_state_update(struct uwb_rsv *rsv,
277 				 enum uwb_rsv_state new_state)
278 {
279 	rsv->state = new_state;
280 	rsv->ie_valid = false;
281 
282 	uwb_rsv_dump("SU", rsv);
283 
284 	uwb_rsv_stroke_timer(rsv);
285 	uwb_rsv_sched_update(rsv->rc);
286 }
287 
uwb_rsv_callback(struct uwb_rsv * rsv)288 static void uwb_rsv_callback(struct uwb_rsv *rsv)
289 {
290 	if (rsv->callback)
291 		rsv->callback(rsv);
292 }
293 
uwb_rsv_set_state(struct uwb_rsv * rsv,enum uwb_rsv_state new_state)294 void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state)
295 {
296 	struct uwb_rsv_move *mv = &rsv->mv;
297 
298 	if (rsv->state == new_state) {
299 		switch (rsv->state) {
300 		case UWB_RSV_STATE_O_ESTABLISHED:
301 		case UWB_RSV_STATE_O_MOVE_EXPANDING:
302 		case UWB_RSV_STATE_O_MOVE_COMBINING:
303 		case UWB_RSV_STATE_O_MOVE_REDUCING:
304 		case UWB_RSV_STATE_T_ACCEPTED:
305 		case UWB_RSV_STATE_T_EXPANDING_ACCEPTED:
306 		case UWB_RSV_STATE_T_RESIZED:
307 		case UWB_RSV_STATE_NONE:
308 			uwb_rsv_stroke_timer(rsv);
309 			break;
310 		default:
311 			/* Expecting a state transition so leave timer
312 			   as-is. */
313 			break;
314 		}
315 		return;
316 	}
317 
318 	uwb_rsv_dump("SC", rsv);
319 
320 	switch (new_state) {
321 	case UWB_RSV_STATE_NONE:
322 		uwb_rsv_state_update(rsv, UWB_RSV_STATE_NONE);
323 		uwb_rsv_callback(rsv);
324 		break;
325 	case UWB_RSV_STATE_O_INITIATED:
326 		uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_INITIATED);
327 		break;
328 	case UWB_RSV_STATE_O_PENDING:
329 		uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_PENDING);
330 		break;
331 	case UWB_RSV_STATE_O_MODIFIED:
332 		/* in the companion there are the MASes to drop */
333 		bitmap_andnot(rsv->mas.bm, rsv->mas.bm, mv->companion_mas.bm, UWB_NUM_MAS);
334 		uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MODIFIED);
335 		break;
336 	case UWB_RSV_STATE_O_ESTABLISHED:
337 		if (rsv->state == UWB_RSV_STATE_O_MODIFIED
338 		    || rsv->state == UWB_RSV_STATE_O_MOVE_REDUCING) {
339 			uwb_drp_avail_release(rsv->rc, &mv->companion_mas);
340 			rsv->needs_release_companion_mas = false;
341 		}
342 		uwb_drp_avail_reserve(rsv->rc, &rsv->mas);
343 		uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_ESTABLISHED);
344 		uwb_rsv_callback(rsv);
345 		break;
346 	case UWB_RSV_STATE_O_MOVE_EXPANDING:
347 		rsv->needs_release_companion_mas = true;
348 		uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MOVE_EXPANDING);
349 		break;
350 	case UWB_RSV_STATE_O_MOVE_COMBINING:
351 		rsv->needs_release_companion_mas = false;
352 		uwb_drp_avail_reserve(rsv->rc, &mv->companion_mas);
353 		bitmap_or(rsv->mas.bm, rsv->mas.bm, mv->companion_mas.bm, UWB_NUM_MAS);
354 		rsv->mas.safe   += mv->companion_mas.safe;
355 		rsv->mas.unsafe += mv->companion_mas.unsafe;
356 		uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MOVE_COMBINING);
357 		break;
358 	case UWB_RSV_STATE_O_MOVE_REDUCING:
359 		bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm, mv->final_mas.bm, UWB_NUM_MAS);
360 		rsv->needs_release_companion_mas = true;
361 		rsv->mas.safe   = mv->final_mas.safe;
362 		rsv->mas.unsafe = mv->final_mas.unsafe;
363 		bitmap_copy(rsv->mas.bm, mv->final_mas.bm, UWB_NUM_MAS);
364 		bitmap_copy(rsv->mas.unsafe_bm, mv->final_mas.unsafe_bm, UWB_NUM_MAS);
365 		uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MOVE_REDUCING);
366 		break;
367 	case UWB_RSV_STATE_T_ACCEPTED:
368 	case UWB_RSV_STATE_T_RESIZED:
369 		rsv->needs_release_companion_mas = false;
370 		uwb_drp_avail_reserve(rsv->rc, &rsv->mas);
371 		uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_ACCEPTED);
372 		uwb_rsv_callback(rsv);
373 		break;
374 	case UWB_RSV_STATE_T_DENIED:
375 		uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_DENIED);
376 		break;
377 	case UWB_RSV_STATE_T_CONFLICT:
378 		uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_CONFLICT);
379 		break;
380 	case UWB_RSV_STATE_T_PENDING:
381 		uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_PENDING);
382 		break;
383 	case UWB_RSV_STATE_T_EXPANDING_ACCEPTED:
384 		rsv->needs_release_companion_mas = true;
385 		uwb_drp_avail_reserve(rsv->rc, &mv->companion_mas);
386 		uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_EXPANDING_ACCEPTED);
387 		break;
388 	default:
389 		dev_err(&rsv->rc->uwb_dev.dev, "unhandled state: %s (%d)\n",
390 			uwb_rsv_state_str(new_state), new_state);
391 	}
392 }
393 
uwb_rsv_handle_timeout_work(struct work_struct * work)394 static void uwb_rsv_handle_timeout_work(struct work_struct *work)
395 {
396 	struct uwb_rsv *rsv = container_of(work, struct uwb_rsv,
397 					   handle_timeout_work);
398 	struct uwb_rc *rc = rsv->rc;
399 
400 	mutex_lock(&rc->rsvs_mutex);
401 
402 	uwb_rsv_dump("TO", rsv);
403 
404 	switch (rsv->state) {
405 	case UWB_RSV_STATE_O_INITIATED:
406 		if (rsv->is_multicast) {
407 			uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
408 			goto unlock;
409 		}
410 		break;
411 	case UWB_RSV_STATE_O_MOVE_EXPANDING:
412 		if (rsv->is_multicast) {
413 			uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING);
414 			goto unlock;
415 		}
416 		break;
417 	case UWB_RSV_STATE_O_MOVE_COMBINING:
418 		if (rsv->is_multicast) {
419 			uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING);
420 			goto unlock;
421 		}
422 		break;
423 	case UWB_RSV_STATE_O_MOVE_REDUCING:
424 		if (rsv->is_multicast) {
425 			uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
426 			goto unlock;
427 		}
428 		break;
429 	case UWB_RSV_STATE_O_ESTABLISHED:
430 		if (rsv->is_multicast)
431 			goto unlock;
432 		break;
433 	case UWB_RSV_STATE_T_EXPANDING_ACCEPTED:
434 		/*
435 		 * The time out could be for the main or of the
436 		 * companion DRP, assume it's for the companion and
437 		 * drop that first.  A further time out is required to
438 		 * drop the main.
439 		 */
440 		uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED);
441 		uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas);
442 		goto unlock;
443 	default:
444 		break;
445 	}
446 
447 	uwb_rsv_remove(rsv);
448 
449 unlock:
450 	mutex_unlock(&rc->rsvs_mutex);
451 }
452 
uwb_rsv_alloc(struct uwb_rc * rc)453 static struct uwb_rsv *uwb_rsv_alloc(struct uwb_rc *rc)
454 {
455 	struct uwb_rsv *rsv;
456 
457 	rsv = kzalloc(sizeof(struct uwb_rsv), GFP_KERNEL);
458 	if (!rsv)
459 		return NULL;
460 
461 	INIT_LIST_HEAD(&rsv->rc_node);
462 	INIT_LIST_HEAD(&rsv->pal_node);
463 	kref_init(&rsv->kref);
464 	init_timer(&rsv->timer);
465 	rsv->timer.function = uwb_rsv_timer;
466 	rsv->timer.data     = (unsigned long)rsv;
467 
468 	rsv->rc = rc;
469 	INIT_WORK(&rsv->handle_timeout_work, uwb_rsv_handle_timeout_work);
470 
471 	return rsv;
472 }
473 
474 /**
475  * uwb_rsv_create - allocate and initialize a UWB reservation structure
476  * @rc: the radio controller
477  * @cb: callback to use when the reservation completes or terminates
478  * @pal_priv: data private to the PAL to be passed in the callback
479  *
480  * The callback is called when the state of the reservation changes from:
481  *
482  *   - pending to accepted
483  *   - pending to denined
484  *   - accepted to terminated
485  *   - pending to terminated
486  */
uwb_rsv_create(struct uwb_rc * rc,uwb_rsv_cb_f cb,void * pal_priv)487 struct uwb_rsv *uwb_rsv_create(struct uwb_rc *rc, uwb_rsv_cb_f cb, void *pal_priv)
488 {
489 	struct uwb_rsv *rsv;
490 
491 	rsv = uwb_rsv_alloc(rc);
492 	if (!rsv)
493 		return NULL;
494 
495 	rsv->callback = cb;
496 	rsv->pal_priv = pal_priv;
497 
498 	return rsv;
499 }
500 EXPORT_SYMBOL_GPL(uwb_rsv_create);
501 
uwb_rsv_remove(struct uwb_rsv * rsv)502 void uwb_rsv_remove(struct uwb_rsv *rsv)
503 {
504 	uwb_rsv_dump("RM", rsv);
505 
506 	if (rsv->state != UWB_RSV_STATE_NONE)
507 		uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
508 
509 	if (rsv->needs_release_companion_mas)
510 		uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas);
511 	uwb_drp_avail_release(rsv->rc, &rsv->mas);
512 
513 	if (uwb_rsv_is_owner(rsv))
514 		uwb_rsv_put_stream(rsv);
515 
516 	uwb_dev_put(rsv->owner);
517 	if (rsv->target.type == UWB_RSV_TARGET_DEV)
518 		uwb_dev_put(rsv->target.dev);
519 
520 	list_del_init(&rsv->rc_node);
521 	uwb_rsv_put(rsv);
522 }
523 
524 /**
525  * uwb_rsv_destroy - free a UWB reservation structure
526  * @rsv: the reservation to free
527  *
528  * The reservation must already be terminated.
529  */
uwb_rsv_destroy(struct uwb_rsv * rsv)530 void uwb_rsv_destroy(struct uwb_rsv *rsv)
531 {
532 	uwb_rsv_put(rsv);
533 }
534 EXPORT_SYMBOL_GPL(uwb_rsv_destroy);
535 
536 /**
537  * usb_rsv_establish - start a reservation establishment
538  * @rsv: the reservation
539  *
540  * The PAL should fill in @rsv's owner, target, type, max_mas,
541  * min_mas, max_interval and is_multicast fields.  If the target is a
542  * uwb_dev it must be referenced.
543  *
544  * The reservation's callback will be called when the reservation is
545  * accepted, denied or times out.
546  */
uwb_rsv_establish(struct uwb_rsv * rsv)547 int uwb_rsv_establish(struct uwb_rsv *rsv)
548 {
549 	struct uwb_rc *rc = rsv->rc;
550 	struct uwb_mas_bm available;
551 	int ret;
552 
553 	mutex_lock(&rc->rsvs_mutex);
554 	ret = uwb_rsv_get_stream(rsv);
555 	if (ret)
556 		goto out;
557 
558 	rsv->tiebreaker = random32() & 1;
559 	/* get available mas bitmap */
560 	uwb_drp_available(rc, &available);
561 
562 	ret = uwb_rsv_find_best_allocation(rsv, &available, &rsv->mas);
563 	if (ret == UWB_RSV_ALLOC_NOT_FOUND) {
564 		ret = -EBUSY;
565 		uwb_rsv_put_stream(rsv);
566 		goto out;
567 	}
568 
569 	ret = uwb_drp_avail_reserve_pending(rc, &rsv->mas);
570 	if (ret != 0) {
571 		uwb_rsv_put_stream(rsv);
572 		goto out;
573 	}
574 
575 	uwb_rsv_get(rsv);
576 	list_add_tail(&rsv->rc_node, &rc->reservations);
577 	rsv->owner = &rc->uwb_dev;
578 	uwb_dev_get(rsv->owner);
579 	uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_INITIATED);
580 out:
581 	mutex_unlock(&rc->rsvs_mutex);
582 	return ret;
583 }
584 EXPORT_SYMBOL_GPL(uwb_rsv_establish);
585 
586 /**
587  * uwb_rsv_modify - modify an already established reservation
588  * @rsv: the reservation to modify
589  * @max_mas: new maximum MAS to reserve
590  * @min_mas: new minimum MAS to reserve
591  * @max_interval: new max_interval to use
592  *
593  * FIXME: implement this once there are PALs that use it.
594  */
uwb_rsv_modify(struct uwb_rsv * rsv,int max_mas,int min_mas,int max_interval)595 int uwb_rsv_modify(struct uwb_rsv *rsv, int max_mas, int min_mas, int max_interval)
596 {
597 	return -ENOSYS;
598 }
599 EXPORT_SYMBOL_GPL(uwb_rsv_modify);
600 
601 /*
602  * move an already established reservation (rc->rsvs_mutex must to be
603  * taken when tis function is called)
604  */
uwb_rsv_try_move(struct uwb_rsv * rsv,struct uwb_mas_bm * available)605 int uwb_rsv_try_move(struct uwb_rsv *rsv, struct uwb_mas_bm *available)
606 {
607 	struct uwb_rc *rc = rsv->rc;
608 	struct uwb_drp_backoff_win *bow = &rc->bow;
609 	struct device *dev = &rc->uwb_dev.dev;
610 	struct uwb_rsv_move *mv;
611 	int ret = 0;
612 
613 	if (bow->can_reserve_extra_mases == false)
614 		return -EBUSY;
615 
616 	mv = &rsv->mv;
617 
618 	if (uwb_rsv_find_best_allocation(rsv, available, &mv->final_mas) == UWB_RSV_ALLOC_FOUND) {
619 
620 		if (!bitmap_equal(rsv->mas.bm, mv->final_mas.bm, UWB_NUM_MAS)) {
621 			/* We want to move the reservation */
622 			bitmap_andnot(mv->companion_mas.bm, mv->final_mas.bm, rsv->mas.bm, UWB_NUM_MAS);
623 			uwb_drp_avail_reserve_pending(rc, &mv->companion_mas);
624 			uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_EXPANDING);
625 		}
626 	} else {
627 		dev_dbg(dev, "new allocation not found\n");
628 	}
629 
630 	return ret;
631 }
632 
633 /* It will try to move every reservation in state O_ESTABLISHED giving
634  * to the MAS allocator algorithm an availability that is the real one
635  * plus the allocation already established from the reservation. */
uwb_rsv_handle_drp_avail_change(struct uwb_rc * rc)636 void uwb_rsv_handle_drp_avail_change(struct uwb_rc *rc)
637 {
638 	struct uwb_drp_backoff_win *bow = &rc->bow;
639 	struct uwb_rsv *rsv;
640 	struct uwb_mas_bm mas;
641 
642 	if (bow->can_reserve_extra_mases == false)
643 		return;
644 
645 	list_for_each_entry(rsv, &rc->reservations, rc_node) {
646 		if (rsv->state == UWB_RSV_STATE_O_ESTABLISHED ||
647 		    rsv->state == UWB_RSV_STATE_O_TO_BE_MOVED) {
648 			uwb_drp_available(rc, &mas);
649 			bitmap_or(mas.bm, mas.bm, rsv->mas.bm, UWB_NUM_MAS);
650 			uwb_rsv_try_move(rsv, &mas);
651 		}
652 	}
653 
654 }
655 
656 /**
657  * uwb_rsv_terminate - terminate an established reservation
658  * @rsv: the reservation to terminate
659  *
660  * A reservation is terminated by removing the DRP IE from the beacon,
661  * the other end will consider the reservation to be terminated when
662  * it does not see the DRP IE for at least mMaxLostBeacons.
663  *
664  * If applicable, the reference to the target uwb_dev will be released.
665  */
uwb_rsv_terminate(struct uwb_rsv * rsv)666 void uwb_rsv_terminate(struct uwb_rsv *rsv)
667 {
668 	struct uwb_rc *rc = rsv->rc;
669 
670 	mutex_lock(&rc->rsvs_mutex);
671 
672 	if (rsv->state != UWB_RSV_STATE_NONE)
673 		uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
674 
675 	mutex_unlock(&rc->rsvs_mutex);
676 }
677 EXPORT_SYMBOL_GPL(uwb_rsv_terminate);
678 
679 /**
680  * uwb_rsv_accept - accept a new reservation from a peer
681  * @rsv:      the reservation
682  * @cb:       call back for reservation changes
683  * @pal_priv: data to be passed in the above call back
684  *
685  * Reservation requests from peers are denied unless a PAL accepts it
686  * by calling this function.
687  *
688  * The PAL call uwb_rsv_destroy() for all accepted reservations before
689  * calling uwb_pal_unregister().
690  */
uwb_rsv_accept(struct uwb_rsv * rsv,uwb_rsv_cb_f cb,void * pal_priv)691 void uwb_rsv_accept(struct uwb_rsv *rsv, uwb_rsv_cb_f cb, void *pal_priv)
692 {
693 	uwb_rsv_get(rsv);
694 
695 	rsv->callback = cb;
696 	rsv->pal_priv = pal_priv;
697 	rsv->state    = UWB_RSV_STATE_T_ACCEPTED;
698 }
699 EXPORT_SYMBOL_GPL(uwb_rsv_accept);
700 
701 /*
702  * Is a received DRP IE for this reservation?
703  */
uwb_rsv_match(struct uwb_rsv * rsv,struct uwb_dev * src,struct uwb_ie_drp * drp_ie)704 static bool uwb_rsv_match(struct uwb_rsv *rsv, struct uwb_dev *src,
705 			  struct uwb_ie_drp *drp_ie)
706 {
707 	struct uwb_dev_addr *rsv_src;
708 	int stream;
709 
710 	stream = uwb_ie_drp_stream_index(drp_ie);
711 
712 	if (rsv->stream != stream)
713 		return false;
714 
715 	switch (rsv->target.type) {
716 	case UWB_RSV_TARGET_DEVADDR:
717 		return rsv->stream == stream;
718 	case UWB_RSV_TARGET_DEV:
719 		if (uwb_ie_drp_owner(drp_ie))
720 			rsv_src = &rsv->owner->dev_addr;
721 		else
722 			rsv_src = &rsv->target.dev->dev_addr;
723 		return uwb_dev_addr_cmp(&src->dev_addr, rsv_src) == 0;
724 	}
725 	return false;
726 }
727 
uwb_rsv_new_target(struct uwb_rc * rc,struct uwb_dev * src,struct uwb_ie_drp * drp_ie)728 static struct uwb_rsv *uwb_rsv_new_target(struct uwb_rc *rc,
729 					  struct uwb_dev *src,
730 					  struct uwb_ie_drp *drp_ie)
731 {
732 	struct uwb_rsv *rsv;
733 	struct uwb_pal *pal;
734 	enum uwb_rsv_state state;
735 
736 	rsv = uwb_rsv_alloc(rc);
737 	if (!rsv)
738 		return NULL;
739 
740 	rsv->rc          = rc;
741 	rsv->owner       = src;
742 	uwb_dev_get(rsv->owner);
743 	rsv->target.type = UWB_RSV_TARGET_DEV;
744 	rsv->target.dev  = &rc->uwb_dev;
745 	uwb_dev_get(&rc->uwb_dev);
746 	rsv->type        = uwb_ie_drp_type(drp_ie);
747 	rsv->stream      = uwb_ie_drp_stream_index(drp_ie);
748 	uwb_drp_ie_to_bm(&rsv->mas, drp_ie);
749 
750 	/*
751 	 * See if any PALs are interested in this reservation. If not,
752 	 * deny the request.
753 	 */
754 	rsv->state = UWB_RSV_STATE_T_DENIED;
755 	mutex_lock(&rc->uwb_dev.mutex);
756 	list_for_each_entry(pal, &rc->pals, node) {
757 		if (pal->new_rsv)
758 			pal->new_rsv(pal, rsv);
759 		if (rsv->state == UWB_RSV_STATE_T_ACCEPTED)
760 			break;
761 	}
762 	mutex_unlock(&rc->uwb_dev.mutex);
763 
764 	list_add_tail(&rsv->rc_node, &rc->reservations);
765 	state = rsv->state;
766 	rsv->state = UWB_RSV_STATE_NONE;
767 
768 	/* FIXME: do something sensible here */
769 	if (state == UWB_RSV_STATE_T_ACCEPTED
770 	    && uwb_drp_avail_reserve_pending(rc, &rsv->mas) == -EBUSY) {
771 		/* FIXME: do something sensible here */
772 	} else {
773 		uwb_rsv_set_state(rsv, state);
774 	}
775 
776 	return rsv;
777 }
778 
779 /**
780  * uwb_rsv_get_usable_mas - get the bitmap of the usable MAS of a reservations
781  * @rsv: the reservation.
782  * @mas: returns the available MAS.
783  *
784  * The usable MAS of a reservation may be less than the negotiated MAS
785  * if alien BPs are present.
786  */
uwb_rsv_get_usable_mas(struct uwb_rsv * rsv,struct uwb_mas_bm * mas)787 void uwb_rsv_get_usable_mas(struct uwb_rsv *rsv, struct uwb_mas_bm *mas)
788 {
789 	bitmap_zero(mas->bm, UWB_NUM_MAS);
790 	bitmap_andnot(mas->bm, rsv->mas.bm, rsv->rc->cnflt_alien_bitmap.bm, UWB_NUM_MAS);
791 }
792 EXPORT_SYMBOL_GPL(uwb_rsv_get_usable_mas);
793 
794 /**
795  * uwb_rsv_find - find a reservation for a received DRP IE.
796  * @rc: the radio controller
797  * @src: source of the DRP IE
798  * @drp_ie: the DRP IE
799  *
800  * If the reservation cannot be found and the DRP IE is from a peer
801  * attempting to establish a new reservation, create a new reservation
802  * and add it to the list.
803  */
uwb_rsv_find(struct uwb_rc * rc,struct uwb_dev * src,struct uwb_ie_drp * drp_ie)804 struct uwb_rsv *uwb_rsv_find(struct uwb_rc *rc, struct uwb_dev *src,
805 			     struct uwb_ie_drp *drp_ie)
806 {
807 	struct uwb_rsv *rsv;
808 
809 	list_for_each_entry(rsv, &rc->reservations, rc_node) {
810 		if (uwb_rsv_match(rsv, src, drp_ie))
811 			return rsv;
812 	}
813 
814 	if (uwb_ie_drp_owner(drp_ie))
815 		return uwb_rsv_new_target(rc, src, drp_ie);
816 
817 	return NULL;
818 }
819 
820 /*
821  * Go through all the reservations and check for timeouts and (if
822  * necessary) update their DRP IEs.
823  *
824  * FIXME: look at building the SET_DRP_IE command here rather than
825  * having to rescan the list in uwb_rc_send_all_drp_ie().
826  */
uwb_rsv_update_all(struct uwb_rc * rc)827 static bool uwb_rsv_update_all(struct uwb_rc *rc)
828 {
829 	struct uwb_rsv *rsv, *t;
830 	bool ie_updated = false;
831 
832 	list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) {
833 		if (!rsv->ie_valid) {
834 			uwb_drp_ie_update(rsv);
835 			ie_updated = true;
836 		}
837 	}
838 
839 	return ie_updated;
840 }
841 
uwb_rsv_queue_update(struct uwb_rc * rc)842 void uwb_rsv_queue_update(struct uwb_rc *rc)
843 {
844 	unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE;
845 
846 	queue_delayed_work(rc->rsv_workq, &rc->rsv_update_work, usecs_to_jiffies(delay_us));
847 }
848 
849 /**
850  * uwb_rsv_sched_update - schedule an update of the DRP IEs
851  * @rc: the radio controller.
852  *
853  * To improve performance and ensure correctness with [ECMA-368] the
854  * number of SET-DRP-IE commands that are done are limited.
855  *
856  * DRP IEs update come from two sources: DRP events from the hardware
857  * which all occur at the beginning of the superframe ('syncronous'
858  * events) and reservation establishment/termination requests from
859  * PALs or timers ('asynchronous' events).
860  *
861  * A delayed work ensures that all the synchronous events result in
862  * one SET-DRP-IE command.
863  *
864  * Additional logic (the set_drp_ie_pending and rsv_updated_postponed
865  * flags) will prevent an asynchrous event starting a SET-DRP-IE
866  * command if one is currently awaiting a response.
867  *
868  * FIXME: this does leave a window where an asynchrous event can delay
869  * the SET-DRP-IE for a synchronous event by one superframe.
870  */
uwb_rsv_sched_update(struct uwb_rc * rc)871 void uwb_rsv_sched_update(struct uwb_rc *rc)
872 {
873 	spin_lock_bh(&rc->rsvs_lock);
874 	if (!delayed_work_pending(&rc->rsv_update_work)) {
875 		if (rc->set_drp_ie_pending > 0) {
876 			rc->set_drp_ie_pending++;
877 			goto unlock;
878 		}
879 		uwb_rsv_queue_update(rc);
880 	}
881 unlock:
882 	spin_unlock_bh(&rc->rsvs_lock);
883 }
884 
885 /*
886  * Update DRP IEs and, if necessary, the DRP Availability IE and send
887  * the updated IEs to the radio controller.
888  */
uwb_rsv_update_work(struct work_struct * work)889 static void uwb_rsv_update_work(struct work_struct *work)
890 {
891 	struct uwb_rc *rc = container_of(work, struct uwb_rc,
892 					 rsv_update_work.work);
893 	bool ie_updated;
894 
895 	mutex_lock(&rc->rsvs_mutex);
896 
897 	ie_updated = uwb_rsv_update_all(rc);
898 
899 	if (!rc->drp_avail.ie_valid) {
900 		uwb_drp_avail_ie_update(rc);
901 		ie_updated = true;
902 	}
903 
904 	if (ie_updated && (rc->set_drp_ie_pending == 0))
905 		uwb_rc_send_all_drp_ie(rc);
906 
907 	mutex_unlock(&rc->rsvs_mutex);
908 }
909 
uwb_rsv_alien_bp_work(struct work_struct * work)910 static void uwb_rsv_alien_bp_work(struct work_struct *work)
911 {
912 	struct uwb_rc *rc = container_of(work, struct uwb_rc,
913 					 rsv_alien_bp_work.work);
914 	struct uwb_rsv *rsv;
915 
916 	mutex_lock(&rc->rsvs_mutex);
917 
918 	list_for_each_entry(rsv, &rc->reservations, rc_node) {
919 		if (rsv->type != UWB_DRP_TYPE_ALIEN_BP) {
920 			rsv->callback(rsv);
921 		}
922 	}
923 
924 	mutex_unlock(&rc->rsvs_mutex);
925 }
926 
uwb_rsv_timer(unsigned long arg)927 static void uwb_rsv_timer(unsigned long arg)
928 {
929 	struct uwb_rsv *rsv = (struct uwb_rsv *)arg;
930 
931 	queue_work(rsv->rc->rsv_workq, &rsv->handle_timeout_work);
932 }
933 
934 /**
935  * uwb_rsv_remove_all - remove all reservations
936  * @rc: the radio controller
937  *
938  * A DRP IE update is not done.
939  */
uwb_rsv_remove_all(struct uwb_rc * rc)940 void uwb_rsv_remove_all(struct uwb_rc *rc)
941 {
942 	struct uwb_rsv *rsv, *t;
943 
944 	mutex_lock(&rc->rsvs_mutex);
945 	list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) {
946 		if (rsv->state != UWB_RSV_STATE_NONE)
947 			uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
948 		del_timer_sync(&rsv->timer);
949 	}
950 	/* Cancel any postponed update. */
951 	rc->set_drp_ie_pending = 0;
952 	mutex_unlock(&rc->rsvs_mutex);
953 
954 	cancel_delayed_work_sync(&rc->rsv_update_work);
955 	flush_workqueue(rc->rsv_workq);
956 
957 	mutex_lock(&rc->rsvs_mutex);
958 	list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) {
959 		uwb_rsv_remove(rsv);
960 	}
961 	mutex_unlock(&rc->rsvs_mutex);
962 }
963 
uwb_rsv_init(struct uwb_rc * rc)964 void uwb_rsv_init(struct uwb_rc *rc)
965 {
966 	INIT_LIST_HEAD(&rc->reservations);
967 	INIT_LIST_HEAD(&rc->cnflt_alien_list);
968 	mutex_init(&rc->rsvs_mutex);
969 	spin_lock_init(&rc->rsvs_lock);
970 	INIT_DELAYED_WORK(&rc->rsv_update_work, uwb_rsv_update_work);
971 	INIT_DELAYED_WORK(&rc->rsv_alien_bp_work, uwb_rsv_alien_bp_work);
972 	rc->bow.can_reserve_extra_mases = true;
973 	rc->bow.total_expired = 0;
974 	rc->bow.window = UWB_DRP_BACKOFF_WIN_MIN >> 1;
975 	init_timer(&rc->bow.timer);
976 	rc->bow.timer.function = uwb_rsv_backoff_win_timer;
977 	rc->bow.timer.data     = (unsigned long)&rc->bow;
978 
979 	bitmap_complement(rc->uwb_dev.streams, rc->uwb_dev.streams, UWB_NUM_STREAMS);
980 }
981 
uwb_rsv_setup(struct uwb_rc * rc)982 int uwb_rsv_setup(struct uwb_rc *rc)
983 {
984 	char name[16];
985 
986 	snprintf(name, sizeof(name), "%s_rsvd", dev_name(&rc->uwb_dev.dev));
987 	rc->rsv_workq = create_singlethread_workqueue(name);
988 	if (rc->rsv_workq == NULL)
989 		return -ENOMEM;
990 
991 	return 0;
992 }
993 
uwb_rsv_cleanup(struct uwb_rc * rc)994 void uwb_rsv_cleanup(struct uwb_rc *rc)
995 {
996 	uwb_rsv_remove_all(rc);
997 	destroy_workqueue(rc->rsv_workq);
998 }
999