• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/uwb/
1/*
2 * Ultra Wide Band
3 * Dynamic Reservation Protocol handling
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version
11 * 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
20 */
21#include <linux/kthread.h>
22#include <linux/freezer.h>
23#include <linux/slab.h>
24#include <linux/delay.h>
25#include "uwb-internal.h"
26
27
28/* DRP Conflict Actions ([ECMA-368 2nd Edition] 17.4.6) */
29enum uwb_drp_conflict_action {
30	/* Reservation is mantained, no action needed */
31	UWB_DRP_CONFLICT_MANTAIN = 0,
32
33	/* the device shall not transmit frames in conflicting MASs in
34	 * the following superframe. If the device is the reservation
35	 * target, it shall also set the Reason Code in its DRP IE to
36	 * Conflict in its beacon in the following superframe.
37	 */
38	UWB_DRP_CONFLICT_ACT1,
39
40	/* the device shall not set the Reservation Status bit to ONE
41	 * and shall not transmit frames in conflicting MASs. If the
42	 * device is the reservation target, it shall also set the
43	 * Reason Code in its DRP IE to Conflict.
44	 */
45	UWB_DRP_CONFLICT_ACT2,
46
47	/* the device shall not transmit frames in conflicting MASs in
48	 * the following superframe. It shall remove the conflicting
49	 * MASs from the reservation or set the Reservation Status to
50	 * ZERO in its beacon in the following superframe. If the
51	 * device is the reservation target, it shall also set the
52	 * Reason Code in its DRP IE to Conflict.
53	 */
54	UWB_DRP_CONFLICT_ACT3,
55};
56
57
58static void uwb_rc_set_drp_cmd_done(struct uwb_rc *rc, void *arg,
59				    struct uwb_rceb *reply, ssize_t reply_size)
60{
61	struct uwb_rc_evt_set_drp_ie *r = (struct uwb_rc_evt_set_drp_ie *)reply;
62
63	if (r != NULL) {
64		if (r->bResultCode != UWB_RC_RES_SUCCESS)
65			dev_err(&rc->uwb_dev.dev, "SET-DRP-IE failed: %s (%d)\n",
66				uwb_rc_strerror(r->bResultCode), r->bResultCode);
67	} else
68		dev_err(&rc->uwb_dev.dev, "SET-DRP-IE: timeout\n");
69
70	spin_lock_bh(&rc->rsvs_lock);
71	if (rc->set_drp_ie_pending > 1) {
72		rc->set_drp_ie_pending = 0;
73		uwb_rsv_queue_update(rc);
74	} else {
75		rc->set_drp_ie_pending = 0;
76	}
77	spin_unlock_bh(&rc->rsvs_lock);
78}
79
80int uwb_rc_send_all_drp_ie(struct uwb_rc *rc)
81{
82	int result;
83	struct uwb_rc_cmd_set_drp_ie *cmd;
84	struct uwb_rsv *rsv;
85	struct uwb_rsv_move *mv;
86	int num_bytes = 0;
87	u8 *IEDataptr;
88
89	result = -ENOMEM;
90	/* First traverse all reservations to determine memory needed. */
91	list_for_each_entry(rsv, &rc->reservations, rc_node) {
92		if (rsv->drp_ie != NULL) {
93			num_bytes += rsv->drp_ie->hdr.length + 2;
94			if (uwb_rsv_has_two_drp_ies(rsv) &&
95				(rsv->mv.companion_drp_ie != NULL)) {
96				mv = &rsv->mv;
97				num_bytes += mv->companion_drp_ie->hdr.length + 2;
98			}
99		}
100	}
101	num_bytes += sizeof(rc->drp_avail.ie);
102	cmd = kzalloc(sizeof(*cmd) + num_bytes, GFP_KERNEL);
103	if (cmd == NULL)
104		goto error;
105	cmd->rccb.bCommandType = UWB_RC_CET_GENERAL;
106	cmd->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_SET_DRP_IE);
107	cmd->wIELength = num_bytes;
108	IEDataptr = (u8 *)&cmd->IEData[0];
109
110	/* put DRP avail IE first */
111	memcpy(IEDataptr, &rc->drp_avail.ie, sizeof(rc->drp_avail.ie));
112	IEDataptr += sizeof(struct uwb_ie_drp_avail);
113
114	/* Next traverse all reservations to place IEs in allocated memory. */
115	list_for_each_entry(rsv, &rc->reservations, rc_node) {
116		if (rsv->drp_ie != NULL) {
117			memcpy(IEDataptr, rsv->drp_ie,
118			       rsv->drp_ie->hdr.length + 2);
119			IEDataptr += rsv->drp_ie->hdr.length + 2;
120
121			if (uwb_rsv_has_two_drp_ies(rsv) &&
122				(rsv->mv.companion_drp_ie != NULL)) {
123				mv = &rsv->mv;
124				memcpy(IEDataptr, mv->companion_drp_ie,
125				       mv->companion_drp_ie->hdr.length + 2);
126				IEDataptr += mv->companion_drp_ie->hdr.length + 2;
127			}
128		}
129	}
130
131	result = uwb_rc_cmd_async(rc, "SET-DRP-IE", &cmd->rccb, sizeof(*cmd) + num_bytes,
132				  UWB_RC_CET_GENERAL, UWB_RC_CMD_SET_DRP_IE,
133				  uwb_rc_set_drp_cmd_done, NULL);
134
135	rc->set_drp_ie_pending = 1;
136
137	kfree(cmd);
138error:
139	return result;
140}
141
142/*
143 * Evaluate the action to perform using conflict resolution rules
144 *
145 * Return a uwb_drp_conflict_action.
146 */
147static int evaluate_conflict_action(struct uwb_ie_drp *ext_drp_ie, int ext_beacon_slot,
148				    struct uwb_rsv *rsv, int our_status)
149{
150	int our_tie_breaker = rsv->tiebreaker;
151	int our_type        = rsv->type;
152	int our_beacon_slot = rsv->rc->uwb_dev.beacon_slot;
153
154	int ext_tie_breaker = uwb_ie_drp_tiebreaker(ext_drp_ie);
155	int ext_status      = uwb_ie_drp_status(ext_drp_ie);
156	int ext_type        = uwb_ie_drp_type(ext_drp_ie);
157
158
159	/* [ECMA-368 2nd Edition] 17.4.6 */
160	if (ext_type == UWB_DRP_TYPE_PCA && our_type == UWB_DRP_TYPE_PCA) {
161		return UWB_DRP_CONFLICT_MANTAIN;
162	}
163
164	/* [ECMA-368 2nd Edition] 17.4.6-1 */
165	if (our_type == UWB_DRP_TYPE_ALIEN_BP) {
166		return UWB_DRP_CONFLICT_MANTAIN;
167	}
168
169	/* [ECMA-368 2nd Edition] 17.4.6-2 */
170	if (ext_type == UWB_DRP_TYPE_ALIEN_BP) {
171		/* here we know our_type != UWB_DRP_TYPE_ALIEN_BP */
172		return UWB_DRP_CONFLICT_ACT1;
173	}
174
175	/* [ECMA-368 2nd Edition] 17.4.6-3 */
176	if (our_status == 0 && ext_status == 1) {
177		return UWB_DRP_CONFLICT_ACT2;
178	}
179
180	/* [ECMA-368 2nd Edition] 17.4.6-4 */
181	if (our_status == 1 && ext_status == 0) {
182		return UWB_DRP_CONFLICT_MANTAIN;
183	}
184
185	/* [ECMA-368 2nd Edition] 17.4.6-5a */
186	if (our_tie_breaker == ext_tie_breaker &&
187	    our_beacon_slot <  ext_beacon_slot) {
188		return UWB_DRP_CONFLICT_MANTAIN;
189	}
190
191	/* [ECMA-368 2nd Edition] 17.4.6-5b */
192	if (our_tie_breaker != ext_tie_breaker &&
193	    our_beacon_slot >  ext_beacon_slot) {
194		return UWB_DRP_CONFLICT_MANTAIN;
195	}
196
197	if (our_status == 0) {
198		if (our_tie_breaker == ext_tie_breaker) {
199			/* [ECMA-368 2nd Edition] 17.4.6-6a */
200			if (our_beacon_slot > ext_beacon_slot) {
201				return UWB_DRP_CONFLICT_ACT2;
202			}
203		} else  {
204			/* [ECMA-368 2nd Edition] 17.4.6-6b */
205			if (our_beacon_slot < ext_beacon_slot) {
206				return UWB_DRP_CONFLICT_ACT2;
207			}
208		}
209	} else {
210		if (our_tie_breaker == ext_tie_breaker) {
211			/* [ECMA-368 2nd Edition] 17.4.6-7a */
212			if (our_beacon_slot > ext_beacon_slot) {
213				return UWB_DRP_CONFLICT_ACT3;
214			}
215		} else {
216			/* [ECMA-368 2nd Edition] 17.4.6-7b */
217			if (our_beacon_slot < ext_beacon_slot) {
218				return UWB_DRP_CONFLICT_ACT3;
219			}
220		}
221	}
222	return UWB_DRP_CONFLICT_MANTAIN;
223}
224
225static void handle_conflict_normal(struct uwb_ie_drp *drp_ie,
226				   int ext_beacon_slot,
227				   struct uwb_rsv *rsv,
228				   struct uwb_mas_bm *conflicting_mas)
229{
230	struct uwb_rc *rc = rsv->rc;
231	struct uwb_rsv_move *mv = &rsv->mv;
232	struct uwb_drp_backoff_win *bow = &rc->bow;
233	int action;
234
235	action = evaluate_conflict_action(drp_ie, ext_beacon_slot, rsv, uwb_rsv_status(rsv));
236
237	if (uwb_rsv_is_owner(rsv)) {
238		switch(action) {
239		case UWB_DRP_CONFLICT_ACT2:
240			/* try move */
241			uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_TO_BE_MOVED);
242			if (bow->can_reserve_extra_mases == false)
243				uwb_rsv_backoff_win_increment(rc);
244
245			break;
246		case UWB_DRP_CONFLICT_ACT3:
247			uwb_rsv_backoff_win_increment(rc);
248			/* drop some mases with reason modified */
249			/* put in the companion the mases to be dropped */
250			bitmap_and(mv->companion_mas.bm, rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS);
251			uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED);
252		default:
253			break;
254		}
255	} else {
256		switch(action) {
257		case UWB_DRP_CONFLICT_ACT2:
258		case UWB_DRP_CONFLICT_ACT3:
259			uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT);
260		default:
261			break;
262		}
263
264	}
265
266}
267
268static void handle_conflict_expanding(struct uwb_ie_drp *drp_ie, int ext_beacon_slot,
269				      struct uwb_rsv *rsv, bool companion_only,
270				      struct uwb_mas_bm *conflicting_mas)
271{
272	struct uwb_rc *rc = rsv->rc;
273	struct uwb_drp_backoff_win *bow = &rc->bow;
274	struct uwb_rsv_move *mv = &rsv->mv;
275	int action;
276
277	if (companion_only) {
278		/* status of companion is 0 at this point */
279		action = evaluate_conflict_action(drp_ie, ext_beacon_slot, rsv, 0);
280		if (uwb_rsv_is_owner(rsv)) {
281			switch(action) {
282			case UWB_DRP_CONFLICT_ACT2:
283			case UWB_DRP_CONFLICT_ACT3:
284				uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
285				rsv->needs_release_companion_mas = false;
286				if (bow->can_reserve_extra_mases == false)
287					uwb_rsv_backoff_win_increment(rc);
288				uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas);
289			}
290		} else { /* rsv is target */
291			switch(action) {
292			case UWB_DRP_CONFLICT_ACT2:
293			case UWB_DRP_CONFLICT_ACT3:
294				uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_EXPANDING_CONFLICT);
295                                /* send_drp_avail_ie = true; */
296			}
297		}
298	} else { /* also base part of the reservation is conflicting */
299		if (uwb_rsv_is_owner(rsv)) {
300			uwb_rsv_backoff_win_increment(rc);
301			/* remove companion part */
302			uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas);
303
304			/* drop some mases with reason modified */
305
306			/* put in the companion the mases to be dropped */
307			bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS);
308			uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED);
309		} else { /* it is a target rsv */
310			uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT);
311                        /* send_drp_avail_ie = true; */
312		}
313	}
314}
315
316static void uwb_drp_handle_conflict_rsv(struct uwb_rc *rc, struct uwb_rsv *rsv,
317					struct uwb_rc_evt_drp *drp_evt,
318					struct uwb_ie_drp *drp_ie,
319					struct uwb_mas_bm *conflicting_mas)
320{
321	struct uwb_rsv_move *mv;
322
323	/* check if the conflicting reservation has two drp_ies */
324	if (uwb_rsv_has_two_drp_ies(rsv)) {
325		mv = &rsv->mv;
326		if (bitmap_intersects(rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS)) {
327			handle_conflict_expanding(drp_ie, drp_evt->beacon_slot_number,
328						  rsv, false, conflicting_mas);
329		} else {
330			if (bitmap_intersects(mv->companion_mas.bm, conflicting_mas->bm, UWB_NUM_MAS)) {
331				handle_conflict_expanding(drp_ie, drp_evt->beacon_slot_number,
332							  rsv, true, conflicting_mas);
333			}
334		}
335	} else if (bitmap_intersects(rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS)) {
336		handle_conflict_normal(drp_ie, drp_evt->beacon_slot_number, rsv, conflicting_mas);
337	}
338}
339
340static void uwb_drp_handle_all_conflict_rsv(struct uwb_rc *rc,
341					    struct uwb_rc_evt_drp *drp_evt,
342					    struct uwb_ie_drp *drp_ie,
343					    struct uwb_mas_bm *conflicting_mas)
344{
345	struct uwb_rsv *rsv;
346
347	list_for_each_entry(rsv, &rc->reservations, rc_node) {
348		uwb_drp_handle_conflict_rsv(rc, rsv, drp_evt, drp_ie, conflicting_mas);
349	}
350}
351
352/*
353 * Based on the DRP IE, transition a target reservation to a new
354 * state.
355 */
356static void uwb_drp_process_target(struct uwb_rc *rc, struct uwb_rsv *rsv,
357				   struct uwb_ie_drp *drp_ie, struct uwb_rc_evt_drp *drp_evt)
358{
359	struct device *dev = &rc->uwb_dev.dev;
360	struct uwb_rsv_move *mv = &rsv->mv;
361	int status;
362	enum uwb_drp_reason reason_code;
363	struct uwb_mas_bm mas;
364
365	status = uwb_ie_drp_status(drp_ie);
366	reason_code = uwb_ie_drp_reason_code(drp_ie);
367	uwb_drp_ie_to_bm(&mas, drp_ie);
368
369	switch (reason_code) {
370	case UWB_DRP_REASON_ACCEPTED:
371
372		if (rsv->state == UWB_RSV_STATE_T_CONFLICT) {
373			uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT);
374			break;
375		}
376
377		if (rsv->state == UWB_RSV_STATE_T_EXPANDING_ACCEPTED) {
378			/* drp_ie is companion */
379			if (!bitmap_equal(rsv->mas.bm, mas.bm, UWB_NUM_MAS))
380				/* stroke companion */
381				uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_EXPANDING_ACCEPTED);
382		} else {
383			if (!bitmap_equal(rsv->mas.bm, mas.bm, UWB_NUM_MAS)) {
384				if (uwb_drp_avail_reserve_pending(rc, &mas) == -EBUSY) {
385					uwb_drp_handle_all_conflict_rsv(rc, drp_evt, drp_ie, &mas);
386				} else {
387					/* accept the extra reservation */
388					bitmap_copy(mv->companion_mas.bm, mas.bm, UWB_NUM_MAS);
389					uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_EXPANDING_ACCEPTED);
390				}
391			} else {
392				if (status) {
393					uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED);
394				}
395			}
396
397		}
398		break;
399
400	case UWB_DRP_REASON_MODIFIED:
401		/* check to see if we have already modified the reservation */
402		if (bitmap_equal(rsv->mas.bm, mas.bm, UWB_NUM_MAS)) {
403			uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED);
404			break;
405		}
406
407		/* find if the owner wants to expand or reduce */
408		if (bitmap_subset(mas.bm, rsv->mas.bm, UWB_NUM_MAS)) {
409			/* owner is reducing */
410			bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm, mas.bm, UWB_NUM_MAS);
411			uwb_drp_avail_release(rsv->rc, &mv->companion_mas);
412		}
413
414		bitmap_copy(rsv->mas.bm, mas.bm, UWB_NUM_MAS);
415		uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_RESIZED);
416		break;
417	default:
418		dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
419			 reason_code, status);
420	}
421}
422
423/*
424 * Based on the DRP IE, transition an owner reservation to a new
425 * state.
426 */
427static void uwb_drp_process_owner(struct uwb_rc *rc, struct uwb_rsv *rsv,
428				  struct uwb_dev *src, struct uwb_ie_drp *drp_ie,
429				  struct uwb_rc_evt_drp *drp_evt)
430{
431	struct device *dev = &rc->uwb_dev.dev;
432	struct uwb_rsv_move *mv = &rsv->mv;
433	int status;
434	enum uwb_drp_reason reason_code;
435	struct uwb_mas_bm mas;
436
437	status = uwb_ie_drp_status(drp_ie);
438	reason_code = uwb_ie_drp_reason_code(drp_ie);
439	uwb_drp_ie_to_bm(&mas, drp_ie);
440
441	if (status) {
442		switch (reason_code) {
443		case UWB_DRP_REASON_ACCEPTED:
444			switch (rsv->state) {
445			case UWB_RSV_STATE_O_PENDING:
446			case UWB_RSV_STATE_O_INITIATED:
447			case UWB_RSV_STATE_O_ESTABLISHED:
448				uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
449				break;
450			case UWB_RSV_STATE_O_MODIFIED:
451				if (bitmap_equal(mas.bm, rsv->mas.bm, UWB_NUM_MAS)) {
452					uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
453				} else {
454					uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED);
455				}
456				break;
457
458			case UWB_RSV_STATE_O_MOVE_REDUCING: /* shouldn' t be a problem */
459				if (bitmap_equal(mas.bm, rsv->mas.bm, UWB_NUM_MAS)) {
460					uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
461				} else {
462					uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING);
463				}
464				break;
465			case UWB_RSV_STATE_O_MOVE_EXPANDING:
466				if (bitmap_equal(mas.bm, mv->companion_mas.bm, UWB_NUM_MAS)) {
467					/* Companion reservation accepted */
468					uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING);
469				} else {
470					uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_EXPANDING);
471				}
472				break;
473			case UWB_RSV_STATE_O_MOVE_COMBINING:
474				if (bitmap_equal(mas.bm, rsv->mas.bm, UWB_NUM_MAS))
475					uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING);
476				else
477					uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING);
478				break;
479			default:
480				break;
481			}
482			break;
483		default:
484			dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
485				 reason_code, status);
486		}
487	} else {
488		switch (reason_code) {
489		case UWB_DRP_REASON_PENDING:
490			uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_PENDING);
491			break;
492		case UWB_DRP_REASON_DENIED:
493			uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
494			break;
495		case UWB_DRP_REASON_CONFLICT:
496			/* resolve the conflict */
497			bitmap_complement(mas.bm, src->last_availability_bm,
498					  UWB_NUM_MAS);
499			uwb_drp_handle_conflict_rsv(rc, rsv, drp_evt, drp_ie, &mas);
500			break;
501		default:
502			dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
503				 reason_code, status);
504		}
505	}
506}
507
508static void uwb_cnflt_alien_stroke_timer(struct uwb_cnflt_alien *cnflt)
509{
510	unsigned timeout_us = UWB_MAX_LOST_BEACONS * UWB_SUPERFRAME_LENGTH_US;
511	mod_timer(&cnflt->timer, jiffies + usecs_to_jiffies(timeout_us));
512}
513
514static void uwb_cnflt_update_work(struct work_struct *work)
515{
516	struct uwb_cnflt_alien *cnflt = container_of(work,
517						     struct uwb_cnflt_alien,
518						     cnflt_update_work);
519	struct uwb_cnflt_alien *c;
520	struct uwb_rc *rc = cnflt->rc;
521
522	unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE;
523
524	mutex_lock(&rc->rsvs_mutex);
525
526	list_del(&cnflt->rc_node);
527
528	/* update rc global conflicting alien bitmap */
529	bitmap_zero(rc->cnflt_alien_bitmap.bm, UWB_NUM_MAS);
530
531	list_for_each_entry(c, &rc->cnflt_alien_list, rc_node) {
532		bitmap_or(rc->cnflt_alien_bitmap.bm, rc->cnflt_alien_bitmap.bm, c->mas.bm, UWB_NUM_MAS);
533	}
534
535	queue_delayed_work(rc->rsv_workq, &rc->rsv_alien_bp_work, usecs_to_jiffies(delay_us));
536
537	kfree(cnflt);
538	mutex_unlock(&rc->rsvs_mutex);
539}
540
541static void uwb_cnflt_timer(unsigned long arg)
542{
543	struct uwb_cnflt_alien *cnflt = (struct uwb_cnflt_alien *)arg;
544
545	queue_work(cnflt->rc->rsv_workq, &cnflt->cnflt_update_work);
546}
547
548/*
549 * We have received an DRP_IE of type Alien BP and we need to make
550 * sure we do not transmit in conflicting MASs.
551 */
552static void uwb_drp_handle_alien_drp(struct uwb_rc *rc, struct uwb_ie_drp *drp_ie)
553{
554	struct device *dev = &rc->uwb_dev.dev;
555	struct uwb_mas_bm mas;
556	struct uwb_cnflt_alien *cnflt;
557	char buf[72];
558	unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE;
559
560	uwb_drp_ie_to_bm(&mas, drp_ie);
561	bitmap_scnprintf(buf, sizeof(buf), mas.bm, UWB_NUM_MAS);
562
563	list_for_each_entry(cnflt, &rc->cnflt_alien_list, rc_node) {
564		if (bitmap_equal(cnflt->mas.bm, mas.bm, UWB_NUM_MAS)) {
565			/* Existing alien BP reservation conflicting
566			 * bitmap, just reset the timer */
567			uwb_cnflt_alien_stroke_timer(cnflt);
568			return;
569		}
570	}
571
572	/* New alien BP reservation conflicting bitmap */
573
574	/* alloc and initialize new uwb_cnflt_alien */
575	cnflt = kzalloc(sizeof(struct uwb_cnflt_alien), GFP_KERNEL);
576	if (!cnflt)
577		dev_err(dev, "failed to alloc uwb_cnflt_alien struct\n");
578	INIT_LIST_HEAD(&cnflt->rc_node);
579	init_timer(&cnflt->timer);
580	cnflt->timer.function = uwb_cnflt_timer;
581	cnflt->timer.data     = (unsigned long)cnflt;
582
583	cnflt->rc = rc;
584	INIT_WORK(&cnflt->cnflt_update_work, uwb_cnflt_update_work);
585
586	bitmap_copy(cnflt->mas.bm, mas.bm, UWB_NUM_MAS);
587
588	list_add_tail(&cnflt->rc_node, &rc->cnflt_alien_list);
589
590	/* update rc global conflicting alien bitmap */
591	bitmap_or(rc->cnflt_alien_bitmap.bm, rc->cnflt_alien_bitmap.bm, mas.bm, UWB_NUM_MAS);
592
593	queue_delayed_work(rc->rsv_workq, &rc->rsv_alien_bp_work, usecs_to_jiffies(delay_us));
594
595	/* start the timer */
596	uwb_cnflt_alien_stroke_timer(cnflt);
597}
598
599static void uwb_drp_process_not_involved(struct uwb_rc *rc,
600					 struct uwb_rc_evt_drp *drp_evt,
601					 struct uwb_ie_drp *drp_ie)
602{
603	struct uwb_mas_bm mas;
604
605	uwb_drp_ie_to_bm(&mas, drp_ie);
606	uwb_drp_handle_all_conflict_rsv(rc, drp_evt, drp_ie, &mas);
607}
608
609static void uwb_drp_process_involved(struct uwb_rc *rc, struct uwb_dev *src,
610				     struct uwb_rc_evt_drp *drp_evt,
611				     struct uwb_ie_drp *drp_ie)
612{
613	struct uwb_rsv *rsv;
614
615	rsv = uwb_rsv_find(rc, src, drp_ie);
616	if (!rsv) {
617		/*
618		 * No reservation? It's either for a recently
619		 * terminated reservation; or the DRP IE couldn't be
620		 * processed (e.g., an invalid IE or out of memory).
621		 */
622		return;
623	}
624
625	/*
626	 * Do nothing with DRP IEs for reservations that have been
627	 * terminated.
628	 */
629	if (rsv->state == UWB_RSV_STATE_NONE) {
630		uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
631		return;
632	}
633
634	if (uwb_ie_drp_owner(drp_ie))
635		uwb_drp_process_target(rc, rsv, drp_ie, drp_evt);
636	else
637		uwb_drp_process_owner(rc, rsv, src, drp_ie, drp_evt);
638
639}
640
641
642static bool uwb_drp_involves_us(struct uwb_rc *rc, struct uwb_ie_drp *drp_ie)
643{
644	return uwb_dev_addr_cmp(&rc->uwb_dev.dev_addr, &drp_ie->dev_addr) == 0;
645}
646
647/*
648 * Process a received DRP IE.
649 */
650static void uwb_drp_process(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt,
651			    struct uwb_dev *src, struct uwb_ie_drp *drp_ie)
652{
653	if (uwb_ie_drp_type(drp_ie) == UWB_DRP_TYPE_ALIEN_BP)
654		uwb_drp_handle_alien_drp(rc, drp_ie);
655	else if (uwb_drp_involves_us(rc, drp_ie))
656		uwb_drp_process_involved(rc, src, drp_evt, drp_ie);
657	else
658		uwb_drp_process_not_involved(rc, drp_evt, drp_ie);
659}
660
661/*
662 * Process a received DRP Availability IE
663 */
664static void uwb_drp_availability_process(struct uwb_rc *rc, struct uwb_dev *src,
665					 struct uwb_ie_drp_avail *drp_availability_ie)
666{
667	bitmap_copy(src->last_availability_bm,
668		    drp_availability_ie->bmp, UWB_NUM_MAS);
669}
670
671/*
672 * Process all the DRP IEs (both DRP IEs and the DRP Availability IE)
673 * from a device.
674 */
675static
676void uwb_drp_process_all(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt,
677			 size_t ielen, struct uwb_dev *src_dev)
678{
679	struct device *dev = &rc->uwb_dev.dev;
680	struct uwb_ie_hdr *ie_hdr;
681	void *ptr;
682
683	ptr = drp_evt->ie_data;
684	for (;;) {
685		ie_hdr = uwb_ie_next(&ptr, &ielen);
686		if (!ie_hdr)
687			break;
688
689		switch (ie_hdr->element_id) {
690		case UWB_IE_DRP_AVAILABILITY:
691			uwb_drp_availability_process(rc, src_dev, (struct uwb_ie_drp_avail *)ie_hdr);
692			break;
693		case UWB_IE_DRP:
694			uwb_drp_process(rc, drp_evt, src_dev, (struct uwb_ie_drp *)ie_hdr);
695			break;
696		default:
697			dev_warn(dev, "unexpected IE in DRP notification\n");
698			break;
699		}
700	}
701
702	if (ielen > 0)
703		dev_warn(dev, "%d octets remaining in DRP notification\n",
704			 (int)ielen);
705}
706
707/**
708 * uwbd_evt_handle_rc_drp - handle a DRP_IE event
709 * @evt: the DRP_IE event from the radio controller
710 *
711 * This processes DRP notifications from the radio controller, either
712 * initiating a new reservation or transitioning an existing
713 * reservation into a different state.
714 *
715 * DRP notifications can occur for three different reasons:
716 *
717 * - UWB_DRP_NOTIF_DRP_IE_RECVD: one or more DRP IEs with the RC as
718 *   the target or source have been recieved.
719 *
720 *   These DRP IEs could be new or for an existing reservation.
721 *
722 *   If the DRP IE for an existing reservation ceases to be to
723 *   recieved for at least mMaxLostBeacons, the reservation should be
724 *   considered to be terminated.  Note that the TERMINATE reason (see
725 *   below) may not always be signalled (e.g., the remote device has
726 *   two or more reservations established with the RC).
727 *
728 * - UWB_DRP_NOTIF_CONFLICT: DRP IEs from any device in the beacon
729 *   group conflict with the RC's reservations.
730 *
731 * - UWB_DRP_NOTIF_TERMINATE: DRP IEs are no longer being received
732 *   from a device (i.e., it's terminated all reservations).
733 *
734 * Only the software state of the reservations is changed; the setting
735 * of the radio controller's DRP IEs is done after all the events in
736 * an event buffer are processed.  This saves waiting multiple times
737 * for the SET_DRP_IE command to complete.
738 */
739int uwbd_evt_handle_rc_drp(struct uwb_event *evt)
740{
741	struct device *dev = &evt->rc->uwb_dev.dev;
742	struct uwb_rc *rc = evt->rc;
743	struct uwb_rc_evt_drp *drp_evt;
744	size_t ielength, bytes_left;
745	struct uwb_dev_addr src_addr;
746	struct uwb_dev *src_dev;
747
748	/* Is there enough data to decode the event (and any IEs in
749	   its payload)? */
750	if (evt->notif.size < sizeof(*drp_evt)) {
751		dev_err(dev, "DRP event: Not enough data to decode event "
752			"[%zu bytes left, %zu needed]\n",
753			evt->notif.size, sizeof(*drp_evt));
754		return 0;
755	}
756	bytes_left = evt->notif.size - sizeof(*drp_evt);
757	drp_evt = container_of(evt->notif.rceb, struct uwb_rc_evt_drp, rceb);
758	ielength = le16_to_cpu(drp_evt->ie_length);
759	if (bytes_left != ielength) {
760		dev_err(dev, "DRP event: Not enough data in payload [%zu"
761			"bytes left, %zu declared in the event]\n",
762			bytes_left, ielength);
763		return 0;
764	}
765
766	memcpy(src_addr.data, &drp_evt->src_addr, sizeof(src_addr));
767	src_dev = uwb_dev_get_by_devaddr(rc, &src_addr);
768	if (!src_dev) {
769		/*
770		 * A DRP notification from an unrecognized device.
771		 *
772		 * This is probably from a WUSB device that doesn't
773		 * have an EUI-48 and therefore doesn't show up in the
774		 * UWB device database.  It's safe to simply ignore
775		 * these.
776		 */
777		return 0;
778	}
779
780	mutex_lock(&rc->rsvs_mutex);
781
782	/* We do not distinguish from the reason */
783	uwb_drp_process_all(rc, drp_evt, ielength, src_dev);
784
785	mutex_unlock(&rc->rsvs_mutex);
786
787	uwb_dev_put(src_dev);
788	return 0;
789}
790