1/*********************************************************************
2 *
3 * Filename:      irlap.c
4 * Version:       1.0
5 * Description:   IrLAP implementation for Linux
6 * Status:        Stable
7 * Author:        Dag Brattli <dagb@cs.uit.no>
8 * Created at:    Mon Aug  4 20:40:53 1997
9 * Modified at:   Tue Dec 14 09:26:44 1999
10 * Modified by:   Dag Brattli <dagb@cs.uit.no>
11 *
12 *     Copyright (c) 1998-1999 Dag Brattli, All Rights Reserved.
13 *     Copyright (c) 2000-2003 Jean Tourrilhes <jt@hpl.hp.com>
14 *
15 *     This program is free software; you can redistribute it and/or
16 *     modify it under the terms of the GNU General Public License as
17 *     published by the Free Software Foundation; either version 2 of
18 *     the License, or (at your option) any later version.
19 *
20 *     This program is distributed in the hope that it will be useful,
21 *     but WITHOUT ANY WARRANTY; without even the implied warranty of
22 *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 *     GNU General Public License for more details.
24 *
25 *     You should have received a copy of the GNU General Public License
26 *     along with this program; if not, write to the Free Software
27 *     Foundation, Inc., 59 Temple Place, Suite 330, Boston,
28 *     MA 02111-1307 USA
29 *
30 ********************************************************************/
31
32#include <linux/slab.h>
33#include <linux/string.h>
34#include <linux/skbuff.h>
35#include <linux/delay.h>
36#include <linux/proc_fs.h>
37#include <linux/init.h>
38#include <linux/random.h>
39#include <linux/module.h>
40#include <linux/seq_file.h>
41
42#include <net/irda/irda.h>
43#include <net/irda/irda_device.h>
44#include <net/irda/irqueue.h>
45#include <net/irda/irlmp.h>
46#include <net/irda/irlmp_frame.h>
47#include <net/irda/irlap_frame.h>
48#include <net/irda/irlap.h>
49#include <net/irda/timer.h>
50#include <net/irda/qos.h>
51
52static hashbin_t *irlap = NULL;
53int sysctl_slot_timeout = SLOT_TIMEOUT * 1000 / HZ;
54
55/* This is the delay of missed pf period before generating an event
56 * to the application. The spec mandate 3 seconds, but in some cases
57 * it's way too long. - Jean II */
58int sysctl_warn_noreply_time = 3;
59
60extern void irlap_queue_xmit(struct irlap_cb *self, struct sk_buff *skb);
61static void __irlap_close(struct irlap_cb *self);
62static void irlap_init_qos_capabilities(struct irlap_cb *self,
63					struct qos_info *qos_user);
64
65#ifdef CONFIG_IRDA_DEBUG
66static const char *const lap_reasons[] = {
67	"ERROR, NOT USED",
68	"LAP_DISC_INDICATION",
69	"LAP_NO_RESPONSE",
70	"LAP_RESET_INDICATION",
71	"LAP_FOUND_NONE",
72	"LAP_MEDIA_BUSY",
73	"LAP_PRIMARY_CONFLICT",
74	"ERROR, NOT USED",
75};
76#endif	/* CONFIG_IRDA_DEBUG */
77
78int __init irlap_init(void)
79{
80	/* Check if the compiler did its job properly.
81	 * May happen on some ARM configuration, check with Russell King. */
82	IRDA_ASSERT(sizeof(struct xid_frame) == 14, ;);
83	IRDA_ASSERT(sizeof(struct test_frame) == 10, ;);
84	IRDA_ASSERT(sizeof(struct ua_frame) == 10, ;);
85	IRDA_ASSERT(sizeof(struct snrm_frame) == 11, ;);
86
87	/* Allocate master array */
88	irlap = hashbin_new(HB_LOCK);
89	if (irlap == NULL) {
90		IRDA_ERROR("%s: can't allocate irlap hashbin!\n",
91			   __func__);
92		return -ENOMEM;
93	}
94
95	return 0;
96}
97
98void irlap_cleanup(void)
99{
100	IRDA_ASSERT(irlap != NULL, return;);
101
102	hashbin_delete(irlap, (FREE_FUNC) __irlap_close);
103}
104
105/*
106 * Function irlap_open (driver)
107 *
108 *    Initialize IrLAP layer
109 *
110 */
111struct irlap_cb *irlap_open(struct net_device *dev, struct qos_info *qos,
112			    const char *hw_name)
113{
114	struct irlap_cb *self;
115
116	IRDA_DEBUG(4, "%s()\n", __func__);
117
118	/* Initialize the irlap structure. */
119	self = kzalloc(sizeof(struct irlap_cb), GFP_KERNEL);
120	if (self == NULL)
121		return NULL;
122
123	self->magic = LAP_MAGIC;
124
125	/* Make a binding between the layers */
126	self->netdev = dev;
127	self->qos_dev = qos;
128	/* Copy hardware name */
129	if(hw_name != NULL) {
130		strlcpy(self->hw_name, hw_name, sizeof(self->hw_name));
131	} else {
132		self->hw_name[0] = '\0';
133	}
134
135	dev->atalk_ptr = self;
136
137	self->state = LAP_OFFLINE;
138
139	/* Initialize transmit queue */
140	skb_queue_head_init(&self->txq);
141	skb_queue_head_init(&self->txq_ultra);
142	skb_queue_head_init(&self->wx_list);
143
144	/* My unique IrLAP device address! */
145	/* We don't want the broadcast address, neither the NULL address
146	 * (most often used to signify "invalid"), and we don't want an
147	 * address already in use (otherwise connect won't be able
148	 * to select the proper link). - Jean II */
149	do {
150		get_random_bytes(&self->saddr, sizeof(self->saddr));
151	} while ((self->saddr == 0x0) || (self->saddr == BROADCAST) ||
152		 (hashbin_lock_find(irlap, self->saddr, NULL)) );
153	/* Copy to the driver */
154	memcpy(dev->dev_addr, &self->saddr, 4);
155
156	init_timer(&self->slot_timer);
157	init_timer(&self->query_timer);
158	init_timer(&self->discovery_timer);
159	init_timer(&self->final_timer);
160	init_timer(&self->poll_timer);
161	init_timer(&self->wd_timer);
162	init_timer(&self->backoff_timer);
163	init_timer(&self->media_busy_timer);
164
165	irlap_apply_default_connection_parameters(self);
166
167	self->N3 = 3; /* # connections attemts to try before giving up */
168
169	self->state = LAP_NDM;
170
171	hashbin_insert(irlap, (irda_queue_t *) self, self->saddr, NULL);
172
173	irlmp_register_link(self, self->saddr, &self->notify);
174
175	return self;
176}
177EXPORT_SYMBOL(irlap_open);
178
179/*
180 * Function __irlap_close (self)
181 *
182 *    Remove IrLAP and all allocated memory. Stop any pending timers.
183 *
184 */
185static void __irlap_close(struct irlap_cb *self)
186{
187	IRDA_ASSERT(self != NULL, return;);
188	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
189
190	/* Stop timers */
191	del_timer(&self->slot_timer);
192	del_timer(&self->query_timer);
193	del_timer(&self->discovery_timer);
194	del_timer(&self->final_timer);
195	del_timer(&self->poll_timer);
196	del_timer(&self->wd_timer);
197	del_timer(&self->backoff_timer);
198	del_timer(&self->media_busy_timer);
199
200	irlap_flush_all_queues(self);
201
202	self->magic = 0;
203
204	kfree(self);
205}
206
207/*
208 * Function irlap_close (self)
209 *
210 *    Remove IrLAP instance
211 *
212 */
213void irlap_close(struct irlap_cb *self)
214{
215	struct irlap_cb *lap;
216
217	IRDA_DEBUG(4, "%s()\n", __func__);
218
219	IRDA_ASSERT(self != NULL, return;);
220	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
221
222	/* We used to send a LAP_DISC_INDICATION here, but this was
223	 * racy. This has been move within irlmp_unregister_link()
224	 * itself. Jean II */
225
226	/* Kill the LAP and all LSAPs on top of it */
227	irlmp_unregister_link(self->saddr);
228	self->notify.instance = NULL;
229
230	/* Be sure that we manage to remove ourself from the hash */
231	lap = hashbin_remove(irlap, self->saddr, NULL);
232	if (!lap) {
233		IRDA_DEBUG(1, "%s(), Didn't find myself!\n", __func__);
234		return;
235	}
236	__irlap_close(lap);
237}
238EXPORT_SYMBOL(irlap_close);
239
240/*
241 * Function irlap_connect_indication (self, skb)
242 *
243 *    Another device is attempting to make a connection
244 *
245 */
246void irlap_connect_indication(struct irlap_cb *self, struct sk_buff *skb)
247{
248	IRDA_DEBUG(4, "%s()\n", __func__);
249
250	IRDA_ASSERT(self != NULL, return;);
251	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
252
253	irlap_init_qos_capabilities(self, NULL); /* No user QoS! */
254
255	irlmp_link_connect_indication(self->notify.instance, self->saddr,
256				      self->daddr, &self->qos_tx, skb);
257}
258
259/*
260 * Function irlap_connect_response (self, skb)
261 *
262 *    Service user has accepted incoming connection
263 *
264 */
265void irlap_connect_response(struct irlap_cb *self, struct sk_buff *userdata)
266{
267	IRDA_DEBUG(4, "%s()\n", __func__);
268
269	irlap_do_event(self, CONNECT_RESPONSE, userdata, NULL);
270}
271
272/*
273 * Function irlap_connect_request (self, daddr, qos_user, sniff)
274 *
275 *    Request connection with another device, sniffing is not implemented
276 *    yet.
277 *
278 */
279void irlap_connect_request(struct irlap_cb *self, __u32 daddr,
280			   struct qos_info *qos_user, int sniff)
281{
282	IRDA_DEBUG(3, "%s(), daddr=0x%08x\n", __func__, daddr);
283
284	IRDA_ASSERT(self != NULL, return;);
285	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
286
287	self->daddr = daddr;
288
289	/*
290	 *  If the service user specifies QoS values for this connection,
291	 *  then use them
292	 */
293	irlap_init_qos_capabilities(self, qos_user);
294
295	if ((self->state == LAP_NDM) && !self->media_busy)
296		irlap_do_event(self, CONNECT_REQUEST, NULL, NULL);
297	else
298		self->connect_pending = TRUE;
299}
300
301/*
302 * Function irlap_connect_confirm (self, skb)
303 *
304 *    Connection request has been accepted
305 *
306 */
307void irlap_connect_confirm(struct irlap_cb *self, struct sk_buff *skb)
308{
309	IRDA_DEBUG(4, "%s()\n", __func__);
310
311	IRDA_ASSERT(self != NULL, return;);
312	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
313
314	irlmp_link_connect_confirm(self->notify.instance, &self->qos_tx, skb);
315}
316
317/*
318 * Function irlap_data_indication (self, skb)
319 *
320 *    Received data frames from IR-port, so we just pass them up to
321 *    IrLMP for further processing
322 *
323 */
324void irlap_data_indication(struct irlap_cb *self, struct sk_buff *skb,
325			   int unreliable)
326{
327	/* Hide LAP header from IrLMP layer */
328	skb_pull(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER);
329
330	irlmp_link_data_indication(self->notify.instance, skb, unreliable);
331}
332
333
334/*
335 * Function irlap_data_request (self, skb)
336 *
337 *    Queue data for transmission, must wait until XMIT state
338 *
339 */
340void irlap_data_request(struct irlap_cb *self, struct sk_buff *skb,
341			int unreliable)
342{
343	IRDA_ASSERT(self != NULL, return;);
344	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
345
346	IRDA_DEBUG(3, "%s()\n", __func__);
347
348	IRDA_ASSERT(skb_headroom(skb) >= (LAP_ADDR_HEADER+LAP_CTRL_HEADER),
349		    return;);
350	skb_push(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER);
351
352	/*
353	 *  Must set frame format now so that the rest of the code knows
354	 *  if its dealing with an I or an UI frame
355	 */
356	if (unreliable)
357		skb->data[1] = UI_FRAME;
358	else
359		skb->data[1] = I_FRAME;
360
361	/* Don't forget to refcount it - see irlmp_connect_request(). */
362	skb_get(skb);
363
364	/* Add at the end of the queue (keep ordering) - Jean II */
365	skb_queue_tail(&self->txq, skb);
366
367	if ((self->state == LAP_XMIT_P) || (self->state == LAP_XMIT_S)) {
368		/* If we are not already processing the Tx queue, trigger
369		 * transmission immediately - Jean II */
370		if((skb_queue_len(&self->txq) <= 1) && (!self->local_busy))
371			irlap_do_event(self, DATA_REQUEST, skb, NULL);
372		/* Otherwise, the packets will be sent normally at the
373		 * next pf-poll - Jean II */
374	}
375}
376
377/*
378 * Function irlap_unitdata_request (self, skb)
379 *
380 *    Send Ultra data. This is data that must be sent outside any connection
381 *
382 */
383#ifdef CONFIG_IRDA_ULTRA
384void irlap_unitdata_request(struct irlap_cb *self, struct sk_buff *skb)
385{
386	IRDA_ASSERT(self != NULL, return;);
387	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
388
389	IRDA_DEBUG(3, "%s()\n", __func__);
390
391	IRDA_ASSERT(skb_headroom(skb) >= (LAP_ADDR_HEADER+LAP_CTRL_HEADER),
392	       return;);
393	skb_push(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER);
394
395	skb->data[0] = CBROADCAST;
396	skb->data[1] = UI_FRAME;
397
398	/* Don't need to refcount, see irlmp_connless_data_request() */
399
400	skb_queue_tail(&self->txq_ultra, skb);
401
402	irlap_do_event(self, SEND_UI_FRAME, NULL, NULL);
403}
404#endif /*CONFIG_IRDA_ULTRA */
405
406/*
407 * Function irlap_udata_indication (self, skb)
408 *
409 *    Receive Ultra data. This is data that is received outside any connection
410 *
411 */
412#ifdef CONFIG_IRDA_ULTRA
413void irlap_unitdata_indication(struct irlap_cb *self, struct sk_buff *skb)
414{
415	IRDA_DEBUG(1, "%s()\n", __func__);
416
417	IRDA_ASSERT(self != NULL, return;);
418	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
419	IRDA_ASSERT(skb != NULL, return;);
420
421	/* Hide LAP header from IrLMP layer */
422	skb_pull(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER);
423
424	irlmp_link_unitdata_indication(self->notify.instance, skb);
425}
426#endif /* CONFIG_IRDA_ULTRA */
427
428/*
429 * Function irlap_disconnect_request (void)
430 *
431 *    Request to disconnect connection by service user
432 */
433void irlap_disconnect_request(struct irlap_cb *self)
434{
435	IRDA_DEBUG(3, "%s()\n", __func__);
436
437	IRDA_ASSERT(self != NULL, return;);
438	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
439
440	/* Don't disconnect until all data frames are successfully sent */
441	if (!skb_queue_empty(&self->txq)) {
442		self->disconnect_pending = TRUE;
443		return;
444	}
445
446	/* Check if we are in the right state for disconnecting */
447	switch (self->state) {
448	case LAP_XMIT_P:        /* FALLTHROUGH */
449	case LAP_XMIT_S:        /* FALLTHROUGH */
450	case LAP_CONN:          /* FALLTHROUGH */
451	case LAP_RESET_WAIT:    /* FALLTHROUGH */
452	case LAP_RESET_CHECK:
453		irlap_do_event(self, DISCONNECT_REQUEST, NULL, NULL);
454		break;
455	default:
456		IRDA_DEBUG(2, "%s(), disconnect pending!\n", __func__);
457		self->disconnect_pending = TRUE;
458		break;
459	}
460}
461
462/*
463 * Function irlap_disconnect_indication (void)
464 *
465 *    Disconnect request from other device
466 *
467 */
468void irlap_disconnect_indication(struct irlap_cb *self, LAP_REASON reason)
469{
470	IRDA_DEBUG(1, "%s(), reason=%s\n", __func__, lap_reasons[reason]);
471
472	IRDA_ASSERT(self != NULL, return;);
473	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
474
475	/* Flush queues */
476	irlap_flush_all_queues(self);
477
478	switch (reason) {
479	case LAP_RESET_INDICATION:
480		IRDA_DEBUG(1, "%s(), Sending reset request!\n", __func__);
481		irlap_do_event(self, RESET_REQUEST, NULL, NULL);
482		break;
483	case LAP_NO_RESPONSE:	   /* FALLTHROUGH */
484	case LAP_DISC_INDICATION:  /* FALLTHROUGH */
485	case LAP_FOUND_NONE:       /* FALLTHROUGH */
486	case LAP_MEDIA_BUSY:
487		irlmp_link_disconnect_indication(self->notify.instance, self,
488						 reason, NULL);
489		break;
490	default:
491		IRDA_ERROR("%s: Unknown reason %d\n", __func__, reason);
492	}
493}
494
495/*
496 * Function irlap_discovery_request (gen_addr_bit)
497 *
498 *    Start one single discovery operation.
499 *
500 */
501void irlap_discovery_request(struct irlap_cb *self, discovery_t *discovery)
502{
503	struct irlap_info info;
504
505	IRDA_ASSERT(self != NULL, return;);
506	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
507	IRDA_ASSERT(discovery != NULL, return;);
508
509	IRDA_DEBUG(4, "%s(), nslots = %d\n", __func__, discovery->nslots);
510
511	IRDA_ASSERT((discovery->nslots == 1) || (discovery->nslots == 6) ||
512		    (discovery->nslots == 8) || (discovery->nslots == 16),
513		    return;);
514
515	/* Discovery is only possible in NDM mode */
516	if (self->state != LAP_NDM) {
517		IRDA_DEBUG(4, "%s(), discovery only possible in NDM mode\n",
518			   __func__);
519		irlap_discovery_confirm(self, NULL);
520		/* Note : in theory, if we are not in NDM, we could postpone
521		 * the discovery like we do for connection request.
522		 * In practice, it's not worth it. If the media was busy,
523		 * it's likely next time around it won't be busy. If we are
524		 * in REPLY state, we will get passive discovery info & event.
525		 * Jean II */
526		return;
527	}
528
529	/* Check if last discovery request finished in time, or if
530	 * it was aborted due to the media busy flag. */
531	if (self->discovery_log != NULL) {
532		hashbin_delete(self->discovery_log, (FREE_FUNC) kfree);
533		self->discovery_log = NULL;
534	}
535
536	/* All operations will occur at predictable time, no need to lock */
537	self->discovery_log = hashbin_new(HB_NOLOCK);
538
539	if (self->discovery_log == NULL) {
540		IRDA_WARNING("%s(), Unable to allocate discovery log!\n",
541			     __func__);
542		return;
543	}
544
545	info.S = discovery->nslots; /* Number of slots */
546	info.s = 0; /* Current slot */
547
548	self->discovery_cmd = discovery;
549	info.discovery = discovery;
550
551	/* sysctl_slot_timeout bounds are checked in irsysctl.c - Jean II */
552	self->slot_timeout = sysctl_slot_timeout * HZ / 1000;
553
554	irlap_do_event(self, DISCOVERY_REQUEST, NULL, &info);
555}
556
557/*
558 * Function irlap_discovery_confirm (log)
559 *
560 *    A device has been discovered in front of this station, we
561 *    report directly to LMP.
562 */
563void irlap_discovery_confirm(struct irlap_cb *self, hashbin_t *discovery_log)
564{
565	IRDA_ASSERT(self != NULL, return;);
566	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
567
568	IRDA_ASSERT(self->notify.instance != NULL, return;);
569
570	/*
571	 * Check for successful discovery, since we are then allowed to clear
572	 * the media busy condition (IrLAP 6.13.4 - p.94). This should allow
573	 * us to make connection attempts much faster and easier (i.e. no
574	 * collisions).
575	 * Setting media busy to false will also generate an event allowing
576	 * to process pending events in NDM state machine.
577	 * Note : the spec doesn't define what's a successful discovery is.
578	 * If we want Ultra to work, it's successful even if there is
579	 * nobody discovered - Jean II
580	 */
581	if (discovery_log)
582		irda_device_set_media_busy(self->netdev, FALSE);
583
584	/* Inform IrLMP */
585	irlmp_link_discovery_confirm(self->notify.instance, discovery_log);
586}
587
588/*
589 * Function irlap_discovery_indication (log)
590 *
591 *    Somebody is trying to discover us!
592 *
593 */
594void irlap_discovery_indication(struct irlap_cb *self, discovery_t *discovery)
595{
596	IRDA_DEBUG(4, "%s()\n", __func__);
597
598	IRDA_ASSERT(self != NULL, return;);
599	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
600	IRDA_ASSERT(discovery != NULL, return;);
601
602	IRDA_ASSERT(self->notify.instance != NULL, return;);
603
604	/* A device is very likely to connect immediately after it performs
605	 * a successful discovery. This means that in our case, we are much
606	 * more likely to receive a connection request over the medium.
607	 * So, we backoff to avoid collisions.
608	 * IrLAP spec 6.13.4 suggest 100ms...
609	 * Note : this little trick actually make a *BIG* difference. If I set
610	 * my Linux box with discovery enabled and one Ultra frame sent every
611	 * second, my Palm has no trouble connecting to it every time !
612	 * Jean II */
613	irda_device_set_media_busy(self->netdev, SMALL);
614
615	irlmp_link_discovery_indication(self->notify.instance, discovery);
616}
617
618/*
619 * Function irlap_status_indication (quality_of_link)
620 */
621void irlap_status_indication(struct irlap_cb *self, int quality_of_link)
622{
623	switch (quality_of_link) {
624	case STATUS_NO_ACTIVITY:
625		IRDA_MESSAGE("IrLAP, no activity on link!\n");
626		break;
627	case STATUS_NOISY:
628		IRDA_MESSAGE("IrLAP, noisy link!\n");
629		break;
630	default:
631		break;
632	}
633	irlmp_status_indication(self->notify.instance,
634				quality_of_link, LOCK_NO_CHANGE);
635}
636
637/*
638 * Function irlap_reset_indication (void)
639 */
640void irlap_reset_indication(struct irlap_cb *self)
641{
642	IRDA_DEBUG(1, "%s()\n", __func__);
643
644	IRDA_ASSERT(self != NULL, return;);
645	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
646
647	if (self->state == LAP_RESET_WAIT)
648		irlap_do_event(self, RESET_REQUEST, NULL, NULL);
649	else
650		irlap_do_event(self, RESET_RESPONSE, NULL, NULL);
651}
652
653/*
654 * Function irlap_reset_confirm (void)
655 */
656void irlap_reset_confirm(void)
657{
658	IRDA_DEBUG(1, "%s()\n", __func__);
659}
660
661/*
662 * Function irlap_generate_rand_time_slot (S, s)
663 *
664 *    Generate a random time slot between s and S-1 where
665 *    S = Number of slots (0 -> S-1)
666 *    s = Current slot
667 */
668int irlap_generate_rand_time_slot(int S, int s)
669{
670	static int rand;
671	int slot;
672
673	IRDA_ASSERT((S - s) > 0, return 0;);
674
675	rand += jiffies;
676	rand ^= (rand << 12);
677	rand ^= (rand >> 20);
678
679	slot = s + rand % (S-s);
680
681	IRDA_ASSERT((slot >= s) || (slot < S), return 0;);
682
683	return slot;
684}
685
686/*
687 * Function irlap_update_nr_received (nr)
688 *
689 *    Remove all acknowledged frames in current window queue. This code is
690 *    not intuitive and you should not try to change it. If you think it
691 *    contains bugs, please mail a patch to the author instead.
692 */
693void irlap_update_nr_received(struct irlap_cb *self, int nr)
694{
695	struct sk_buff *skb = NULL;
696	int count = 0;
697
698	/*
699	 * Remove all the ack-ed frames from the window queue.
700	 */
701
702	/*
703	 *  Optimize for the common case. It is most likely that the receiver
704	 *  will acknowledge all the frames we have sent! So in that case we
705	 *  delete all frames stored in window.
706	 */
707	if (nr == self->vs) {
708		while ((skb = skb_dequeue(&self->wx_list)) != NULL) {
709			dev_kfree_skb(skb);
710		}
711		/* The last acked frame is the next to send minus one */
712		self->va = nr - 1;
713	} else {
714		/* Remove all acknowledged frames in current window */
715		while ((skb_peek(&self->wx_list) != NULL) &&
716		       (((self->va+1) % 8) != nr))
717		{
718			skb = skb_dequeue(&self->wx_list);
719			dev_kfree_skb(skb);
720
721			self->va = (self->va + 1) % 8;
722			count++;
723		}
724	}
725
726	/* Advance window */
727	self->window = self->window_size - skb_queue_len(&self->wx_list);
728}
729
730/*
731 * Function irlap_validate_ns_received (ns)
732 *
733 *    Validate the next to send (ns) field from received frame.
734 */
735int irlap_validate_ns_received(struct irlap_cb *self, int ns)
736{
737	/*  ns as expected?  */
738	if (ns == self->vr)
739		return NS_EXPECTED;
740	/*
741	 *  Stations are allowed to treat invalid NS as unexpected NS
742	 *  IrLAP, Recv ... with-invalid-Ns. p. 84
743	 */
744	return NS_UNEXPECTED;
745
746	/* return NR_INVALID; */
747}
748/*
749 * Function irlap_validate_nr_received (nr)
750 *
751 *    Validate the next to receive (nr) field from received frame.
752 *
753 */
754int irlap_validate_nr_received(struct irlap_cb *self, int nr)
755{
756	/*  nr as expected?  */
757	if (nr == self->vs) {
758		IRDA_DEBUG(4, "%s(), expected!\n", __func__);
759		return NR_EXPECTED;
760	}
761
762	/*
763	 *  unexpected nr? (but within current window), first we check if the
764	 *  ns numbers of the frames in the current window wrap.
765	 */
766	if (self->va < self->vs) {
767		if ((nr >= self->va) && (nr <= self->vs))
768			return NR_UNEXPECTED;
769	} else {
770		if ((nr >= self->va) || (nr <= self->vs))
771			return NR_UNEXPECTED;
772	}
773
774	/* Invalid nr!  */
775	return NR_INVALID;
776}
777
778/*
779 * Function irlap_initiate_connection_state ()
780 *
781 *    Initialize the connection state parameters
782 *
783 */
784void irlap_initiate_connection_state(struct irlap_cb *self)
785{
786	IRDA_DEBUG(4, "%s()\n", __func__);
787
788	IRDA_ASSERT(self != NULL, return;);
789	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
790
791	/* Next to send and next to receive */
792	self->vs = self->vr = 0;
793
794	/* Last frame which got acked (0 - 1) % 8 */
795	self->va = 7;
796
797	self->window = 1;
798
799	self->remote_busy = FALSE;
800	self->retry_count = 0;
801}
802
803/*
804 * Function irlap_wait_min_turn_around (self, qos)
805 *
806 *    Wait negotiated minimum turn around time, this function actually sets
807 *    the number of BOS's that must be sent before the next transmitted
808 *    frame in order to delay for the specified amount of time. This is
809 *    done to avoid using timers, and the forbidden udelay!
810 */
811void irlap_wait_min_turn_around(struct irlap_cb *self, struct qos_info *qos)
812{
813	__u32 min_turn_time;
814	__u32 speed;
815
816	/* Get QoS values.  */
817	speed = qos->baud_rate.value;
818	min_turn_time = qos->min_turn_time.value;
819
820	/* No need to calculate XBOFs for speeds over 115200 bps */
821	if (speed > 115200) {
822		self->mtt_required = min_turn_time;
823		return;
824	}
825
826	/*
827	 *  Send additional BOF's for the next frame for the requested
828	 *  min turn time, so now we must calculate how many chars (XBOF's) we
829	 *  must send for the requested time period (min turn time)
830	 */
831	self->xbofs_delay = irlap_min_turn_time_in_bytes(speed, min_turn_time);
832}
833
834/*
835 * Function irlap_flush_all_queues (void)
836 *
837 *    Flush all queues
838 *
839 */
840void irlap_flush_all_queues(struct irlap_cb *self)
841{
842	struct sk_buff* skb;
843
844	IRDA_ASSERT(self != NULL, return;);
845	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
846
847	/* Free transmission queue */
848	while ((skb = skb_dequeue(&self->txq)) != NULL)
849		dev_kfree_skb(skb);
850
851	while ((skb = skb_dequeue(&self->txq_ultra)) != NULL)
852		dev_kfree_skb(skb);
853
854	/* Free sliding window buffered packets */
855	while ((skb = skb_dequeue(&self->wx_list)) != NULL)
856		dev_kfree_skb(skb);
857}
858
859/*
860 * Function irlap_setspeed (self, speed)
861 *
862 *    Change the speed of the IrDA port
863 *
864 */
865static void irlap_change_speed(struct irlap_cb *self, __u32 speed, int now)
866{
867	struct sk_buff *skb;
868
869	IRDA_DEBUG(0, "%s(), setting speed to %d\n", __func__, speed);
870
871	IRDA_ASSERT(self != NULL, return;);
872	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
873
874	self->speed = speed;
875
876	/* Change speed now, or just piggyback speed on frames */
877	if (now) {
878		/* Send down empty frame to trigger speed change */
879		skb = alloc_skb(0, GFP_ATOMIC);
880		if (skb)
881			irlap_queue_xmit(self, skb);
882	}
883}
884
885/*
886 * Function irlap_init_qos_capabilities (self, qos)
887 *
888 *    Initialize QoS for this IrLAP session, What we do is to compute the
889 *    intersection of the QoS capabilities for the user, driver and for
890 *    IrLAP itself. Normally, IrLAP will not specify any values, but it can
891 *    be used to restrict certain values.
892 */
893static void irlap_init_qos_capabilities(struct irlap_cb *self,
894					struct qos_info *qos_user)
895{
896	IRDA_ASSERT(self != NULL, return;);
897	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
898	IRDA_ASSERT(self->netdev != NULL, return;);
899
900	/* Start out with the maximum QoS support possible */
901	irda_init_max_qos_capabilies(&self->qos_rx);
902
903	/* Apply drivers QoS capabilities */
904	irda_qos_compute_intersection(&self->qos_rx, self->qos_dev);
905
906	/*
907	 *  Check for user supplied QoS parameters. The service user is only
908	 *  allowed to supply these values. We check each parameter since the
909	 *  user may not have set all of them.
910	 */
911	if (qos_user) {
912		IRDA_DEBUG(1, "%s(), Found user specified QoS!\n", __func__);
913
914		if (qos_user->baud_rate.bits)
915			self->qos_rx.baud_rate.bits &= qos_user->baud_rate.bits;
916
917		if (qos_user->max_turn_time.bits)
918			self->qos_rx.max_turn_time.bits &= qos_user->max_turn_time.bits;
919		if (qos_user->data_size.bits)
920			self->qos_rx.data_size.bits &= qos_user->data_size.bits;
921
922		if (qos_user->link_disc_time.bits)
923			self->qos_rx.link_disc_time.bits &= qos_user->link_disc_time.bits;
924	}
925
926	/* Use 500ms in IrLAP for now */
927	self->qos_rx.max_turn_time.bits &= 0x01;
928
929	/* Set data size */
930	/*self->qos_rx.data_size.bits &= 0x03;*/
931
932	irda_qos_bits_to_value(&self->qos_rx);
933}
934
935/*
936 * Function irlap_apply_default_connection_parameters (void, now)
937 *
938 *    Use the default connection and transmission parameters
939 */
940void irlap_apply_default_connection_parameters(struct irlap_cb *self)
941{
942	IRDA_DEBUG(4, "%s()\n", __func__);
943
944	IRDA_ASSERT(self != NULL, return;);
945	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
946
947	/* xbofs : Default value in NDM */
948	self->next_bofs   = 12;
949	self->bofs_count  = 12;
950
951	/* NDM Speed is 9600 */
952	irlap_change_speed(self, 9600, TRUE);
953
954	/* Set mbusy when going to NDM state */
955	irda_device_set_media_busy(self->netdev, TRUE);
956
957	/*
958	 * Generate random connection address for this session, which must
959	 * be 7 bits wide and different from 0x00 and 0xfe
960	 */
961	while ((self->caddr == 0x00) || (self->caddr == 0xfe)) {
962		get_random_bytes(&self->caddr, sizeof(self->caddr));
963		self->caddr &= 0xfe;
964	}
965
966	/* Use default values until connection has been negitiated */
967	self->slot_timeout = sysctl_slot_timeout;
968	self->final_timeout = FINAL_TIMEOUT;
969	self->poll_timeout = POLL_TIMEOUT;
970	self->wd_timeout = WD_TIMEOUT;
971
972	/* Set some default values */
973	self->qos_tx.baud_rate.value = 9600;
974	self->qos_rx.baud_rate.value = 9600;
975	self->qos_tx.max_turn_time.value = 0;
976	self->qos_rx.max_turn_time.value = 0;
977	self->qos_tx.min_turn_time.value = 0;
978	self->qos_rx.min_turn_time.value = 0;
979	self->qos_tx.data_size.value = 64;
980	self->qos_rx.data_size.value = 64;
981	self->qos_tx.window_size.value = 1;
982	self->qos_rx.window_size.value = 1;
983	self->qos_tx.additional_bofs.value = 12;
984	self->qos_rx.additional_bofs.value = 12;
985	self->qos_tx.link_disc_time.value = 0;
986	self->qos_rx.link_disc_time.value = 0;
987
988	irlap_flush_all_queues(self);
989
990	self->disconnect_pending = FALSE;
991	self->connect_pending = FALSE;
992}
993
994/*
995 * Function irlap_apply_connection_parameters (qos, now)
996 *
997 *    Initialize IrLAP with the negotiated QoS values
998 *
999 * If 'now' is false, the speed and xbofs will be changed after the next
1000 * frame is sent.
1001 * If 'now' is true, the speed and xbofs is changed immediately
1002 */
1003void irlap_apply_connection_parameters(struct irlap_cb *self, int now)
1004{
1005	IRDA_DEBUG(4, "%s()\n", __func__);
1006
1007	IRDA_ASSERT(self != NULL, return;);
1008	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
1009
1010	/* Set the negotiated xbofs value */
1011	self->next_bofs   = self->qos_tx.additional_bofs.value;
1012	if (now)
1013		self->bofs_count = self->next_bofs;
1014
1015	/* Set the negotiated link speed (may need the new xbofs value) */
1016	irlap_change_speed(self, self->qos_tx.baud_rate.value, now);
1017
1018	self->window_size = self->qos_tx.window_size.value;
1019	self->window      = self->qos_tx.window_size.value;
1020
1021#ifdef CONFIG_IRDA_DYNAMIC_WINDOW
1022	/*
1023	 *  Calculate how many bytes it is possible to transmit before the
1024	 *  link must be turned around
1025	 */
1026	self->line_capacity =
1027		irlap_max_line_capacity(self->qos_tx.baud_rate.value,
1028					self->qos_tx.max_turn_time.value);
1029	self->bytes_left = self->line_capacity;
1030#endif /* CONFIG_IRDA_DYNAMIC_WINDOW */
1031
1032
1033	/*
1034	 *  Initialize timeout values, some of the rules are listed on
1035	 *  page 92 in IrLAP.
1036	 */
1037	IRDA_ASSERT(self->qos_tx.max_turn_time.value != 0, return;);
1038	IRDA_ASSERT(self->qos_rx.max_turn_time.value != 0, return;);
1039	/* The poll timeout applies only to the primary station.
1040	 * It defines the maximum time the primary stay in XMIT mode
1041	 * before timeout and turning the link around (sending a RR).
1042	 * Or, this is how much we can keep the pf bit in primary mode.
1043	 * Therefore, it must be lower or equal than our *OWN* max turn around.
1044	 * Jean II */
1045	self->poll_timeout = self->qos_tx.max_turn_time.value * HZ / 1000;
1046	/* The Final timeout applies only to the primary station.
1047	 * It defines the maximum time the primary wait (mostly in RECV mode)
1048	 * for an answer from the secondary station before polling it again.
1049	 * Therefore, it must be greater or equal than our *PARTNER*
1050	 * max turn around time - Jean II */
1051	self->final_timeout = self->qos_rx.max_turn_time.value * HZ / 1000;
1052	/* The Watchdog Bit timeout applies only to the secondary station.
1053	 * It defines the maximum time the secondary wait (mostly in RECV mode)
1054	 * for poll from the primary station before getting annoyed.
1055	 * Therefore, it must be greater or equal than our *PARTNER*
1056	 * max turn around time - Jean II */
1057	self->wd_timeout = self->final_timeout * 2;
1058
1059	/*
1060	 * N1 and N2 are maximum retry count for *both* the final timer
1061	 * and the wd timer (with a factor 2) as defined above.
1062	 * After N1 retry of a timer, we give a warning to the user.
1063	 * After N2 retry, we consider the link dead and disconnect it.
1064	 * Jean II
1065	 */
1066
1067	/*
1068	 *  Set N1 to 0 if Link Disconnect/Threshold Time = 3 and set it to
1069	 *  3 seconds otherwise. See page 71 in IrLAP for more details.
1070	 *  Actually, it's not always 3 seconds, as we allow to set
1071	 *  it via sysctl... Max maxtt is 500ms, and N1 need to be multiple
1072	 *  of 2, so 1 second is minimum we can allow. - Jean II
1073	 */
1074	if (self->qos_tx.link_disc_time.value == sysctl_warn_noreply_time)
1075		/*
1076		 * If we set N1 to 0, it will trigger immediately, which is
1077		 * not what we want. What we really want is to disable it,
1078		 * Jean II
1079		 */
1080		self->N1 = -2; /* Disable - Need to be multiple of 2*/
1081	else
1082		self->N1 = sysctl_warn_noreply_time * 1000 /
1083		  self->qos_rx.max_turn_time.value;
1084
1085	IRDA_DEBUG(4, "Setting N1 = %d\n", self->N1);
1086
1087	/* Set N2 to match our own disconnect time */
1088	self->N2 = self->qos_tx.link_disc_time.value * 1000 /
1089		self->qos_rx.max_turn_time.value;
1090	IRDA_DEBUG(4, "Setting N2 = %d\n", self->N2);
1091}
1092
1093#ifdef CONFIG_PROC_FS
1094struct irlap_iter_state {
1095	int id;
1096};
1097
1098static void *irlap_seq_start(struct seq_file *seq, loff_t *pos)
1099{
1100	struct irlap_iter_state *iter = seq->private;
1101	struct irlap_cb *self;
1102
1103	/* Protect our access to the tsap list */
1104	spin_lock_irq(&irlap->hb_spinlock);
1105	iter->id = 0;
1106
1107	for (self = (struct irlap_cb *) hashbin_get_first(irlap);
1108	     self; self = (struct irlap_cb *) hashbin_get_next(irlap)) {
1109		if (iter->id == *pos)
1110			break;
1111		++iter->id;
1112	}
1113
1114	return self;
1115}
1116
1117static void *irlap_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1118{
1119	struct irlap_iter_state *iter = seq->private;
1120
1121	++*pos;
1122	++iter->id;
1123	return (void *) hashbin_get_next(irlap);
1124}
1125
1126static void irlap_seq_stop(struct seq_file *seq, void *v)
1127{
1128	spin_unlock_irq(&irlap->hb_spinlock);
1129}
1130
1131static int irlap_seq_show(struct seq_file *seq, void *v)
1132{
1133	const struct irlap_iter_state *iter = seq->private;
1134	const struct irlap_cb *self = v;
1135
1136	IRDA_ASSERT(self->magic == LAP_MAGIC, return -EINVAL;);
1137
1138	seq_printf(seq, "irlap%d ", iter->id);
1139	seq_printf(seq, "state: %s\n",
1140		   irlap_state[self->state]);
1141
1142	seq_printf(seq, "  device name: %s, ",
1143		   (self->netdev) ? self->netdev->name : "bug");
1144	seq_printf(seq, "hardware name: %s\n", self->hw_name);
1145
1146	seq_printf(seq, "  caddr: %#02x, ", self->caddr);
1147	seq_printf(seq, "saddr: %#08x, ", self->saddr);
1148	seq_printf(seq, "daddr: %#08x\n", self->daddr);
1149
1150	seq_printf(seq, "  win size: %d, ",
1151		   self->window_size);
1152	seq_printf(seq, "win: %d, ", self->window);
1153#ifdef CONFIG_IRDA_DYNAMIC_WINDOW
1154	seq_printf(seq, "line capacity: %d, ",
1155		   self->line_capacity);
1156	seq_printf(seq, "bytes left: %d\n", self->bytes_left);
1157#endif /* CONFIG_IRDA_DYNAMIC_WINDOW */
1158	seq_printf(seq, "  tx queue len: %d ",
1159		   skb_queue_len(&self->txq));
1160	seq_printf(seq, "win queue len: %d ",
1161		   skb_queue_len(&self->wx_list));
1162	seq_printf(seq, "rbusy: %s", self->remote_busy ?
1163		   "TRUE" : "FALSE");
1164	seq_printf(seq, " mbusy: %s\n", self->media_busy ?
1165		   "TRUE" : "FALSE");
1166
1167	seq_printf(seq, "  retrans: %d ", self->retry_count);
1168	seq_printf(seq, "vs: %d ", self->vs);
1169	seq_printf(seq, "vr: %d ", self->vr);
1170	seq_printf(seq, "va: %d\n", self->va);
1171
1172	seq_printf(seq, "  qos\tbps\tmaxtt\tdsize\twinsize\taddbofs\tmintt\tldisc\tcomp\n");
1173
1174	seq_printf(seq, "  tx\t%d\t",
1175		   self->qos_tx.baud_rate.value);
1176	seq_printf(seq, "%d\t",
1177		   self->qos_tx.max_turn_time.value);
1178	seq_printf(seq, "%d\t",
1179		   self->qos_tx.data_size.value);
1180	seq_printf(seq, "%d\t",
1181		   self->qos_tx.window_size.value);
1182	seq_printf(seq, "%d\t",
1183		   self->qos_tx.additional_bofs.value);
1184	seq_printf(seq, "%d\t",
1185		   self->qos_tx.min_turn_time.value);
1186	seq_printf(seq, "%d\t",
1187		   self->qos_tx.link_disc_time.value);
1188	seq_printf(seq, "\n");
1189
1190	seq_printf(seq, "  rx\t%d\t",
1191		   self->qos_rx.baud_rate.value);
1192	seq_printf(seq, "%d\t",
1193		   self->qos_rx.max_turn_time.value);
1194	seq_printf(seq, "%d\t",
1195		   self->qos_rx.data_size.value);
1196	seq_printf(seq, "%d\t",
1197		   self->qos_rx.window_size.value);
1198	seq_printf(seq, "%d\t",
1199		   self->qos_rx.additional_bofs.value);
1200	seq_printf(seq, "%d\t",
1201		   self->qos_rx.min_turn_time.value);
1202	seq_printf(seq, "%d\n",
1203		   self->qos_rx.link_disc_time.value);
1204
1205	return 0;
1206}
1207
1208static const struct seq_operations irlap_seq_ops = {
1209	.start  = irlap_seq_start,
1210	.next   = irlap_seq_next,
1211	.stop   = irlap_seq_stop,
1212	.show   = irlap_seq_show,
1213};
1214
1215static int irlap_seq_open(struct inode *inode, struct file *file)
1216{
1217	if (irlap == NULL)
1218		return -EINVAL;
1219
1220	return seq_open_private(file, &irlap_seq_ops,
1221			sizeof(struct irlap_iter_state));
1222}
1223
1224const struct file_operations irlap_seq_fops = {
1225	.owner		= THIS_MODULE,
1226	.open           = irlap_seq_open,
1227	.read           = seq_read,
1228	.llseek         = seq_lseek,
1229	.release	= seq_release_private,
1230};
1231
1232#endif /* CONFIG_PROC_FS */
1233