1/* SCTP kernel reference Implementation
2 * (C) Copyright IBM Corp. 2001, 2004
3 * Copyright (c) 1999 Cisco, Inc.
4 * Copyright (c) 1999-2001 Motorola, Inc.
5 *
6 * This file is part of the SCTP kernel reference Implementation
7 *
8 * These functions work with the state functions in sctp_sm_statefuns.c
9 * to implement that state operations.  These functions implement the
10 * steps which require modifying existing data structures.
11 *
12 * The SCTP reference implementation is free software;
13 * you can redistribute it and/or modify it under the terms of
14 * the GNU General Public License as published by
15 * the Free Software Foundation; either version 2, or (at your option)
16 * any later version.
17 *
18 * The SCTP reference implementation is distributed in the hope that it
19 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
20 *                 ************************
21 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
22 * See the GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with GNU CC; see the file COPYING.  If not, write to
26 * the Free Software Foundation, 59 Temple Place - Suite 330,
27 * Boston, MA 02111-1307, USA.
28 *
29 * Please send any bug reports or fixes you make to the
30 * email address(es):
31 *    lksctp developers <lksctp-developers@lists.sourceforge.net>
32 *
33 * Or submit a bug report through the following website:
34 *    http://www.sf.net/projects/lksctp
35 *
36 * Written or modified by:
37 *    La Monte H.P. Yarroll <piggy@acm.org>
38 *    Karl Knutson          <karl@athena.chicago.il.us>
39 *    Jon Grimm             <jgrimm@austin.ibm.com>
40 *    Hui Huang		    <hui.huang@nokia.com>
41 *    Dajiang Zhang	    <dajiang.zhang@nokia.com>
42 *    Daisy Chang	    <daisyc@us.ibm.com>
43 *    Sridhar Samudrala	    <sri@us.ibm.com>
44 *    Ardelle Fan	    <ardelle.fan@intel.com>
45 *
46 * Any bugs reported given to us we will try to fix... any fixes shared will
47 * be incorporated into the next SCTP release.
48 */
49
50#include <linux/skbuff.h>
51#include <linux/types.h>
52#include <linux/socket.h>
53#include <linux/ip.h>
54#include <net/sock.h>
55#include <net/sctp/sctp.h>
56#include <net/sctp/sm.h>
57
58static int sctp_cmd_interpreter(sctp_event_t event_type,
59				sctp_subtype_t subtype,
60				sctp_state_t state,
61				struct sctp_endpoint *ep,
62				struct sctp_association *asoc,
63				void *event_arg,
64				sctp_disposition_t status,
65				sctp_cmd_seq_t *commands,
66				gfp_t gfp);
67static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype,
68			     sctp_state_t state,
69			     struct sctp_endpoint *ep,
70			     struct sctp_association *asoc,
71			     void *event_arg,
72			     sctp_disposition_t status,
73			     sctp_cmd_seq_t *commands,
74			     gfp_t gfp);
75
76/********************************************************************
77 * Helper functions
78 ********************************************************************/
79
80/* A helper function for delayed processing of INET ECN CE bit. */
81static void sctp_do_ecn_ce_work(struct sctp_association *asoc,
82				__u32 lowest_tsn)
83{
84	/* Save the TSN away for comparison when we receive CWR */
85
86	asoc->last_ecne_tsn = lowest_tsn;
87	asoc->need_ecne = 1;
88}
89
90/* Helper function for delayed processing of SCTP ECNE chunk.  */
91/* RFC 2960 Appendix A
92 *
93 * RFC 2481 details a specific bit for a sender to send in
94 * the header of its next outbound TCP segment to indicate to
95 * its peer that it has reduced its congestion window.  This
96 * is termed the CWR bit.  For SCTP the same indication is made
97 * by including the CWR chunk.  This chunk contains one data
98 * element, i.e. the TSN number that was sent in the ECNE chunk.
99 * This element represents the lowest TSN number in the datagram
100 * that was originally marked with the CE bit.
101 */
102static struct sctp_chunk *sctp_do_ecn_ecne_work(struct sctp_association *asoc,
103					   __u32 lowest_tsn,
104					   struct sctp_chunk *chunk)
105{
106	struct sctp_chunk *repl;
107
108	/* Our previously transmitted packet ran into some congestion
109	 * so we should take action by reducing cwnd and ssthresh
110	 * and then ACK our peer that we we've done so by
111	 * sending a CWR.
112	 */
113
114	/* First, try to determine if we want to actually lower
115	 * our cwnd variables.  Only lower them if the ECNE looks more
116	 * recent than the last response.
117	 */
118	if (TSN_lt(asoc->last_cwr_tsn, lowest_tsn)) {
119		struct sctp_transport *transport;
120
121		/* Find which transport's congestion variables
122		 * need to be adjusted.
123		 */
124		transport = sctp_assoc_lookup_tsn(asoc, lowest_tsn);
125
126		/* Update the congestion variables. */
127		if (transport)
128			sctp_transport_lower_cwnd(transport,
129						  SCTP_LOWER_CWND_ECNE);
130		asoc->last_cwr_tsn = lowest_tsn;
131	}
132
133	/* Always try to quiet the other end.  In case of lost CWR,
134	 * resend last_cwr_tsn.
135	 */
136	repl = sctp_make_cwr(asoc, asoc->last_cwr_tsn, chunk);
137
138	/* If we run out of memory, it will look like a lost CWR.  We'll
139	 * get back in sync eventually.
140	 */
141	return repl;
142}
143
144/* Helper function to do delayed processing of ECN CWR chunk.  */
145static void sctp_do_ecn_cwr_work(struct sctp_association *asoc,
146				 __u32 lowest_tsn)
147{
148	/* Turn off ECNE getting auto-prepended to every outgoing
149	 * packet
150	 */
151	asoc->need_ecne = 0;
152}
153
154/* Generate SACK if necessary.  We call this at the end of a packet.  */
155static int sctp_gen_sack(struct sctp_association *asoc, int force,
156			 sctp_cmd_seq_t *commands)
157{
158	__u32 ctsn, max_tsn_seen;
159	struct sctp_chunk *sack;
160	struct sctp_transport *trans = asoc->peer.last_data_from;
161	int error = 0;
162
163	if (force ||
164	    (!trans && (asoc->param_flags & SPP_SACKDELAY_DISABLE)) ||
165	    (trans && (trans->param_flags & SPP_SACKDELAY_DISABLE)))
166		asoc->peer.sack_needed = 1;
167
168	ctsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map);
169	max_tsn_seen = sctp_tsnmap_get_max_tsn_seen(&asoc->peer.tsn_map);
170
171	/* From 12.2 Parameters necessary per association (i.e. the TCB):
172	 *
173	 * Ack State : This flag indicates if the next received packet
174	 * 	     : is to be responded to with a SACK. ...
175	 *	     : When DATA chunks are out of order, SACK's
176	 *           : are not delayed (see Section 6).
177	 *
178	 * [This is actually not mentioned in Section 6, but we
179	 * implement it here anyway. --piggy]
180	 */
181	if (max_tsn_seen != ctsn)
182		asoc->peer.sack_needed = 1;
183
184	/* From 6.2  Acknowledgement on Reception of DATA Chunks:
185	 *
186	 * Section 4.2 of [RFC2581] SHOULD be followed. Specifically,
187	 * an acknowledgement SHOULD be generated for at least every
188	 * second packet (not every second DATA chunk) received, and
189	 * SHOULD be generated within 200 ms of the arrival of any
190	 * unacknowledged DATA chunk. ...
191	 */
192	if (!asoc->peer.sack_needed) {
193		/* We will need a SACK for the next packet.  */
194		asoc->peer.sack_needed = 1;
195
196		/* Set the SACK delay timeout based on the
197		 * SACK delay for the last transport
198		 * data was received from, or the default
199		 * for the association.
200		 */
201		if (trans)
202			asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] =
203				trans->sackdelay;
204		else
205			asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] =
206				asoc->sackdelay;
207
208		/* Restart the SACK timer. */
209		sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
210				SCTP_TO(SCTP_EVENT_TIMEOUT_SACK));
211	} else {
212		if (asoc->a_rwnd > asoc->rwnd)
213			asoc->a_rwnd = asoc->rwnd;
214		sack = sctp_make_sack(asoc);
215		if (!sack)
216			goto nomem;
217
218		asoc->peer.sack_needed = 0;
219
220		sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(sack));
221
222		/* Stop the SACK timer.  */
223		sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
224				SCTP_TO(SCTP_EVENT_TIMEOUT_SACK));
225	}
226
227	return error;
228nomem:
229	error = -ENOMEM;
230	return error;
231}
232
233/* When the T3-RTX timer expires, it calls this function to create the
234 * relevant state machine event.
235 */
236void sctp_generate_t3_rtx_event(unsigned long peer)
237{
238	int error;
239	struct sctp_transport *transport = (struct sctp_transport *) peer;
240	struct sctp_association *asoc = transport->asoc;
241
242	/* Check whether a task is in the sock.  */
243
244	sctp_bh_lock_sock(asoc->base.sk);
245	if (sock_owned_by_user(asoc->base.sk)) {
246		SCTP_DEBUG_PRINTK("%s:Sock is busy.\n", __FUNCTION__);
247
248		/* Try again later.  */
249		if (!mod_timer(&transport->T3_rtx_timer, jiffies + (HZ/20)))
250			sctp_transport_hold(transport);
251		goto out_unlock;
252	}
253
254	/* Is this transport really dead and just waiting around for
255	 * the timer to let go of the reference?
256	 */
257	if (transport->dead)
258		goto out_unlock;
259
260	/* Run through the state machine.  */
261	error = sctp_do_sm(SCTP_EVENT_T_TIMEOUT,
262			   SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_T3_RTX),
263			   asoc->state,
264			   asoc->ep, asoc,
265			   transport, GFP_ATOMIC);
266
267	if (error)
268		asoc->base.sk->sk_err = -error;
269
270out_unlock:
271	sctp_bh_unlock_sock(asoc->base.sk);
272	sctp_transport_put(transport);
273}
274
275/* This is a sa interface for producing timeout events.  It works
276 * for timeouts which use the association as their parameter.
277 */
278static void sctp_generate_timeout_event(struct sctp_association *asoc,
279					sctp_event_timeout_t timeout_type)
280{
281	int error = 0;
282
283	sctp_bh_lock_sock(asoc->base.sk);
284	if (sock_owned_by_user(asoc->base.sk)) {
285		SCTP_DEBUG_PRINTK("%s:Sock is busy: timer %d\n",
286				  __FUNCTION__,
287				  timeout_type);
288
289		/* Try again later.  */
290		if (!mod_timer(&asoc->timers[timeout_type], jiffies + (HZ/20)))
291			sctp_association_hold(asoc);
292		goto out_unlock;
293	}
294
295	/* Is this association really dead and just waiting around for
296	 * the timer to let go of the reference?
297	 */
298	if (asoc->base.dead)
299		goto out_unlock;
300
301	/* Run through the state machine.  */
302	error = sctp_do_sm(SCTP_EVENT_T_TIMEOUT,
303			   SCTP_ST_TIMEOUT(timeout_type),
304			   asoc->state, asoc->ep, asoc,
305			   (void *)timeout_type, GFP_ATOMIC);
306
307	if (error)
308		asoc->base.sk->sk_err = -error;
309
310out_unlock:
311	sctp_bh_unlock_sock(asoc->base.sk);
312	sctp_association_put(asoc);
313}
314
315static void sctp_generate_t1_cookie_event(unsigned long data)
316{
317	struct sctp_association *asoc = (struct sctp_association *) data;
318	sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T1_COOKIE);
319}
320
321static void sctp_generate_t1_init_event(unsigned long data)
322{
323	struct sctp_association *asoc = (struct sctp_association *) data;
324	sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T1_INIT);
325}
326
327static void sctp_generate_t2_shutdown_event(unsigned long data)
328{
329	struct sctp_association *asoc = (struct sctp_association *) data;
330	sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T2_SHUTDOWN);
331}
332
333static void sctp_generate_t4_rto_event(unsigned long data)
334{
335	struct sctp_association *asoc = (struct sctp_association *) data;
336	sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T4_RTO);
337}
338
339static void sctp_generate_t5_shutdown_guard_event(unsigned long data)
340{
341	struct sctp_association *asoc = (struct sctp_association *)data;
342	sctp_generate_timeout_event(asoc,
343				    SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD);
344
345} /* sctp_generate_t5_shutdown_guard_event() */
346
347static void sctp_generate_autoclose_event(unsigned long data)
348{
349	struct sctp_association *asoc = (struct sctp_association *) data;
350	sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_AUTOCLOSE);
351}
352
353/* Generate a heart beat event.  If the sock is busy, reschedule.   Make
354 * sure that the transport is still valid.
355 */
356void sctp_generate_heartbeat_event(unsigned long data)
357{
358	int error = 0;
359	struct sctp_transport *transport = (struct sctp_transport *) data;
360	struct sctp_association *asoc = transport->asoc;
361
362	sctp_bh_lock_sock(asoc->base.sk);
363	if (sock_owned_by_user(asoc->base.sk)) {
364		SCTP_DEBUG_PRINTK("%s:Sock is busy.\n", __FUNCTION__);
365
366		/* Try again later.  */
367		if (!mod_timer(&transport->hb_timer, jiffies + (HZ/20)))
368			sctp_transport_hold(transport);
369		goto out_unlock;
370	}
371
372	/* Is this structure just waiting around for us to actually
373	 * get destroyed?
374	 */
375	if (transport->dead)
376		goto out_unlock;
377
378	error = sctp_do_sm(SCTP_EVENT_T_TIMEOUT,
379			   SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_HEARTBEAT),
380			   asoc->state, asoc->ep, asoc,
381			   transport, GFP_ATOMIC);
382
383	 if (error)
384		 asoc->base.sk->sk_err = -error;
385
386out_unlock:
387	sctp_bh_unlock_sock(asoc->base.sk);
388	sctp_transport_put(transport);
389}
390
391/* Inject a SACK Timeout event into the state machine.  */
392static void sctp_generate_sack_event(unsigned long data)
393{
394	struct sctp_association *asoc = (struct sctp_association *) data;
395	sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_SACK);
396}
397
398sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES] = {
399	NULL,
400	sctp_generate_t1_cookie_event,
401	sctp_generate_t1_init_event,
402	sctp_generate_t2_shutdown_event,
403	NULL,
404	sctp_generate_t4_rto_event,
405	sctp_generate_t5_shutdown_guard_event,
406	NULL,
407	sctp_generate_sack_event,
408	sctp_generate_autoclose_event,
409};
410
411
412/* RFC 2960 8.2 Path Failure Detection
413 *
414 * When its peer endpoint is multi-homed, an endpoint should keep a
415 * error counter for each of the destination transport addresses of the
416 * peer endpoint.
417 *
418 * Each time the T3-rtx timer expires on any address, or when a
419 * HEARTBEAT sent to an idle address is not acknowledged within a RTO,
420 * the error counter of that destination address will be incremented.
421 * When the value in the error counter exceeds the protocol parameter
422 * 'Path.Max.Retrans' of that destination address, the endpoint should
423 * mark the destination transport address as inactive, and a
424 * notification SHOULD be sent to the upper layer.
425 *
426 */
427static void sctp_do_8_2_transport_strike(struct sctp_association *asoc,
428					 struct sctp_transport *transport)
429{
430	/* The check for association's overall error counter exceeding the
431	 * threshold is done in the state function.
432	 */
433	/* When probing UNCONFIRMED addresses, the association overall
434	 * error count is NOT incremented
435	 */
436	if (transport->state != SCTP_UNCONFIRMED)
437		asoc->overall_error_count++;
438
439	if (transport->state != SCTP_INACTIVE &&
440	    (transport->error_count++ >= transport->pathmaxrxt)) {
441		SCTP_DEBUG_PRINTK_IPADDR("transport_strike:association %p",
442					 " transport IP: port:%d failed.\n",
443					 asoc,
444					 (&transport->ipaddr),
445					 ntohs(transport->ipaddr.v4.sin_port));
446		sctp_assoc_control_transport(asoc, transport,
447					     SCTP_TRANSPORT_DOWN,
448					     SCTP_FAILED_THRESHOLD);
449	}
450
451	/* E2) For the destination address for which the timer
452	 * expires, set RTO <- RTO * 2 ("back off the timer").  The
453	 * maximum value discussed in rule C7 above (RTO.max) may be
454	 * used to provide an upper bound to this doubling operation.
455	 */
456	transport->rto = min((transport->rto * 2), transport->asoc->rto_max);
457}
458
459/* Worker routine to handle INIT command failure.  */
460static void sctp_cmd_init_failed(sctp_cmd_seq_t *commands,
461				 struct sctp_association *asoc,
462				 unsigned error)
463{
464	struct sctp_ulpevent *event;
465
466	event = sctp_ulpevent_make_assoc_change(asoc,0, SCTP_CANT_STR_ASSOC,
467						(__u16)error, 0, 0, NULL,
468						GFP_ATOMIC);
469
470	if (event)
471		sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
472				SCTP_ULPEVENT(event));
473
474	sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
475			SCTP_STATE(SCTP_STATE_CLOSED));
476
477	/* SEND_FAILED sent later when cleaning up the association. */
478	asoc->outqueue.error = error;
479	sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
480}
481
482/* Worker routine to handle SCTP_CMD_ASSOC_FAILED.  */
483static void sctp_cmd_assoc_failed(sctp_cmd_seq_t *commands,
484				  struct sctp_association *asoc,
485				  sctp_event_t event_type,
486				  sctp_subtype_t subtype,
487				  struct sctp_chunk *chunk,
488				  unsigned error)
489{
490	struct sctp_ulpevent *event;
491
492	/* Cancel any partial delivery in progress. */
493	sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
494
495	if (event_type == SCTP_EVENT_T_CHUNK && subtype.chunk == SCTP_CID_ABORT)
496		event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST,
497						(__u16)error, 0, 0, chunk,
498						GFP_ATOMIC);
499	else
500		event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST,
501						(__u16)error, 0, 0, NULL,
502						GFP_ATOMIC);
503	if (event)
504		sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
505				SCTP_ULPEVENT(event));
506
507	sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
508			SCTP_STATE(SCTP_STATE_CLOSED));
509
510	/* SEND_FAILED sent later when cleaning up the association. */
511	asoc->outqueue.error = error;
512	sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
513}
514
515/* Process an init chunk (may be real INIT/INIT-ACK or an embedded INIT
516 * inside the cookie.  In reality, this is only used for INIT-ACK processing
517 * since all other cases use "temporary" associations and can do all
518 * their work in statefuns directly.
519 */
520static int sctp_cmd_process_init(sctp_cmd_seq_t *commands,
521				 struct sctp_association *asoc,
522				 struct sctp_chunk *chunk,
523				 sctp_init_chunk_t *peer_init,
524				 gfp_t gfp)
525{
526	int error;
527
528	/* We only process the init as a sideeffect in a single
529	 * case.   This is when we process the INIT-ACK.   If we
530	 * fail during INIT processing (due to malloc problems),
531	 * just return the error and stop processing the stack.
532	 */
533	if (!sctp_process_init(asoc, chunk->chunk_hdr->type,
534			       sctp_source(chunk), peer_init, gfp))
535		error = -ENOMEM;
536	else
537		error = 0;
538
539	return error;
540}
541
542/* Helper function to break out starting up of heartbeat timers.  */
543static void sctp_cmd_hb_timers_start(sctp_cmd_seq_t *cmds,
544				     struct sctp_association *asoc)
545{
546	struct sctp_transport *t;
547	struct list_head *pos;
548
549	/* Start a heartbeat timer for each transport on the association.
550	 * hold a reference on the transport to make sure none of
551	 * the needed data structures go away.
552	 */
553	list_for_each(pos, &asoc->peer.transport_addr_list) {
554		t = list_entry(pos, struct sctp_transport, transports);
555
556		if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t)))
557			sctp_transport_hold(t);
558	}
559}
560
561static void sctp_cmd_hb_timers_stop(sctp_cmd_seq_t *cmds,
562				    struct sctp_association *asoc)
563{
564	struct sctp_transport *t;
565	struct list_head *pos;
566
567	/* Stop all heartbeat timers. */
568
569	list_for_each(pos, &asoc->peer.transport_addr_list) {
570		t = list_entry(pos, struct sctp_transport, transports);
571		if (del_timer(&t->hb_timer))
572			sctp_transport_put(t);
573	}
574}
575
576/* Helper function to stop any pending T3-RTX timers */
577static void sctp_cmd_t3_rtx_timers_stop(sctp_cmd_seq_t *cmds,
578					struct sctp_association *asoc)
579{
580	struct sctp_transport *t;
581	struct list_head *pos;
582
583	list_for_each(pos, &asoc->peer.transport_addr_list) {
584		t = list_entry(pos, struct sctp_transport, transports);
585		if (timer_pending(&t->T3_rtx_timer) &&
586		    del_timer(&t->T3_rtx_timer)) {
587			sctp_transport_put(t);
588		}
589	}
590}
591
592
593/* Helper function to update the heartbeat timer. */
594static void sctp_cmd_hb_timer_update(sctp_cmd_seq_t *cmds,
595				     struct sctp_association *asoc,
596				     struct sctp_transport *t)
597{
598	/* Update the heartbeat timer.  */
599	if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t)))
600		sctp_transport_hold(t);
601}
602
603/* Helper function to handle the reception of an HEARTBEAT ACK.  */
604static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
605				  struct sctp_association *asoc,
606				  struct sctp_transport *t,
607				  struct sctp_chunk *chunk)
608{
609	sctp_sender_hb_info_t *hbinfo;
610
611	/* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of the
612	 * HEARTBEAT should clear the error counter of the destination
613	 * transport address to which the HEARTBEAT was sent.
614	 * The association's overall error count is also cleared.
615	 */
616	t->error_count = 0;
617	t->asoc->overall_error_count = 0;
618
619	/* Mark the destination transport address as active if it is not so
620	 * marked.
621	 */
622	if ((t->state == SCTP_INACTIVE) || (t->state == SCTP_UNCONFIRMED))
623		sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP,
624					     SCTP_HEARTBEAT_SUCCESS);
625
626	/* The receiver of the HEARTBEAT ACK should also perform an
627	 * RTT measurement for that destination transport address
628	 * using the time value carried in the HEARTBEAT ACK chunk.
629	 * If the transport's rto_pending variable has been cleared,
630	 * it was most likely due to a retransmit.  However, we want
631	 * to re-enable it to properly update the rto.
632	 */
633	if (t->rto_pending == 0)
634		t->rto_pending = 1;
635
636	hbinfo = (sctp_sender_hb_info_t *) chunk->skb->data;
637	sctp_transport_update_rto(t, (jiffies - hbinfo->sent_at));
638
639	/* Update the heartbeat timer.  */
640	if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t)))
641		sctp_transport_hold(t);
642}
643
644/* Helper function to do a transport reset at the expiry of the hearbeat
645 * timer.
646 */
647static void sctp_cmd_transport_reset(sctp_cmd_seq_t *cmds,
648				     struct sctp_association *asoc,
649				     struct sctp_transport *t)
650{
651	sctp_transport_lower_cwnd(t, SCTP_LOWER_CWND_INACTIVE);
652
653	/* Mark one strike against a transport.  */
654	sctp_do_8_2_transport_strike(asoc, t);
655}
656
657/* Helper function to process the process SACK command.  */
658static int sctp_cmd_process_sack(sctp_cmd_seq_t *cmds,
659				 struct sctp_association *asoc,
660				 struct sctp_sackhdr *sackh)
661{
662	int err;
663
664	if (sctp_outq_sack(&asoc->outqueue, sackh)) {
665		/* There are no more TSNs awaiting SACK.  */
666		err = sctp_do_sm(SCTP_EVENT_T_OTHER,
667				 SCTP_ST_OTHER(SCTP_EVENT_NO_PENDING_TSN),
668				 asoc->state, asoc->ep, asoc, NULL,
669				 GFP_ATOMIC);
670	} else {
671		/* Windows may have opened, so we need
672		 * to check if we have DATA to transmit
673		 */
674		err = sctp_outq_flush(&asoc->outqueue, 0);
675	}
676
677	return err;
678}
679
680/* Helper function to set the timeout value for T2-SHUTDOWN timer and to set
681 * the transport for a shutdown chunk.
682 */
683static void sctp_cmd_setup_t2(sctp_cmd_seq_t *cmds,
684			      struct sctp_association *asoc,
685			      struct sctp_chunk *chunk)
686{
687	struct sctp_transport *t;
688
689	t = sctp_assoc_choose_shutdown_transport(asoc);
690	asoc->shutdown_last_sent_to = t;
691	asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = t->rto;
692	chunk->transport = t;
693}
694
695/* Helper function to change the state of an association. */
696static void sctp_cmd_new_state(sctp_cmd_seq_t *cmds,
697			       struct sctp_association *asoc,
698			       sctp_state_t state)
699{
700	struct sock *sk = asoc->base.sk;
701
702	asoc->state = state;
703
704	SCTP_DEBUG_PRINTK("sctp_cmd_new_state: asoc %p[%s]\n",
705			  asoc, sctp_state_tbl[state]);
706
707	if (sctp_style(sk, TCP)) {
708		/* Change the sk->sk_state of a TCP-style socket that has
709		 * sucessfully completed a connect() call.
710		 */
711		if (sctp_state(asoc, ESTABLISHED) && sctp_sstate(sk, CLOSED))
712			sk->sk_state = SCTP_SS_ESTABLISHED;
713
714		/* Set the RCV_SHUTDOWN flag when a SHUTDOWN is received. */
715		if (sctp_state(asoc, SHUTDOWN_RECEIVED) &&
716		    sctp_sstate(sk, ESTABLISHED))
717			sk->sk_shutdown |= RCV_SHUTDOWN;
718	}
719
720	if (sctp_state(asoc, COOKIE_WAIT)) {
721		/* Reset init timeouts since they may have been
722		 * increased due to timer expirations.
723		 */
724		asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] =
725						asoc->rto_initial;
726		asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] =
727						asoc->rto_initial;
728	}
729
730	if (sctp_state(asoc, ESTABLISHED) ||
731	    sctp_state(asoc, CLOSED) ||
732	    sctp_state(asoc, SHUTDOWN_RECEIVED)) {
733		/* Wake up any processes waiting in the asoc's wait queue in
734		 * sctp_wait_for_connect() or sctp_wait_for_sndbuf().
735		 */
736		if (waitqueue_active(&asoc->wait))
737			wake_up_interruptible(&asoc->wait);
738
739		/* Wake up any processes waiting in the sk's sleep queue of
740		 * a TCP-style or UDP-style peeled-off socket in
741		 * sctp_wait_for_accept() or sctp_wait_for_packet().
742		 * For a UDP-style socket, the waiters are woken up by the
743		 * notifications.
744		 */
745		if (!sctp_style(sk, UDP))
746			sk->sk_state_change(sk);
747	}
748}
749
750/* Helper function to delete an association. */
751static void sctp_cmd_delete_tcb(sctp_cmd_seq_t *cmds,
752				struct sctp_association *asoc)
753{
754	struct sock *sk = asoc->base.sk;
755
756	/* If it is a non-temporary association belonging to a TCP-style
757	 * listening socket that is not closed, do not free it so that accept()
758	 * can pick it up later.
759	 */
760	if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING) &&
761	    (!asoc->temp) && (sk->sk_shutdown != SHUTDOWN_MASK))
762		return;
763
764	sctp_unhash_established(asoc);
765	sctp_association_free(asoc);
766}
767
768/*
769 * ADDIP Section 4.1 ASCONF Chunk Procedures
770 * A4) Start a T-4 RTO timer, using the RTO value of the selected
771 * destination address (we use active path instead of primary path just
772 * because primary path may be inactive.
773 */
774static void sctp_cmd_setup_t4(sctp_cmd_seq_t *cmds,
775				struct sctp_association *asoc,
776				struct sctp_chunk *chunk)
777{
778	struct sctp_transport *t;
779
780	t = asoc->peer.active_path;
781	asoc->timeouts[SCTP_EVENT_TIMEOUT_T4_RTO] = t->rto;
782	chunk->transport = t;
783}
784
785/* Process an incoming Operation Error Chunk. */
786static void sctp_cmd_process_operr(sctp_cmd_seq_t *cmds,
787				   struct sctp_association *asoc,
788				   struct sctp_chunk *chunk)
789{
790	struct sctp_operr_chunk *operr_chunk;
791	struct sctp_errhdr *err_hdr;
792
793	operr_chunk = (struct sctp_operr_chunk *)chunk->chunk_hdr;
794	err_hdr = &operr_chunk->err_hdr;
795
796	switch (err_hdr->cause) {
797	case SCTP_ERROR_UNKNOWN_CHUNK:
798	{
799		struct sctp_chunkhdr *unk_chunk_hdr;
800
801		unk_chunk_hdr = (struct sctp_chunkhdr *)err_hdr->variable;
802		switch (unk_chunk_hdr->type) {
803		/* ADDIP 4.1 A9) If the peer responds to an ASCONF with an
804		 * ERROR chunk reporting that it did not recognized the ASCONF
805		 * chunk type, the sender of the ASCONF MUST NOT send any
806		 * further ASCONF chunks and MUST stop its T-4 timer.
807		 */
808		case SCTP_CID_ASCONF:
809			asoc->peer.asconf_capable = 0;
810			sctp_add_cmd_sf(cmds, SCTP_CMD_TIMER_STOP,
811					SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
812			break;
813		default:
814			break;
815		}
816		break;
817	}
818	default:
819		break;
820	}
821}
822
823/* Process variable FWDTSN chunk information. */
824static void sctp_cmd_process_fwdtsn(struct sctp_ulpq *ulpq,
825				    struct sctp_chunk *chunk)
826{
827	struct sctp_fwdtsn_skip *skip;
828	/* Walk through all the skipped SSNs */
829	sctp_walk_fwdtsn(skip, chunk) {
830		sctp_ulpq_skip(ulpq, ntohs(skip->stream), ntohs(skip->ssn));
831	}
832
833	return;
834}
835
836/* Helper function to remove the association non-primary peer
837 * transports.
838 */
839static void sctp_cmd_del_non_primary(struct sctp_association *asoc)
840{
841	struct sctp_transport *t;
842	struct list_head *pos;
843	struct list_head *temp;
844
845	list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
846		t = list_entry(pos, struct sctp_transport, transports);
847		if (!sctp_cmp_addr_exact(&t->ipaddr,
848					 &asoc->peer.primary_addr)) {
849			sctp_assoc_del_peer(asoc, &t->ipaddr);
850		}
851	}
852
853	return;
854}
855
856/* Helper function to set sk_err on a 1-1 style socket. */
857static void sctp_cmd_set_sk_err(struct sctp_association *asoc, int error)
858{
859	struct sock *sk = asoc->base.sk;
860
861	if (!sctp_style(sk, UDP))
862		sk->sk_err = error;
863}
864
865/* Helper function to generate an association change event */
866static void sctp_cmd_assoc_change(sctp_cmd_seq_t *commands,
867				 struct sctp_association *asoc,
868				 u8 state)
869{
870	struct sctp_ulpevent *ev;
871
872	ev = sctp_ulpevent_make_assoc_change(asoc, 0, state, 0,
873					    asoc->c.sinit_num_ostreams,
874					    asoc->c.sinit_max_instreams,
875					    NULL, GFP_ATOMIC);
876	if (ev)
877		sctp_ulpq_tail_event(&asoc->ulpq, ev);
878}
879
880/* Helper function to generate an adaptation indication event */
881static void sctp_cmd_adaptation_ind(sctp_cmd_seq_t *commands,
882				    struct sctp_association *asoc)
883{
884	struct sctp_ulpevent *ev;
885
886	ev = sctp_ulpevent_make_adaptation_indication(asoc, GFP_ATOMIC);
887
888	if (ev)
889		sctp_ulpq_tail_event(&asoc->ulpq, ev);
890}
891
892/* These three macros allow us to pull the debugging code out of the
893 * main flow of sctp_do_sm() to keep attention focused on the real
894 * functionality there.
895 */
896#define DEBUG_PRE \
897	SCTP_DEBUG_PRINTK("sctp_do_sm prefn: " \
898			  "ep %p, %s, %s, asoc %p[%s], %s\n", \
899			  ep, sctp_evttype_tbl[event_type], \
900			  (*debug_fn)(subtype), asoc, \
901			  sctp_state_tbl[state], state_fn->name)
902
903#define DEBUG_POST \
904	SCTP_DEBUG_PRINTK("sctp_do_sm postfn: " \
905			  "asoc %p, status: %s\n", \
906			  asoc, sctp_status_tbl[status])
907
908#define DEBUG_POST_SFX \
909	SCTP_DEBUG_PRINTK("sctp_do_sm post sfx: error %d, asoc %p[%s]\n", \
910			  error, asoc, \
911			  sctp_state_tbl[(asoc && sctp_id2assoc(ep->base.sk, \
912			  sctp_assoc2id(asoc)))?asoc->state:SCTP_STATE_CLOSED])
913
914/*
915 * This is the master state machine processing function.
916 *
917 * If you want to understand all of lksctp, this is a
918 * good place to start.
919 */
920int sctp_do_sm(sctp_event_t event_type, sctp_subtype_t subtype,
921	       sctp_state_t state,
922	       struct sctp_endpoint *ep,
923	       struct sctp_association *asoc,
924	       void *event_arg,
925	       gfp_t gfp)
926{
927	sctp_cmd_seq_t commands;
928	const sctp_sm_table_entry_t *state_fn;
929	sctp_disposition_t status;
930	int error = 0;
931	typedef const char *(printfn_t)(sctp_subtype_t);
932
933	static printfn_t *table[] = {
934		NULL, sctp_cname, sctp_tname, sctp_oname, sctp_pname,
935	};
936	printfn_t *debug_fn  __attribute__ ((unused)) = table[event_type];
937
938	/* Look up the state function, run it, and then process the
939	 * side effects.  These three steps are the heart of lksctp.
940	 */
941	state_fn = sctp_sm_lookup_event(event_type, state, subtype);
942
943	sctp_init_cmd_seq(&commands);
944
945	DEBUG_PRE;
946	status = (*state_fn->fn)(ep, asoc, subtype, event_arg, &commands);
947	DEBUG_POST;
948
949	error = sctp_side_effects(event_type, subtype, state,
950				  ep, asoc, event_arg, status,
951				  &commands, gfp);
952	DEBUG_POST_SFX;
953
954	return error;
955}
956
957#undef DEBUG_PRE
958#undef DEBUG_POST
959
960/*****************************************************************
961 * This the master state function side effect processing function.
962 *****************************************************************/
963static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype,
964			     sctp_state_t state,
965			     struct sctp_endpoint *ep,
966			     struct sctp_association *asoc,
967			     void *event_arg,
968			     sctp_disposition_t status,
969			     sctp_cmd_seq_t *commands,
970			     gfp_t gfp)
971{
972	int error;
973
974	if (0 != (error = sctp_cmd_interpreter(event_type, subtype, state,
975					       ep, asoc,
976					       event_arg, status,
977					       commands, gfp)))
978		goto bail;
979
980	switch (status) {
981	case SCTP_DISPOSITION_DISCARD:
982		SCTP_DEBUG_PRINTK("Ignored sctp protocol event - state %d, "
983				  "event_type %d, event_id %d\n",
984				  state, event_type, subtype.chunk);
985		break;
986
987	case SCTP_DISPOSITION_NOMEM:
988		/* We ran out of memory, so we need to discard this
989		 * packet.
990		 */
991		/* BUG--we should now recover some memory, probably by
992		 * reneging...
993		 */
994		error = -ENOMEM;
995		break;
996
997	case SCTP_DISPOSITION_DELETE_TCB:
998		/* This should now be a command. */
999		break;
1000
1001	case SCTP_DISPOSITION_CONSUME:
1002	case SCTP_DISPOSITION_ABORT:
1003		/*
1004		 * We should no longer have much work to do here as the
1005		 * real work has been done as explicit commands above.
1006		 */
1007		break;
1008
1009	case SCTP_DISPOSITION_VIOLATION:
1010		printk(KERN_ERR "sctp protocol violation state %d "
1011		       "chunkid %d\n", state, subtype.chunk);
1012		break;
1013
1014	case SCTP_DISPOSITION_NOT_IMPL:
1015		printk(KERN_WARNING "sctp unimplemented feature in state %d, "
1016		       "event_type %d, event_id %d\n",
1017		       state, event_type, subtype.chunk);
1018		break;
1019
1020	case SCTP_DISPOSITION_BUG:
1021		printk(KERN_ERR "sctp bug in state %d, "
1022		       "event_type %d, event_id %d\n",
1023		       state, event_type, subtype.chunk);
1024		BUG();
1025		break;
1026
1027	default:
1028		printk(KERN_ERR "sctp impossible disposition %d "
1029		       "in state %d, event_type %d, event_id %d\n",
1030		       status, state, event_type, subtype.chunk);
1031		BUG();
1032		break;
1033	}
1034
1035bail:
1036	return error;
1037}
1038
1039/********************************************************************
1040 * 2nd Level Abstractions
1041 ********************************************************************/
1042
1043/* This is the side-effect interpreter.  */
1044static int sctp_cmd_interpreter(sctp_event_t event_type,
1045				sctp_subtype_t subtype,
1046				sctp_state_t state,
1047				struct sctp_endpoint *ep,
1048				struct sctp_association *asoc,
1049				void *event_arg,
1050				sctp_disposition_t status,
1051				sctp_cmd_seq_t *commands,
1052				gfp_t gfp)
1053{
1054	int error = 0;
1055	int force;
1056	sctp_cmd_t *cmd;
1057	struct sctp_chunk *new_obj;
1058	struct sctp_chunk *chunk = NULL;
1059	struct sctp_packet *packet;
1060	struct list_head *pos;
1061	struct timer_list *timer;
1062	unsigned long timeout;
1063	struct sctp_transport *t;
1064	struct sctp_sackhdr sackh;
1065	int local_cork = 0;
1066
1067	if (SCTP_EVENT_T_TIMEOUT != event_type)
1068		chunk = (struct sctp_chunk *) event_arg;
1069
1070	/* Note:  This whole file is a huge candidate for rework.
1071	 * For example, each command could either have its own handler, so
1072	 * the loop would look like:
1073	 *     while (cmds)
1074	 *         cmd->handle(x, y, z)
1075	 * --jgrimm
1076	 */
1077	while (NULL != (cmd = sctp_next_cmd(commands))) {
1078		switch (cmd->verb) {
1079		case SCTP_CMD_NOP:
1080			/* Do nothing. */
1081			break;
1082
1083		case SCTP_CMD_NEW_ASOC:
1084			/* Register a new association.  */
1085			if (local_cork) {
1086				sctp_outq_uncork(&asoc->outqueue);
1087				local_cork = 0;
1088			}
1089			asoc = cmd->obj.ptr;
1090			/* Register with the endpoint.  */
1091			sctp_endpoint_add_asoc(ep, asoc);
1092			sctp_hash_established(asoc);
1093			break;
1094
1095		case SCTP_CMD_UPDATE_ASSOC:
1096		       sctp_assoc_update(asoc, cmd->obj.ptr);
1097		       break;
1098
1099		case SCTP_CMD_PURGE_OUTQUEUE:
1100		       sctp_outq_teardown(&asoc->outqueue);
1101		       break;
1102
1103		case SCTP_CMD_DELETE_TCB:
1104			if (local_cork) {
1105				sctp_outq_uncork(&asoc->outqueue);
1106				local_cork = 0;
1107			}
1108			/* Delete the current association.  */
1109			sctp_cmd_delete_tcb(commands, asoc);
1110			asoc = NULL;
1111			break;
1112
1113		case SCTP_CMD_NEW_STATE:
1114			/* Enter a new state.  */
1115			sctp_cmd_new_state(commands, asoc, cmd->obj.state);
1116			break;
1117
1118		case SCTP_CMD_REPORT_TSN:
1119			/* Record the arrival of a TSN.  */
1120			sctp_tsnmap_mark(&asoc->peer.tsn_map, cmd->obj.u32);
1121			break;
1122
1123		case SCTP_CMD_REPORT_FWDTSN:
1124			/* Move the Cumulattive TSN Ack ahead. */
1125			sctp_tsnmap_skip(&asoc->peer.tsn_map, cmd->obj.u32);
1126
1127			/* Abort any in progress partial delivery. */
1128			sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
1129			break;
1130
1131		case SCTP_CMD_PROCESS_FWDTSN:
1132			sctp_cmd_process_fwdtsn(&asoc->ulpq, cmd->obj.ptr);
1133			break;
1134
1135		case SCTP_CMD_GEN_SACK:
1136			/* Generate a Selective ACK.
1137			 * The argument tells us whether to just count
1138			 * the packet and MAYBE generate a SACK, or
1139			 * force a SACK out.
1140			 */
1141			force = cmd->obj.i32;
1142			error = sctp_gen_sack(asoc, force, commands);
1143			break;
1144
1145		case SCTP_CMD_PROCESS_SACK:
1146			/* Process an inbound SACK.  */
1147			error = sctp_cmd_process_sack(commands, asoc,
1148						      cmd->obj.ptr);
1149			break;
1150
1151		case SCTP_CMD_GEN_INIT_ACK:
1152			/* Generate an INIT ACK chunk.  */
1153			new_obj = sctp_make_init_ack(asoc, chunk, GFP_ATOMIC,
1154						     0);
1155			if (!new_obj)
1156				goto nomem;
1157
1158			sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
1159					SCTP_CHUNK(new_obj));
1160			break;
1161
1162		case SCTP_CMD_PEER_INIT:
1163			/* Process a unified INIT from the peer.
1164			 * Note: Only used during INIT-ACK processing.  If
1165			 * there is an error just return to the outter
1166			 * layer which will bail.
1167			 */
1168			error = sctp_cmd_process_init(commands, asoc, chunk,
1169						      cmd->obj.ptr, gfp);
1170			break;
1171
1172		case SCTP_CMD_GEN_COOKIE_ECHO:
1173			/* Generate a COOKIE ECHO chunk.  */
1174			new_obj = sctp_make_cookie_echo(asoc, chunk);
1175			if (!new_obj) {
1176				if (cmd->obj.ptr)
1177					sctp_chunk_free(cmd->obj.ptr);
1178				goto nomem;
1179			}
1180			sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
1181					SCTP_CHUNK(new_obj));
1182
1183			/* If there is an ERROR chunk to be sent along with
1184			 * the COOKIE_ECHO, send it, too.
1185			 */
1186			if (cmd->obj.ptr)
1187				sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
1188						SCTP_CHUNK(cmd->obj.ptr));
1189
1190			if ((asoc->peer.retran_path !=
1191			     asoc->peer.primary_path) &&
1192			    (asoc->init_err_counter > 0)) {
1193				sctp_add_cmd_sf(commands,
1194						SCTP_CMD_FORCE_PRIM_RETRAN,
1195						SCTP_NULL());
1196			}
1197
1198			break;
1199
1200		case SCTP_CMD_GEN_SHUTDOWN:
1201			/* Generate SHUTDOWN when in SHUTDOWN_SENT state.
1202			 * Reset error counts.
1203			 */
1204			asoc->overall_error_count = 0;
1205
1206			/* Generate a SHUTDOWN chunk.  */
1207			new_obj = sctp_make_shutdown(asoc, chunk);
1208			if (!new_obj)
1209				goto nomem;
1210			sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
1211					SCTP_CHUNK(new_obj));
1212			break;
1213
1214		case SCTP_CMD_CHUNK_ULP:
1215			/* Send a chunk to the sockets layer.  */
1216			SCTP_DEBUG_PRINTK("sm_sideff: %s %p, %s %p.\n",
1217					  "chunk_up:", cmd->obj.ptr,
1218					  "ulpq:", &asoc->ulpq);
1219			sctp_ulpq_tail_data(&asoc->ulpq, cmd->obj.ptr,
1220					    GFP_ATOMIC);
1221			break;
1222
1223		case SCTP_CMD_EVENT_ULP:
1224			/* Send a notification to the sockets layer.  */
1225			SCTP_DEBUG_PRINTK("sm_sideff: %s %p, %s %p.\n",
1226					  "event_up:",cmd->obj.ptr,
1227					  "ulpq:",&asoc->ulpq);
1228			sctp_ulpq_tail_event(&asoc->ulpq, cmd->obj.ptr);
1229			break;
1230
1231		case SCTP_CMD_REPLY:
1232			/* If an caller has not already corked, do cork. */
1233			if (!asoc->outqueue.cork) {
1234				sctp_outq_cork(&asoc->outqueue);
1235				local_cork = 1;
1236			}
1237			/* Send a chunk to our peer.  */
1238			error = sctp_outq_tail(&asoc->outqueue, cmd->obj.ptr);
1239			break;
1240
1241		case SCTP_CMD_SEND_PKT:
1242			/* Send a full packet to our peer.  */
1243			packet = cmd->obj.ptr;
1244			sctp_packet_transmit(packet);
1245			sctp_ootb_pkt_free(packet);
1246			break;
1247
1248		case SCTP_CMD_RETRAN:
1249			/* Mark a transport for retransmission.  */
1250			sctp_retransmit(&asoc->outqueue, cmd->obj.transport,
1251					SCTP_RTXR_T3_RTX);
1252			break;
1253
1254		case SCTP_CMD_TRANSMIT:
1255			/* Kick start transmission. */
1256			error = sctp_outq_uncork(&asoc->outqueue);
1257			local_cork = 0;
1258			break;
1259
1260		case SCTP_CMD_ECN_CE:
1261			/* Do delayed CE processing.   */
1262			sctp_do_ecn_ce_work(asoc, cmd->obj.u32);
1263			break;
1264
1265		case SCTP_CMD_ECN_ECNE:
1266			/* Do delayed ECNE processing. */
1267			new_obj = sctp_do_ecn_ecne_work(asoc, cmd->obj.u32,
1268							chunk);
1269			if (new_obj)
1270				sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
1271						SCTP_CHUNK(new_obj));
1272			break;
1273
1274		case SCTP_CMD_ECN_CWR:
1275			/* Do delayed CWR processing.  */
1276			sctp_do_ecn_cwr_work(asoc, cmd->obj.u32);
1277			break;
1278
1279		case SCTP_CMD_SETUP_T2:
1280			sctp_cmd_setup_t2(commands, asoc, cmd->obj.ptr);
1281			break;
1282
1283		case SCTP_CMD_TIMER_START:
1284			timer = &asoc->timers[cmd->obj.to];
1285			timeout = asoc->timeouts[cmd->obj.to];
1286			BUG_ON(!timeout);
1287
1288			timer->expires = jiffies + timeout;
1289			sctp_association_hold(asoc);
1290			add_timer(timer);
1291			break;
1292
1293		case SCTP_CMD_TIMER_RESTART:
1294			timer = &asoc->timers[cmd->obj.to];
1295			timeout = asoc->timeouts[cmd->obj.to];
1296			if (!mod_timer(timer, jiffies + timeout))
1297				sctp_association_hold(asoc);
1298			break;
1299
1300		case SCTP_CMD_TIMER_STOP:
1301			timer = &asoc->timers[cmd->obj.to];
1302			if (timer_pending(timer) && del_timer(timer))
1303				sctp_association_put(asoc);
1304			break;
1305
1306		case SCTP_CMD_INIT_CHOOSE_TRANSPORT:
1307			chunk = cmd->obj.ptr;
1308			t = sctp_assoc_choose_init_transport(asoc);
1309			asoc->init_last_sent_to = t;
1310			chunk->transport = t;
1311			t->init_sent_count++;
1312			break;
1313
1314		case SCTP_CMD_INIT_RESTART:
1315			/* Do the needed accounting and updates
1316			 * associated with restarting an initialization
1317			 * timer. Only multiply the timeout by two if
1318			 * all transports have been tried at the current
1319			 * timeout.
1320			 */
1321			t = asoc->init_last_sent_to;
1322			asoc->init_err_counter++;
1323
1324			if (t->init_sent_count > (asoc->init_cycle + 1)) {
1325				asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] *= 2;
1326				if (asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] >
1327				    asoc->max_init_timeo) {
1328					asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] =
1329						asoc->max_init_timeo;
1330				}
1331				asoc->init_cycle++;
1332				SCTP_DEBUG_PRINTK(
1333					"T1 INIT Timeout adjustment"
1334					" init_err_counter: %d"
1335					" cycle: %d"
1336					" timeout: %ld\n",
1337					asoc->init_err_counter,
1338					asoc->init_cycle,
1339					asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT]);
1340			}
1341
1342			sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
1343					SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
1344			break;
1345
1346		case SCTP_CMD_COOKIEECHO_RESTART:
1347			/* Do the needed accounting and updates
1348			 * associated with restarting an initialization
1349			 * timer. Only multiply the timeout by two if
1350			 * all transports have been tried at the current
1351			 * timeout.
1352			 */
1353			asoc->init_err_counter++;
1354
1355			asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] *= 2;
1356			if (asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] >
1357			    asoc->max_init_timeo) {
1358				asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] =
1359					asoc->max_init_timeo;
1360			}
1361			SCTP_DEBUG_PRINTK(
1362				"T1 COOKIE Timeout adjustment"
1363				" init_err_counter: %d"
1364				" timeout: %ld\n",
1365				asoc->init_err_counter,
1366				asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE]);
1367
1368			/* If we've sent any data bundled with
1369			 * COOKIE-ECHO we need to resend.
1370			 */
1371			list_for_each(pos, &asoc->peer.transport_addr_list) {
1372				t = list_entry(pos, struct sctp_transport,
1373					       transports);
1374				sctp_retransmit_mark(&asoc->outqueue, t, 0);
1375			}
1376
1377			sctp_add_cmd_sf(commands,
1378					SCTP_CMD_TIMER_RESTART,
1379					SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE));
1380			break;
1381
1382		case SCTP_CMD_INIT_FAILED:
1383			sctp_cmd_init_failed(commands, asoc, cmd->obj.err);
1384			break;
1385
1386		case SCTP_CMD_ASSOC_FAILED:
1387			sctp_cmd_assoc_failed(commands, asoc, event_type,
1388					      subtype, chunk, cmd->obj.err);
1389			break;
1390
1391		case SCTP_CMD_INIT_COUNTER_INC:
1392			asoc->init_err_counter++;
1393			break;
1394
1395		case SCTP_CMD_INIT_COUNTER_RESET:
1396			asoc->init_err_counter = 0;
1397			asoc->init_cycle = 0;
1398			break;
1399
1400		case SCTP_CMD_REPORT_DUP:
1401			sctp_tsnmap_mark_dup(&asoc->peer.tsn_map,
1402					     cmd->obj.u32);
1403			break;
1404
1405		case SCTP_CMD_REPORT_BAD_TAG:
1406			SCTP_DEBUG_PRINTK("vtag mismatch!\n");
1407			break;
1408
1409		case SCTP_CMD_STRIKE:
1410			/* Mark one strike against a transport.  */
1411			sctp_do_8_2_transport_strike(asoc, cmd->obj.transport);
1412			break;
1413
1414		case SCTP_CMD_TRANSPORT_RESET:
1415			t = cmd->obj.transport;
1416			sctp_cmd_transport_reset(commands, asoc, t);
1417			break;
1418
1419		case SCTP_CMD_TRANSPORT_ON:
1420			t = cmd->obj.transport;
1421			sctp_cmd_transport_on(commands, asoc, t, chunk);
1422			break;
1423
1424		case SCTP_CMD_HB_TIMERS_START:
1425			sctp_cmd_hb_timers_start(commands, asoc);
1426			break;
1427
1428		case SCTP_CMD_HB_TIMER_UPDATE:
1429			t = cmd->obj.transport;
1430			sctp_cmd_hb_timer_update(commands, asoc, t);
1431			break;
1432
1433		case SCTP_CMD_HB_TIMERS_STOP:
1434			sctp_cmd_hb_timers_stop(commands, asoc);
1435			break;
1436
1437		case SCTP_CMD_REPORT_ERROR:
1438			error = cmd->obj.error;
1439			break;
1440
1441		case SCTP_CMD_PROCESS_CTSN:
1442			/* Dummy up a SACK for processing. */
1443			sackh.cum_tsn_ack = cmd->obj.be32;
1444			sackh.a_rwnd = 0;
1445			sackh.num_gap_ack_blocks = 0;
1446			sackh.num_dup_tsns = 0;
1447			sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK,
1448					SCTP_SACKH(&sackh));
1449			break;
1450
1451		case SCTP_CMD_DISCARD_PACKET:
1452			/* We need to discard the whole packet.  */
1453			chunk->pdiscard = 1;
1454			break;
1455
1456		case SCTP_CMD_RTO_PENDING:
1457			t = cmd->obj.transport;
1458			t->rto_pending = 1;
1459			break;
1460
1461		case SCTP_CMD_PART_DELIVER:
1462			sctp_ulpq_partial_delivery(&asoc->ulpq, cmd->obj.ptr,
1463						   GFP_ATOMIC);
1464			break;
1465
1466		case SCTP_CMD_RENEGE:
1467			sctp_ulpq_renege(&asoc->ulpq, cmd->obj.ptr,
1468					 GFP_ATOMIC);
1469			break;
1470
1471		case SCTP_CMD_SETUP_T4:
1472			sctp_cmd_setup_t4(commands, asoc, cmd->obj.ptr);
1473			break;
1474
1475		case SCTP_CMD_PROCESS_OPERR:
1476			sctp_cmd_process_operr(commands, asoc, chunk);
1477			break;
1478		case SCTP_CMD_CLEAR_INIT_TAG:
1479			asoc->peer.i.init_tag = 0;
1480			break;
1481		case SCTP_CMD_DEL_NON_PRIMARY:
1482			sctp_cmd_del_non_primary(asoc);
1483			break;
1484		case SCTP_CMD_T3_RTX_TIMERS_STOP:
1485			sctp_cmd_t3_rtx_timers_stop(commands, asoc);
1486			break;
1487		case SCTP_CMD_FORCE_PRIM_RETRAN:
1488			t = asoc->peer.retran_path;
1489			asoc->peer.retran_path = asoc->peer.primary_path;
1490			error = sctp_outq_uncork(&asoc->outqueue);
1491			local_cork = 0;
1492			asoc->peer.retran_path = t;
1493			break;
1494		case SCTP_CMD_SET_SK_ERR:
1495			sctp_cmd_set_sk_err(asoc, cmd->obj.error);
1496			break;
1497		case SCTP_CMD_ASSOC_CHANGE:
1498			sctp_cmd_assoc_change(commands, asoc,
1499					      cmd->obj.u8);
1500			break;
1501		case SCTP_CMD_ADAPTATION_IND:
1502			sctp_cmd_adaptation_ind(commands, asoc);
1503			break;
1504
1505		default:
1506			printk(KERN_WARNING "Impossible command: %u, %p\n",
1507			       cmd->verb, cmd->obj.ptr);
1508			break;
1509		}
1510
1511		if (error)
1512			break;
1513	}
1514
1515out:
1516	if (local_cork)
1517		sctp_outq_uncork(&asoc->outqueue);
1518	return error;
1519nomem:
1520	error = -ENOMEM;
1521	goto out;
1522}
1523