1/*
2 * net/tipc/msg.c: TIPC message header routines
3 *
4 * Copyright (c) 2000-2006, 2014-2015, Ericsson AB
5 * Copyright (c) 2005, 2010-2011, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 *    contributors may be used to endorse or promote products derived from
18 *    this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include <net/sock.h>
38#include "core.h"
39#include "msg.h"
40#include "addr.h"
41#include "name_table.h"
42#include "crypto.h"
43
44#define BUF_ALIGN(x) ALIGN(x, 4)
45#define MAX_FORWARD_SIZE 1024
46#ifdef CONFIG_TIPC_CRYPTO
47#define BUF_HEADROOM ALIGN(((LL_MAX_HEADER + 48) + EHDR_MAX_SIZE), 16)
48#define BUF_OVERHEAD (BUF_HEADROOM + TIPC_AES_GCM_TAG_SIZE)
49#else
50#define BUF_HEADROOM (LL_MAX_HEADER + 48)
51#define BUF_OVERHEAD BUF_HEADROOM
52#endif
53
54const int one_page_mtu = PAGE_SIZE - SKB_DATA_ALIGN(BUF_OVERHEAD) -
55			 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
56
57/**
58 * tipc_buf_acquire - creates a TIPC message buffer
59 * @size: message size (including TIPC header)
60 * @gfp: memory allocation flags
61 *
62 * Return: a new buffer with data pointers set to the specified size.
63 *
64 * NOTE:
65 * Headroom is reserved to allow prepending of a data link header.
66 * There may also be unrequested tailroom present at the buffer's end.
67 */
68struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp)
69{
70	struct sk_buff *skb;
71
72	skb = alloc_skb_fclone(BUF_OVERHEAD + size, gfp);
73	if (skb) {
74		skb_reserve(skb, BUF_HEADROOM);
75		skb_put(skb, size);
76		skb->next = NULL;
77	}
78	return skb;
79}
80
81void tipc_msg_init(u32 own_node, struct tipc_msg *m, u32 user, u32 type,
82		   u32 hsize, u32 dnode)
83{
84	memset(m, 0, hsize);
85	msg_set_version(m);
86	msg_set_user(m, user);
87	msg_set_hdr_sz(m, hsize);
88	msg_set_size(m, hsize);
89	msg_set_prevnode(m, own_node);
90	msg_set_type(m, type);
91	if (hsize > SHORT_H_SIZE) {
92		msg_set_orignode(m, own_node);
93		msg_set_destnode(m, dnode);
94	}
95}
96
97struct sk_buff *tipc_msg_create(uint user, uint type,
98				uint hdr_sz, uint data_sz, u32 dnode,
99				u32 onode, u32 dport, u32 oport, int errcode)
100{
101	struct tipc_msg *msg;
102	struct sk_buff *buf;
103
104	buf = tipc_buf_acquire(hdr_sz + data_sz, GFP_ATOMIC);
105	if (unlikely(!buf))
106		return NULL;
107
108	msg = buf_msg(buf);
109	tipc_msg_init(onode, msg, user, type, hdr_sz, dnode);
110	msg_set_size(msg, hdr_sz + data_sz);
111	msg_set_origport(msg, oport);
112	msg_set_destport(msg, dport);
113	msg_set_errcode(msg, errcode);
114	return buf;
115}
116
117/* tipc_buf_append(): Append a buffer to the fragment list of another buffer
118 * @*headbuf: in:  NULL for first frag, otherwise value returned from prev call
119 *            out: set when successful non-complete reassembly, otherwise NULL
120 * @*buf:     in:  the buffer to append. Always defined
121 *            out: head buf after successful complete reassembly, otherwise NULL
122 * Returns 1 when reassembly complete, otherwise 0
123 */
124int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
125{
126	struct sk_buff *head = *headbuf;
127	struct sk_buff *frag = *buf;
128	struct sk_buff *tail = NULL;
129	struct tipc_msg *msg;
130	u32 fragid;
131	int delta;
132	bool headstolen;
133
134	if (!frag)
135		goto err;
136
137	msg = buf_msg(frag);
138	fragid = msg_type(msg);
139	frag->next = NULL;
140	skb_pull(frag, msg_hdr_sz(msg));
141
142	if (fragid == FIRST_FRAGMENT) {
143		if (unlikely(head))
144			goto err;
145		*buf = NULL;
146		if (skb_has_frag_list(frag) && __skb_linearize(frag))
147			goto err;
148		frag = skb_unshare(frag, GFP_ATOMIC);
149		if (unlikely(!frag))
150			goto err;
151		head = *headbuf = frag;
152		TIPC_SKB_CB(head)->tail = NULL;
153		return 0;
154	}
155
156	if (!head)
157		goto err;
158
159	if (skb_try_coalesce(head, frag, &headstolen, &delta)) {
160		kfree_skb_partial(frag, headstolen);
161	} else {
162		tail = TIPC_SKB_CB(head)->tail;
163		if (!skb_has_frag_list(head))
164			skb_shinfo(head)->frag_list = frag;
165		else
166			tail->next = frag;
167		head->truesize += frag->truesize;
168		head->data_len += frag->len;
169		head->len += frag->len;
170		TIPC_SKB_CB(head)->tail = frag;
171	}
172
173	if (fragid == LAST_FRAGMENT) {
174		TIPC_SKB_CB(head)->validated = 0;
175		if (unlikely(!tipc_msg_validate(&head)))
176			goto err;
177		*buf = head;
178		TIPC_SKB_CB(head)->tail = NULL;
179		*headbuf = NULL;
180		return 1;
181	}
182	*buf = NULL;
183	return 0;
184err:
185	kfree_skb(*buf);
186	kfree_skb(*headbuf);
187	*buf = *headbuf = NULL;
188	return 0;
189}
190
191/**
192 * tipc_msg_append(): Append data to tail of an existing buffer queue
193 * @_hdr: header to be used
194 * @m: the data to be appended
195 * @mss: max allowable size of buffer
196 * @dlen: size of data to be appended
197 * @txq: queue to append to
198 *
199 * Return: the number of 1k blocks appended or errno value
200 */
201int tipc_msg_append(struct tipc_msg *_hdr, struct msghdr *m, int dlen,
202		    int mss, struct sk_buff_head *txq)
203{
204	struct sk_buff *skb;
205	int accounted, total, curr;
206	int mlen, cpy, rem = dlen;
207	struct tipc_msg *hdr;
208
209	skb = skb_peek_tail(txq);
210	accounted = skb ? msg_blocks(buf_msg(skb)) : 0;
211	total = accounted;
212
213	do {
214		if (!skb || skb->len >= mss) {
215			skb = tipc_buf_acquire(mss, GFP_KERNEL);
216			if (unlikely(!skb))
217				return -ENOMEM;
218			skb_orphan(skb);
219			skb_trim(skb, MIN_H_SIZE);
220			hdr = buf_msg(skb);
221			skb_copy_to_linear_data(skb, _hdr, MIN_H_SIZE);
222			msg_set_hdr_sz(hdr, MIN_H_SIZE);
223			msg_set_size(hdr, MIN_H_SIZE);
224			__skb_queue_tail(txq, skb);
225			total += 1;
226		}
227		hdr = buf_msg(skb);
228		curr = msg_blocks(hdr);
229		mlen = msg_size(hdr);
230		cpy = min_t(size_t, rem, mss - mlen);
231		if (cpy != copy_from_iter(skb->data + mlen, cpy, &m->msg_iter))
232			return -EFAULT;
233		msg_set_size(hdr, mlen + cpy);
234		skb_put(skb, cpy);
235		rem -= cpy;
236		total += msg_blocks(hdr) - curr;
237	} while (rem > 0);
238	return total - accounted;
239}
240
241/* tipc_msg_validate - validate basic format of received message
242 *
243 * This routine ensures a TIPC message has an acceptable header, and at least
244 * as much data as the header indicates it should.  The routine also ensures
245 * that the entire message header is stored in the main fragment of the message
246 * buffer, to simplify future access to message header fields.
247 *
248 * Note: Having extra info present in the message header or data areas is OK.
249 * TIPC will ignore the excess, under the assumption that it is optional info
250 * introduced by a later release of the protocol.
251 */
252bool tipc_msg_validate(struct sk_buff **_skb)
253{
254	struct sk_buff *skb = *_skb;
255	struct tipc_msg *hdr;
256	int msz, hsz;
257
258	/* Ensure that flow control ratio condition is satisfied */
259	if (unlikely(skb->truesize / buf_roundup_len(skb) >= 4)) {
260		skb = skb_copy_expand(skb, BUF_HEADROOM, 0, GFP_ATOMIC);
261		if (!skb)
262			return false;
263		kfree_skb(*_skb);
264		*_skb = skb;
265	}
266
267	if (unlikely(TIPC_SKB_CB(skb)->validated))
268		return true;
269
270	if (unlikely(!pskb_may_pull(skb, MIN_H_SIZE)))
271		return false;
272
273	hsz = msg_hdr_sz(buf_msg(skb));
274	if (unlikely(hsz < MIN_H_SIZE) || (hsz > MAX_H_SIZE))
275		return false;
276	if (unlikely(!pskb_may_pull(skb, hsz)))
277		return false;
278
279	hdr = buf_msg(skb);
280	if (unlikely(msg_version(hdr) != TIPC_VERSION))
281		return false;
282
283	msz = msg_size(hdr);
284	if (unlikely(msz < hsz))
285		return false;
286	if (unlikely((msz - hsz) > TIPC_MAX_USER_MSG_SIZE))
287		return false;
288	if (unlikely(skb->len < msz))
289		return false;
290
291	TIPC_SKB_CB(skb)->validated = 1;
292	return true;
293}
294
295/**
296 * tipc_msg_fragment - build a fragment skb list for TIPC message
297 *
298 * @skb: TIPC message skb
299 * @hdr: internal msg header to be put on the top of the fragments
300 * @pktmax: max size of a fragment incl. the header
301 * @frags: returned fragment skb list
302 *
303 * Return: 0 if the fragmentation is successful, otherwise: -EINVAL
304 * or -ENOMEM
305 */
306int tipc_msg_fragment(struct sk_buff *skb, const struct tipc_msg *hdr,
307		      int pktmax, struct sk_buff_head *frags)
308{
309	int pktno, nof_fragms, dsz, dmax, eat;
310	struct tipc_msg *_hdr;
311	struct sk_buff *_skb;
312	u8 *data;
313
314	/* Non-linear buffer? */
315	if (skb_linearize(skb))
316		return -ENOMEM;
317
318	data = (u8 *)skb->data;
319	dsz = msg_size(buf_msg(skb));
320	dmax = pktmax - INT_H_SIZE;
321	if (dsz <= dmax || !dmax)
322		return -EINVAL;
323
324	nof_fragms = dsz / dmax + 1;
325	for (pktno = 1; pktno <= nof_fragms; pktno++) {
326		if (pktno < nof_fragms)
327			eat = dmax;
328		else
329			eat = dsz % dmax;
330		/* Allocate a new fragment */
331		_skb = tipc_buf_acquire(INT_H_SIZE + eat, GFP_ATOMIC);
332		if (!_skb)
333			goto error;
334		skb_orphan(_skb);
335		__skb_queue_tail(frags, _skb);
336		/* Copy header & data to the fragment */
337		skb_copy_to_linear_data(_skb, hdr, INT_H_SIZE);
338		skb_copy_to_linear_data_offset(_skb, INT_H_SIZE, data, eat);
339		data += eat;
340		/* Update the fragment's header */
341		_hdr = buf_msg(_skb);
342		msg_set_fragm_no(_hdr, pktno);
343		msg_set_nof_fragms(_hdr, nof_fragms);
344		msg_set_size(_hdr, INT_H_SIZE + eat);
345	}
346	return 0;
347
348error:
349	__skb_queue_purge(frags);
350	__skb_queue_head_init(frags);
351	return -ENOMEM;
352}
353
354/**
355 * tipc_msg_build - create buffer chain containing specified header and data
356 * @mhdr: Message header, to be prepended to data
357 * @m: User message
358 * @offset: buffer offset for fragmented messages (FIXME)
359 * @dsz: Total length of user data
360 * @pktmax: Max packet size that can be used
361 * @list: Buffer or chain of buffers to be returned to caller
362 *
363 * Note that the recursive call we are making here is safe, since it can
364 * logically go only one further level down.
365 *
366 * Return: message data size or errno: -ENOMEM, -EFAULT
367 */
368int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
369		   int dsz, int pktmax, struct sk_buff_head *list)
370{
371	int mhsz = msg_hdr_sz(mhdr);
372	struct tipc_msg pkthdr;
373	int msz = mhsz + dsz;
374	int pktrem = pktmax;
375	struct sk_buff *skb;
376	int drem = dsz;
377	int pktno = 1;
378	char *pktpos;
379	int pktsz;
380	int rc;
381
382	msg_set_size(mhdr, msz);
383
384	/* No fragmentation needed? */
385	if (likely(msz <= pktmax)) {
386		skb = tipc_buf_acquire(msz, GFP_KERNEL);
387
388		/* Fall back to smaller MTU if node local message */
389		if (unlikely(!skb)) {
390			if (pktmax != MAX_MSG_SIZE)
391				return -ENOMEM;
392			rc = tipc_msg_build(mhdr, m, offset, dsz,
393					    one_page_mtu, list);
394			if (rc != dsz)
395				return rc;
396			if (tipc_msg_assemble(list))
397				return dsz;
398			return -ENOMEM;
399		}
400		skb_orphan(skb);
401		__skb_queue_tail(list, skb);
402		skb_copy_to_linear_data(skb, mhdr, mhsz);
403		pktpos = skb->data + mhsz;
404		if (copy_from_iter_full(pktpos, dsz, &m->msg_iter))
405			return dsz;
406		rc = -EFAULT;
407		goto error;
408	}
409
410	/* Prepare reusable fragment header */
411	tipc_msg_init(msg_prevnode(mhdr), &pkthdr, MSG_FRAGMENTER,
412		      FIRST_FRAGMENT, INT_H_SIZE, msg_destnode(mhdr));
413	msg_set_size(&pkthdr, pktmax);
414	msg_set_fragm_no(&pkthdr, pktno);
415	msg_set_importance(&pkthdr, msg_importance(mhdr));
416
417	/* Prepare first fragment */
418	skb = tipc_buf_acquire(pktmax, GFP_KERNEL);
419	if (!skb)
420		return -ENOMEM;
421	skb_orphan(skb);
422	__skb_queue_tail(list, skb);
423	pktpos = skb->data;
424	skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
425	pktpos += INT_H_SIZE;
426	pktrem -= INT_H_SIZE;
427	skb_copy_to_linear_data_offset(skb, INT_H_SIZE, mhdr, mhsz);
428	pktpos += mhsz;
429	pktrem -= mhsz;
430
431	do {
432		if (drem < pktrem)
433			pktrem = drem;
434
435		if (!copy_from_iter_full(pktpos, pktrem, &m->msg_iter)) {
436			rc = -EFAULT;
437			goto error;
438		}
439		drem -= pktrem;
440
441		if (!drem)
442			break;
443
444		/* Prepare new fragment: */
445		if (drem < (pktmax - INT_H_SIZE))
446			pktsz = drem + INT_H_SIZE;
447		else
448			pktsz = pktmax;
449		skb = tipc_buf_acquire(pktsz, GFP_KERNEL);
450		if (!skb) {
451			rc = -ENOMEM;
452			goto error;
453		}
454		skb_orphan(skb);
455		__skb_queue_tail(list, skb);
456		msg_set_type(&pkthdr, FRAGMENT);
457		msg_set_size(&pkthdr, pktsz);
458		msg_set_fragm_no(&pkthdr, ++pktno);
459		skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
460		pktpos = skb->data + INT_H_SIZE;
461		pktrem = pktsz - INT_H_SIZE;
462
463	} while (1);
464	msg_set_type(buf_msg(skb), LAST_FRAGMENT);
465	return dsz;
466error:
467	__skb_queue_purge(list);
468	__skb_queue_head_init(list);
469	return rc;
470}
471
472/**
473 * tipc_msg_bundle - Append contents of a buffer to tail of an existing one
474 * @bskb: the bundle buffer to append to
475 * @msg: message to be appended
476 * @max: max allowable size for the bundle buffer
477 *
478 * Return: "true" if bundling has been performed, otherwise "false"
479 */
480static bool tipc_msg_bundle(struct sk_buff *bskb, struct tipc_msg *msg,
481			    u32 max)
482{
483	struct tipc_msg *bmsg = buf_msg(bskb);
484	u32 msz, bsz, offset, pad;
485
486	msz = msg_size(msg);
487	bsz = msg_size(bmsg);
488	offset = BUF_ALIGN(bsz);
489	pad = offset - bsz;
490
491	if (unlikely(skb_tailroom(bskb) < (pad + msz)))
492		return false;
493	if (unlikely(max < (offset + msz)))
494		return false;
495
496	skb_put(bskb, pad + msz);
497	skb_copy_to_linear_data_offset(bskb, offset, msg, msz);
498	msg_set_size(bmsg, offset + msz);
499	msg_set_msgcnt(bmsg, msg_msgcnt(bmsg) + 1);
500	return true;
501}
502
503/**
504 * tipc_msg_try_bundle - Try to bundle a new message to the last one
505 * @tskb: the last/target message to which the new one will be appended
506 * @skb: the new message skb pointer
507 * @mss: max message size (header inclusive)
508 * @dnode: destination node for the message
509 * @new_bundle: if this call made a new bundle or not
510 *
511 * Return: "true" if the new message skb is potential for bundling this time or
512 * later, in the case a bundling has been done this time, the skb is consumed
513 * (the skb pointer = NULL).
514 * Otherwise, "false" if the skb cannot be bundled at all.
515 */
516bool tipc_msg_try_bundle(struct sk_buff *tskb, struct sk_buff **skb, u32 mss,
517			 u32 dnode, bool *new_bundle)
518{
519	struct tipc_msg *msg, *inner, *outer;
520	u32 tsz;
521
522	/* First, check if the new buffer is suitable for bundling */
523	msg = buf_msg(*skb);
524	if (msg_user(msg) == MSG_FRAGMENTER)
525		return false;
526	if (msg_user(msg) == TUNNEL_PROTOCOL)
527		return false;
528	if (msg_user(msg) == BCAST_PROTOCOL)
529		return false;
530	if (mss <= INT_H_SIZE + msg_size(msg))
531		return false;
532
533	/* Ok, but the last/target buffer can be empty? */
534	if (unlikely(!tskb))
535		return true;
536
537	/* Is it a bundle already? Try to bundle the new message to it */
538	if (msg_user(buf_msg(tskb)) == MSG_BUNDLER) {
539		*new_bundle = false;
540		goto bundle;
541	}
542
543	/* Make a new bundle of the two messages if possible */
544	tsz = msg_size(buf_msg(tskb));
545	if (unlikely(mss < BUF_ALIGN(INT_H_SIZE + tsz) + msg_size(msg)))
546		return true;
547	if (unlikely(pskb_expand_head(tskb, INT_H_SIZE, mss - tsz - INT_H_SIZE,
548				      GFP_ATOMIC)))
549		return true;
550	inner = buf_msg(tskb);
551	skb_push(tskb, INT_H_SIZE);
552	outer = buf_msg(tskb);
553	tipc_msg_init(msg_prevnode(inner), outer, MSG_BUNDLER, 0, INT_H_SIZE,
554		      dnode);
555	msg_set_importance(outer, msg_importance(inner));
556	msg_set_size(outer, INT_H_SIZE + tsz);
557	msg_set_msgcnt(outer, 1);
558	*new_bundle = true;
559
560bundle:
561	if (likely(tipc_msg_bundle(tskb, msg, mss))) {
562		consume_skb(*skb);
563		*skb = NULL;
564	}
565	return true;
566}
567
568/**
569 *  tipc_msg_extract(): extract bundled inner packet from buffer
570 *  @skb: buffer to be extracted from.
571 *  @iskb: extracted inner buffer, to be returned
572 *  @pos: position in outer message of msg to be extracted.
573 *  Returns position of next msg.
574 *  Consumes outer buffer when last packet extracted
575 *  Return: true when there is an extracted buffer, otherwise false
576 */
577bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos)
578{
579	struct tipc_msg *hdr, *ihdr;
580	int imsz;
581
582	*iskb = NULL;
583	if (unlikely(skb_linearize(skb)))
584		goto none;
585
586	hdr = buf_msg(skb);
587	if (unlikely(*pos > (msg_data_sz(hdr) - MIN_H_SIZE)))
588		goto none;
589
590	ihdr = (struct tipc_msg *)(msg_data(hdr) + *pos);
591	imsz = msg_size(ihdr);
592
593	if ((*pos + imsz) > msg_data_sz(hdr))
594		goto none;
595
596	*iskb = tipc_buf_acquire(imsz, GFP_ATOMIC);
597	if (!*iskb)
598		goto none;
599
600	skb_copy_to_linear_data(*iskb, ihdr, imsz);
601	if (unlikely(!tipc_msg_validate(iskb)))
602		goto none;
603
604	*pos += BUF_ALIGN(imsz);
605	return true;
606none:
607	kfree_skb(skb);
608	kfree_skb(*iskb);
609	*iskb = NULL;
610	return false;
611}
612
613/**
614 * tipc_msg_reverse(): swap source and destination addresses and add error code
615 * @own_node: originating node id for reversed message
616 * @skb:  buffer containing message to be reversed; will be consumed
617 * @err:  error code to be set in message, if any
618 * Replaces consumed buffer with new one when successful
619 * Return: true if success, otherwise false
620 */
621bool tipc_msg_reverse(u32 own_node,  struct sk_buff **skb, int err)
622{
623	struct sk_buff *_skb = *skb;
624	struct tipc_msg *_hdr, *hdr;
625	int hlen, dlen;
626
627	if (skb_linearize(_skb))
628		goto exit;
629	_hdr = buf_msg(_skb);
630	dlen = min_t(uint, msg_data_sz(_hdr), MAX_FORWARD_SIZE);
631	hlen = msg_hdr_sz(_hdr);
632
633	if (msg_dest_droppable(_hdr))
634		goto exit;
635	if (msg_errcode(_hdr))
636		goto exit;
637
638	/* Never return SHORT header */
639	if (hlen == SHORT_H_SIZE)
640		hlen = BASIC_H_SIZE;
641
642	/* Don't return data along with SYN+, - sender has a clone */
643	if (msg_is_syn(_hdr) && err == TIPC_ERR_OVERLOAD)
644		dlen = 0;
645
646	/* Allocate new buffer to return */
647	*skb = tipc_buf_acquire(hlen + dlen, GFP_ATOMIC);
648	if (!*skb)
649		goto exit;
650	memcpy((*skb)->data, _skb->data, msg_hdr_sz(_hdr));
651	memcpy((*skb)->data + hlen, msg_data(_hdr), dlen);
652
653	/* Build reverse header in new buffer */
654	hdr = buf_msg(*skb);
655	msg_set_hdr_sz(hdr, hlen);
656	msg_set_errcode(hdr, err);
657	msg_set_non_seq(hdr, 0);
658	msg_set_origport(hdr, msg_destport(_hdr));
659	msg_set_destport(hdr, msg_origport(_hdr));
660	msg_set_destnode(hdr, msg_prevnode(_hdr));
661	msg_set_prevnode(hdr, own_node);
662	msg_set_orignode(hdr, own_node);
663	msg_set_size(hdr, hlen + dlen);
664	skb_orphan(_skb);
665	kfree_skb(_skb);
666	return true;
667exit:
668	kfree_skb(_skb);
669	*skb = NULL;
670	return false;
671}
672
673bool tipc_msg_skb_clone(struct sk_buff_head *msg, struct sk_buff_head *cpy)
674{
675	struct sk_buff *skb, *_skb;
676
677	skb_queue_walk(msg, skb) {
678		_skb = skb_clone(skb, GFP_ATOMIC);
679		if (!_skb) {
680			__skb_queue_purge(cpy);
681			pr_err_ratelimited("Failed to clone buffer chain\n");
682			return false;
683		}
684		__skb_queue_tail(cpy, _skb);
685	}
686	return true;
687}
688
689/**
690 * tipc_msg_lookup_dest(): try to find new destination for named message
691 * @net: pointer to associated network namespace
692 * @skb: the buffer containing the message.
693 * @err: error code to be used by caller if lookup fails
694 * Does not consume buffer
695 * Return: true if a destination is found, false otherwise
696 */
697bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err)
698{
699	struct tipc_msg *msg = buf_msg(skb);
700	u32 scope = msg_lookup_scope(msg);
701	u32 self = tipc_own_addr(net);
702	u32 inst = msg_nameinst(msg);
703	struct tipc_socket_addr sk;
704	struct tipc_uaddr ua;
705
706	if (!msg_isdata(msg))
707		return false;
708	if (!msg_named(msg))
709		return false;
710	if (msg_errcode(msg))
711		return false;
712	*err = TIPC_ERR_NO_NAME;
713	if (skb_linearize(skb))
714		return false;
715	msg = buf_msg(skb);
716	if (msg_reroute_cnt(msg))
717		return false;
718	tipc_uaddr(&ua, TIPC_SERVICE_RANGE, scope,
719		   msg_nametype(msg), inst, inst);
720	sk.node = tipc_scope2node(net, scope);
721	if (!tipc_nametbl_lookup_anycast(net, &ua, &sk))
722		return false;
723	msg_incr_reroute_cnt(msg);
724	if (sk.node != self)
725		msg_set_prevnode(msg, self);
726	msg_set_destnode(msg, sk.node);
727	msg_set_destport(msg, sk.ref);
728	*err = TIPC_OK;
729
730	return true;
731}
732
733/* tipc_msg_assemble() - assemble chain of fragments into one message
734 */
735bool tipc_msg_assemble(struct sk_buff_head *list)
736{
737	struct sk_buff *skb, *tmp = NULL;
738
739	if (skb_queue_len(list) == 1)
740		return true;
741
742	while ((skb = __skb_dequeue(list))) {
743		skb->next = NULL;
744		if (tipc_buf_append(&tmp, &skb)) {
745			__skb_queue_tail(list, skb);
746			return true;
747		}
748		if (!tmp)
749			break;
750	}
751	__skb_queue_purge(list);
752	__skb_queue_head_init(list);
753	pr_warn("Failed do assemble buffer\n");
754	return false;
755}
756
757/* tipc_msg_reassemble() - clone a buffer chain of fragments and
758 *                         reassemble the clones into one message
759 */
760bool tipc_msg_reassemble(struct sk_buff_head *list, struct sk_buff_head *rcvq)
761{
762	struct sk_buff *skb, *_skb;
763	struct sk_buff *frag = NULL;
764	struct sk_buff *head = NULL;
765	int hdr_len;
766
767	/* Copy header if single buffer */
768	if (skb_queue_len(list) == 1) {
769		skb = skb_peek(list);
770		hdr_len = skb_headroom(skb) + msg_hdr_sz(buf_msg(skb));
771		_skb = __pskb_copy(skb, hdr_len, GFP_ATOMIC);
772		if (!_skb)
773			return false;
774		__skb_queue_tail(rcvq, _skb);
775		return true;
776	}
777
778	/* Clone all fragments and reassemble */
779	skb_queue_walk(list, skb) {
780		frag = skb_clone(skb, GFP_ATOMIC);
781		if (!frag)
782			goto error;
783		frag->next = NULL;
784		if (tipc_buf_append(&head, &frag))
785			break;
786		if (!head)
787			goto error;
788	}
789	__skb_queue_tail(rcvq, frag);
790	return true;
791error:
792	pr_warn("Failed do clone local mcast rcv buffer\n");
793	kfree_skb(head);
794	return false;
795}
796
797bool tipc_msg_pskb_copy(u32 dst, struct sk_buff_head *msg,
798			struct sk_buff_head *cpy)
799{
800	struct sk_buff *skb, *_skb;
801
802	skb_queue_walk(msg, skb) {
803		_skb = pskb_copy(skb, GFP_ATOMIC);
804		if (!_skb) {
805			__skb_queue_purge(cpy);
806			return false;
807		}
808		msg_set_destnode(buf_msg(_skb), dst);
809		__skb_queue_tail(cpy, _skb);
810	}
811	return true;
812}
813
814/* tipc_skb_queue_sorted(); sort pkt into list according to sequence number
815 * @list: list to be appended to
816 * @seqno: sequence number of buffer to add
817 * @skb: buffer to add
818 */
819bool __tipc_skb_queue_sorted(struct sk_buff_head *list, u16 seqno,
820			     struct sk_buff *skb)
821{
822	struct sk_buff *_skb, *tmp;
823
824	if (skb_queue_empty(list) || less(seqno, buf_seqno(skb_peek(list)))) {
825		__skb_queue_head(list, skb);
826		return true;
827	}
828
829	if (more(seqno, buf_seqno(skb_peek_tail(list)))) {
830		__skb_queue_tail(list, skb);
831		return true;
832	}
833
834	skb_queue_walk_safe(list, _skb, tmp) {
835		if (more(seqno, buf_seqno(_skb)))
836			continue;
837		if (seqno == buf_seqno(_skb))
838			break;
839		__skb_queue_before(list, _skb, skb);
840		return true;
841	}
842	kfree_skb(skb);
843	return false;
844}
845
846void tipc_skb_reject(struct net *net, int err, struct sk_buff *skb,
847		     struct sk_buff_head *xmitq)
848{
849	if (tipc_msg_reverse(tipc_own_addr(net), &skb, err))
850		__skb_queue_tail(xmitq, skb);
851}
852