1/**
2 * @file
3 * Packet buffer management
4 */
5
6
7/*
8 * Copyright (c) 2017, ETH Zurich.
9 * All rights reserved.
10 *
11 * This file is distributed under the terms in the attached LICENSE file.
12 * If you do not find this file, copies can be found by writing to:
13 * ETH Zurich D-INFK, Universitaetsstrasse 6, CH-8092 Zurich. Attn: Systems Group.
14 *
15 *
16 * This file contains modified pbuf functions:
17 *
18 *  pbuf_alloced_custom
19 *  pbuf_alloc
20 *  pbuf_realloc
21 *  pbuf_free
22 *
23 */
24
25
26
27
28/**
29 * @defgroup pbuf Packet buffers (PBUF)
30 * @ingroup infrastructure
31 *
32 * Packets are built from the pbuf data structure. It supports dynamic
33 * memory allocation for packet contents or can reference externally
34 * managed packet contents both in RAM and ROM. Quick allocation for
35 * incoming packets is provided through pools with fixed sized pbufs.
36 *
37 * A packet may span over multiple pbufs, chained as a singly linked
38 * list. This is called a "pbuf chain".
39 *
40 * Multiple packets may be queued, also using this singly linked list.
41 * This is called a "packet queue".
42 *
43 * So, a packet queue consists of one or more pbuf chains, each of
44 * which consist of one or more pbufs. CURRENTLY, PACKET QUEUES ARE
45 * NOT SUPPORTED!!! Use helper structs to queue multiple packets.
46 *
47 * The differences between a pbuf chain and a packet queue are very
48 * precise but subtle.
49 *
50 * The last pbuf of a packet has a ->tot_len field that equals the
51 * ->len field. It can be found by traversing the list. If the last
52 * pbuf of a packet has a ->next field other than NULL, more packets
53 * are on the queue.
54 *
55 * Therefore, looping through a pbuf of a single packet, has an
56 * loop end condition (tot_len == p->len), NOT (next == NULL).
57 *
58 * Example of custom pbuf usage for zero-copy RX:
59  @code{.c}
60typedef struct my_custom_pbuf
61{
62   struct pbuf_custom p;
63   void* dma_descriptor;
64} my_custom_pbuf_t;
65
66LWIP_MEMPOOL_DECLARE(RX_POOL, 10, sizeof(my_custom_pbuf_t), "Zero-copy RX PBUF pool");
67
68void my_pbuf_free_custom(void* p)
69{
70  my_custom_pbuf_t* my_puf = (my_custom_pbuf_t*)p;
71
72  LOCK_INTERRUPTS();
73  free_rx_dma_descriptor(my_pbuf->dma_descriptor);
74  LWIP_MEMPOOL_FREE(RX_POOL, my_pbuf);
75  UNLOCK_INTERRUPTS();
76}
77
78void eth_rx_irq()
79{
80  dma_descriptor*   dma_desc = get_RX_DMA_descriptor_from_ethernet();
81  my_custom_pbuf_t* my_pbuf  = (my_custom_pbuf_t*)LWIP_MEMPOOL_ALLOC(RX_POOL);
82
83  my_pbuf->p.custom_free_function = my_pbuf_free_custom;
84  my_pbuf->dma_descriptor         = dma_desc;
85
86  invalidate_cpu_cache(dma_desc->rx_data, dma_desc->rx_length);
87
88  struct pbuf* p = pbuf_alloced_custom(PBUF_RAW,
89     dma_desc->rx_length,
90     PBUF_REF,
91     &my_pbuf->p,
92     dma_desc->rx_data,
93     dma_desc->max_buffer_size);
94
95  if(netif->input(p, netif) != ERR_OK) {
96    pbuf_free(p);
97  }
98}
99  @endcode
100 */
101
102/*
103 * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
104 * All rights reserved.
105 *
106 * Redistribution and use in source and binary forms, with or without modification,
107 * are permitted provided that the following conditions are met:
108 *
109 * 1. Redistributions of source code must retain the above copyright notice,
110 *    this list of conditions and the following disclaimer.
111 * 2. Redistributions in binary form must reproduce the above copyright notice,
112 *    this list of conditions and the following disclaimer in the documentation
113 *    and/or other materials provided with the distribution.
114 * 3. The name of the author may not be used to endorse or promote products
115 *    derived from this software without specific prior written permission.
116 *
117 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
118 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
119 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
120 * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
121 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
122 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
123 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
124 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
125 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
126 * OF SUCH DAMAGE.
127 *
128 * This file is part of the lwIP TCP/IP stack.
129 *
130 * Author: Adam Dunkels <adam@sics.se>
131 *
132 *
133 *
134 *
135 */
136
137#include "lwip/opt.h"
138
139#include "lwip/stats.h"
140#include "lwip/def.h"
141#include "lwip/mem.h"
142#include "lwip/memp.h"
143#include "lwip/pbuf.h"
144#include "lwip/sys.h"
145#if LWIP_TCP && TCP_QUEUE_OOSEQ
146#include "lwip/priv/tcp_priv.h"
147#endif
148#if LWIP_CHECKSUM_ON_COPY
149#include "lwip/inet_chksum.h"
150#endif
151
152#include "networking_internal.h"
153
154#include <string.h>
155
156#define SIZEOF_STRUCT_PBUF        LWIP_MEM_ALIGN_SIZE(sizeof(struct pbuf))
157/* Since the pool is created in memp, PBUF_POOL_BUFSIZE will be automatically
158   aligned there. Therefore, PBUF_POOL_BUFSIZE_ALIGNED can be used here. */
159#define PBUF_POOL_BUFSIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(PBUF_POOL_BUFSIZE)
160
161#if !LWIP_TCP || !TCP_QUEUE_OOSEQ || !PBUF_POOL_FREE_OOSEQ
162#define PBUF_POOL_IS_EMPTY()
163#else /* !LWIP_TCP || !TCP_QUEUE_OOSEQ || !PBUF_POOL_FREE_OOSEQ */
164
165#if !NO_SYS
166#ifndef PBUF_POOL_FREE_OOSEQ_QUEUE_CALL
167#include "lwip/tcpip.h"
168#define PBUF_POOL_FREE_OOSEQ_QUEUE_CALL()  do { \
169  if (tcpip_callback_with_block(pbuf_free_ooseq_callback, NULL, 0) != ERR_OK) { \
170      SYS_ARCH_PROTECT(old_level); \
171      pbuf_free_ooseq_pending = 0; \
172      SYS_ARCH_UNPROTECT(old_level); \
173  } } while(0)
174#endif /* PBUF_POOL_FREE_OOSEQ_QUEUE_CALL */
175#endif /* !NO_SYS */
176
177volatile u8_t pbuf_free_ooseq_pending;
178#define PBUF_POOL_IS_EMPTY() pbuf_pool_is_empty()
179
180/**
181 * Attempt to reclaim some memory from queued out-of-sequence TCP segments
182 * if we run out of pool pbufs. It's better to give priority to new packets
183 * if we're running out.
184 *
185 * This must be done in the correct thread context therefore this function
186 * can only be used with NO_SYS=0 and through tcpip_callback.
187 */
188#if !NO_SYS
189static
190#endif /* !NO_SYS */
191void
192pbuf_free_ooseq(void)
193{
194  struct tcp_pcb* pcb;
195  SYS_ARCH_SET(pbuf_free_ooseq_pending, 0);
196
197  for (pcb = tcp_active_pcbs; NULL != pcb; pcb = pcb->next) {
198    if (NULL != pcb->ooseq) {
199      /** Free the ooseq pbufs of one PCB only */
200      LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free_ooseq: freeing out-of-sequence pbufs\n"));
201      tcp_segs_free(pcb->ooseq);
202      pcb->ooseq = NULL;
203      return;
204    }
205  }
206}
207
208#if !NO_SYS
209/**
210 * Just a callback function for tcpip_callback() that calls pbuf_free_ooseq().
211 */
212static void
213pbuf_free_ooseq_callback(void *arg)
214{
215  LWIP_UNUSED_ARG(arg);
216  pbuf_free_ooseq();
217}
218#endif /* !NO_SYS */
219
220/** Queue a call to pbuf_free_ooseq if not already queued. */
221static void
222pbuf_pool_is_empty(void)
223{
224#ifndef PBUF_POOL_FREE_OOSEQ_QUEUE_CALL
225  SYS_ARCH_SET(pbuf_free_ooseq_pending, 1);
226#else /* PBUF_POOL_FREE_OOSEQ_QUEUE_CALL */
227  u8_t queued;
228  SYS_ARCH_DECL_PROTECT(old_level);
229  SYS_ARCH_PROTECT(old_level);
230  queued = pbuf_free_ooseq_pending;
231  pbuf_free_ooseq_pending = 1;
232  SYS_ARCH_UNPROTECT(old_level);
233
234  if (!queued) {
235    /* queue a call to pbuf_free_ooseq if not already queued */
236    PBUF_POOL_FREE_OOSEQ_QUEUE_CALL();
237  }
238#endif /* PBUF_POOL_FREE_OOSEQ_QUEUE_CALL */
239}
240#endif /* !LWIP_TCP || !TCP_QUEUE_OOSEQ || !PBUF_POOL_FREE_OOSEQ */
241
242
243/**
244 * @ingroup pbuf
245 * Allocates a pbuf of the given type (possibly a chain for PBUF_POOL type).
246 *
247 * The actual memory allocated for the pbuf is determined by the
248 * layer at which the pbuf is allocated and the requested size
249 * (from the size parameter).
250 *
251 * @param layer flag to define header size
252 * @param length size of the pbuf's payload
253 * @param type this parameter decides how and where the pbuf
254 * should be allocated as follows:
255 *
256 * - PBUF_RAM: buffer memory for pbuf is allocated as one large
257 *             chunk. This includes protocol headers as well.
258 * - PBUF_ROM: no buffer memory is allocated for the pbuf, even for
259 *             protocol headers. Additional headers must be prepended
260 *             by allocating another pbuf and chain in to the front of
261 *             the ROM pbuf. It is assumed that the memory used is really
262 *             similar to ROM in that it is immutable and will not be
263 *             changed. Memory which is dynamic should generally not
264 *             be attached to PBUF_ROM pbufs. Use PBUF_REF instead.
265 * - PBUF_REF: no buffer memory is allocated for the pbuf, even for
266 *             protocol headers. It is assumed that the pbuf is only
267 *             being used in a single thread. If the pbuf gets queued,
268 *             then pbuf_take should be called to copy the buffer.
269 * - PBUF_POOL: the pbuf is allocated as a pbuf chain, with pbufs from
270 *              the pbuf pool that is allocated during pbuf_init().
271 *
272 * @return the allocated pbuf. If multiple pbufs where allocated, this
273 * is the first pbuf of a pbuf chain.
274 */
275struct pbuf *
276pbuf_alloc(pbuf_layer layer, u16_t length, pbuf_type type)
277{
278  struct pbuf *p, *q, *r;
279  u16_t offset;
280  s32_t rem_len; /* remaining length */
281  LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloc(length=%"U16_F")\n", length));
282
283  /* determine header offset */
284  switch (layer) {
285  case PBUF_TRANSPORT:
286    /* add room for transport (often TCP) layer header */
287    offset = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN + PBUF_TRANSPORT_HLEN;
288    break;
289  case PBUF_IP:
290    /* add room for IP layer header */
291    offset = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN;
292    break;
293  case PBUF_LINK:
294    /* add room for link layer header */
295    offset = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN;
296    break;
297  case PBUF_RAW_TX:
298    /* add room for encapsulating link layer headers (e.g. 802.11) */
299    offset = PBUF_LINK_ENCAPSULATION_HLEN;
300    break;
301  case PBUF_RAW:
302    /* no offset (e.g. RX buffers or chain successors) */
303    offset = 0;
304    break;
305  default:
306    LWIP_ASSERT("pbuf_alloc: bad pbuf layer", 0);
307    return NULL;
308  }
309
310  switch (type) {
311  case PBUF_POOL:
312    /* allocate head of pbuf chain into p */
313
314    p  = net_buf_alloc(state.pool);
315    LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloc: allocated pbuf %p\n", (void *)p));
316    if (p == NULL) {
317      PBUF_POOL_IS_EMPTY();
318      return NULL;
319    }
320    p->type = type;
321    p->next = NULL;
322
323    /* make the payload pointer point 'offset' bytes into pbuf data memory */
324    p->payload = LWIP_MEM_ALIGN((void *)((u8_t *)p->payload + offset));
325    LWIP_ASSERT("pbuf_alloc: pbuf p->payload properly aligned",
326            ((mem_ptr_t)p->payload % MEM_ALIGNMENT) == 0);
327    /* the total length of the pbuf chain is the requested size */
328    p->tot_len = length;
329    /* set the length of the first pbuf in the chain */
330    p->len = LWIP_MIN(length, PBUF_POOL_BUFSIZE_ALIGNED - LWIP_MEM_ALIGN_SIZE(offset));
331    LWIP_ASSERT("check p->payload + p->len does not overflow pbuf",
332                ((u8_t*)p->payload + p->len <=
333                 (u8_t*)p + SIZEOF_STRUCT_PBUF + PBUF_POOL_BUFSIZE_ALIGNED));
334    LWIP_ASSERT("PBUF_POOL_BUFSIZE must be bigger than MEM_ALIGNMENT",
335      (PBUF_POOL_BUFSIZE_ALIGNED - LWIP_MEM_ALIGN_SIZE(offset)) > 0 );
336    /* set reference count (needed here in case we fail) */
337    p->ref = 1;
338
339    /* now allocate the tail of the pbuf chain */
340
341    /* remember first pbuf for linkage in next iteration */
342    r = p;
343    /* remaining length to be allocated */
344    rem_len = length - p->len;
345    /* any remaining pbufs to be allocated? */
346    while (rem_len > 0) {
347      q  = net_buf_alloc(state.pool);
348      if (q == NULL) {
349        PBUF_POOL_IS_EMPTY();
350        /* free chain so far allocated */
351        pbuf_free(p);
352        /* bail out unsuccessfully */
353        return NULL;
354      }
355      q->type = type;
356      q->flags = 0;
357      q->next = NULL;
358      /* make previous pbuf point to this pbuf */
359      r->next = q;
360      /* set total length of this pbuf and next in chain */
361      LWIP_ASSERT("rem_len < max_u16_t", rem_len < 0xffff);
362      q->tot_len = (u16_t)rem_len;
363      /* this pbuf length is pool size, unless smaller sized tail */
364      q->len = LWIP_MIN((u16_t)rem_len, PBUF_POOL_BUFSIZE_ALIGNED);
365      LWIP_ASSERT("pbuf_alloc: pbuf q->payload properly aligned",
366              ((mem_ptr_t)q->payload % MEM_ALIGNMENT) == 0);
367      LWIP_ASSERT("check p->payload + p->len does not overflow pbuf",
368                  ((u8_t*)p->payload + p->len <=
369                   (u8_t*)p + SIZEOF_STRUCT_PBUF + PBUF_POOL_BUFSIZE_ALIGNED));
370      q->ref = 1;
371      /* calculate remaining length to be allocated */
372      rem_len -= q->len;
373      /* remember this pbuf for linkage in next iteration */
374      r = q;
375    }
376    /* end of chain */
377    /*r->next = NULL;*/
378
379    break;
380  case PBUF_RAM:
381    {
382      mem_size_t alloc_len = LWIP_MEM_ALIGN_SIZE(SIZEOF_STRUCT_PBUF + offset) + LWIP_MEM_ALIGN_SIZE(length);
383
384      /* bug #50040: Check for integer overflow when calculating alloc_len */
385      if (alloc_len < LWIP_MEM_ALIGN_SIZE(length)) {
386        return NULL;
387      }
388
389      /* If pbuf is to be allocated in RAM, allocate memory for it. */
390      p  = net_buf_alloc(state.pool);
391    }
392
393    if (p == NULL) {
394      return NULL;
395    }
396    /* Set up internal structure of the pbuf. */
397    p->payload = LWIP_MEM_ALIGN((void *)((u8_t *)p->payload + offset));
398    p->len = p->tot_len = length;
399    p->next = NULL;
400    p->type = type;
401
402    LWIP_ASSERT("pbuf_alloc: pbuf->payload properly aligned",
403           ((mem_ptr_t)p->payload % MEM_ALIGNMENT) == 0);
404    break;
405  /* pbuf references existing (non-volatile static constant) ROM payload? */
406  case PBUF_ROM:
407  /* pbuf references existing (externally allocated) RAM payload? */
408  case PBUF_REF:
409    /* only allocate memory for the pbuf structure */
410    p = (struct pbuf *)memp_malloc(MEMP_PBUF);
411    if (p == NULL) {
412      LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_LEVEL_SERIOUS,
413                  ("pbuf_alloc: Could not allocate MEMP_PBUF for PBUF_%s.\n",
414                  (type == PBUF_ROM) ? "ROM" : "REF"));
415      return NULL;
416    }
417    /* caller must set this field properly, afterwards */
418    p->payload = NULL;
419    p->len = p->tot_len = length;
420    p->next = NULL;
421    p->type = type;
422    break;
423  default:
424    LWIP_ASSERT("pbuf_alloc: erroneous type", 0);
425    return NULL;
426  }
427  /* set reference count */
428  p->ref = 1;
429  p->type = PBUF_REF;
430  /* set flags */
431  LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloc(length=%"U16_F") == %p\n", length, (void *)p));
432
433  assert(p->next == NULL);
434  return p;
435}
436
437#if LWIP_SUPPORT_CUSTOM_PBUF
438/**
439 * @ingroup pbuf
440 * Initialize a custom pbuf (already allocated).
441 *
442 * @param l flag to define header size
443 * @param length size of the pbuf's payload
444 * @param type type of the pbuf (only used to treat the pbuf accordingly, as
445 *        this function allocates no memory)
446 * @param p pointer to the custom pbuf to initialize (already allocated)
447 * @param payload_mem pointer to the buffer that is used for payload and headers,
448 *        must be at least big enough to hold 'length' plus the header size,
449 *        may be NULL if set later.
450 *        ATTENTION: The caller is responsible for correct alignment of this buffer!!
451 * @param payload_mem_len the size of the 'payload_mem' buffer, must be at least
452 *        big enough to hold 'length' plus the header size
453 */
454struct pbuf*
455pbuf_alloced_custom(pbuf_layer l, u16_t length, pbuf_type type, struct pbuf_custom *p,
456                    void *payload_mem, u16_t payload_mem_len)
457{
458  u16_t offset;
459  LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloced_custom(length=%"U16_F")\n", length));
460
461  /* determine header offset */
462  switch (l) {
463  case PBUF_TRANSPORT:
464    /* add room for transport (often TCP) layer header */
465    offset = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN + PBUF_TRANSPORT_HLEN;
466    break;
467  case PBUF_IP:
468    /* add room for IP layer header */
469    offset = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN;
470    break;
471  case PBUF_LINK:
472    /* add room for link layer header */
473    offset = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN;
474    break;
475  case PBUF_RAW_TX:
476    /* add room for encapsulating link layer headers (e.g. 802.11) */
477    offset = PBUF_LINK_ENCAPSULATION_HLEN;
478    break;
479  case PBUF_RAW:
480    offset = 0;
481    break;
482  default:
483    LWIP_ASSERT("pbuf_alloced_custom: bad pbuf layer", 0);
484    return NULL;
485  }
486
487  if (LWIP_MEM_ALIGN_SIZE(offset) + length > payload_mem_len) {
488    LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_LEVEL_WARNING, ("pbuf_alloced_custom(length=%"U16_F") buffer too short\n", length));
489    return NULL;
490  }
491
492  p->pbuf.next = NULL;
493  if (payload_mem != NULL) {
494    p->pbuf.payload = (u8_t *)payload_mem + LWIP_MEM_ALIGN_SIZE(offset);
495  } else {
496    p->pbuf.payload = NULL;
497  }
498  p->pbuf.flags = PBUF_FLAG_IS_CUSTOM;
499  p->pbuf.len = p->pbuf.tot_len = length;
500  p->pbuf.type = PBUF_REF;
501  p->pbuf.ref = 1;
502  return &p->pbuf;
503}
504#endif /* LWIP_SUPPORT_CUSTOM_PBUF */
505
506/**
507 * @ingroup pbuf
508 * Shrink a pbuf chain to a desired length.
509 *
510 * @param p pbuf to shrink.
511 * @param new_len desired new length of pbuf chain
512 *
513 * Depending on the desired length, the first few pbufs in a chain might
514 * be skipped and left unchanged. The new last pbuf in the chain will be
515 * resized, and any remaining pbufs will be freed.
516 *
517 * @note If the pbuf is ROM/REF, only the ->tot_len and ->len fields are adjusted.
518 * @note May not be called on a packet queue.
519 *
520 * @note Despite its name, pbuf_realloc cannot grow the size of a pbuf (chain).
521 */
522void
523pbuf_realloc(struct pbuf *p, u16_t new_len)
524{
525  struct pbuf *q;
526  u16_t rem_len; /* remaining length */
527  s32_t grow;
528
529  LWIP_ASSERT("pbuf_realloc: p != NULL", p != NULL);
530  LWIP_ASSERT("pbuf_realloc: sane p->type", p->type == PBUF_POOL ||
531              p->type == PBUF_ROM ||
532              p->type == PBUF_RAM ||
533              p->type == PBUF_REF);
534
535  /* desired length larger than current length? */
536  if (new_len >= p->tot_len) {
537    /* enlarging not yet supported */
538    return;
539  }
540
541  /* the pbuf chain grows by (new_len - p->tot_len) bytes
542   * (which may be negative in case of shrinking) */
543  grow = new_len - p->tot_len;
544
545  /* first, step over any pbufs that should remain in the chain */
546  rem_len = new_len;
547  q = p;
548  /* should this pbuf be kept? */
549  while (rem_len > q->len) {
550    /* decrease remaining length by pbuf length */
551    rem_len -= q->len;
552    /* decrease total length indicator */
553    LWIP_ASSERT("grow < max_u16_t", grow < 0xffff);
554    q->tot_len += (u16_t)grow;
555    /* proceed to next pbuf in chain */
556    q = q->next;
557    LWIP_ASSERT("pbuf_realloc: q != NULL", q != NULL);
558  }
559  /* we have now reached the new last pbuf (in q) */
560  /* rem_len == desired length for pbuf q */
561
562  /* shrink allocated memory for PBUF_RAM */
563  /* (other types merely adjust their length fields */
564  if ((q->type == PBUF_RAM) && (rem_len != q->len)
565#if LWIP_SUPPORT_CUSTOM_PBUF
566      && ((q->flags & PBUF_FLAG_IS_CUSTOM) == 0)
567#endif /* LWIP_SUPPORT_CUSTOM_PBUF */
568     ) {
569    USER_PANIC("SHOULD Not trim memory\n");
570    /* reallocate and adjust the length of the pbuf that will be split */
571    q = (struct pbuf *)mem_trim(q, (u16_t)((u8_t *)q->payload - (u8_t *)q) + rem_len);
572    LWIP_ASSERT("mem_trim returned q == NULL", q != NULL);
573  }
574  /* adjust length fields for new last pbuf */
575  q->len = rem_len;
576  q->tot_len = q->len;
577
578  /* any remaining pbufs in chain? */
579  if (q->next != NULL) {
580    /* free remaining pbufs in chain */
581    pbuf_free(q->next);
582  }
583  /* q is last packet in chain */
584  q->next = NULL;
585}
586
587
588/**
589 * @ingroup pbuf
590 * Dereference a pbuf chain or queue and deallocate any no-longer-used
591 * pbufs at the head of this chain or queue.
592 *
593 * Decrements the pbuf reference count. If it reaches zero, the pbuf is
594 * deallocated.
595 *
596 * For a pbuf chain, this is repeated for each pbuf in the chain,
597 * up to the first pbuf which has a non-zero reference count after
598 * decrementing. So, when all reference counts are one, the whole
599 * chain is free'd.
600 *
601 * @param p The pbuf (chain) to be dereferenced.
602 *
603 * @return the number of pbufs that were de-allocated
604 * from the head of the chain.
605 *
606 * @note MUST NOT be called on a packet queue (Not verified to work yet).
607 * @note the reference counter of a pbuf equals the number of pointers
608 * that refer to the pbuf (or into the pbuf).
609 *
610 * @internal examples:
611 *
612 * Assuming existing chains a->b->c with the following reference
613 * counts, calling pbuf_free(a) results in:
614 *
615 * 1->2->3 becomes ...1->3
616 * 3->3->3 becomes 2->3->3
617 * 1->1->2 becomes ......1
618 * 2->1->1 becomes 1->1->1
619 * 1->1->1 becomes .......
620 *
621 */
622u8_t
623pbuf_free(struct pbuf *p)
624{
625  u16_t type;
626  struct pbuf *q;
627  u8_t count;
628
629  if (p == NULL) {
630    LWIP_ASSERT("p != NULL", p != NULL);
631    /* if assertions are disabled, proceed with debug output */
632    LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_LEVEL_SERIOUS,
633      ("pbuf_free(p == NULL) was called.\n"));
634    return 0;
635  }
636  LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free(%p)\n", (void *)p));
637
638  PERF_START;
639
640  LWIP_ASSERT("pbuf_free: sane type",
641    p->type == PBUF_RAM || p->type == PBUF_ROM ||
642    p->type == PBUF_REF || p->type == PBUF_POOL);
643
644  count = 0;
645  /* de-allocate all consecutive pbufs from the head of the chain that
646   * obtain a zero reference count after decrementing*/
647  while (p != NULL) {
648    u16_t ref;
649    SYS_ARCH_DECL_PROTECT(old_level);
650    /* Since decrementing ref cannot be guaranteed to be a single machine operation
651     * we must protect it. We put the new ref into a local variable to prevent
652     * further protection. */
653    SYS_ARCH_PROTECT(old_level);
654    /* all pbufs in a chain are referenced at least once */
655    LWIP_ASSERT("pbuf_free: p->ref > 0", p->ref > 0);
656    /* decrease reference count (number of pointers to pbuf) */
657    ref = --(p->ref);
658    SYS_ARCH_UNPROTECT(old_level);
659    /* this pbuf is no longer referenced to? */
660    if (ref == 0) {
661      /* remember next pbuf in chain for next iteration */
662      q = p->next;
663      LWIP_DEBUGF( PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free: deallocating %p\n", (void *)p));
664      type = p->type;
665#if LWIP_SUPPORT_CUSTOM_PBUF
666      /* is this a custom pbuf? */
667      if ((p->flags & PBUF_FLAG_IS_CUSTOM) != 0) {
668        struct pbuf_custom *pc = (struct pbuf_custom*)p;
669        LWIP_ASSERT("pc->custom_free_function != NULL", pc->custom_free_function != NULL);
670        pc->custom_free_function(p);
671      } else
672#endif /* LWIP_SUPPORT_CUSTOM_PBUF */
673      {
674          USER_PANIC("SHOULD NOT REACH HERE...\n");
675        /* is this a pbuf from the pool? */
676        if (type == PBUF_POOL) {
677          memp_free(MEMP_PBUF_POOL, p);
678        /* is this a ROM or RAM referencing pbuf? */
679        } else if (type == PBUF_ROM || type == PBUF_REF) {
680          memp_free(MEMP_PBUF, p);
681        /* type == PBUF_RAM */
682        } else {
683          mem_free(p);
684        }
685      }
686      count++;
687      /* proceed to next pbuf */
688      p = q;
689    /* p->ref > 0, this pbuf is still referenced to */
690    /* (and so the remaining pbufs in chain as well) */
691    } else {
692      LWIP_DEBUGF( PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free: %p has ref %"U16_F", ending here.\n", (void *)p, ref));
693      /* stop walking through the chain */
694      p = NULL;
695    }
696  }
697  PERF_STOP("pbuf_free");
698  /* return number of de-allocated pbufs */
699  return count;
700}
701