1/** 2 * @file 3 * Packet buffer management 4 */ 5 6/** 7 * @defgroup pbuf Packet buffers (PBUF) 8 * @ingroup infrastructure 9 * 10 * Packets are built from the pbuf data structure. It supports dynamic 11 * memory allocation for packet contents or can reference externally 12 * managed packet contents both in RAM and ROM. Quick allocation for 13 * incoming packets is provided through pools with fixed sized pbufs. 14 * 15 * A packet may span over multiple pbufs, chained as a singly linked 16 * list. This is called a "pbuf chain". 17 * 18 * Multiple packets may be queued, also using this singly linked list. 19 * This is called a "packet queue". 20 * 21 * So, a packet queue consists of one or more pbuf chains, each of 22 * which consist of one or more pbufs. CURRENTLY, PACKET QUEUES ARE 23 * NOT SUPPORTED!!! Use helper structs to queue multiple packets. 24 * 25 * The differences between a pbuf chain and a packet queue are very 26 * precise but subtle. 27 * 28 * The last pbuf of a packet has a ->tot_len field that equals the 29 * ->len field. It can be found by traversing the list. If the last 30 * pbuf of a packet has a ->next field other than NULL, more packets 31 * are on the queue. 32 * 33 * Therefore, looping through a pbuf of a single packet, has an 34 * loop end condition (tot_len == p->len), NOT (next == NULL). 35 * 36 * Example of custom pbuf usage for zero-copy RX: 37 @code{.c} 38typedef struct my_custom_pbuf 39{ 40 struct pbuf_custom p; 41 void* dma_descriptor; 42} my_custom_pbuf_t; 43 44LWIP_MEMPOOL_DECLARE(RX_POOL, 10, sizeof(my_custom_pbuf_t), "Zero-copy RX PBUF pool"); 45 46void my_pbuf_free_custom(void* p) 47{ 48 my_custom_pbuf_t* my_puf = (my_custom_pbuf_t*)p; 49 50 LOCK_INTERRUPTS(); 51 free_rx_dma_descriptor(my_pbuf->dma_descriptor); 52 LWIP_MEMPOOL_FREE(RX_POOL, my_pbuf); 53 UNLOCK_INTERRUPTS(); 54} 55 56void eth_rx_irq() 57{ 58 dma_descriptor* dma_desc = get_RX_DMA_descriptor_from_ethernet(); 59 my_custom_pbuf_t* my_pbuf = (my_custom_pbuf_t*)LWIP_MEMPOOL_ALLOC(RX_POOL); 60 61 my_pbuf->p.custom_free_function = my_pbuf_free_custom; 62 my_pbuf->dma_descriptor = dma_desc; 63 64 invalidate_cpu_cache(dma_desc->rx_data, dma_desc->rx_length); 65 66 struct pbuf* p = pbuf_alloced_custom(PBUF_RAW, 67 dma_desc->rx_length, 68 PBUF_REF, 69 &my_pbuf->p, 70 dma_desc->rx_data, 71 dma_desc->max_buffer_size); 72 73 if(netif->input(p, netif) != ERR_OK) { 74 pbuf_free(p); 75 } 76} 77 @endcode 78 */ 79 80/* 81 * Copyright (c) 2001-2004 Swedish Institute of Computer Science. 82 * All rights reserved. 83 * 84 * Redistribution and use in source and binary forms, with or without modification, 85 * are permitted provided that the following conditions are met: 86 * 87 * 1. Redistributions of source code must retain the above copyright notice, 88 * this list of conditions and the following disclaimer. 89 * 2. Redistributions in binary form must reproduce the above copyright notice, 90 * this list of conditions and the following disclaimer in the documentation 91 * and/or other materials provided with the distribution. 92 * 3. The name of the author may not be used to endorse or promote products 93 * derived from this software without specific prior written permission. 94 * 95 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 96 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 97 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT 98 * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 99 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT 100 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 101 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 102 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 103 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY 104 * OF SUCH DAMAGE. 105 * 106 * This file is part of the lwIP TCP/IP stack. 107 * 108 * Author: Adam Dunkels <adam@sics.se> 109 * 110 */ 111 112#include "lwip/opt.h" 113 114#include "lwip/stats.h" 115#include "lwip/def.h" 116#include "lwip/mem.h" 117#include "lwip/memp.h" 118#include "lwip/pbuf.h" 119#include "lwip/sys.h" 120#if LWIP_TCP && TCP_QUEUE_OOSEQ 121#include "lwip/priv/tcp_priv.h" 122#endif 123#if LWIP_CHECKSUM_ON_COPY 124#include "lwip/inet_chksum.h" 125#endif 126 127#include <string.h> 128 129#define SIZEOF_STRUCT_PBUF LWIP_MEM_ALIGN_SIZE(sizeof(struct pbuf)) 130/* Since the pool is created in memp, PBUF_POOL_BUFSIZE will be automatically 131 aligned there. Therefore, PBUF_POOL_BUFSIZE_ALIGNED can be used here. */ 132#define PBUF_POOL_BUFSIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(PBUF_POOL_BUFSIZE) 133 134#if !LWIP_TCP || !TCP_QUEUE_OOSEQ || !PBUF_POOL_FREE_OOSEQ 135#define PBUF_POOL_IS_EMPTY() 136#else /* !LWIP_TCP || !TCP_QUEUE_OOSEQ || !PBUF_POOL_FREE_OOSEQ */ 137 138#if !NO_SYS 139#ifndef PBUF_POOL_FREE_OOSEQ_QUEUE_CALL 140#include "lwip/tcpip.h" 141#define PBUF_POOL_FREE_OOSEQ_QUEUE_CALL() do { \ 142 if (tcpip_callback_with_block(pbuf_free_ooseq_callback, NULL, 0) != ERR_OK) { \ 143 SYS_ARCH_PROTECT(old_level); \ 144 pbuf_free_ooseq_pending = 0; \ 145 SYS_ARCH_UNPROTECT(old_level); \ 146 } } while(0) 147#endif /* PBUF_POOL_FREE_OOSEQ_QUEUE_CALL */ 148#endif /* !NO_SYS */ 149 150volatile u8_t pbuf_free_ooseq_pending; 151#define PBUF_POOL_IS_EMPTY() pbuf_pool_is_empty() 152 153/** 154 * Attempt to reclaim some memory from queued out-of-sequence TCP segments 155 * if we run out of pool pbufs. It's better to give priority to new packets 156 * if we're running out. 157 * 158 * This must be done in the correct thread context therefore this function 159 * can only be used with NO_SYS=0 and through tcpip_callback. 160 */ 161#if !NO_SYS 162static 163#endif /* !NO_SYS */ 164void 165pbuf_free_ooseq(void) 166{ 167 struct tcp_pcb* pcb; 168 SYS_ARCH_SET(pbuf_free_ooseq_pending, 0); 169 170 for (pcb = tcp_active_pcbs; NULL != pcb; pcb = pcb->next) { 171 if (NULL != pcb->ooseq) { 172 /** Free the ooseq pbufs of one PCB only */ 173 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free_ooseq: freeing out-of-sequence pbufs\n")); 174 tcp_segs_free(pcb->ooseq); 175 pcb->ooseq = NULL; 176 return; 177 } 178 } 179} 180 181#if !NO_SYS 182/** 183 * Just a callback function for tcpip_callback() that calls pbuf_free_ooseq(). 184 */ 185static void 186pbuf_free_ooseq_callback(void *arg) 187{ 188 LWIP_UNUSED_ARG(arg); 189 pbuf_free_ooseq(); 190} 191#endif /* !NO_SYS */ 192 193/** Queue a call to pbuf_free_ooseq if not already queued. */ 194static void 195pbuf_pool_is_empty(void) 196{ 197#ifndef PBUF_POOL_FREE_OOSEQ_QUEUE_CALL 198 SYS_ARCH_SET(pbuf_free_ooseq_pending, 1); 199#else /* PBUF_POOL_FREE_OOSEQ_QUEUE_CALL */ 200 u8_t queued; 201 SYS_ARCH_DECL_PROTECT(old_level); 202 SYS_ARCH_PROTECT(old_level); 203 queued = pbuf_free_ooseq_pending; 204 pbuf_free_ooseq_pending = 1; 205 SYS_ARCH_UNPROTECT(old_level); 206 207 if (!queued) { 208 /* queue a call to pbuf_free_ooseq if not already queued */ 209 PBUF_POOL_FREE_OOSEQ_QUEUE_CALL(); 210 } 211#endif /* PBUF_POOL_FREE_OOSEQ_QUEUE_CALL */ 212} 213#endif /* !LWIP_TCP || !TCP_QUEUE_OOSEQ || !PBUF_POOL_FREE_OOSEQ */ 214 215/** 216 * @ingroup pbuf 217 * Allocates a pbuf of the given type (possibly a chain for PBUF_POOL type). 218 * 219 * The actual memory allocated for the pbuf is determined by the 220 * layer at which the pbuf is allocated and the requested size 221 * (from the size parameter). 222 * 223 * @param layer flag to define header size 224 * @param length size of the pbuf's payload 225 * @param type this parameter decides how and where the pbuf 226 * should be allocated as follows: 227 * 228 * - PBUF_RAM: buffer memory for pbuf is allocated as one large 229 * chunk. This includes protocol headers as well. 230 * - PBUF_ROM: no buffer memory is allocated for the pbuf, even for 231 * protocol headers. Additional headers must be prepended 232 * by allocating another pbuf and chain in to the front of 233 * the ROM pbuf. It is assumed that the memory used is really 234 * similar to ROM in that it is immutable and will not be 235 * changed. Memory which is dynamic should generally not 236 * be attached to PBUF_ROM pbufs. Use PBUF_REF instead. 237 * - PBUF_REF: no buffer memory is allocated for the pbuf, even for 238 * protocol headers. It is assumed that the pbuf is only 239 * being used in a single thread. If the pbuf gets queued, 240 * then pbuf_take should be called to copy the buffer. 241 * - PBUF_POOL: the pbuf is allocated as a pbuf chain, with pbufs from 242 * the pbuf pool that is allocated during pbuf_init(). 243 * 244 * @return the allocated pbuf. If multiple pbufs where allocated, this 245 * is the first pbuf of a pbuf chain. 246 */ 247struct pbuf * 248pbuf_alloc(pbuf_layer layer, u16_t length, pbuf_type type) 249{ 250 struct pbuf *p, *q, *r; 251 u16_t offset; 252 s32_t rem_len; /* remaining length */ 253 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloc(length=%"U16_F")\n", length)); 254 255 /* determine header offset */ 256 switch (layer) { 257 case PBUF_TRANSPORT: 258 /* add room for transport (often TCP) layer header */ 259 offset = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN + PBUF_TRANSPORT_HLEN; 260 break; 261 case PBUF_IP: 262 /* add room for IP layer header */ 263 offset = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN; 264 break; 265 case PBUF_LINK: 266 /* add room for link layer header */ 267 offset = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN; 268 break; 269 case PBUF_RAW_TX: 270 /* add room for encapsulating link layer headers (e.g. 802.11) */ 271 offset = PBUF_LINK_ENCAPSULATION_HLEN; 272 break; 273 case PBUF_RAW: 274 /* no offset (e.g. RX buffers or chain successors) */ 275 offset = 0; 276 break; 277 default: 278 LWIP_ASSERT("pbuf_alloc: bad pbuf layer", 0); 279 return NULL; 280 } 281 282 switch (type) { 283 case PBUF_POOL: 284 /* allocate head of pbuf chain into p */ 285 p = (struct pbuf *)memp_malloc(MEMP_PBUF_POOL); 286 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloc: allocated pbuf %p\n", (void *)p)); 287 if (p == NULL) { 288 PBUF_POOL_IS_EMPTY(); 289 return NULL; 290 } 291 p->type = type; 292 p->next = NULL; 293 294 /* make the payload pointer point 'offset' bytes into pbuf data memory */ 295 p->payload = LWIP_MEM_ALIGN((void *)((u8_t *)p + (SIZEOF_STRUCT_PBUF + offset))); 296 LWIP_ASSERT("pbuf_alloc: pbuf p->payload properly aligned", 297 ((mem_ptr_t)p->payload % MEM_ALIGNMENT) == 0); 298 /* the total length of the pbuf chain is the requested size */ 299 p->tot_len = length; 300 /* set the length of the first pbuf in the chain */ 301 p->len = LWIP_MIN(length, PBUF_POOL_BUFSIZE_ALIGNED - LWIP_MEM_ALIGN_SIZE(offset)); 302 LWIP_ASSERT("check p->payload + p->len does not overflow pbuf", 303 ((u8_t*)p->payload + p->len <= 304 (u8_t*)p + SIZEOF_STRUCT_PBUF + PBUF_POOL_BUFSIZE_ALIGNED)); 305 LWIP_ASSERT("PBUF_POOL_BUFSIZE must be bigger than MEM_ALIGNMENT", 306 (PBUF_POOL_BUFSIZE_ALIGNED - LWIP_MEM_ALIGN_SIZE(offset)) > 0 ); 307 /* set reference count (needed here in case we fail) */ 308 p->ref = 1; 309 310 /* now allocate the tail of the pbuf chain */ 311 312 /* remember first pbuf for linkage in next iteration */ 313 r = p; 314 /* remaining length to be allocated */ 315 rem_len = length - p->len; 316 /* any remaining pbufs to be allocated? */ 317 while (rem_len > 0) { 318 q = (struct pbuf *)memp_malloc(MEMP_PBUF_POOL); 319 if (q == NULL) { 320 PBUF_POOL_IS_EMPTY(); 321 /* free chain so far allocated */ 322 pbuf_free(p); 323 /* bail out unsuccessfully */ 324 return NULL; 325 } 326 q->type = type; 327 q->flags = 0; 328 q->next = NULL; 329 /* make previous pbuf point to this pbuf */ 330 r->next = q; 331 /* set total length of this pbuf and next in chain */ 332 LWIP_ASSERT("rem_len < max_u16_t", rem_len < 0xffff); 333 q->tot_len = (u16_t)rem_len; 334 /* this pbuf length is pool size, unless smaller sized tail */ 335 q->len = LWIP_MIN((u16_t)rem_len, PBUF_POOL_BUFSIZE_ALIGNED); 336 q->payload = (void *)((u8_t *)q + SIZEOF_STRUCT_PBUF); 337 LWIP_ASSERT("pbuf_alloc: pbuf q->payload properly aligned", 338 ((mem_ptr_t)q->payload % MEM_ALIGNMENT) == 0); 339 LWIP_ASSERT("check p->payload + p->len does not overflow pbuf", 340 ((u8_t*)p->payload + p->len <= 341 (u8_t*)p + SIZEOF_STRUCT_PBUF + PBUF_POOL_BUFSIZE_ALIGNED)); 342 q->ref = 1; 343 /* calculate remaining length to be allocated */ 344 rem_len -= q->len; 345 /* remember this pbuf for linkage in next iteration */ 346 r = q; 347 } 348 /* end of chain */ 349 /*r->next = NULL;*/ 350 351 break; 352 case PBUF_RAM: 353 { 354 mem_size_t alloc_len = LWIP_MEM_ALIGN_SIZE(SIZEOF_STRUCT_PBUF + offset) + LWIP_MEM_ALIGN_SIZE(length); 355 356 /* bug #50040: Check for integer overflow when calculating alloc_len */ 357 if (alloc_len < LWIP_MEM_ALIGN_SIZE(length)) { 358 return NULL; 359 } 360 361 /* If pbuf is to be allocated in RAM, allocate memory for it. */ 362 p = (struct pbuf*)mem_malloc(alloc_len); 363 } 364 365 if (p == NULL) { 366 return NULL; 367 } 368 /* Set up internal structure of the pbuf. */ 369 p->payload = LWIP_MEM_ALIGN((void *)((u8_t *)p + SIZEOF_STRUCT_PBUF + offset)); 370 p->len = p->tot_len = length; 371 p->next = NULL; 372 p->type = type; 373 374 LWIP_ASSERT("pbuf_alloc: pbuf->payload properly aligned", 375 ((mem_ptr_t)p->payload % MEM_ALIGNMENT) == 0); 376 break; 377 /* pbuf references existing (non-volatile static constant) ROM payload? */ 378 case PBUF_ROM: 379 /* pbuf references existing (externally allocated) RAM payload? */ 380 case PBUF_REF: 381 /* only allocate memory for the pbuf structure */ 382 p = (struct pbuf *)memp_malloc(MEMP_PBUF); 383 if (p == NULL) { 384 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_LEVEL_SERIOUS, 385 ("pbuf_alloc: Could not allocate MEMP_PBUF for PBUF_%s.\n", 386 (type == PBUF_ROM) ? "ROM" : "REF")); 387 return NULL; 388 } 389 /* caller must set this field properly, afterwards */ 390 p->payload = NULL; 391 p->len = p->tot_len = length; 392 p->next = NULL; 393 p->type = type; 394 break; 395 default: 396 LWIP_ASSERT("pbuf_alloc: erroneous type", 0); 397 return NULL; 398 } 399 /* set reference count */ 400 p->ref = 1; 401 /* set flags */ 402 p->flags = 0; 403 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloc(length=%"U16_F") == %p\n", length, (void *)p)); 404 return p; 405} 406 407#if LWIP_SUPPORT_CUSTOM_PBUF 408/** 409 * @ingroup pbuf 410 * Initialize a custom pbuf (already allocated). 411 * 412 * @param l flag to define header size 413 * @param length size of the pbuf's payload 414 * @param type type of the pbuf (only used to treat the pbuf accordingly, as 415 * this function allocates no memory) 416 * @param p pointer to the custom pbuf to initialize (already allocated) 417 * @param payload_mem pointer to the buffer that is used for payload and headers, 418 * must be at least big enough to hold 'length' plus the header size, 419 * may be NULL if set later. 420 * ATTENTION: The caller is responsible for correct alignment of this buffer!! 421 * @param payload_mem_len the size of the 'payload_mem' buffer, must be at least 422 * big enough to hold 'length' plus the header size 423 */ 424struct pbuf* 425pbuf_alloced_custom(pbuf_layer l, u16_t length, pbuf_type type, struct pbuf_custom *p, 426 void *payload_mem, u16_t payload_mem_len) 427{ 428 u16_t offset; 429 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloced_custom(length=%"U16_F")\n", length)); 430 431 /* determine header offset */ 432 switch (l) { 433 case PBUF_TRANSPORT: 434 /* add room for transport (often TCP) layer header */ 435 offset = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN + PBUF_TRANSPORT_HLEN; 436 break; 437 case PBUF_IP: 438 /* add room for IP layer header */ 439 offset = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN; 440 break; 441 case PBUF_LINK: 442 /* add room for link layer header */ 443 offset = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN; 444 break; 445 case PBUF_RAW_TX: 446 /* add room for encapsulating link layer headers (e.g. 802.11) */ 447 offset = PBUF_LINK_ENCAPSULATION_HLEN; 448 break; 449 case PBUF_RAW: 450 offset = 0; 451 break; 452 default: 453 LWIP_ASSERT("pbuf_alloced_custom: bad pbuf layer", 0); 454 return NULL; 455 } 456 457 if (LWIP_MEM_ALIGN_SIZE(offset) + length > payload_mem_len) { 458 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_LEVEL_WARNING, ("pbuf_alloced_custom(length=%"U16_F") buffer too short\n", length)); 459 return NULL; 460 } 461 462 p->pbuf.next = NULL; 463 if (payload_mem != NULL) { 464 p->pbuf.payload = (u8_t *)payload_mem + LWIP_MEM_ALIGN_SIZE(offset); 465 } else { 466 p->pbuf.payload = NULL; 467 } 468 p->pbuf.flags = PBUF_FLAG_IS_CUSTOM; 469 p->pbuf.len = p->pbuf.tot_len = length; 470 p->pbuf.type = type; 471 p->pbuf.ref = 1; 472 return &p->pbuf; 473} 474#endif /* LWIP_SUPPORT_CUSTOM_PBUF */ 475 476/** 477 * @ingroup pbuf 478 * Shrink a pbuf chain to a desired length. 479 * 480 * @param p pbuf to shrink. 481 * @param new_len desired new length of pbuf chain 482 * 483 * Depending on the desired length, the first few pbufs in a chain might 484 * be skipped and left unchanged. The new last pbuf in the chain will be 485 * resized, and any remaining pbufs will be freed. 486 * 487 * @note If the pbuf is ROM/REF, only the ->tot_len and ->len fields are adjusted. 488 * @note May not be called on a packet queue. 489 * 490 * @note Despite its name, pbuf_realloc cannot grow the size of a pbuf (chain). 491 */ 492void 493pbuf_realloc(struct pbuf *p, u16_t new_len) 494{ 495 struct pbuf *q; 496 u16_t rem_len; /* remaining length */ 497 s32_t grow; 498 499 LWIP_ASSERT("pbuf_realloc: p != NULL", p != NULL); 500 LWIP_ASSERT("pbuf_realloc: sane p->type", p->type == PBUF_POOL || 501 p->type == PBUF_ROM || 502 p->type == PBUF_RAM || 503 p->type == PBUF_REF); 504 505 /* desired length larger than current length? */ 506 if (new_len >= p->tot_len) { 507 /* enlarging not yet supported */ 508 return; 509 } 510 511 /* the pbuf chain grows by (new_len - p->tot_len) bytes 512 * (which may be negative in case of shrinking) */ 513 grow = new_len - p->tot_len; 514 515 /* first, step over any pbufs that should remain in the chain */ 516 rem_len = new_len; 517 q = p; 518 /* should this pbuf be kept? */ 519 while (rem_len > q->len) { 520 /* decrease remaining length by pbuf length */ 521 rem_len -= q->len; 522 /* decrease total length indicator */ 523 LWIP_ASSERT("grow < max_u16_t", grow < 0xffff); 524 q->tot_len += (u16_t)grow; 525 /* proceed to next pbuf in chain */ 526 q = q->next; 527 LWIP_ASSERT("pbuf_realloc: q != NULL", q != NULL); 528 } 529 /* we have now reached the new last pbuf (in q) */ 530 /* rem_len == desired length for pbuf q */ 531 532 /* shrink allocated memory for PBUF_RAM */ 533 /* (other types merely adjust their length fields */ 534 if ((q->type == PBUF_RAM) && (rem_len != q->len) 535#if LWIP_SUPPORT_CUSTOM_PBUF 536 && ((q->flags & PBUF_FLAG_IS_CUSTOM) == 0) 537#endif /* LWIP_SUPPORT_CUSTOM_PBUF */ 538 ) { 539 /* reallocate and adjust the length of the pbuf that will be split */ 540 q = (struct pbuf *)mem_trim(q, (u16_t)((u8_t *)q->payload - (u8_t *)q) + rem_len); 541 LWIP_ASSERT("mem_trim returned q == NULL", q != NULL); 542 } 543 /* adjust length fields for new last pbuf */ 544 q->len = rem_len; 545 q->tot_len = q->len; 546 547 /* any remaining pbufs in chain? */ 548 if (q->next != NULL) { 549 /* free remaining pbufs in chain */ 550 pbuf_free(q->next); 551 } 552 /* q is last packet in chain */ 553 q->next = NULL; 554 555} 556 557/** 558 * Adjusts the payload pointer to hide or reveal headers in the payload. 559 * @see pbuf_header. 560 * 561 * @param p pbuf to change the header size. 562 * @param header_size_increment Number of bytes to increment header size. 563 * @param force Allow 'header_size_increment > 0' for PBUF_REF/PBUF_ROM types 564 * 565 * @return non-zero on failure, zero on success. 566 * 567 */ 568static u8_t 569pbuf_header_impl(struct pbuf *p, s16_t header_size_increment, u8_t force) 570{ 571 u16_t type; 572 void *payload; 573 u16_t increment_magnitude; 574 575 LWIP_ASSERT("p != NULL", p != NULL); 576 if ((header_size_increment == 0) || (p == NULL)) { 577 return 0; 578 } 579 580 if (header_size_increment < 0) { 581 increment_magnitude = (u16_t)-header_size_increment; 582 /* Check that we aren't going to move off the end of the pbuf */ 583 LWIP_ERROR("increment_magnitude <= p->len", (increment_magnitude <= p->len), return 1;); 584 } else { 585 increment_magnitude = (u16_t)header_size_increment; 586#if 0 587 /* Can't assert these as some callers speculatively call 588 pbuf_header() to see if it's OK. Will return 1 below instead. */ 589 /* Check that we've got the correct type of pbuf to work with */ 590 LWIP_ASSERT("p->type == PBUF_RAM || p->type == PBUF_POOL", 591 p->type == PBUF_RAM || p->type == PBUF_POOL); 592 /* Check that we aren't going to move off the beginning of the pbuf */ 593 LWIP_ASSERT("p->payload - increment_magnitude >= p + SIZEOF_STRUCT_PBUF", 594 (u8_t *)p->payload - increment_magnitude >= (u8_t *)p + SIZEOF_STRUCT_PBUF); 595#endif 596 } 597 598 type = p->type; 599 /* remember current payload pointer */ 600 payload = p->payload; 601 602 /* pbuf types containing payloads? */ 603 if (type == PBUF_RAM || type == PBUF_POOL) { 604 /* set new payload pointer */ 605 p->payload = (u8_t *)p->payload - header_size_increment; 606 /* boundary check fails? */ 607 if ((u8_t *)p->payload < (u8_t *)p + SIZEOF_STRUCT_PBUF) { 608 LWIP_DEBUGF( PBUF_DEBUG | LWIP_DBG_TRACE, 609 ("pbuf_header: failed as %p < %p (not enough space for new header size)\n", 610 (void *)p->payload, (void *)((u8_t *)p + SIZEOF_STRUCT_PBUF))); 611 /* restore old payload pointer */ 612 p->payload = payload; 613 /* bail out unsuccessfully */ 614 return 1; 615 } 616 /* pbuf types referring to external payloads? */ 617 } else if (type == PBUF_REF || type == PBUF_ROM) { 618 /* hide a header in the payload? */ 619 if ((header_size_increment < 0) && (increment_magnitude <= p->len)) { 620 /* increase payload pointer */ 621 p->payload = (u8_t *)p->payload - header_size_increment; 622 } else if ((header_size_increment > 0) && force) { 623 p->payload = (u8_t *)p->payload - header_size_increment; 624 } else { 625 /* cannot expand payload to front (yet!) 626 * bail out unsuccessfully */ 627 return 1; 628 } 629 } else { 630 /* Unknown type */ 631 LWIP_ASSERT("bad pbuf type", 0); 632 return 1; 633 } 634 /* modify pbuf length fields */ 635 p->len += header_size_increment; 636 p->tot_len += header_size_increment; 637 638 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_header: old %p new %p (%"S16_F")\n", 639 (void *)payload, (void *)p->payload, header_size_increment)); 640 641 return 0; 642} 643 644/** 645 * Adjusts the payload pointer to hide or reveal headers in the payload. 646 * 647 * Adjusts the ->payload pointer so that space for a header 648 * (dis)appears in the pbuf payload. 649 * 650 * The ->payload, ->tot_len and ->len fields are adjusted. 651 * 652 * @param p pbuf to change the header size. 653 * @param header_size_increment Number of bytes to increment header size which 654 * increases the size of the pbuf. New space is on the front. 655 * (Using a negative value decreases the header size.) 656 * If hdr_size_inc is 0, this function does nothing and returns successful. 657 * 658 * PBUF_ROM and PBUF_REF type buffers cannot have their sizes increased, so 659 * the call will fail. A check is made that the increase in header size does 660 * not move the payload pointer in front of the start of the buffer. 661 * @return non-zero on failure, zero on success. 662 * 663 */ 664u8_t 665pbuf_header(struct pbuf *p, s16_t header_size_increment) 666{ 667 return pbuf_header_impl(p, header_size_increment, 0); 668} 669 670/** 671 * Same as pbuf_header but does not check if 'header_size > 0' is allowed. 672 * This is used internally only, to allow PBUF_REF for RX. 673 */ 674u8_t 675pbuf_header_force(struct pbuf *p, s16_t header_size_increment) 676{ 677 return pbuf_header_impl(p, header_size_increment, 1); 678} 679 680/** 681 * @ingroup pbuf 682 * Dereference a pbuf chain or queue and deallocate any no-longer-used 683 * pbufs at the head of this chain or queue. 684 * 685 * Decrements the pbuf reference count. If it reaches zero, the pbuf is 686 * deallocated. 687 * 688 * For a pbuf chain, this is repeated for each pbuf in the chain, 689 * up to the first pbuf which has a non-zero reference count after 690 * decrementing. So, when all reference counts are one, the whole 691 * chain is free'd. 692 * 693 * @param p The pbuf (chain) to be dereferenced. 694 * 695 * @return the number of pbufs that were de-allocated 696 * from the head of the chain. 697 * 698 * @note MUST NOT be called on a packet queue (Not verified to work yet). 699 * @note the reference counter of a pbuf equals the number of pointers 700 * that refer to the pbuf (or into the pbuf). 701 * 702 * @internal examples: 703 * 704 * Assuming existing chains a->b->c with the following reference 705 * counts, calling pbuf_free(a) results in: 706 * 707 * 1->2->3 becomes ...1->3 708 * 3->3->3 becomes 2->3->3 709 * 1->1->2 becomes ......1 710 * 2->1->1 becomes 1->1->1 711 * 1->1->1 becomes ....... 712 * 713 */ 714u8_t 715pbuf_free(struct pbuf *p) 716{ 717 u16_t type; 718 struct pbuf *q; 719 u8_t count; 720 721 if (p == NULL) { 722 LWIP_ASSERT("p != NULL", p != NULL); 723 /* if assertions are disabled, proceed with debug output */ 724 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_LEVEL_SERIOUS, 725 ("pbuf_free(p == NULL) was called.\n")); 726 return 0; 727 } 728 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free(%p)\n", (void *)p)); 729 730 PERF_START; 731 732 LWIP_ASSERT("pbuf_free: sane type", 733 p->type == PBUF_RAM || p->type == PBUF_ROM || 734 p->type == PBUF_REF || p->type == PBUF_POOL); 735 736 count = 0; 737 /* de-allocate all consecutive pbufs from the head of the chain that 738 * obtain a zero reference count after decrementing*/ 739 while (p != NULL) { 740 u16_t ref; 741 SYS_ARCH_DECL_PROTECT(old_level); 742 /* Since decrementing ref cannot be guaranteed to be a single machine operation 743 * we must protect it. We put the new ref into a local variable to prevent 744 * further protection. */ 745 SYS_ARCH_PROTECT(old_level); 746 /* all pbufs in a chain are referenced at least once */ 747 LWIP_ASSERT("pbuf_free: p->ref > 0", p->ref > 0); 748 /* decrease reference count (number of pointers to pbuf) */ 749 ref = --(p->ref); 750 SYS_ARCH_UNPROTECT(old_level); 751 /* this pbuf is no longer referenced to? */ 752 if (ref == 0) { 753 /* remember next pbuf in chain for next iteration */ 754 q = p->next; 755 LWIP_DEBUGF( PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free: deallocating %p\n", (void *)p)); 756 type = p->type; 757#if LWIP_SUPPORT_CUSTOM_PBUF 758 /* is this a custom pbuf? */ 759 if ((p->flags & PBUF_FLAG_IS_CUSTOM) != 0) { 760 struct pbuf_custom *pc = (struct pbuf_custom*)p; 761 LWIP_ASSERT("pc->custom_free_function != NULL", pc->custom_free_function != NULL); 762 pc->custom_free_function(p); 763 } else 764#endif /* LWIP_SUPPORT_CUSTOM_PBUF */ 765 { 766 /* is this a pbuf from the pool? */ 767 if (type == PBUF_POOL) { 768 memp_free(MEMP_PBUF_POOL, p); 769 /* is this a ROM or RAM referencing pbuf? */ 770 } else if (type == PBUF_ROM || type == PBUF_REF) { 771 memp_free(MEMP_PBUF, p); 772 /* type == PBUF_RAM */ 773 } else { 774 mem_free(p); 775 } 776 } 777 count++; 778 /* proceed to next pbuf */ 779 p = q; 780 /* p->ref > 0, this pbuf is still referenced to */ 781 /* (and so the remaining pbufs in chain as well) */ 782 } else { 783 LWIP_DEBUGF( PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free: %p has ref %"U16_F", ending here.\n", (void *)p, ref)); 784 /* stop walking through the chain */ 785 p = NULL; 786 } 787 } 788 PERF_STOP("pbuf_free"); 789 /* return number of de-allocated pbufs */ 790 return count; 791} 792 793/** 794 * Count number of pbufs in a chain 795 * 796 * @param p first pbuf of chain 797 * @return the number of pbufs in a chain 798 */ 799u16_t 800pbuf_clen(const struct pbuf *p) 801{ 802 u16_t len; 803 804 len = 0; 805 while (p != NULL) { 806 ++len; 807 p = p->next; 808 } 809 return len; 810} 811 812/** 813 * @ingroup pbuf 814 * Increment the reference count of the pbuf. 815 * 816 * @param p pbuf to increase reference counter of 817 * 818 */ 819void 820pbuf_ref(struct pbuf *p) 821{ 822 /* pbuf given? */ 823 if (p != NULL) { 824 SYS_ARCH_INC(p->ref, 1); 825 LWIP_ASSERT("pbuf ref overflow", p->ref > 0); 826 } 827} 828 829/** 830 * @ingroup pbuf 831 * Concatenate two pbufs (each may be a pbuf chain) and take over 832 * the caller's reference of the tail pbuf. 833 * 834 * @note The caller MAY NOT reference the tail pbuf afterwards. 835 * Use pbuf_chain() for that purpose. 836 * 837 * @see pbuf_chain() 838 */ 839void 840pbuf_cat(struct pbuf *h, struct pbuf *t) 841{ 842 struct pbuf *p; 843 844 LWIP_ERROR("(h != NULL) && (t != NULL) (programmer violates API)", 845 ((h != NULL) && (t != NULL)), return;); 846 847 /* proceed to last pbuf of chain */ 848 for (p = h; p->next != NULL; p = p->next) { 849 /* add total length of second chain to all totals of first chain */ 850 p->tot_len += t->tot_len; 851 } 852 /* { p is last pbuf of first h chain, p->next == NULL } */ 853 LWIP_ASSERT("p->tot_len == p->len (of last pbuf in chain)", p->tot_len == p->len); 854 LWIP_ASSERT("p->next == NULL", p->next == NULL); 855 /* add total length of second chain to last pbuf total of first chain */ 856 p->tot_len += t->tot_len; 857 /* chain last pbuf of head (p) with first of tail (t) */ 858 p->next = t; 859 /* p->next now references t, but the caller will drop its reference to t, 860 * so netto there is no change to the reference count of t. 861 */ 862} 863 864/** 865 * @ingroup pbuf 866 * Chain two pbufs (or pbuf chains) together. 867 * 868 * The caller MUST call pbuf_free(t) once it has stopped 869 * using it. Use pbuf_cat() instead if you no longer use t. 870 * 871 * @param h head pbuf (chain) 872 * @param t tail pbuf (chain) 873 * @note The pbufs MUST belong to the same packet. 874 * @note MAY NOT be called on a packet queue. 875 * 876 * The ->tot_len fields of all pbufs of the head chain are adjusted. 877 * The ->next field of the last pbuf of the head chain is adjusted. 878 * The ->ref field of the first pbuf of the tail chain is adjusted. 879 * 880 */ 881void 882pbuf_chain(struct pbuf *h, struct pbuf *t) 883{ 884 pbuf_cat(h, t); 885 /* t is now referenced by h */ 886 pbuf_ref(t); 887 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_chain: %p references %p\n", (void *)h, (void *)t)); 888} 889 890/** 891 * Dechains the first pbuf from its succeeding pbufs in the chain. 892 * 893 * Makes p->tot_len field equal to p->len. 894 * @param p pbuf to dechain 895 * @return remainder of the pbuf chain, or NULL if it was de-allocated. 896 * @note May not be called on a packet queue. 897 */ 898struct pbuf * 899pbuf_dechain(struct pbuf *p) 900{ 901 struct pbuf *q; 902 u8_t tail_gone = 1; 903 /* tail */ 904 q = p->next; 905 /* pbuf has successor in chain? */ 906 if (q != NULL) { 907 /* assert tot_len invariant: (p->tot_len == p->len + (p->next? p->next->tot_len: 0) */ 908 LWIP_ASSERT("p->tot_len == p->len + q->tot_len", q->tot_len == p->tot_len - p->len); 909 /* enforce invariant if assertion is disabled */ 910 q->tot_len = p->tot_len - p->len; 911 /* decouple pbuf from remainder */ 912 p->next = NULL; 913 /* total length of pbuf p is its own length only */ 914 p->tot_len = p->len; 915 /* q is no longer referenced by p, free it */ 916 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_dechain: unreferencing %p\n", (void *)q)); 917 tail_gone = pbuf_free(q); 918 if (tail_gone > 0) { 919 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, 920 ("pbuf_dechain: deallocated %p (as it is no longer referenced)\n", (void *)q)); 921 } 922 /* return remaining tail or NULL if deallocated */ 923 } 924 /* assert tot_len invariant: (p->tot_len == p->len + (p->next? p->next->tot_len: 0) */ 925 LWIP_ASSERT("p->tot_len == p->len", p->tot_len == p->len); 926 return ((tail_gone > 0) ? NULL : q); 927} 928 929/** 930 * @ingroup pbuf 931 * Create PBUF_RAM copies of pbufs. 932 * 933 * Used to queue packets on behalf of the lwIP stack, such as 934 * ARP based queueing. 935 * 936 * @note You MUST explicitly use p = pbuf_take(p); 937 * 938 * @note Only one packet is copied, no packet queue! 939 * 940 * @param p_to pbuf destination of the copy 941 * @param p_from pbuf source of the copy 942 * 943 * @return ERR_OK if pbuf was copied 944 * ERR_ARG if one of the pbufs is NULL or p_to is not big 945 * enough to hold p_from 946 */ 947err_t 948pbuf_copy(struct pbuf *p_to, const struct pbuf *p_from) 949{ 950 u16_t offset_to=0, offset_from=0, len; 951 952 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_copy(%p, %p)\n", 953 (const void*)p_to, (const void*)p_from)); 954 955 /* is the target big enough to hold the source? */ 956 LWIP_ERROR("pbuf_copy: target not big enough to hold source", ((p_to != NULL) && 957 (p_from != NULL) && (p_to->tot_len >= p_from->tot_len)), return ERR_ARG;); 958 959 /* iterate through pbuf chain */ 960 do 961 { 962 /* copy one part of the original chain */ 963 if ((p_to->len - offset_to) >= (p_from->len - offset_from)) { 964 /* complete current p_from fits into current p_to */ 965 len = p_from->len - offset_from; 966 } else { 967 /* current p_from does not fit into current p_to */ 968 len = p_to->len - offset_to; 969 } 970 MEMCPY((u8_t*)p_to->payload + offset_to, (u8_t*)p_from->payload + offset_from, len); 971 offset_to += len; 972 offset_from += len; 973 LWIP_ASSERT("offset_to <= p_to->len", offset_to <= p_to->len); 974 LWIP_ASSERT("offset_from <= p_from->len", offset_from <= p_from->len); 975 if (offset_from >= p_from->len) { 976 /* on to next p_from (if any) */ 977 offset_from = 0; 978 p_from = p_from->next; 979 } 980 if (offset_to == p_to->len) { 981 /* on to next p_to (if any) */ 982 offset_to = 0; 983 p_to = p_to->next; 984 LWIP_ERROR("p_to != NULL", (p_to != NULL) || (p_from == NULL) , return ERR_ARG;); 985 } 986 987 if ((p_from != NULL) && (p_from->len == p_from->tot_len)) { 988 /* don't copy more than one packet! */ 989 LWIP_ERROR("pbuf_copy() does not allow packet queues!", 990 (p_from->next == NULL), return ERR_VAL;); 991 } 992 if ((p_to != NULL) && (p_to->len == p_to->tot_len)) { 993 /* don't copy more than one packet! */ 994 LWIP_ERROR("pbuf_copy() does not allow packet queues!", 995 (p_to->next == NULL), return ERR_VAL;); 996 } 997 } while (p_from); 998 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_copy: end of chain reached.\n")); 999 return ERR_OK; 1000} 1001 1002/** 1003 * @ingroup pbuf 1004 * Copy (part of) the contents of a packet buffer 1005 * to an application supplied buffer. 1006 * 1007 * @param buf the pbuf from which to copy data 1008 * @param dataptr the application supplied buffer 1009 * @param len length of data to copy (dataptr must be big enough). No more 1010 * than buf->tot_len will be copied, irrespective of len 1011 * @param offset offset into the packet buffer from where to begin copying len bytes 1012 * @return the number of bytes copied, or 0 on failure 1013 */ 1014u16_t 1015pbuf_copy_partial(const struct pbuf *buf, void *dataptr, u16_t len, u16_t offset) 1016{ 1017 const struct pbuf *p; 1018 u16_t left; 1019 u16_t buf_copy_len; 1020 u16_t copied_total = 0; 1021 1022 LWIP_ERROR("pbuf_copy_partial: invalid buf", (buf != NULL), return 0;); 1023 LWIP_ERROR("pbuf_copy_partial: invalid dataptr", (dataptr != NULL), return 0;); 1024 1025 left = 0; 1026 1027 if ((buf == NULL) || (dataptr == NULL)) { 1028 return 0; 1029 } 1030 1031 /* Note some systems use byte copy if dataptr or one of the pbuf payload pointers are unaligned. */ 1032 for (p = buf; len != 0 && p != NULL; p = p->next) { 1033 if ((offset != 0) && (offset >= p->len)) { 1034 /* don't copy from this buffer -> on to the next */ 1035 offset -= p->len; 1036 } else { 1037 /* copy from this buffer. maybe only partially. */ 1038 buf_copy_len = p->len - offset; 1039 if (buf_copy_len > len) { 1040 buf_copy_len = len; 1041 } 1042 /* copy the necessary parts of the buffer */ 1043 MEMCPY(&((char*)dataptr)[left], &((char*)p->payload)[offset], buf_copy_len); 1044 copied_total += buf_copy_len; 1045 left += buf_copy_len; 1046 len -= buf_copy_len; 1047 offset = 0; 1048 } 1049 } 1050 return copied_total; 1051} 1052 1053#if LWIP_TCP && TCP_QUEUE_OOSEQ && LWIP_WND_SCALE 1054/** 1055 * This method modifies a 'pbuf chain', so that its total length is 1056 * smaller than 64K. The remainder of the original pbuf chain is stored 1057 * in *rest. 1058 * This function never creates new pbufs, but splits an existing chain 1059 * in two parts. The tot_len of the modified packet queue will likely be 1060 * smaller than 64K. 1061 * 'packet queues' are not supported by this function. 1062 * 1063 * @param p the pbuf queue to be split 1064 * @param rest pointer to store the remainder (after the first 64K) 1065 */ 1066void pbuf_split_64k(struct pbuf *p, struct pbuf **rest) 1067{ 1068 *rest = NULL; 1069 if ((p != NULL) && (p->next != NULL)) { 1070 u16_t tot_len_front = p->len; 1071 struct pbuf *i = p; 1072 struct pbuf *r = p->next; 1073 1074 /* continue until the total length (summed up as u16_t) overflows */ 1075 while ((r != NULL) && ((u16_t)(tot_len_front + r->len) > tot_len_front)) { 1076 tot_len_front += r->len; 1077 i = r; 1078 r = r->next; 1079 } 1080 /* i now points to last packet of the first segment. Set next 1081 pointer to NULL */ 1082 i->next = NULL; 1083 1084 if (r != NULL) { 1085 /* Update the tot_len field in the first part */ 1086 for (i = p; i != NULL; i = i->next) { 1087 i->tot_len -= r->tot_len; 1088 LWIP_ASSERT("tot_len/len mismatch in last pbuf", 1089 (i->next != NULL) || (i->tot_len == i->len)); 1090 } 1091 if (p->flags & PBUF_FLAG_TCP_FIN) { 1092 r->flags |= PBUF_FLAG_TCP_FIN; 1093 } 1094 1095 /* tot_len field in rest does not need modifications */ 1096 /* reference counters do not need modifications */ 1097 *rest = r; 1098 } 1099 } 1100} 1101#endif /* LWIP_TCP && TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ 1102 1103/* Actual implementation of pbuf_skip() but returning const pointer... */ 1104static const struct pbuf* 1105pbuf_skip_const(const struct pbuf* in, u16_t in_offset, u16_t* out_offset) 1106{ 1107 u16_t offset_left = in_offset; 1108 const struct pbuf* q = in; 1109 1110 /* get the correct pbuf */ 1111 while ((q != NULL) && (q->len <= offset_left)) { 1112 offset_left -= q->len; 1113 q = q->next; 1114 } 1115 if (out_offset != NULL) { 1116 *out_offset = offset_left; 1117 } 1118 return q; 1119} 1120 1121/** 1122 * @ingroup pbuf 1123 * Skip a number of bytes at the start of a pbuf 1124 * 1125 * @param in input pbuf 1126 * @param in_offset offset to skip 1127 * @param out_offset resulting offset in the returned pbuf 1128 * @return the pbuf in the queue where the offset is 1129 */ 1130struct pbuf* 1131pbuf_skip(struct pbuf* in, u16_t in_offset, u16_t* out_offset) 1132{ 1133 const struct pbuf* out = pbuf_skip_const(in, in_offset, out_offset); 1134 return LWIP_CONST_CAST(struct pbuf*, out); 1135} 1136 1137/** 1138 * @ingroup pbuf 1139 * Copy application supplied data into a pbuf. 1140 * This function can only be used to copy the equivalent of buf->tot_len data. 1141 * 1142 * @param buf pbuf to fill with data 1143 * @param dataptr application supplied data buffer 1144 * @param len length of the application supplied data buffer 1145 * 1146 * @return ERR_OK if successful, ERR_MEM if the pbuf is not big enough 1147 */ 1148err_t 1149pbuf_take(struct pbuf *buf, const void *dataptr, u16_t len) 1150{ 1151 struct pbuf *p; 1152 u16_t buf_copy_len; 1153 u16_t total_copy_len = len; 1154 u16_t copied_total = 0; 1155 1156 LWIP_ERROR("pbuf_take: invalid buf", (buf != NULL), return ERR_ARG;); 1157 LWIP_ERROR("pbuf_take: invalid dataptr", (dataptr != NULL), return ERR_ARG;); 1158 LWIP_ERROR("pbuf_take: buf not large enough", (buf->tot_len >= len), return ERR_MEM;); 1159 1160 if ((buf == NULL) || (dataptr == NULL) || (buf->tot_len < len)) { 1161 return ERR_ARG; 1162 } 1163 1164 /* Note some systems use byte copy if dataptr or one of the pbuf payload pointers are unaligned. */ 1165 for (p = buf; total_copy_len != 0; p = p->next) { 1166 LWIP_ASSERT("pbuf_take: invalid pbuf", p != NULL); 1167 buf_copy_len = total_copy_len; 1168 if (buf_copy_len > p->len) { 1169 /* this pbuf cannot hold all remaining data */ 1170 buf_copy_len = p->len; 1171 } 1172 /* copy the necessary parts of the buffer */ 1173 MEMCPY(p->payload, &((const char*)dataptr)[copied_total], buf_copy_len); 1174 total_copy_len -= buf_copy_len; 1175 copied_total += buf_copy_len; 1176 } 1177 LWIP_ASSERT("did not copy all data", total_copy_len == 0 && copied_total == len); 1178 return ERR_OK; 1179} 1180 1181/** 1182 * @ingroup pbuf 1183 * Same as pbuf_take() but puts data at an offset 1184 * 1185 * @param buf pbuf to fill with data 1186 * @param dataptr application supplied data buffer 1187 * @param len length of the application supplied data buffer 1188 * @param offset offset in pbuf where to copy dataptr to 1189 * 1190 * @return ERR_OK if successful, ERR_MEM if the pbuf is not big enough 1191 */ 1192err_t 1193pbuf_take_at(struct pbuf *buf, const void *dataptr, u16_t len, u16_t offset) 1194{ 1195 u16_t target_offset; 1196 struct pbuf* q = pbuf_skip(buf, offset, &target_offset); 1197 1198 /* return requested data if pbuf is OK */ 1199 if ((q != NULL) && (q->tot_len >= target_offset + len)) { 1200 u16_t remaining_len = len; 1201 const u8_t* src_ptr = (const u8_t*)dataptr; 1202 /* copy the part that goes into the first pbuf */ 1203 u16_t first_copy_len = LWIP_MIN(q->len - target_offset, len); 1204 MEMCPY(((u8_t*)q->payload) + target_offset, dataptr, first_copy_len); 1205 remaining_len -= first_copy_len; 1206 src_ptr += first_copy_len; 1207 if (remaining_len > 0) { 1208 return pbuf_take(q->next, src_ptr, remaining_len); 1209 } 1210 return ERR_OK; 1211 } 1212 return ERR_MEM; 1213} 1214 1215/** 1216 * @ingroup pbuf 1217 * Creates a single pbuf out of a queue of pbufs. 1218 * 1219 * @remark: Either the source pbuf 'p' is freed by this function or the original 1220 * pbuf 'p' is returned, therefore the caller has to check the result! 1221 * 1222 * @param p the source pbuf 1223 * @param layer pbuf_layer of the new pbuf 1224 * 1225 * @return a new, single pbuf (p->next is NULL) 1226 * or the old pbuf if allocation fails 1227 */ 1228struct pbuf* 1229pbuf_coalesce(struct pbuf *p, pbuf_layer layer) 1230{ 1231 struct pbuf *q; 1232 err_t err; 1233 if (p->next == NULL) { 1234 return p; 1235 } 1236 q = pbuf_alloc(layer, p->tot_len, PBUF_RAM); 1237 if (q == NULL) { 1238 /* @todo: what do we do now? */ 1239 return p; 1240 } 1241 err = pbuf_copy(q, p); 1242 LWIP_UNUSED_ARG(err); /* in case of LWIP_NOASSERT */ 1243 LWIP_ASSERT("pbuf_copy failed", err == ERR_OK); 1244 pbuf_free(p); 1245 return q; 1246} 1247 1248#if LWIP_CHECKSUM_ON_COPY 1249/** 1250 * Copies data into a single pbuf (*not* into a pbuf queue!) and updates 1251 * the checksum while copying 1252 * 1253 * @param p the pbuf to copy data into 1254 * @param start_offset offset of p->payload where to copy the data to 1255 * @param dataptr data to copy into the pbuf 1256 * @param len length of data to copy into the pbuf 1257 * @param chksum pointer to the checksum which is updated 1258 * @return ERR_OK if successful, another error if the data does not fit 1259 * within the (first) pbuf (no pbuf queues!) 1260 */ 1261err_t 1262pbuf_fill_chksum(struct pbuf *p, u16_t start_offset, const void *dataptr, 1263 u16_t len, u16_t *chksum) 1264{ 1265 u32_t acc; 1266 u16_t copy_chksum; 1267 char *dst_ptr; 1268 LWIP_ASSERT("p != NULL", p != NULL); 1269 LWIP_ASSERT("dataptr != NULL", dataptr != NULL); 1270 LWIP_ASSERT("chksum != NULL", chksum != NULL); 1271 LWIP_ASSERT("len != 0", len != 0); 1272 1273 if ((start_offset >= p->len) || (start_offset + len > p->len)) { 1274 return ERR_ARG; 1275 } 1276 1277 dst_ptr = ((char*)p->payload) + start_offset; 1278 copy_chksum = LWIP_CHKSUM_COPY(dst_ptr, dataptr, len); 1279 if ((start_offset & 1) != 0) { 1280 copy_chksum = SWAP_BYTES_IN_WORD(copy_chksum); 1281 } 1282 acc = *chksum; 1283 acc += copy_chksum; 1284 *chksum = FOLD_U32T(acc); 1285 return ERR_OK; 1286} 1287#endif /* LWIP_CHECKSUM_ON_COPY */ 1288 1289/** 1290 * @ingroup pbuf 1291 * Get one byte from the specified position in a pbuf 1292 * WARNING: returns zero for offset >= p->tot_len 1293 * 1294 * @param p pbuf to parse 1295 * @param offset offset into p of the byte to return 1296 * @return byte at an offset into p OR ZERO IF 'offset' >= p->tot_len 1297 */ 1298u8_t 1299pbuf_get_at(const struct pbuf* p, u16_t offset) 1300{ 1301 int ret = pbuf_try_get_at(p, offset); 1302 if (ret >= 0) { 1303 return (u8_t)ret; 1304 } 1305 return 0; 1306} 1307 1308/** 1309 * @ingroup pbuf 1310 * Get one byte from the specified position in a pbuf 1311 * 1312 * @param p pbuf to parse 1313 * @param offset offset into p of the byte to return 1314 * @return byte at an offset into p [0..0xFF] OR negative if 'offset' >= p->tot_len 1315 */ 1316int 1317pbuf_try_get_at(const struct pbuf* p, u16_t offset) 1318{ 1319 u16_t q_idx; 1320 const struct pbuf* q = pbuf_skip_const(p, offset, &q_idx); 1321 1322 /* return requested data if pbuf is OK */ 1323 if ((q != NULL) && (q->len > q_idx)) { 1324 return ((u8_t*)q->payload)[q_idx]; 1325 } 1326 return -1; 1327} 1328 1329/** 1330 * @ingroup pbuf 1331 * Put one byte to the specified position in a pbuf 1332 * WARNING: silently ignores offset >= p->tot_len 1333 * 1334 * @param p pbuf to fill 1335 * @param offset offset into p of the byte to write 1336 * @param data byte to write at an offset into p 1337 */ 1338void 1339pbuf_put_at(struct pbuf* p, u16_t offset, u8_t data) 1340{ 1341 u16_t q_idx; 1342 struct pbuf* q = pbuf_skip(p, offset, &q_idx); 1343 1344 /* write requested data if pbuf is OK */ 1345 if ((q != NULL) && (q->len > q_idx)) { 1346 ((u8_t*)q->payload)[q_idx] = data; 1347 } 1348} 1349 1350/** 1351 * @ingroup pbuf 1352 * Compare pbuf contents at specified offset with memory s2, both of length n 1353 * 1354 * @param p pbuf to compare 1355 * @param offset offset into p at which to start comparing 1356 * @param s2 buffer to compare 1357 * @param n length of buffer to compare 1358 * @return zero if equal, nonzero otherwise 1359 * (0xffff if p is too short, diffoffset+1 otherwise) 1360 */ 1361u16_t 1362pbuf_memcmp(const struct pbuf* p, u16_t offset, const void* s2, u16_t n) 1363{ 1364 u16_t start = offset; 1365 const struct pbuf* q = p; 1366 u16_t i; 1367 1368 /* pbuf long enough to perform check? */ 1369 if(p->tot_len < (offset + n)) { 1370 return 0xffff; 1371 } 1372 1373 /* get the correct pbuf from chain. We know it succeeds because of p->tot_len check above. */ 1374 while ((q != NULL) && (q->len <= start)) { 1375 start -= q->len; 1376 q = q->next; 1377 } 1378 1379 /* return requested data if pbuf is OK */ 1380 for (i = 0; i < n; i++) { 1381 /* We know pbuf_get_at() succeeds because of p->tot_len check above. */ 1382 u8_t a = pbuf_get_at(q, start + i); 1383 u8_t b = ((const u8_t*)s2)[i]; 1384 if (a != b) { 1385 return i+1; 1386 } 1387 } 1388 return 0; 1389} 1390 1391/** 1392 * @ingroup pbuf 1393 * Find occurrence of mem (with length mem_len) in pbuf p, starting at offset 1394 * start_offset. 1395 * 1396 * @param p pbuf to search, maximum length is 0xFFFE since 0xFFFF is used as 1397 * return value 'not found' 1398 * @param mem search for the contents of this buffer 1399 * @param mem_len length of 'mem' 1400 * @param start_offset offset into p at which to start searching 1401 * @return 0xFFFF if substr was not found in p or the index where it was found 1402 */ 1403u16_t 1404pbuf_memfind(const struct pbuf* p, const void* mem, u16_t mem_len, u16_t start_offset) 1405{ 1406 u16_t i; 1407 u16_t max = p->tot_len - mem_len; 1408 if (p->tot_len >= mem_len + start_offset) { 1409 for (i = start_offset; i <= max; i++) { 1410 u16_t plus = pbuf_memcmp(p, i, mem, mem_len); 1411 if (plus == 0) { 1412 return i; 1413 } 1414 } 1415 } 1416 return 0xFFFF; 1417} 1418 1419/** 1420 * Find occurrence of substr with length substr_len in pbuf p, start at offset 1421 * start_offset 1422 * WARNING: in contrast to strstr(), this one does not stop at the first \0 in 1423 * the pbuf/source string! 1424 * 1425 * @param p pbuf to search, maximum length is 0xFFFE since 0xFFFF is used as 1426 * return value 'not found' 1427 * @param substr string to search for in p, maximum length is 0xFFFE 1428 * @return 0xFFFF if substr was not found in p or the index where it was found 1429 */ 1430u16_t 1431pbuf_strstr(const struct pbuf* p, const char* substr) 1432{ 1433 size_t substr_len; 1434 if ((substr == NULL) || (substr[0] == 0) || (p->tot_len == 0xFFFF)) { 1435 return 0xFFFF; 1436 } 1437 substr_len = strlen(substr); 1438 if (substr_len >= 0xFFFF) { 1439 return 0xFFFF; 1440 } 1441 return pbuf_memfind(p, substr, (u16_t)substr_len, 0); 1442} 1443