1/** 2 * @file 3 * Packet buffer management 4 * 5 * Packets are built from the pbuf data structure. It supports dynamic 6 * memory allocation for packet contents or can reference externally 7 * managed packet contents both in RAM and ROM. Quick allocation for 8 * incoming packets is provided through pools with fixed sized pbufs. 9 * 10 * A packet may span over multiple pbufs, chained as a singly linked 11 * list. This is called a "pbuf chain". 12 * 13 * Multiple packets may be queued, also using this singly linked list. 14 * This is called a "packet queue". 15 * 16 * So, a packet queue consists of one or more pbuf chains, each of 17 * which consist of one or more pbufs. CURRENTLY, PACKET QUEUES ARE 18 * NOT SUPPORTED!!! Use helper structs to queue multiple packets. 19 * 20 * The differences between a pbuf chain and a packet queue are very 21 * precise but subtle. 22 * 23 * The last pbuf of a packet has a ->tot_len field that equals the 24 * ->len field. It can be found by traversing the list. If the last 25 * pbuf of a packet has a ->next field other than NULL, more packets 26 * are on the queue. 27 * 28 * Therefore, looping through a pbuf of a single packet, has an 29 * loop end condition (tot_len == p->len), NOT (next == NULL). 30 */ 31 32/* 33 * Copyright (c) 2001-2004 Swedish Institute of Computer Science. 34 * All rights reserved. 35 * 36 * Redistribution and use in source and binary forms, with or without modification, 37 * are permitted provided that the following conditions are met: 38 * 39 * 1. Redistributions of source code must retain the above copyright notice, 40 * this list of conditions and the following disclaimer. 41 * 2. Redistributions in binary form must reproduce the above copyright notice, 42 * this list of conditions and the following disclaimer in the documentation 43 * and/or other materials provided with the distribution. 44 * 3. The name of the author may not be used to endorse or promote products 45 * derived from this software without specific prior written permission. 46 * 47 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 48 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 49 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT 50 * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 51 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT 52 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 53 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 54 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 55 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY 56 * OF SUCH DAMAGE. 57 * 58 * This file is part of the lwIP TCP/IP stack. 59 * 60 * Author: Adam Dunkels <adam@sics.se> 61 * 62 */ 63 64#include <barrelfish/barrelfish.h> 65 66#include "lwip/opt.h" 67 68#include "lwip/stats.h" 69#include "lwip/def.h" 70#include "lwip/mem.h" 71#include "lwip/memp.h" 72#include "lwip/pbuf.h" 73#include "lwip/sys.h" 74#include "arch/perf.h" 75#if TCP_QUEUE_OOSEQ 76#include "lwip/tcp.h" 77#endif 78 79#include <string.h> 80#include <assert.h> 81#include <trace/trace.h> 82#include <trace_definitions/trace_defs.h> 83 84 85/* Enable tracing based on the global settings. */ 86#if CONFIG_TRACE && NETWORK_STACK_TRACE 87#define LWIP_TRACE_MODE 1 88#endif // CONFIG_TRACE && NETWORK_STACK_TRACE 89 90 91 92#define SIZEOF_STRUCT_PBUF LWIP_MEM_ALIGN_SIZE(sizeof(struct pbuf)) 93/* Since the pool is created in memp, PBUF_POOL_BUFSIZE will be automatically 94 aligned there. Therefore, PBUF_POOL_BUFSIZE_ALIGNED can be used here. */ 95#define PBUF_POOL_BUFSIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(PBUF_POOL_BUFSIZE) 96 97#if TCP_QUEUE_OOSEQ 98#define ALLOC_POOL_PBUF(p) do { (p) = alloc_pool_pbuf(); } while (0) 99#else 100#define ALLOC_POOL_PBUF(p) do { (p) = NULL } while (0) 101#endif 102 103 104uint64_t pbuf_free_RX_packets = 0; 105uint64_t pbuf_free_TX_packets = 0; 106uint64_t pbuf_alloc_RX_packets = 0; 107uint64_t pbuf_alloc_TX_packets = 0; 108 109 110#define INSTRUMENT_PBUF_CALLS 1 111 112#if INSTRUMENT_PBUF_CALLS 113 114#define MAX_INSTRUMENTED_CALLS 256 115#define MAX_INSTRUMENTED_STATS 8 116 117struct func_call_list { 118 char func_name[256]; 119 uint64_t stats[MAX_INSTRUMENTED_STATS]; 120}; 121 122typedef struct func_call_list * func_call_list_t; 123 124struct func_call_list pbuf_free_calls[MAX_INSTRUMENTED_CALLS]; 125struct func_call_list pbuf_alloc_calls[MAX_INSTRUMENTED_CALLS]; 126 127static void show_list(func_call_list_t list_name) 128{ 129 int i = 0, j = 0, k = 0; 130 131 for (i = 0; i < MAX_INSTRUMENTED_CALLS; ++i) { 132 if (i == 0 || list_name[i].stats[0] == 1 ) { 133 printf("%4d %-35s ", i, (i == 0)? "TOTAL" :list_name[i].func_name); 134 135 for (j = 1; j < MAX_INSTRUMENTED_STATS; ++j ) { 136 bool print_value = false; 137 if (list_name[i].stats[j] == 0) { 138 for (k = j; k < MAX_INSTRUMENTED_STATS; ++k) { 139 if (list_name[i].stats[k] != 0) { 140 print_value = true; 141 break; 142 } 143 } 144 } else { 145 print_value = true; 146 } 147 if (print_value) { 148 printf(" %7"PRIu64" ", list_name[i].stats[j]); 149 } 150 } 151 printf("\n"); 152 } 153 } 154} 155 156void show_pbuf_alloc_stats(void); 157void show_pbuf_alloc_stats(void) 158{ 159 func_call_list_t list_name = pbuf_alloc_calls; 160 pbuf_alloc_TX_packets = list_name[0].stats[3]; 161 pbuf_alloc_RX_packets = list_name[0].stats[4]; 162 printf("\n\n"); 163 printf("%4s %-35s " " %7s " " %7s " " %7s " " %7s \n" , 164 "idx", "pbuf_alloc_stats", "pbfref", "RAM_T", "POOL_S", "RAM_S"); 165 166 show_list(list_name); 167 printf("\n\n"); 168} 169 170void show_pbuf_free_stats(void); 171void show_pbuf_free_stats(void) 172{ 173 func_call_list_t list_name = pbuf_free_calls; 174 pbuf_free_TX_packets = list_name[0].stats[3]; 175 pbuf_free_RX_packets = list_name[0].stats[4]; 176 printf("\n\n"); 177 printf("%4s %-35s " " %7s " " %7s " " %7s " " %7s \n" , 178 "idx", "pbuf_free stats", "POOL_T", "RAM_T", "POOL_S", "RAM_S"); 179 show_list(list_name); 180 printf("\n\n"); 181} 182 183static int locate_key(func_call_list_t list_name, char *key) 184{ 185 int i = 0; 186 for (i = 1; i < MAX_INSTRUMENTED_CALLS; ++i) { 187 if (list_name[i].stats[0] != 1) { 188 continue; 189 } 190 if (strncmp(list_name[i].func_name, key, 256) == 0) { 191 return i; 192 } 193 } 194 return -1; 195} 196 197static int add_key(func_call_list_t list_name, char *key) 198{ 199 int i = 0; 200 for (i = 1; i < MAX_INSTRUMENTED_CALLS; ++i) { 201 if (list_name[i].stats[0] != 1) { 202 list_name[i].stats[0] = 1; 203 strncpy(list_name[i].func_name, key, 256); 204 return i; 205 } 206 } 207 return -1; 208} 209 210static void increment_stats(func_call_list_t list_name, int idx, int type) 211{ 212 assert(type < MAX_INSTRUMENTED_STATS); 213 list_name[idx].stats[type] = list_name[idx].stats[type] + 1; 214 215 // Increasing total sum 216 list_name[0].stats[type] = list_name[0].stats[type] + 1; 217} 218 219 220 221static void increment_calls(func_call_list_t list_name, int type, 222 const char *func_name, int line_no) 223{ 224 char key[256]; 225 snprintf(key, sizeof(key), "%s:%d", func_name, line_no); 226 int idx = locate_key(list_name, key); 227 if (idx == -1) { 228 idx = add_key(list_name, key); 229 } 230 if (idx == -1) { 231 USER_PANIC("Function call tracking table is full!, dropping key %s\n", key); 232 abort(); 233 } 234 increment_stats(list_name, idx, type); 235} 236 237#endif // INSTRUMENT_PBUF_CALLS 238 239 240 241 242#if TCP_QUEUE_OOSEQ 243 244static bool try_free_segs(void) 245{ 246 struct tcp_pcb *pcb; 247 for (pcb = tcp_active_pcbs; NULL != pcb; pcb = pcb->next) { 248 if (NULL != pcb->ooseq) { 249 tcp_segs_free(pcb->ooseq); 250 pcb->ooseq = NULL; 251 return true; 252 } 253 } 254 return false; 255} 256 257/** 258 * Attempt to reclaim some memory from queued out-of-sequence TCP segments 259 * if we run out of pool pbufs. It's better to give priority to new packets 260 * if we're running out. 261 * 262 * @return the allocated pbuf. 263 */ 264static struct pbuf *alloc_pool_pbuf(void) 265{ 266 struct pbuf *p = NULL; 267 void *payload = NULL; 268 bool try_again = false; 269 bool alloc_failed = false; 270 271 272 do { 273 if (payload == NULL) { 274 payload = memp_malloc(MEMP_PBUF_POOL); 275 assert((uintptr_t) payload % PBUF_POOL_BUFSIZE == 0); 276 } 277 if (p == NULL) { 278 p = memp_malloc(MEMP_PBUF); 279 } 280 281 alloc_failed = (p == NULL || payload == NULL); 282 283 if (alloc_failed) { 284 if (p == NULL) { 285 printf("p = memp_malloc(MEMP_PBUF) failed\n"); 286 } 287 288 if (payload == NULL) { 289 printf("payload = memp_malloc(MEMP_PBUF_POOL) failed\n"); 290 } 291 try_again = try_free_segs(); 292 } 293 } while (alloc_failed && try_again); 294 295 if (alloc_failed) { 296 if (p != NULL) { 297 memp_free(MEMP_PBUF, p); 298 p = NULL; 299 } 300 if (payload != NULL) { 301 memp_free(MEMP_PBUF_POOL, payload); 302 } 303 //USER_PANIC("alloc_pool_pbuf: failed!"); 304 printf("alloc_pool_pbuf: failed!\n"); 305 return NULL; 306 } 307 308 p->payload = payload; 309 return p; 310} 311#endif /* TCP_QUEUE_OOSEQ */ 312 313 314uint16_t free_pbuf_pool_count(void) 315{ 316 return memp_pbuf_peek(); 317} 318 319 320#define PBUF_FIXED_SIZE 1 321/* FIXME: get rid of PBUF_FIXED_SIZE */ 322 323uint64_t pbuf_alloc_all = 0; 324uint64_t pbuf_alloc_pool = 0; 325uint64_t pbuf_alloc_ram = 0; 326uint64_t pbuf_free_all = 0; 327uint64_t pbuf_free_pool = 0; 328uint64_t pbuf_free_ram = 0; 329uint64_t pbuf_free_all_called = 0; 330uint64_t pbuf_free_pool_called = 0; 331uint64_t pbuf_free_ram_called = 0; 332 333 334uint64_t pbuf_realloc_called = 0; 335 336/** 337 * Allocates a pbuf of the given type (possibly a chain for PBUF_POOL type). 338 * 339 * The actual memory allocated for the pbuf is determined by the 340 * layer at which the pbuf is allocated and the requested size 341 * (from the size parameter). 342 * 343 * @param layer flag to define header size 344 * @param length size of the pbuf's payload 345 * @param type this parameter decides how and where the pbuf 346 * should be allocated as follows: 347 * 348 * - PBUF_RAM: buffer memory for pbuf is allocated as one large 349 * chunk. This includes protocol headers as well. 350 * - PBUF_ROM: no buffer memory is allocated for the pbuf, even for 351 * protocol headers. Additional headers must be prepended 352 * by allocating another pbuf and chain in to the front of 353 * the ROM pbuf. It is assumed that the memory used is really 354 * similar to ROM in that it is immutable and will not be 355 * changed. Memory which is dynamic should generally not 356 * be attached to PBUF_ROM pbufs. Use PBUF_REF instead. 357 * - PBUF_REF: no buffer memory is allocated for the pbuf, even for 358 * protocol headers. It is assumed that the pbuf is only 359 * being used in a single thread. If the pbuf gets queued, 360 * then pbuf_take should be called to copy the buffer. 361 * - PBUF_POOL: the pbuf is allocated as a pbuf chain, with pbufs from 362 * the pbuf pool that is allocated during pbuf_init(). 363 * 364 * @return the allocated pbuf. If multiple pbufs where allocated, this 365 * is the first pbuf of a pbuf chain. 366 */ 367//struct pbuf *pbuf_alloc(pbuf_layer layer, u16_t length, pbuf_type type) 368struct pbuf *pbuf_alloc_tagged(pbuf_layer layer, u16_t length, pbuf_type type, 369 const char *func_name, int line_no) 370{ 371 struct pbuf *p, *q, *r; 372 u16_t offset; 373 s32_t rem_len; /* remaining length */ 374 375 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE | 3, 376 ("pbuf_alloc(length=%" U16_F ")\n", length)); 377 378#ifdef PBUF_FIXED_SIZE 379 // printf("pbuf_alloc(length=%"U16_F")\n", length); 380 assert(length <= PBUF_PKT_SIZE); /* It is typically equal to 1514, but adding extra for safety */ 381#endif // PBUF_FIXED_SIZE 382 /* determine header offset */ 383 p = q = r = NULL; 384 385 offset = 0; 386 switch (layer) { 387 case PBUF_TRANSPORT: 388 /* add room for transport (often TCP) layer header */ 389 offset += PBUF_TRANSPORT_HLEN; 390 /* FALLTHROUGH */ 391 case PBUF_IP: 392 /* add room for IP layer header */ 393 offset += PBUF_IP_HLEN; 394 /* FALLTHROUGH */ 395 case PBUF_LINK: 396 /* add room for link layer header */ 397 offset += PBUF_LINK_HLEN; 398 break; 399 case PBUF_RAW: 400 break; 401 default: 402 LWIP_ASSERT("pbuf_alloc: bad pbuf layer", 0); 403 return NULL; 404 } 405 406 switch (type) { 407 case PBUF_POOL: 408 /* allocate head of pbuf chain into p */ 409 ALLOC_POOL_PBUF(p); 410 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE | 3, 411 ("pbuf_alloc: allocated pbuf %p\n", (void *) p)); 412 if (p == NULL) { 413 414 printf("\npbuf_alloc(): no more memory available.\n"); 415 return NULL; 416 } 417#if INSTRUMENT_PBUF_CALLS 418 increment_calls(pbuf_alloc_calls, 3, func_name, line_no); 419#endif // INSTRUMENT_PBUF_CALLS 420 p->type = type; 421 p->next = NULL; 422 /* make the payload pointer point 'offset' bytes into pbuf data memory */ 423 p->payload = 424 LWIP_MEM_ALIGN((void *) ((u8_t *) p->payload + offset)); 425 LWIP_ASSERT("pbuf_alloc: pbuf p->payload properly aligned", 426 ((mem_ptr_t) p->payload % MEM_ALIGNMENT) == 0); 427 /* the total length of the pbuf chain is the requested size */ 428 p->tot_len = length; 429#ifdef PBUF_FIXED_SIZE 430 p->buff_len = PBUF_PKT_SIZE; // This doesn't matter that much as this if PBUF_POOL 431#endif // PBUF_FIXED_SIZE 432 /* set the length of the first pbuf in the chain */ 433 p->len = 434 LWIP_MIN(length, 435 PBUF_POOL_BUFSIZE_ALIGNED - LWIP_MEM_ALIGN_SIZE(offset)); 436 LWIP_ASSERT("check p->payload + p->len does not overflow pbuf", 437 (offset + p->len <= PBUF_POOL_BUFSIZE_ALIGNED)); 438 LWIP_ASSERT("PBUF_POOL_BUFSIZE must be bigger than MEM_ALIGNMENT", 439 (PBUF_POOL_BUFSIZE_ALIGNED - 440 LWIP_MEM_ALIGN_SIZE(offset)) > 0); 441 /* set reference count (needed here in case we fail) */ 442 p->ref = 1; 443 444 // stats about how many successfull allocs have happened 445 ++pbuf_alloc_pool; 446 ++pbuf_alloc_all; 447 /* now allocate the tail of the pbuf chain */ 448 449 /* remember first pbuf for linkage in next iteration */ 450 r = p; 451 /* remaining length to be allocated */ 452 rem_len = length - p->len; 453 assert(rem_len <= 0); // making sure that there is no fragmentation 454 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE | 3, 455 ("pbuf_alloc: remaining length to be allocated %" PRIu32 456 "\n", rem_len)); 457 /* any remaining pbufs to be allocated? */ 458 while (rem_len > 0) { 459 ALLOC_POOL_PBUF(q); 460 if (q == NULL) { 461 /* free chain so far allocated */ 462 pbuf_free(p); 463 /* bail out unsuccesfully */ 464 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE | 3, 465 ("pbuf_alloc: remaining length alloc failed %" 466 PRIu32 "\n", rem_len)); 467 468 return NULL; 469 } 470 q->type = type; 471 q->flags = 0; 472 q->next = NULL; 473 /* make previous pbuf point to this pbuf */ 474 r->next = q; 475 /* set total length of this pbuf and next in chain */ 476 LWIP_ASSERT("rem_len < max_u16_t", rem_len < 0xffff); 477 q->tot_len = (u16_t) rem_len; 478 /* this pbuf length is pool size, unless smaller sized tail */ 479 q->len = LWIP_MIN((u16_t) rem_len, PBUF_POOL_BUFSIZE_ALIGNED); 480 LWIP_ASSERT("pbuf_alloc: pbuf q->payload properly aligned", 481 ((mem_ptr_t) q->payload % MEM_ALIGNMENT) == 0); 482 LWIP_ASSERT("check p->payload + p->len does not overflow pbuf", 483 ((u8_t *) p->payload + p->len <= 484 (u8_t *) p + SIZEOF_STRUCT_PBUF + 485 PBUF_POOL_BUFSIZE_ALIGNED)); 486 q->ref = 1; 487 /* calculate remaining length to be allocated */ 488 rem_len -= q->len; 489 /* remember this pbuf for linkage in next iteration */ 490 r = q; 491 } 492 /* end of chain */ 493 /*r->next = NULL; */ 494 495 break; 496 case PBUF_RAM: 497 /* If pbuf is to be allocated in RAM, allocate memory for it. */ 498#ifdef PBUF_FIXED_SIZE 499 assert(length + offset <= PBUF_POOL_BUFSIZE_ALIGNED); 500 p = alloc_pool_pbuf(); 501#else // PBUF_FIXED_SIZE 502 p = 503 (struct pbuf *) 504 mem_malloc(LWIP_MEM_ALIGN_SIZE(SIZEOF_STRUCT_PBUF + offset) + 505 LWIP_MEM_ALIGN_SIZE(length)); 506#endif // PBUF_FIXED_SIZE 507 if (p == NULL) { 508 return NULL; 509 } 510 511#if INSTRUMENT_PBUF_CALLS 512 increment_calls(pbuf_alloc_calls, 4, func_name, line_no); 513 514#endif // INSTRUMENT_PBUF_CALLS 515 /* Set up internal structure of the pbuf. */ 516 p->payload = LWIP_MEM_ALIGN((u8_t *) p->payload + offset); 517 p->len = p->tot_len = length; 518 p->next = NULL; 519 p->type = type; 520#ifdef PBUF_FIXED_SIZE 521 p->buff_len = PBUF_PKT_SIZE; 522#else 523 p->buff_len = length; 524#endif // PBUF_FIXED_SIZE 525 526 ++pbuf_alloc_ram; 527 ++pbuf_alloc_all; 528 529 LWIP_ASSERT("pbuf_alloc: pbuf->payload properly aligned", 530 ((mem_ptr_t) p->payload % MEM_ALIGNMENT) == 0); 531 break; 532 /* pbuf references existing (non-volatile static constant) ROM payload? */ 533 case PBUF_ROM: 534 /* pbuf references existing (externally allocated) RAM payload? */ 535 case PBUF_REF: 536 /* only allocate memory for the pbuf structure */ 537 p = memp_malloc(MEMP_PBUF); 538 if (p == NULL) { 539 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE | 2, 540 ("pbuf_alloc: Could not allocate MEMP_PBUF for PBUF_%s.\n", 541 (type == PBUF_ROM) ? "ROM" : "REF")); 542 return NULL; 543 } 544 /* caller must set this field properly, afterwards */ 545 p->payload = NULL; 546 p->len = p->tot_len = length; 547 p->next = NULL; 548 p->type = type; 549 break; 550 default: 551 LWIP_ASSERT("pbuf_alloc: erroneous type", 0); 552 return NULL; 553 } 554 /* set reference count */ 555 p->ref = 1; 556 /* set flags */ 557 p->flags = 0; 558 p->nicflags = 0; 559/* 560 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE | 3, ("pbuf_alloc(length=%"U16_F") == %p\n", length, (void *)p)); 561*/ 562#if LWIP_TRACE_MODE 563 trace_event(TRACE_SUBSYS_NNET, TRACE_EVENT_NNET_LWIPPBA2, (uint32_t)(uintptr_t)p); 564#endif // LWIP_TRACE_MODE 565 566 return p; 567} 568 569 570/** 571 * Shrink a pbuf chain to a desired length. 572 * 573 * @param p pbuf to shrink. 574 * @param new_len desired new length of pbuf chain 575 * 576 * Depending on the desired length, the first few pbufs in a chain might 577 * be skipped and left unchanged. The new last pbuf in the chain will be 578 * resized, and any remaining pbufs will be freed. 579 * 580 * @note If the pbuf is ROM/REF, only the ->tot_len and ->len fields are adjusted. 581 * @note May not be called on a packet queue. 582 * 583 * @note Despite its name, pbuf_realloc cannot grow the size of a pbuf (chain). 584 */ 585void pbuf_realloc(struct pbuf *p, u16_t new_len) 586{ 587 struct pbuf *q; 588 u16_t rem_len; /* remaining length */ 589 s32_t grow; 590 591 LWIP_ASSERT("pbuf_realloc: p != NULL", p != NULL); 592 LWIP_ASSERT("pbuf_realloc: sane p->type", p->type == PBUF_POOL || 593 p->type == PBUF_ROM || 594 p->type == PBUF_RAM || p->type == PBUF_REF); 595 596 /* desired length larger than current length? */ 597 if (new_len >= p->tot_len) { 598 /* enlarging not yet supported */ 599 return; 600 } 601#ifdef PBUF_FIXED_SIZE 602 /* FIXME: this code should not be used in new buff management. */ 603 assert(new_len <= PBUF_PKT_SIZE); 604#endif // PBUF_FIXED_SIZE 605 /* the pbuf chain grows by (new_len - p->tot_len) bytes 606 * (which may be negative in case of shrinking) */ 607 grow = new_len - p->tot_len; 608 609 /* first, step over any pbufs that should remain in the chain */ 610 rem_len = new_len; 611 q = p; 612 /* should this pbuf be kept? */ 613 while (rem_len > q->len) { 614 /* decrease remaining length by pbuf length */ 615 rem_len -= q->len; 616 /* decrease total length indicator */ 617 LWIP_ASSERT("grow < max_u16_t", grow < 0xffff); 618 q->tot_len += (u16_t) grow; 619 /* proceed to next pbuf in chain */ 620 q = q->next; 621 LWIP_ASSERT("pbuf_realloc: q != NULL", q != NULL); 622 } 623 /* we have now reached the new last pbuf (in q) */ 624 /* rem_len == desired length for pbuf q */ 625 626 /* shrink allocated memory for PBUF_RAM */ 627 /* (other types merely adjust their length fields */ 628 if ((q->type == PBUF_RAM) && (rem_len != q->len)) { 629 /* reallocate and adjust the length of the pbuf that will be split */ 630 q = mem_realloc(q, (u8_t *) q->payload - (u8_t *) q + rem_len); 631 LWIP_ASSERT("mem_realloc give q == NULL", q != NULL); 632 } 633 /* adjust length fields for new last pbuf */ 634 q->len = rem_len; 635 q->tot_len = q->len; 636 637 /* any remaining pbufs in chain? */ 638 if (q->next != NULL) { 639 /* free remaining pbufs in chain */ 640 pbuf_free(q->next); 641 } 642 /* q is last packet in chain */ 643 q->next = NULL; 644 ++pbuf_realloc_called; 645} 646 647/** 648 * Adjusts the payload pointer to hide or reveal headers in the payload. 649 * 650 * Adjusts the ->payload pointer so that space for a header 651 * (dis)appears in the pbuf payload. 652 * 653 * The ->payload, ->tot_len and ->len fields are adjusted. 654 * 655 * @param p pbuf to change the header size. 656 * @param header_size_increment Number of bytes to increment header size which 657 * increases the size of the pbuf. New space is on the front. 658 * (Using a negative value decreases the header size.) 659 * If hdr_size_inc is 0, this function does nothing and returns succesful. 660 * 661 * PBUF_ROM and PBUF_REF type buffers cannot have their sizes increased, so 662 * the call will fail. A check is made that the increase in header size does 663 * not move the payload pointer in front of the start of the buffer. 664 * @return non-zero on failure, zero on success. 665 * 666 */ 667u8_t pbuf_header(struct pbuf *p, s16_t header_size_increment) 668{ 669 u16_t type; 670 void *payload; 671 u16_t increment_magnitude; 672 673 LWIP_ASSERT("p != NULL", p != NULL); 674 if ((header_size_increment == 0) || (p == NULL)) 675 return 0; 676 677 if (header_size_increment < 0) { 678 increment_magnitude = -header_size_increment; 679 /* Check that we aren't going to move off the end of the pbuf */ 680 681 if (increment_magnitude > p->len) { 682 printf("ERROR: incr mag %u <= len %d tot_len %u tp %u, fg %u\n", 683 increment_magnitude, p->len, p->tot_len, p->type, p->flags); 684// abort(); 685 return -1; 686 } 687 /* Following error is converted into above if condition with abort */ 688 LWIP_ERROR("increment_magnitude <= p->len", 689 (increment_magnitude <= p->len), return 1; 690 ); 691 692 693 } else { 694 increment_magnitude = header_size_increment; 695#if 0 696 /* Can't assert these as some callers speculatively call 697 pbuf_header() to see if it's OK. Will return 1 below instead. */ 698 /* Check that we've got the correct type of pbuf to work with */ 699 LWIP_ASSERT("p->type == PBUF_RAM || p->type == PBUF_POOL", 700 p->type == PBUF_RAM || p->type == PBUF_POOL); 701 /* Check that we aren't going to move off the beginning of the pbuf */ 702 LWIP_ASSERT 703 ("p->payload - increment_magnitude >= p + SIZEOF_STRUCT_PBUF", 704 (u8_t *) p->payload - increment_magnitude >= 705 (u8_t *) p + SIZEOF_STRUCT_PBUF); 706#endif 707 } 708 709 type = p->type; 710 /* remember current payload pointer */ 711 payload = p->payload; 712 713 /* pbuf types containing payloads? */ 714 if (type == PBUF_RAM || type == PBUF_POOL) { 715 /* set new payload pointer */ 716 p->payload = (u8_t *) p->payload - header_size_increment; 717 /* boundary check fails? */ 718 if ((uintptr_t) payload / PBUF_POOL_BUFSIZE != 719 ((uintptr_t) p->payload) / PBUF_POOL_BUFSIZE) 720 { 721 LWIP_DEBUGF(PBUF_DEBUG | 2, 722 ("pbuf_header: failed as %p < %p (not enough space for new header size)\n", 723 (void *) p->payload, (void *) (p + 1))); 724 /* restore old payload pointer */ 725 p->payload = payload; 726 /* bail out unsuccesfully */ 727 return 1; 728 } 729 /* pbuf types refering to external payloads? */ 730 } else if (type == PBUF_REF || type == PBUF_ROM) { 731 /* hide a header in the payload? */ 732 if ((header_size_increment < 0) && (increment_magnitude <= p->len)) { 733 /* increase payload pointer */ 734 p->payload = (u8_t *) p->payload - header_size_increment; 735 } else { 736 /* cannot expand payload to front (yet!) 737 * bail out unsuccesfully */ 738 return 1; 739 } 740 } else { 741 /* Unknown type */ 742 LWIP_ASSERT("bad pbuf type", 0); 743 return 1; 744 } 745 /* modify pbuf length fields */ 746 p->len += header_size_increment; 747 p->tot_len += header_size_increment; 748 749 LWIP_DEBUGF(PBUF_DEBUG, ("pbuf_header: old %p new %p (%" S16_F ")\n", 750 (void *) payload, (void *) p->payload, 751 header_size_increment)); 752 753 return 0; 754} 755 756/** 757 * Dereference a pbuf chain or queue and deallocate any no-longer-used 758 * pbufs at the head of this chain or queue. 759 * 760 * Decrements the pbuf reference count. If it reaches zero, the pbuf is 761 * deallocated. 762 * 763 * For a pbuf chain, this is repeated for each pbuf in the chain, 764 * up to the first pbuf which has a non-zero reference count after 765 * decrementing. So, when all reference counts are one, the whole 766 * chain is free'd. 767 * 768 * @param p The pbuf (chain) to be dereferenced. 769 * 770 * @return the number of pbufs that were de-allocated 771 * from the head of the chain. 772 * 773 * @note MUST NOT be called on a packet queue (Not verified to work yet). 774 * @note the reference counter of a pbuf equals the number of pointers 775 * that refer to the pbuf (or into the pbuf). 776 * 777 * @internal examples: 778 * 779 * Assuming existing chains a->b->c with the following reference 780 * counts, calling pbuf_free(a) results in: 781 * 782 * 1->2->3 becomes ...1->3 783 * 3->3->3 becomes 2->3->3 784 * 1->1->2 becomes ......1 785 * 2->1->1 becomes 1->1->1 786 * 1->1->1 becomes ....... 787 * 788 */ 789//u8_t pbuf_free(struct pbuf * p) 790u8_t pbuf_free_tagged(struct pbuf * p, const char *func_name, int line_no) 791{ 792 u16_t type; 793 struct pbuf *q; 794 u8_t count; 795#if LWIP_TRACE_MODE 796 struct pbuf *p_bak = p; 797#endif // LWIP_TRACE_MODE 798 799 if (p == NULL) { 800 LWIP_ASSERT("p != NULL", p != NULL); 801 /* if assertions are disabled, proceed with debug output */ 802 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE | 2, 803 ("pbuf_free(p == NULL) was called.\n")); 804 return 0; 805 } 806 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE | 3, 807 ("pbuf_free(%p)\n", (void *) p)); 808 809 PERF_START; 810 811 LWIP_ASSERT("pbuf_free: sane type", 812 p->type == PBUF_RAM || p->type == PBUF_ROM || 813 p->type == PBUF_REF || p->type == PBUF_POOL); 814 815#if INSTRUMENT_PBUF_CALLS 816 if (p->type == PBUF_POOL) { 817 ++pbuf_free_pool_called; 818 ++pbuf_free_all_called; 819 increment_calls(pbuf_free_calls, 1, func_name, line_no); 820 } 821 if (p->type == PBUF_RAM) { 822 ++pbuf_free_ram_called; 823 ++pbuf_free_all_called; 824 increment_calls(pbuf_free_calls, 2, func_name, line_no); 825 } 826#endif // INSTRUMENT_PBUF_CALLS 827 828 count = 0; 829 /* de-allocate all consecutive pbufs from the head of the chain that 830 * obtain a zero reference count after decrementing*/ 831 while (p != NULL) { 832 u16_t ref; 833 834 SYS_ARCH_DECL_PROTECT(old_level); 835 /* Since decrementing ref cannot be guaranteed to be a single machine operation 836 * we must protect it. We put the new ref into a local variable to prevent 837 * further protection. */ 838 SYS_ARCH_PROTECT(old_level); 839 /* all pbufs in a chain are referenced at least once */ 840 841 /* decrease reference count (number of pointers to pbuf) */ 842 if (p->ref <= 0) { 843 printf("pbuf_free:[%p] p->ref value is %u\n",p, p->ref); 844 printf("callstack: %p %p %p %p\n", 845 __builtin_return_address(0), 846 __builtin_return_address(1), 847 __builtin_return_address(2), 848 __builtin_return_address(3)); 849 /* FIXME: This state represents that something is seriously wrong, 850 * This may lead to releasing the memory twice 851 * or invalid memory accesses in future. */ 852 /* abort(); */ 853 // LWIP_ASSERT("pbuf_free: p->ref > 0", p->ref > 0); 854 ref = 0; 855 } else { 856 ref = --(p->ref); 857 } 858 859// printf("pbuf_free: p->ref value is %u and type %u(%u)\n", 860// p->ref, p->type, PBUF_POOL); 861 SYS_ARCH_UNPROTECT(old_level); 862 /* this pbuf is no longer referenced to? */ 863 if (ref == 0) { 864 /* remember next pbuf in chain for next iteration */ 865 q = p->next; 866 LWIP_DEBUGF(PBUF_DEBUG | 2, 867 ("pbuf_free: deallocating %p\n", (void *) p)); 868 type = p->type; 869 p->nicflags = 0; 870// printf("pbuf_free: deallocating %p\n", (void *) p); 871 /* is this a pbuf from the pool? */ 872 if (type == PBUF_POOL || type == PBUF_RAM) { 873// printf("pbuf_free: %p: PBUF_POOL\n", (void *) p); 874 // Is a bit hacky, but it should work as long as we allocate 875 // objects aligned to their size, is necessary because of the 876 // possible offset of the payload. 877// assert(type == PBUF_POOL); 878 uintptr_t pl = (uintptr_t) p->payload; 879 pl -= pl % PBUF_POOL_BUFSIZE; 880 881 memp_free(MEMP_PBUF_POOL, (void*) pl); 882 memp_free(MEMP_PBUF, p); 883 ++pbuf_free_all; 884#if INSTRUMENT_PBUF_CALLS 885 if (p->type == PBUF_POOL) { 886 ++pbuf_free_pool; 887 increment_calls(pbuf_free_calls, 3, func_name, line_no); 888 } 889 if (p->type == PBUF_RAM) { 890 ++pbuf_free_ram; 891 increment_calls(pbuf_free_calls, 4, func_name, line_no); 892 } 893#endif // INSTRUMENT_PBUF_CALLS 894 895 /* is this a ROM or RAM referencing pbuf? */ 896 } else if (type == PBUF_ROM || type == PBUF_REF) { 897// printf("pbuf_free: %p: PBUF_ROM || PBUF_REF\n", (void *) p); 898 memp_free(MEMP_PBUF, p); 899 /* type == PBUF_RAM */ 900 } else { 901 assert(!"Should never be executed"); 902 } 903 count++; 904 /* proceed to next pbuf */ 905 p = q; 906 /* p->ref > 0, this pbuf is still referenced to */ 907 /* (and so the remaining pbufs in chain as well) */ 908 } else { 909 LWIP_DEBUGF(PBUF_DEBUG | 2, 910 ("pbuf_free: %p has ref %" U16_F ", ending here.\n", 911 (void *) p, ref)); 912// printf("pbuf_free: %p has ref %" U16_F ", ending here.\n", 913// (void *) p, ref); 914 /* stop walking through the chain */ 915 p = NULL; 916 } 917 } 918 PERF_STOP("pbuf_free"); 919 /* return number of de-allocated pbufs */ 920// printf("pbuf_free: finished with [%p] and count %"PRIu8"\n", p, count); 921 922#if LWIP_TRACE_MODE 923 trace_event(TRACE_SUBSYS_NNET, TRACE_EVENT_NNET_LWIPPBF2, (uint32_t)(uintptr_t)p_bak); 924#endif // LWIP_TRACE_MODE 925 926 return count; 927} 928 929/** 930 * Count number of pbufs in a chain 931 * 932 * @param p first pbuf of chain 933 * @return the number of pbufs in a chain 934 */ 935 936u8_t pbuf_clen(struct pbuf * p) 937{ 938 u8_t len; 939 940 len = 0; 941 while (p != NULL) { 942 ++len; 943 p = p->next; 944 } 945 return len; 946} 947 948/** 949 * Increment the reference count of the pbuf. 950 * 951 * @param p pbuf to increase reference counter of 952 * 953 */ 954//void pbuf_ref(struct pbuf *p) 955void pbuf_ref_tagged(struct pbuf *p, const char *func_name, int line_no) 956{ 957 SYS_ARCH_DECL_PROTECT(old_level); 958 /* pbuf given? */ 959 if (p != NULL) { 960 SYS_ARCH_PROTECT(old_level); 961 ++(p->ref); 962#if INSTRUMENT_PBUF_CALLS 963 increment_calls(pbuf_alloc_calls, 1, func_name, line_no); 964#endif // INSTRUMENT_PBUF_CALLS 965 SYS_ARCH_UNPROTECT(old_level); 966 } 967} 968 969/** 970 * Concatenate two pbufs (each may be a pbuf chain) and take over 971 * the caller's reference of the tail pbuf. 972 * 973 * @note The caller MAY NOT reference the tail pbuf afterwards. 974 * Use pbuf_chain() for that purpose. 975 * 976 * @see pbuf_chain() 977 */ 978 979void pbuf_cat(struct pbuf *h, struct pbuf *t) 980{ 981 struct pbuf *p; 982 983 LWIP_ERROR("(h != NULL) && (t != NULL) (programmer violates API)", 984 ((h != NULL) && (t != NULL)), return; 985 ); 986 987 /* proceed to last pbuf of chain */ 988 for (p = h; p->next != NULL; p = p->next) { 989 /* add total length of second chain to all totals of first chain */ 990 p->tot_len += t->tot_len; 991 } 992 /* { p is last pbuf of first h chain, p->next == NULL } */ 993 LWIP_ASSERT("p->tot_len == p->len (of last pbuf in chain)", 994 p->tot_len == p->len); 995 LWIP_ASSERT("p->next == NULL", p->next == NULL); 996 /* add total length of second chain to last pbuf total of first chain */ 997 p->tot_len += t->tot_len; 998 /* chain last pbuf of head (p) with first of tail (t) */ 999 p->next = t; 1000 /* p->next now references t, but the caller will drop its reference to t, 1001 * so netto there is no change to the reference count of t. 1002 */ 1003} 1004 1005/** 1006 * Chain two pbufs (or pbuf chains) together. 1007 * 1008 * The caller MUST call pbuf_free(t) once it has stopped 1009 * using it. Use pbuf_cat() instead if you no longer use t. 1010 * 1011 * @param h head pbuf (chain) 1012 * @param t tail pbuf (chain) 1013 * @note The pbufs MUST belong to the same packet. 1014 * @note MAY NOT be called on a packet queue. 1015 * 1016 * The ->tot_len fields of all pbufs of the head chain are adjusted. 1017 * The ->next field of the last pbuf of the head chain is adjusted. 1018 * The ->ref field of the first pbuf of the tail chain is adjusted. 1019 * 1020 */ 1021void pbuf_chain(struct pbuf *h, struct pbuf *t) 1022{ 1023 pbuf_cat(h, t); 1024 /* t is now referenced by h */ 1025 pbuf_ref(t); 1026 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_FRESH | 2, 1027 ("pbuf_chain: %p references %p\n", (void *) h, (void *) t)); 1028} 1029 1030/** 1031 * Dechains the first pbuf from its succeeding pbufs in the chain. 1032 * 1033 * Makes p->tot_len field equal to p->len. 1034 * @param p pbuf to dechain 1035 * @return remainder of the pbuf chain, or NULL if it was de-allocated. 1036 * @note May not be called on a packet queue. 1037 */ 1038struct pbuf *pbuf_dechain(struct pbuf *p) 1039{ 1040 struct pbuf *q; 1041 u8_t tail_gone = 1; 1042 1043 /* tail */ 1044 q = p->next; 1045 /* pbuf has successor in chain? */ 1046 if (q != NULL) { 1047 /* assert tot_len invariant: (p->tot_len == p->len + (p->next? p->next->tot_len: 0) */ 1048 LWIP_ASSERT("p->tot_len == p->len + q->tot_len", 1049 q->tot_len == p->tot_len - p->len); 1050 /* enforce invariant if assertion is disabled */ 1051 q->tot_len = p->tot_len - p->len; 1052 /* decouple pbuf from remainder */ 1053 p->next = NULL; 1054 /* total length of pbuf p is its own length only */ 1055 p->tot_len = p->len; 1056 /* q is no longer referenced by p, free it */ 1057 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_STATE, 1058 ("pbuf_dechain: unreferencing %p\n", (void *) q)); 1059 tail_gone = pbuf_free(q); 1060 if (tail_gone > 0) { 1061 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_STATE, 1062 ("pbuf_dechain: deallocated %p (as it is no longer referenced)\n", 1063 (void *) q)); 1064 } 1065 /* return remaining tail or NULL if deallocated */ 1066 } 1067 /* assert tot_len invariant: (p->tot_len == p->len + (p->next? p->next->tot_len: 0) */ 1068 LWIP_ASSERT("p->tot_len == p->len", p->tot_len == p->len); 1069 return ((tail_gone > 0) ? NULL : q); 1070} 1071 1072/** 1073 * 1074 * Create PBUF_RAM copies of pbufs. 1075 * 1076 * Used to queue packets on behalf of the lwIP stack, such as 1077 * ARP based queueing. 1078 * 1079 * @note You MUST explicitly use p = pbuf_take(p); 1080 * 1081 * @note Only one packet is copied, no packet queue! 1082 * 1083 * @param p_to pbuf destination of the copy 1084 * @param p_from pbuf source of the copy 1085 * 1086 * @return ERR_OK if pbuf was copied 1087 * ERR_ARG if one of the pbufs is NULL or p_to is not big 1088 * enough to hold p_from 1089 */ 1090err_t pbuf_copy(struct pbuf * p_to, struct pbuf * p_from) 1091{ 1092 u16_t offset_to = 0, offset_from = 0, len; 1093 1094 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE | 3, ("pbuf_copy(%p, %p)\n", 1095 (void *) p_to, 1096 (void *) p_from)); 1097 1098 /* is the target big enough to hold the source? */ 1099 LWIP_ERROR("pbuf_copy: target not big enough to hold source", 1100 ((p_to != NULL) && (p_from != NULL) 1101 && (p_to->tot_len >= p_from->tot_len)), return ERR_ARG; 1102 ); 1103 1104 /* iterate through pbuf chain */ 1105 do { 1106 LWIP_ASSERT("p_to != NULL", p_to != NULL); 1107 /* copy one part of the original chain */ 1108 if ((p_to->len - offset_to) >= (p_from->len - offset_from)) { 1109 /* complete current p_from fits into current p_to */ 1110 len = p_from->len - offset_from; 1111 } else { 1112 /* current p_from does not fit into current p_to */ 1113 len = p_to->len - offset_to; 1114 } 1115 MEMCPY((u8_t *) p_to->payload + offset_to, 1116 (u8_t *) p_from->payload + offset_from, len); 1117 offset_to += len; 1118 offset_from += len; 1119 LWIP_ASSERT("offset_to <= p_to->len", offset_to <= p_to->len); 1120 if (offset_to == p_to->len) { 1121 /* on to next p_to (if any) */ 1122 offset_to = 0; 1123 p_to = p_to->next; 1124 } 1125 LWIP_ASSERT("offset_from <= p_from->len", offset_from <= p_from->len); 1126 if (offset_from >= p_from->len) { 1127 /* on to next p_from (if any) */ 1128 offset_from = 0; 1129 p_from = p_from->next; 1130 } 1131 1132 if ((p_from != NULL) && (p_from->len == p_from->tot_len)) { 1133 /* don't copy more than one packet! */ 1134 LWIP_ERROR("pbuf_copy() does not allow packet queues!\n", 1135 (p_from->next == NULL), return ERR_VAL; 1136 ); 1137 } 1138 if ((p_to != NULL) && (p_to->len == p_to->tot_len)) { 1139 /* don't copy more than one packet! */ 1140 LWIP_ERROR("pbuf_copy() does not allow packet queues!\n", 1141 (p_to->next == NULL), return ERR_VAL; 1142 ); 1143 } 1144 } while (p_from); 1145 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE | 1, 1146 ("pbuf_copy: end of chain reached.\n")); 1147 return ERR_OK; 1148} 1149 1150/** 1151 * Copy (part of) the contents of a packet buffer 1152 * to an application supplied buffer. 1153 * 1154 * @param buf the pbuf from which to copy data 1155 * @param dataptr the application supplied buffer 1156 * @param len length of data to copy (dataptr must be big enough). No more 1157 * than buf->tot_len will be copied, irrespective of len 1158 * @param offset offset into the packet buffer from where to begin copying len bytes 1159 * @return the number of bytes copied, or 0 on failure 1160 */ 1161u16_t 1162pbuf_copy_partial(struct pbuf * buf, void *dataptr, u16_t len, u16_t offset) 1163{ 1164 struct pbuf *p; 1165 u16_t left; 1166 u16_t buf_copy_len; 1167 u16_t copied_total = 0; 1168 1169 LWIP_ERROR("pbuf_copy_partial: invalid buf", (buf != NULL), return 0; 1170 ); 1171 LWIP_ERROR("pbuf_copy_partial: invalid dataptr", (dataptr != NULL), 1172 return 0; 1173 ); 1174 1175 left = 0; 1176 1177 if ((buf == NULL) || (dataptr == NULL)) { 1178 return 0; 1179 } 1180 1181 /* Note some systems use byte copy if dataptr or one of the pbuf payload pointers are unaligned. */ 1182 for (p = buf; len != 0 && p != NULL; p = p->next) { 1183 if ((offset != 0) && (offset >= p->len)) { 1184 /* don't copy from this buffer -> on to the next */ 1185 offset -= p->len; 1186 } else { 1187 /* copy from this buffer. maybe only partially. */ 1188 buf_copy_len = p->len - offset; 1189 if (buf_copy_len > len) 1190 buf_copy_len = len; 1191 /* copy the necessary parts of the buffer */ 1192 MEMCPY(&((char *) dataptr)[left], &((char *) p->payload)[offset], 1193 buf_copy_len); 1194 copied_total += buf_copy_len; 1195 left += buf_copy_len; 1196 len -= buf_copy_len; 1197 offset = 0; 1198 } 1199 } 1200 return copied_total; 1201} 1202 1203/** 1204 * Copy application supplied data into a pbuf. 1205 * This function can only be used to copy the equivalent of buf->tot_len data. 1206 * 1207 * @param buf pbuf to fill with data 1208 * @param dataptr application supplied data buffer 1209 * @param len length of the application supplied data buffer 1210 * 1211 * @return ERR_OK if successful, ERR_MEM if the pbuf is not big enough 1212 */ 1213err_t pbuf_take(struct pbuf * buf, const void *dataptr, u16_t len) 1214{ 1215 struct pbuf *p; 1216 u16_t buf_copy_len; 1217 u16_t total_copy_len = len; 1218 u16_t copied_total = 0; 1219 1220 LWIP_ERROR("pbuf_take: invalid buf", (buf != NULL), return 0; 1221 ); 1222 LWIP_ERROR("pbuf_take: invalid dataptr", (dataptr != NULL), return 0; 1223 ); 1224 1225 if ((buf == NULL) || (dataptr == NULL) || (buf->tot_len < len)) { 1226 return ERR_ARG; 1227 } 1228 1229 /* Note some systems use byte copy if dataptr or one of the pbuf payload pointers are unaligned. */ 1230 for (p = buf; total_copy_len != 0; p = p->next) { 1231 LWIP_ASSERT("pbuf_take: invalid pbuf", p != NULL); 1232 buf_copy_len = total_copy_len; 1233 if (buf_copy_len > p->len) { 1234 /* this pbuf cannot hold all remaining data */ 1235 buf_copy_len = p->len; 1236 } 1237 /* copy the necessary parts of the buffer */ 1238 MEMCPY(p->payload, &((char *) dataptr)[copied_total], buf_copy_len); 1239 total_copy_len -= buf_copy_len; 1240 copied_total += buf_copy_len; 1241 } 1242 LWIP_ASSERT("did not copy all data", total_copy_len == 0 1243 && copied_total == len); 1244 return ERR_OK; 1245} 1246 1247/** 1248 * Creates a single pbuf out of a queue of pbufs. 1249 * 1250 * @remark: The source pbuf 'p' is not freed by this function because that can 1251 * be illegal in some places! 1252 * 1253 * @param p the source pbuf 1254 * @param layer pbuf_layer of the new pbuf 1255 * 1256 * @return a new, single pbuf (p->next is NULL) 1257 * or the old pbuf if allocation fails 1258 */ 1259struct pbuf *pbuf_coalesce(struct pbuf *p, pbuf_layer layer) 1260{ 1261 struct pbuf *q; 1262 err_t err; 1263 1264 if (p->next == NULL) { 1265 return p; 1266 } 1267 q = pbuf_alloc(layer, p->tot_len, PBUF_RAM); 1268 if (q == NULL) { 1269 /* @todo: what do we do now? */ 1270 return p; 1271 } 1272 err = pbuf_copy(q, p); 1273 LWIP_ASSERT("pbuf_copy failed", err == ERR_OK); 1274 pbuf_free(p); 1275 return q; 1276} 1277