cvmx-atomic.h (210286) | cvmx-atomic.h (215990) |
---|---|
1/***********************license start*************** | 1/***********************license start*************** |
2 * Copyright (c) 2003-2008 Cavium Networks (support@cavium.com). All rights 3 * reserved. | 2 * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights 3 * reserved. |
4 * 5 * | 4 * 5 * |
6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are 8 * met: | 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are 8 * met: |
9 * | 9 * |
10 * * Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. | 10 * * Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. |
12 * | 12 * |
13 * * Redistributions in binary form must reproduce the above 14 * copyright notice, this list of conditions and the following 15 * disclaimer in the documentation and/or other materials provided 16 * with the distribution. 17 * 18 * * Neither the name of Cavium Networks nor the names of 19 * its contributors may be used to endorse or promote products 20 * derived from this software without specific prior written 21 * permission. 22 * 23 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" 24 * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS 25 * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH 26 * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY 27 * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT 28 * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES 29 * OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR 30 * PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET 31 * POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT 32 * OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU. 33 * 34 * 35 * For any questions regarding licensing please contact marketing@caviumnetworks.com 36 * | 13 * * Redistributions in binary form must reproduce the above 14 * copyright notice, this list of conditions and the following 15 * disclaimer in the documentation and/or other materials provided 16 * with the distribution. 17 18 * * Neither the name of Cavium Networks nor the names of 19 * its contributors may be used to endorse or promote products 20 * derived from this software without specific prior written 21 * permission. 22 23 * This Software, including technical data, may be subject to U.S. export control 24 * laws, including the U.S. Export Administration Act and its associated 25 * regulations, and may be subject to export or import regulations in other 26 * countries. 27 28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" 29 * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR 30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO 31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR 32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM 33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE, 34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF 35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR 36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR 37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU. |
37 ***********************license end**************************************/ 38 39 40 41 42 43 | 38 ***********************license end**************************************/ 39 40 41 42 43 44 |
45 |
|
44/** 45 * @file 46 * 47 * This file provides atomic operations 48 * | 46/** 47 * @file 48 * 49 * This file provides atomic operations 50 * |
49 * <hr>$Revision: 41586 $<hr> | 51 * <hr>$Revision: 49448 $<hr> |
50 * 51 * 52 */ 53 54 55#ifndef __CVMX_ATOMIC_H__ 56#define __CVMX_ATOMIC_H__ 57 --- 290 unchanged lines hidden (view full) --- 348 * @param incr amount to increment memory location by (signed) 349 * 350 * @return Value of memory location before increment 351 */ 352static inline int64_t cvmx_atomic_fetch_and_add64_nosync(int64_t *ptr, int64_t incr) 353{ 354 uint64_t tmp, ret; 355 | 52 * 53 * 54 */ 55 56 57#ifndef __CVMX_ATOMIC_H__ 58#define __CVMX_ATOMIC_H__ 59 --- 290 unchanged lines hidden (view full) --- 350 * @param incr amount to increment memory location by (signed) 351 * 352 * @return Value of memory location before increment 353 */ 354static inline int64_t cvmx_atomic_fetch_and_add64_nosync(int64_t *ptr, int64_t incr) 355{ 356 uint64_t tmp, ret; 357 |
356 __asm__ __volatile__( 357 ".set noreorder \n" 358 "1: lld %[tmp], %[val] \n" 359 " move %[ret], %[tmp] \n" 360 " daddu %[tmp], %[inc] \n" 361 " scd %[tmp], %[val] \n" 362 " beqz %[tmp], 1b \n" 363 " nop \n" 364 ".set reorder \n" 365 : [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret) 366 : [inc] "r" (incr) 367 : "memory"); | 358#if !defined(__FreeBSD__) || !defined(_KERNEL) 359 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) 360 { 361 CVMX_PUSH_OCTEON2; 362 if (__builtin_constant_p(incr) && incr == 1) 363 { 364 __asm__ __volatile__( 365 "laid %0,(%2)" 366 : "=r" (ret), "+m" (ptr) : "r" (ptr) : "memory"); 367 } 368 else if (__builtin_constant_p(incr) && incr == -1) 369 { 370 __asm__ __volatile__( 371 "ladd %0,(%2)" 372 : "=r" (ret), "+m" (ptr) : "r" (ptr) : "memory"); 373 } 374 else 375 { 376 __asm__ __volatile__( 377 "laad %0,(%2),%3" 378 : "=r" (ret), "+m" (ptr) : "r" (ptr), "r" (incr) : "memory"); 379 } 380 CVMX_POP_OCTEON2; 381 } 382 else 383 { 384#endif 385 __asm__ __volatile__( 386 ".set noreorder \n" 387 "1: lld %[tmp], %[val] \n" 388 " move %[ret], %[tmp] \n" 389 " daddu %[tmp], %[inc] \n" 390 " scd %[tmp], %[val] \n" 391 " beqz %[tmp], 1b \n" 392 " nop \n" 393 ".set reorder \n" 394 : [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret) 395 : [inc] "r" (incr) 396 : "memory"); 397#if !defined(__FreeBSD__) || !defined(_KERNEL) 398 } 399#endif |
368 369 return (ret); 370} 371 372/** 373 * Atomically adds a signed value to a 64 bit (aligned) memory location, 374 * and returns previous value. 375 * --- 27 unchanged lines hidden (view full) --- 403 * @param incr amount to increment memory location by (signed) 404 * 405 * @return Value of memory location before increment 406 */ 407static inline int32_t cvmx_atomic_fetch_and_add32_nosync(int32_t *ptr, int32_t incr) 408{ 409 uint32_t tmp, ret; 410 | 400 401 return (ret); 402} 403 404/** 405 * Atomically adds a signed value to a 64 bit (aligned) memory location, 406 * and returns previous value. 407 * --- 27 unchanged lines hidden (view full) --- 435 * @param incr amount to increment memory location by (signed) 436 * 437 * @return Value of memory location before increment 438 */ 439static inline int32_t cvmx_atomic_fetch_and_add32_nosync(int32_t *ptr, int32_t incr) 440{ 441 uint32_t tmp, ret; 442 |
411 __asm__ __volatile__( 412 ".set noreorder \n" 413 "1: ll %[tmp], %[val] \n" 414 " move %[ret], %[tmp] \n" 415 " addu %[tmp], %[inc] \n" 416 " sc %[tmp], %[val] \n" 417 " beqz %[tmp], 1b \n" 418 " nop \n" 419 ".set reorder \n" 420 : [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret) 421 : [inc] "r" (incr) 422 : "memory"); | 443#if !defined(__FreeBSD__) || !defined(_KERNEL) 444 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) 445 { 446 CVMX_PUSH_OCTEON2; 447 if (__builtin_constant_p(incr) && incr == 1) 448 { 449 __asm__ __volatile__( 450 "lai %0,(%2)" 451 : "=r" (ret), "+m" (ptr) : "r" (ptr) : "memory"); 452 } 453 else if (__builtin_constant_p(incr) && incr == -1) 454 { 455 __asm__ __volatile__( 456 "lad %0,(%2)" 457 : "=r" (ret), "+m" (ptr) : "r" (ptr) : "memory"); 458 } 459 else 460 { 461 __asm__ __volatile__( 462 "laa %0,(%2),%3" 463 : "=r" (ret), "+m" (ptr) : "r" (ptr), "r" (incr) : "memory"); 464 } 465 CVMX_POP_OCTEON2; 466 } 467 else 468 { 469#endif 470 __asm__ __volatile__( 471 ".set noreorder \n" 472 "1: ll %[tmp], %[val] \n" 473 " move %[ret], %[tmp] \n" 474 " addu %[tmp], %[inc] \n" 475 " sc %[tmp], %[val] \n" 476 " beqz %[tmp], 1b \n" 477 " nop \n" 478 ".set reorder \n" 479 : [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret) 480 : [inc] "r" (incr) 481 : "memory"); 482#if !defined(__FreeBSD__) || !defined(_KERNEL) 483 } 484#endif |
423 424 return (ret); 425} 426 427/** 428 * Atomically adds a signed value to a 32 bit (aligned) memory location, 429 * and returns previous value. 430 * --- 102 unchanged lines hidden (view full) --- 533 " nor %[msk], 0 \n" 534 "1: lld %[tmp], %[val] \n" 535 " move %[ret], %[tmp] \n" 536 " and %[tmp], %[msk] \n" 537 " scd %[tmp], %[val] \n" 538 " beqz %[tmp], 1b \n" 539 " nop \n" 540 ".set reorder \n" | 485 486 return (ret); 487} 488 489/** 490 * Atomically adds a signed value to a 32 bit (aligned) memory location, 491 * and returns previous value. 492 * --- 102 unchanged lines hidden (view full) --- 595 " nor %[msk], 0 \n" 596 "1: lld %[tmp], %[val] \n" 597 " move %[ret], %[tmp] \n" 598 " and %[tmp], %[msk] \n" 599 " scd %[tmp], %[val] \n" 600 " beqz %[tmp], 1b \n" 601 " nop \n" 602 ".set reorder \n" |
541 : [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret) 542 : [msk] "r" (mask) 543 : "memory"); | 603 : [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret), [msk] "+r" (mask) 604 : : "memory"); |
544 545 return (ret); 546} 547 548/** 549 * Atomically clear bits in a 32 bit (aligned) memory location, 550 * and returns previous value. 551 * --- 15 unchanged lines hidden (view full) --- 567 " nor %[msk], 0 \n" 568 "1: ll %[tmp], %[val] \n" 569 " move %[ret], %[tmp] \n" 570 " and %[tmp], %[msk] \n" 571 " sc %[tmp], %[val] \n" 572 " beqz %[tmp], 1b \n" 573 " nop \n" 574 ".set reorder \n" | 605 606 return (ret); 607} 608 609/** 610 * Atomically clear bits in a 32 bit (aligned) memory location, 611 * and returns previous value. 612 * --- 15 unchanged lines hidden (view full) --- 628 " nor %[msk], 0 \n" 629 "1: ll %[tmp], %[val] \n" 630 " move %[ret], %[tmp] \n" 631 " and %[tmp], %[msk] \n" 632 " sc %[tmp], %[val] \n" 633 " beqz %[tmp], 1b \n" 634 " nop \n" 635 ".set reorder \n" |
575 : [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret) 576 : [msk] "r" (mask) 577 : "memory"); | 636 : [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret), [msk] "+r" (mask) 637 : : "memory"); |
578 579 return (ret); 580} 581 582/** 583 * Atomically swaps value in 64 bit (aligned) memory location, 584 * and returns previous value. 585 * --- 5 unchanged lines hidden (view full) --- 591 * @param new_val new value to write 592 * 593 * @return Value of memory location before swap operation 594 */ 595static inline uint64_t cvmx_atomic_swap64_nosync(uint64_t *ptr, uint64_t new_val) 596{ 597 uint64_t tmp, ret; 598 | 638 639 return (ret); 640} 641 642/** 643 * Atomically swaps value in 64 bit (aligned) memory location, 644 * and returns previous value. 645 * --- 5 unchanged lines hidden (view full) --- 651 * @param new_val new value to write 652 * 653 * @return Value of memory location before swap operation 654 */ 655static inline uint64_t cvmx_atomic_swap64_nosync(uint64_t *ptr, uint64_t new_val) 656{ 657 uint64_t tmp, ret; 658 |
599 __asm__ __volatile__( 600 ".set noreorder \n" 601 "1: lld %[ret], %[val] \n" 602 " move %[tmp], %[new_val] \n" 603 " scd %[tmp], %[val] \n" 604 " beqz %[tmp], 1b \n" 605 " nop \n" 606 ".set reorder \n" 607 : [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret) 608 : [new_val] "r" (new_val) 609 : "memory"); | 659#if !defined(__FreeBSD__) || !defined(_KERNEL) 660 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) 661 { 662 CVMX_PUSH_OCTEON2; 663 if (__builtin_constant_p(new_val) && new_val == 0) 664 { 665 __asm__ __volatile__( 666 "lacd %0,(%1)" 667 : "=r" (ret) : "r" (ptr) : "memory"); 668 } 669 else if (__builtin_constant_p(new_val) && new_val == ~0ull) 670 { 671 __asm__ __volatile__( 672 "lasd %0,(%1)" 673 : "=r" (ret) : "r" (ptr) : "memory"); 674 } 675 else 676 { 677 __asm__ __volatile__( 678 "lawd %0,(%1),%2" 679 : "=r" (ret) : "r" (ptr), "r" (new_val) : "memory"); 680 } 681 CVMX_POP_OCTEON2; 682 } 683 else 684 { 685#endif 686 __asm__ __volatile__( 687 ".set noreorder \n" 688 "1: lld %[ret], %[val] \n" 689 " move %[tmp], %[new_val] \n" 690 " scd %[tmp], %[val] \n" 691 " beqz %[tmp], 1b \n" 692 " nop \n" 693 ".set reorder \n" 694 : [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret) 695 : [new_val] "r" (new_val) 696 : "memory"); 697#if !defined(__FreeBSD__) || !defined(_KERNEL) 698 } 699#endif |
610 611 return (ret); 612} 613 614/** 615 * Atomically swaps value in 32 bit (aligned) memory location, 616 * and returns previous value. 617 * --- 5 unchanged lines hidden (view full) --- 623 * @param new_val new value to write 624 * 625 * @return Value of memory location before swap operation 626 */ 627static inline uint32_t cvmx_atomic_swap32_nosync(uint32_t *ptr, uint32_t new_val) 628{ 629 uint32_t tmp, ret; 630 | 700 701 return (ret); 702} 703 704/** 705 * Atomically swaps value in 32 bit (aligned) memory location, 706 * and returns previous value. 707 * --- 5 unchanged lines hidden (view full) --- 713 * @param new_val new value to write 714 * 715 * @return Value of memory location before swap operation 716 */ 717static inline uint32_t cvmx_atomic_swap32_nosync(uint32_t *ptr, uint32_t new_val) 718{ 719 uint32_t tmp, ret; 720 |
631 __asm__ __volatile__( 632 ".set noreorder \n" 633 "1: ll %[ret], %[val] \n" 634 " move %[tmp], %[new_val] \n" 635 " sc %[tmp], %[val] \n" 636 " beqz %[tmp], 1b \n" 637 " nop \n" 638 ".set reorder \n" 639 : [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret) 640 : [new_val] "r" (new_val) 641 : "memory"); | 721#if !defined(__FreeBSD__) || !defined(_KERNEL) 722 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) 723 { 724 CVMX_PUSH_OCTEON2; 725 if (__builtin_constant_p(new_val) && new_val == 0) 726 { 727 __asm__ __volatile__( 728 "lac %0,(%1)" 729 : "=r" (ret) : "r" (ptr) : "memory"); 730 } 731 else if (__builtin_constant_p(new_val) && new_val == ~0u) 732 { 733 __asm__ __volatile__( 734 "las %0,(%1)" 735 : "=r" (ret) : "r" (ptr) : "memory"); 736 } 737 else 738 { 739 __asm__ __volatile__( 740 "law %0,(%1),%2" 741 : "=r" (ret) : "r" (ptr), "r" (new_val) : "memory"); 742 } 743 CVMX_POP_OCTEON2; 744 } 745 else 746 { 747#endif 748 __asm__ __volatile__( 749 ".set noreorder \n" 750 "1: ll %[ret], %[val] \n" 751 " move %[tmp], %[new_val] \n" 752 " sc %[tmp], %[val] \n" 753 " beqz %[tmp], 1b \n" 754 " nop \n" 755 ".set reorder \n" 756 : [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret) 757 : [new_val] "r" (new_val) 758 : "memory"); 759#if !defined(__FreeBSD__) || !defined(_KERNEL) 760 } 761#endif |
642 643 return (ret); 644} 645 646/** 647 * This atomic operation is now named cvmx_atomic_compare_and_store32_nosync 648 * and the (deprecated) macro is provided for backward compatibility. 649 * @deprecated --- 17 unchanged lines hidden --- | 762 763 return (ret); 764} 765 766/** 767 * This atomic operation is now named cvmx_atomic_compare_and_store32_nosync 768 * and the (deprecated) macro is provided for backward compatibility. 769 * @deprecated --- 17 unchanged lines hidden --- |