1/* Modified by Broadcom Corp. Portions Copyright (c) Broadcom Corp, 2012. */ 2/* 3 * linux/arch/arm/lib/csumpartial.S 4 * 5 * Copyright (C) 1995-1998 Russell King 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11#include <linux/linkage.h> 12#include <asm/assembler.h> 13 14 .text 15 .section .text.fastpath_host, "a" 16 17/* 18 * Function: __u32 csum_partial(const char *src, int len, __u32 sum) 19 * Params : r0 = buffer, r1 = len, r2 = checksum 20 * Returns : r0 = new checksum 21 */ 22 23buf .req r0 24len .req r1 25sum .req r2 26td0 .req r3 27td1 .req r4 @ save before use 28td2 .req r5 @ save before use 29td3 .req lr 30 31.Lzero: mov r0, sum 32 add sp, sp, #4 33 ldr pc, [sp], #4 34 35 /* 36 * Handle 0 to 7 bytes, with any alignment of source and 37 * destination pointers. Note that when we get here, C = 0 38 */ 39.Lless8: teq len, #0 @ check for zero count 40 beq .Lzero 41 42 /* we must have at least one byte. */ 43 tst buf, #1 @ odd address? 44 movne sum, sum, ror #8 45 ldrneb td0, [buf], #1 46 subne len, len, #1 47 adcnes sum, sum, td0, put_byte_1 48 49.Lless4: tst len, #6 50 beq .Lless8_byte 51 52 /* we are now half-word aligned */ 53 54.Lless8_wordlp: 55#if __LINUX_ARM_ARCH__ >= 4 56 ldrh td0, [buf], #2 57 sub len, len, #2 58#else 59 ldrb td0, [buf], #1 60 ldrb td3, [buf], #1 61 sub len, len, #2 62#ifndef __ARMEB__ 63 orr td0, td0, td3, lsl #8 64#else 65 orr td0, td3, td0, lsl #8 66#endif 67#endif 68 adcs sum, sum, td0 69 tst len, #6 70 bne .Lless8_wordlp 71 72.Lless8_byte: tst len, #1 @ odd number of bytes 73 ldrneb td0, [buf], #1 @ include last byte 74 adcnes sum, sum, td0, put_byte_0 @ update checksum 75 76.Ldone: adc r0, sum, #0 @ collect up the last carry 77 ldr td0, [sp], #4 78 tst td0, #1 @ check buffer alignment 79 movne r0, r0, ror #8 @ rotate checksum by 8 bits 80 ldr pc, [sp], #4 @ return 81 82.Lnot_aligned: tst buf, #1 @ odd address 83 ldrneb td0, [buf], #1 @ make even 84 subne len, len, #1 85 adcnes sum, sum, td0, put_byte_1 @ update checksum 86 87 tst buf, #2 @ 32-bit aligned? 88#if __LINUX_ARM_ARCH__ >= 4 89 ldrneh td0, [buf], #2 @ make 32-bit aligned 90 subne len, len, #2 91#else 92 ldrneb td0, [buf], #1 93 ldrneb ip, [buf], #1 94 subne len, len, #2 95#ifndef __ARMEB__ 96 orrne td0, td0, ip, lsl #8 97#else 98 orrne td0, ip, td0, lsl #8 99#endif 100#endif 101 adcnes sum, sum, td0 @ update checksum 102 mov pc, lr 103 104ENTRY(csum_partial) 105 stmfd sp!, {buf, lr} 106 cmp len, #8 @ Ensure that we have at least 107 blo .Lless8 @ 8 bytes to copy. 108 109 tst buf, #1 110 movne sum, sum, ror #8 111 112 adds sum, sum, #0 @ C = 0 113 tst buf, #3 @ Test destination alignment 114 blne .Lnot_aligned @ align destination, return here 115 1161: bics ip, len, #31 117 beq 3f 118 119 stmfd sp!, {r4 - r5} 1202: ldmia buf!, {td0, td1, td2, td3} 121 adcs sum, sum, td0 122 adcs sum, sum, td1 123 adcs sum, sum, td2 124 adcs sum, sum, td3 125 ldmia buf!, {td0, td1, td2, td3} 126 adcs sum, sum, td0 127 adcs sum, sum, td1 128 adcs sum, sum, td2 129 adcs sum, sum, td3 130 sub ip, ip, #32 131 teq ip, #0 132 bne 2b 133 ldmfd sp!, {r4 - r5} 134 1353: tst len, #0x1c @ should not change C 136 beq .Lless4 137 1384: ldr td0, [buf], #4 139 sub len, len, #4 140 adcs sum, sum, td0 141 tst len, #0x1c 142 bne 4b 143 b .Lless4 144ENDPROC(csum_partial) 145