e_aes_cbc_hmac_sha1.c revision 325337
1/* ==================================================================== 2 * Copyright (c) 2011-2013 The OpenSSL Project. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in 13 * the documentation and/or other materials provided with the 14 * distribution. 15 * 16 * 3. All advertising materials mentioning features or use of this 17 * software must display the following acknowledgment: 18 * "This product includes software developed by the OpenSSL Project 19 * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)" 20 * 21 * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to 22 * endorse or promote products derived from this software without 23 * prior written permission. For written permission, please contact 24 * licensing@OpenSSL.org. 25 * 26 * 5. Products derived from this software may not be called "OpenSSL" 27 * nor may "OpenSSL" appear in their names without prior written 28 * permission of the OpenSSL Project. 29 * 30 * 6. Redistributions of any form whatsoever must retain the following 31 * acknowledgment: 32 * "This product includes software developed by the OpenSSL Project 33 * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)" 34 * 35 * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY 36 * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 37 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 38 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR 39 * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 40 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 41 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 42 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 43 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 44 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 45 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 46 * OF THE POSSIBILITY OF SUCH DAMAGE. 47 * ==================================================================== 48 */ 49 50#include <openssl/opensslconf.h> 51 52#include <stdio.h> 53#include <string.h> 54 55#if !defined(OPENSSL_NO_AES) && !defined(OPENSSL_NO_SHA1) 56 57# include <openssl/evp.h> 58# include <openssl/objects.h> 59# include <openssl/aes.h> 60# include <openssl/sha.h> 61# include <openssl/rand.h> 62# include "modes_lcl.h" 63# include "constant_time_locl.h" 64 65# ifndef EVP_CIPH_FLAG_AEAD_CIPHER 66# define EVP_CIPH_FLAG_AEAD_CIPHER 0x200000 67# define EVP_CTRL_AEAD_TLS1_AAD 0x16 68# define EVP_CTRL_AEAD_SET_MAC_KEY 0x17 69# endif 70 71# if !defined(EVP_CIPH_FLAG_DEFAULT_ASN1) 72# define EVP_CIPH_FLAG_DEFAULT_ASN1 0 73# endif 74 75# if !defined(EVP_CIPH_FLAG_TLS1_1_MULTIBLOCK) 76# define EVP_CIPH_FLAG_TLS1_1_MULTIBLOCK 0 77# endif 78 79# define TLS1_1_VERSION 0x0302 80 81typedef struct { 82 AES_KEY ks; 83 SHA_CTX head, tail, md; 84 size_t payload_length; /* AAD length in decrypt case */ 85 union { 86 unsigned int tls_ver; 87 unsigned char tls_aad[16]; /* 13 used */ 88 } aux; 89} EVP_AES_HMAC_SHA1; 90 91# define NO_PAYLOAD_LENGTH ((size_t)-1) 92 93# if defined(AES_ASM) && ( \ 94 defined(__x86_64) || defined(__x86_64__) || \ 95 defined(_M_AMD64) || defined(_M_X64) || \ 96 defined(__INTEL__) ) 97 98extern unsigned int OPENSSL_ia32cap_P[]; 99# define AESNI_CAPABLE (1<<(57-32)) 100 101int aesni_set_encrypt_key(const unsigned char *userKey, int bits, 102 AES_KEY *key); 103int aesni_set_decrypt_key(const unsigned char *userKey, int bits, 104 AES_KEY *key); 105 106void aesni_cbc_encrypt(const unsigned char *in, 107 unsigned char *out, 108 size_t length, 109 const AES_KEY *key, unsigned char *ivec, int enc); 110 111void aesni_cbc_sha1_enc(const void *inp, void *out, size_t blocks, 112 const AES_KEY *key, unsigned char iv[16], 113 SHA_CTX *ctx, const void *in0); 114 115void aesni256_cbc_sha1_dec(const void *inp, void *out, size_t blocks, 116 const AES_KEY *key, unsigned char iv[16], 117 SHA_CTX *ctx, const void *in0); 118 119# define data(ctx) ((EVP_AES_HMAC_SHA1 *)(ctx)->cipher_data) 120 121static int aesni_cbc_hmac_sha1_init_key(EVP_CIPHER_CTX *ctx, 122 const unsigned char *inkey, 123 const unsigned char *iv, int enc) 124{ 125 EVP_AES_HMAC_SHA1 *key = data(ctx); 126 int ret; 127 128 if (enc) 129 ret = aesni_set_encrypt_key(inkey, ctx->key_len * 8, &key->ks); 130 else 131 ret = aesni_set_decrypt_key(inkey, ctx->key_len * 8, &key->ks); 132 133 SHA1_Init(&key->head); /* handy when benchmarking */ 134 key->tail = key->head; 135 key->md = key->head; 136 137 key->payload_length = NO_PAYLOAD_LENGTH; 138 139 return ret < 0 ? 0 : 1; 140} 141 142# define STITCHED_CALL 143# undef STITCHED_DECRYPT_CALL 144 145# if !defined(STITCHED_CALL) 146# define aes_off 0 147# endif 148 149void sha1_block_data_order(void *c, const void *p, size_t len); 150 151static void sha1_update(SHA_CTX *c, const void *data, size_t len) 152{ 153 const unsigned char *ptr = data; 154 size_t res; 155 156 if ((res = c->num)) { 157 res = SHA_CBLOCK - res; 158 if (len < res) 159 res = len; 160 SHA1_Update(c, ptr, res); 161 ptr += res; 162 len -= res; 163 } 164 165 res = len % SHA_CBLOCK; 166 len -= res; 167 168 if (len) { 169 sha1_block_data_order(c, ptr, len / SHA_CBLOCK); 170 171 ptr += len; 172 c->Nh += len >> 29; 173 c->Nl += len <<= 3; 174 if (c->Nl < (unsigned int)len) 175 c->Nh++; 176 } 177 178 if (res) 179 SHA1_Update(c, ptr, res); 180} 181 182# ifdef SHA1_Update 183# undef SHA1_Update 184# endif 185# define SHA1_Update sha1_update 186 187# if !defined(OPENSSL_NO_MULTIBLOCK) && EVP_CIPH_FLAG_TLS1_1_MULTIBLOCK 188 189typedef struct { 190 unsigned int A[8], B[8], C[8], D[8], E[8]; 191} SHA1_MB_CTX; 192typedef struct { 193 const unsigned char *ptr; 194 int blocks; 195} HASH_DESC; 196 197void sha1_multi_block(SHA1_MB_CTX *, const HASH_DESC *, int); 198 199typedef struct { 200 const unsigned char *inp; 201 unsigned char *out; 202 int blocks; 203 u64 iv[2]; 204} CIPH_DESC; 205 206void aesni_multi_cbc_encrypt(CIPH_DESC *, void *, int); 207 208static size_t tls1_1_multi_block_encrypt(EVP_AES_HMAC_SHA1 *key, 209 unsigned char *out, 210 const unsigned char *inp, 211 size_t inp_len, int n4x) 212{ /* n4x is 1 or 2 */ 213 HASH_DESC hash_d[8], edges[8]; 214 CIPH_DESC ciph_d[8]; 215 unsigned char storage[sizeof(SHA1_MB_CTX) + 32]; 216 union { 217 u64 q[16]; 218 u32 d[32]; 219 u8 c[128]; 220 } blocks[8]; 221 SHA1_MB_CTX *ctx; 222 unsigned int frag, last, packlen, i, x4 = 4 * n4x, minblocks, processed = 223 0; 224 size_t ret = 0; 225 u8 *IVs; 226# if defined(BSWAP8) 227 u64 seqnum; 228# endif 229 230 /* ask for IVs in bulk */ 231 if (RAND_bytes((IVs = blocks[0].c), 16 * x4) <= 0) 232 return 0; 233 234 ctx = (SHA1_MB_CTX *) (storage + 32 - ((size_t)storage % 32)); /* align */ 235 236 frag = (unsigned int)inp_len >> (1 + n4x); 237 last = (unsigned int)inp_len + frag - (frag << (1 + n4x)); 238 if (last > frag && ((last + 13 + 9) % 64) < (x4 - 1)) { 239 frag++; 240 last -= x4 - 1; 241 } 242 243 packlen = 5 + 16 + ((frag + 20 + 16) & -16); 244 245 /* populate descriptors with pointers and IVs */ 246 hash_d[0].ptr = inp; 247 ciph_d[0].inp = inp; 248 /* 5+16 is place for header and explicit IV */ 249 ciph_d[0].out = out + 5 + 16; 250 memcpy(ciph_d[0].out - 16, IVs, 16); 251 memcpy(ciph_d[0].iv, IVs, 16); 252 IVs += 16; 253 254 for (i = 1; i < x4; i++) { 255 ciph_d[i].inp = hash_d[i].ptr = hash_d[i - 1].ptr + frag; 256 ciph_d[i].out = ciph_d[i - 1].out + packlen; 257 memcpy(ciph_d[i].out - 16, IVs, 16); 258 memcpy(ciph_d[i].iv, IVs, 16); 259 IVs += 16; 260 } 261 262# if defined(BSWAP8) 263 memcpy(blocks[0].c, key->md.data, 8); 264 seqnum = BSWAP8(blocks[0].q[0]); 265# endif 266 for (i = 0; i < x4; i++) { 267 unsigned int len = (i == (x4 - 1) ? last : frag); 268# if !defined(BSWAP8) 269 unsigned int carry, j; 270# endif 271 272 ctx->A[i] = key->md.h0; 273 ctx->B[i] = key->md.h1; 274 ctx->C[i] = key->md.h2; 275 ctx->D[i] = key->md.h3; 276 ctx->E[i] = key->md.h4; 277 278 /* fix seqnum */ 279# if defined(BSWAP8) 280 blocks[i].q[0] = BSWAP8(seqnum + i); 281# else 282 for (carry = i, j = 8; j--;) { 283 blocks[i].c[j] = ((u8 *)key->md.data)[j] + carry; 284 carry = (blocks[i].c[j] - carry) >> (sizeof(carry) * 8 - 1); 285 } 286# endif 287 blocks[i].c[8] = ((u8 *)key->md.data)[8]; 288 blocks[i].c[9] = ((u8 *)key->md.data)[9]; 289 blocks[i].c[10] = ((u8 *)key->md.data)[10]; 290 /* fix length */ 291 blocks[i].c[11] = (u8)(len >> 8); 292 blocks[i].c[12] = (u8)(len); 293 294 memcpy(blocks[i].c + 13, hash_d[i].ptr, 64 - 13); 295 hash_d[i].ptr += 64 - 13; 296 hash_d[i].blocks = (len - (64 - 13)) / 64; 297 298 edges[i].ptr = blocks[i].c; 299 edges[i].blocks = 1; 300 } 301 302 /* hash 13-byte headers and first 64-13 bytes of inputs */ 303 sha1_multi_block(ctx, edges, n4x); 304 /* hash bulk inputs */ 305# define MAXCHUNKSIZE 2048 306# if MAXCHUNKSIZE%64 307# error "MAXCHUNKSIZE is not divisible by 64" 308# elif MAXCHUNKSIZE 309 /* 310 * goal is to minimize pressure on L1 cache by moving in shorter steps, 311 * so that hashed data is still in the cache by the time we encrypt it 312 */ 313 minblocks = ((frag <= last ? frag : last) - (64 - 13)) / 64; 314 if (minblocks > MAXCHUNKSIZE / 64) { 315 for (i = 0; i < x4; i++) { 316 edges[i].ptr = hash_d[i].ptr; 317 edges[i].blocks = MAXCHUNKSIZE / 64; 318 ciph_d[i].blocks = MAXCHUNKSIZE / 16; 319 } 320 do { 321 sha1_multi_block(ctx, edges, n4x); 322 aesni_multi_cbc_encrypt(ciph_d, &key->ks, n4x); 323 324 for (i = 0; i < x4; i++) { 325 edges[i].ptr = hash_d[i].ptr += MAXCHUNKSIZE; 326 hash_d[i].blocks -= MAXCHUNKSIZE / 64; 327 edges[i].blocks = MAXCHUNKSIZE / 64; 328 ciph_d[i].inp += MAXCHUNKSIZE; 329 ciph_d[i].out += MAXCHUNKSIZE; 330 ciph_d[i].blocks = MAXCHUNKSIZE / 16; 331 memcpy(ciph_d[i].iv, ciph_d[i].out - 16, 16); 332 } 333 processed += MAXCHUNKSIZE; 334 minblocks -= MAXCHUNKSIZE / 64; 335 } while (minblocks > MAXCHUNKSIZE / 64); 336 } 337# endif 338# undef MAXCHUNKSIZE 339 sha1_multi_block(ctx, hash_d, n4x); 340 341 memset(blocks, 0, sizeof(blocks)); 342 for (i = 0; i < x4; i++) { 343 unsigned int len = (i == (x4 - 1) ? last : frag), 344 off = hash_d[i].blocks * 64; 345 const unsigned char *ptr = hash_d[i].ptr + off; 346 347 off = (len - processed) - (64 - 13) - off; /* remainder actually */ 348 memcpy(blocks[i].c, ptr, off); 349 blocks[i].c[off] = 0x80; 350 len += 64 + 13; /* 64 is HMAC header */ 351 len *= 8; /* convert to bits */ 352 if (off < (64 - 8)) { 353# ifdef BSWAP4 354 blocks[i].d[15] = BSWAP4(len); 355# else 356 PUTU32(blocks[i].c + 60, len); 357# endif 358 edges[i].blocks = 1; 359 } else { 360# ifdef BSWAP4 361 blocks[i].d[31] = BSWAP4(len); 362# else 363 PUTU32(blocks[i].c + 124, len); 364# endif 365 edges[i].blocks = 2; 366 } 367 edges[i].ptr = blocks[i].c; 368 } 369 370 /* hash input tails and finalize */ 371 sha1_multi_block(ctx, edges, n4x); 372 373 memset(blocks, 0, sizeof(blocks)); 374 for (i = 0; i < x4; i++) { 375# ifdef BSWAP4 376 blocks[i].d[0] = BSWAP4(ctx->A[i]); 377 ctx->A[i] = key->tail.h0; 378 blocks[i].d[1] = BSWAP4(ctx->B[i]); 379 ctx->B[i] = key->tail.h1; 380 blocks[i].d[2] = BSWAP4(ctx->C[i]); 381 ctx->C[i] = key->tail.h2; 382 blocks[i].d[3] = BSWAP4(ctx->D[i]); 383 ctx->D[i] = key->tail.h3; 384 blocks[i].d[4] = BSWAP4(ctx->E[i]); 385 ctx->E[i] = key->tail.h4; 386 blocks[i].c[20] = 0x80; 387 blocks[i].d[15] = BSWAP4((64 + 20) * 8); 388# else 389 PUTU32(blocks[i].c + 0, ctx->A[i]); 390 ctx->A[i] = key->tail.h0; 391 PUTU32(blocks[i].c + 4, ctx->B[i]); 392 ctx->B[i] = key->tail.h1; 393 PUTU32(blocks[i].c + 8, ctx->C[i]); 394 ctx->C[i] = key->tail.h2; 395 PUTU32(blocks[i].c + 12, ctx->D[i]); 396 ctx->D[i] = key->tail.h3; 397 PUTU32(blocks[i].c + 16, ctx->E[i]); 398 ctx->E[i] = key->tail.h4; 399 blocks[i].c[20] = 0x80; 400 PUTU32(blocks[i].c + 60, (64 + 20) * 8); 401# endif 402 edges[i].ptr = blocks[i].c; 403 edges[i].blocks = 1; 404 } 405 406 /* finalize MACs */ 407 sha1_multi_block(ctx, edges, n4x); 408 409 for (i = 0; i < x4; i++) { 410 unsigned int len = (i == (x4 - 1) ? last : frag), pad, j; 411 unsigned char *out0 = out; 412 413 memcpy(ciph_d[i].out, ciph_d[i].inp, len - processed); 414 ciph_d[i].inp = ciph_d[i].out; 415 416 out += 5 + 16 + len; 417 418 /* write MAC */ 419 PUTU32(out + 0, ctx->A[i]); 420 PUTU32(out + 4, ctx->B[i]); 421 PUTU32(out + 8, ctx->C[i]); 422 PUTU32(out + 12, ctx->D[i]); 423 PUTU32(out + 16, ctx->E[i]); 424 out += 20; 425 len += 20; 426 427 /* pad */ 428 pad = 15 - len % 16; 429 for (j = 0; j <= pad; j++) 430 *(out++) = pad; 431 len += pad + 1; 432 433 ciph_d[i].blocks = (len - processed) / 16; 434 len += 16; /* account for explicit iv */ 435 436 /* arrange header */ 437 out0[0] = ((u8 *)key->md.data)[8]; 438 out0[1] = ((u8 *)key->md.data)[9]; 439 out0[2] = ((u8 *)key->md.data)[10]; 440 out0[3] = (u8)(len >> 8); 441 out0[4] = (u8)(len); 442 443 ret += len + 5; 444 inp += frag; 445 } 446 447 aesni_multi_cbc_encrypt(ciph_d, &key->ks, n4x); 448 449 OPENSSL_cleanse(blocks, sizeof(blocks)); 450 OPENSSL_cleanse(ctx, sizeof(*ctx)); 451 452 return ret; 453} 454# endif 455 456static int aesni_cbc_hmac_sha1_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, 457 const unsigned char *in, size_t len) 458{ 459 EVP_AES_HMAC_SHA1 *key = data(ctx); 460 unsigned int l; 461 size_t plen = key->payload_length, iv = 0, /* explicit IV in TLS 1.1 and 462 * later */ 463 sha_off = 0; 464# if defined(STITCHED_CALL) 465 size_t aes_off = 0, blocks; 466 467 sha_off = SHA_CBLOCK - key->md.num; 468# endif 469 470 key->payload_length = NO_PAYLOAD_LENGTH; 471 472 if (len % AES_BLOCK_SIZE) 473 return 0; 474 475 if (ctx->encrypt) { 476 if (plen == NO_PAYLOAD_LENGTH) 477 plen = len; 478 else if (len != 479 ((plen + SHA_DIGEST_LENGTH + 480 AES_BLOCK_SIZE) & -AES_BLOCK_SIZE)) 481 return 0; 482 else if (key->aux.tls_ver >= TLS1_1_VERSION) 483 iv = AES_BLOCK_SIZE; 484 485# if defined(STITCHED_CALL) 486 if (plen > (sha_off + iv) 487 && (blocks = (plen - (sha_off + iv)) / SHA_CBLOCK)) { 488 SHA1_Update(&key->md, in + iv, sha_off); 489 490 aesni_cbc_sha1_enc(in, out, blocks, &key->ks, 491 ctx->iv, &key->md, in + iv + sha_off); 492 blocks *= SHA_CBLOCK; 493 aes_off += blocks; 494 sha_off += blocks; 495 key->md.Nh += blocks >> 29; 496 key->md.Nl += blocks <<= 3; 497 if (key->md.Nl < (unsigned int)blocks) 498 key->md.Nh++; 499 } else { 500 sha_off = 0; 501 } 502# endif 503 sha_off += iv; 504 SHA1_Update(&key->md, in + sha_off, plen - sha_off); 505 506 if (plen != len) { /* "TLS" mode of operation */ 507 if (in != out) 508 memcpy(out + aes_off, in + aes_off, plen - aes_off); 509 510 /* calculate HMAC and append it to payload */ 511 SHA1_Final(out + plen, &key->md); 512 key->md = key->tail; 513 SHA1_Update(&key->md, out + plen, SHA_DIGEST_LENGTH); 514 SHA1_Final(out + plen, &key->md); 515 516 /* pad the payload|hmac */ 517 plen += SHA_DIGEST_LENGTH; 518 for (l = len - plen - 1; plen < len; plen++) 519 out[plen] = l; 520 /* encrypt HMAC|padding at once */ 521 aesni_cbc_encrypt(out + aes_off, out + aes_off, len - aes_off, 522 &key->ks, ctx->iv, 1); 523 } else { 524 aesni_cbc_encrypt(in + aes_off, out + aes_off, len - aes_off, 525 &key->ks, ctx->iv, 1); 526 } 527 } else { 528 union { 529 unsigned int u[SHA_DIGEST_LENGTH / sizeof(unsigned int)]; 530 unsigned char c[32 + SHA_DIGEST_LENGTH]; 531 } mac, *pmac; 532 533 /* arrange cache line alignment */ 534 pmac = (void *)(((size_t)mac.c + 31) & ((size_t)0 - 32)); 535 536 if (plen != NO_PAYLOAD_LENGTH) { /* "TLS" mode of operation */ 537 size_t inp_len, mask, j, i; 538 unsigned int res, maxpad, pad, bitlen; 539 int ret = 1; 540 union { 541 unsigned int u[SHA_LBLOCK]; 542 unsigned char c[SHA_CBLOCK]; 543 } *data = (void *)key->md.data; 544# if defined(STITCHED_DECRYPT_CALL) 545 unsigned char tail_iv[AES_BLOCK_SIZE]; 546 int stitch = 0; 547# endif 548 549 if ((key->aux.tls_aad[plen - 4] << 8 | key->aux.tls_aad[plen - 3]) 550 >= TLS1_1_VERSION) { 551 if (len < (AES_BLOCK_SIZE + SHA_DIGEST_LENGTH + 1)) 552 return 0; 553 554 /* omit explicit iv */ 555 memcpy(ctx->iv, in, AES_BLOCK_SIZE); 556 in += AES_BLOCK_SIZE; 557 out += AES_BLOCK_SIZE; 558 len -= AES_BLOCK_SIZE; 559 } else if (len < (SHA_DIGEST_LENGTH + 1)) 560 return 0; 561 562# if defined(STITCHED_DECRYPT_CALL) 563 if (len >= 1024 && ctx->key_len == 32) { 564 /* decrypt last block */ 565 memcpy(tail_iv, in + len - 2 * AES_BLOCK_SIZE, 566 AES_BLOCK_SIZE); 567 aesni_cbc_encrypt(in + len - AES_BLOCK_SIZE, 568 out + len - AES_BLOCK_SIZE, AES_BLOCK_SIZE, 569 &key->ks, tail_iv, 0); 570 stitch = 1; 571 } else 572# endif 573 /* decrypt HMAC|padding at once */ 574 aesni_cbc_encrypt(in, out, len, &key->ks, ctx->iv, 0); 575 576 /* figure out payload length */ 577 pad = out[len - 1]; 578 maxpad = len - (SHA_DIGEST_LENGTH + 1); 579 maxpad |= (255 - maxpad) >> (sizeof(maxpad) * 8 - 8); 580 maxpad &= 255; 581 582 mask = constant_time_ge(maxpad, pad); 583 ret &= mask; 584 /* 585 * If pad is invalid then we will fail the above test but we must 586 * continue anyway because we are in constant time code. However, 587 * we'll use the maxpad value instead of the supplied pad to make 588 * sure we perform well defined pointer arithmetic. 589 */ 590 pad = constant_time_select(mask, pad, maxpad); 591 592 inp_len = len - (SHA_DIGEST_LENGTH + pad + 1); 593 594 key->aux.tls_aad[plen - 2] = inp_len >> 8; 595 key->aux.tls_aad[plen - 1] = inp_len; 596 597 /* calculate HMAC */ 598 key->md = key->head; 599 SHA1_Update(&key->md, key->aux.tls_aad, plen); 600 601# if defined(STITCHED_DECRYPT_CALL) 602 if (stitch) { 603 blocks = (len - (256 + 32 + SHA_CBLOCK)) / SHA_CBLOCK; 604 aes_off = len - AES_BLOCK_SIZE - blocks * SHA_CBLOCK; 605 sha_off = SHA_CBLOCK - plen; 606 607 aesni_cbc_encrypt(in, out, aes_off, &key->ks, ctx->iv, 0); 608 609 SHA1_Update(&key->md, out, sha_off); 610 aesni256_cbc_sha1_dec(in + aes_off, 611 out + aes_off, blocks, &key->ks, 612 ctx->iv, &key->md, out + sha_off); 613 614 sha_off += blocks *= SHA_CBLOCK; 615 out += sha_off; 616 len -= sha_off; 617 inp_len -= sha_off; 618 619 key->md.Nl += (blocks << 3); /* at most 18 bits */ 620 memcpy(ctx->iv, tail_iv, AES_BLOCK_SIZE); 621 } 622# endif 623 624# if 1 625 len -= SHA_DIGEST_LENGTH; /* amend mac */ 626 if (len >= (256 + SHA_CBLOCK)) { 627 j = (len - (256 + SHA_CBLOCK)) & (0 - SHA_CBLOCK); 628 j += SHA_CBLOCK - key->md.num; 629 SHA1_Update(&key->md, out, j); 630 out += j; 631 len -= j; 632 inp_len -= j; 633 } 634 635 /* but pretend as if we hashed padded payload */ 636 bitlen = key->md.Nl + (inp_len << 3); /* at most 18 bits */ 637# ifdef BSWAP4 638 bitlen = BSWAP4(bitlen); 639# else 640 mac.c[0] = 0; 641 mac.c[1] = (unsigned char)(bitlen >> 16); 642 mac.c[2] = (unsigned char)(bitlen >> 8); 643 mac.c[3] = (unsigned char)bitlen; 644 bitlen = mac.u[0]; 645# endif 646 647 pmac->u[0] = 0; 648 pmac->u[1] = 0; 649 pmac->u[2] = 0; 650 pmac->u[3] = 0; 651 pmac->u[4] = 0; 652 653 for (res = key->md.num, j = 0; j < len; j++) { 654 size_t c = out[j]; 655 mask = (j - inp_len) >> (sizeof(j) * 8 - 8); 656 c &= mask; 657 c |= 0x80 & ~mask & ~((inp_len - j) >> (sizeof(j) * 8 - 8)); 658 data->c[res++] = (unsigned char)c; 659 660 if (res != SHA_CBLOCK) 661 continue; 662 663 /* j is not incremented yet */ 664 mask = 0 - ((inp_len + 7 - j) >> (sizeof(j) * 8 - 1)); 665 data->u[SHA_LBLOCK - 1] |= bitlen & mask; 666 sha1_block_data_order(&key->md, data, 1); 667 mask &= 0 - ((j - inp_len - 72) >> (sizeof(j) * 8 - 1)); 668 pmac->u[0] |= key->md.h0 & mask; 669 pmac->u[1] |= key->md.h1 & mask; 670 pmac->u[2] |= key->md.h2 & mask; 671 pmac->u[3] |= key->md.h3 & mask; 672 pmac->u[4] |= key->md.h4 & mask; 673 res = 0; 674 } 675 676 for (i = res; i < SHA_CBLOCK; i++, j++) 677 data->c[i] = 0; 678 679 if (res > SHA_CBLOCK - 8) { 680 mask = 0 - ((inp_len + 8 - j) >> (sizeof(j) * 8 - 1)); 681 data->u[SHA_LBLOCK - 1] |= bitlen & mask; 682 sha1_block_data_order(&key->md, data, 1); 683 mask &= 0 - ((j - inp_len - 73) >> (sizeof(j) * 8 - 1)); 684 pmac->u[0] |= key->md.h0 & mask; 685 pmac->u[1] |= key->md.h1 & mask; 686 pmac->u[2] |= key->md.h2 & mask; 687 pmac->u[3] |= key->md.h3 & mask; 688 pmac->u[4] |= key->md.h4 & mask; 689 690 memset(data, 0, SHA_CBLOCK); 691 j += 64; 692 } 693 data->u[SHA_LBLOCK - 1] = bitlen; 694 sha1_block_data_order(&key->md, data, 1); 695 mask = 0 - ((j - inp_len - 73) >> (sizeof(j) * 8 - 1)); 696 pmac->u[0] |= key->md.h0 & mask; 697 pmac->u[1] |= key->md.h1 & mask; 698 pmac->u[2] |= key->md.h2 & mask; 699 pmac->u[3] |= key->md.h3 & mask; 700 pmac->u[4] |= key->md.h4 & mask; 701 702# ifdef BSWAP4 703 pmac->u[0] = BSWAP4(pmac->u[0]); 704 pmac->u[1] = BSWAP4(pmac->u[1]); 705 pmac->u[2] = BSWAP4(pmac->u[2]); 706 pmac->u[3] = BSWAP4(pmac->u[3]); 707 pmac->u[4] = BSWAP4(pmac->u[4]); 708# else 709 for (i = 0; i < 5; i++) { 710 res = pmac->u[i]; 711 pmac->c[4 * i + 0] = (unsigned char)(res >> 24); 712 pmac->c[4 * i + 1] = (unsigned char)(res >> 16); 713 pmac->c[4 * i + 2] = (unsigned char)(res >> 8); 714 pmac->c[4 * i + 3] = (unsigned char)res; 715 } 716# endif 717 len += SHA_DIGEST_LENGTH; 718# else 719 SHA1_Update(&key->md, out, inp_len); 720 res = key->md.num; 721 SHA1_Final(pmac->c, &key->md); 722 723 { 724 unsigned int inp_blocks, pad_blocks; 725 726 /* but pretend as if we hashed padded payload */ 727 inp_blocks = 728 1 + ((SHA_CBLOCK - 9 - res) >> (sizeof(res) * 8 - 1)); 729 res += (unsigned int)(len - inp_len); 730 pad_blocks = res / SHA_CBLOCK; 731 res %= SHA_CBLOCK; 732 pad_blocks += 733 1 + ((SHA_CBLOCK - 9 - res) >> (sizeof(res) * 8 - 1)); 734 for (; inp_blocks < pad_blocks; inp_blocks++) 735 sha1_block_data_order(&key->md, data, 1); 736 } 737# endif 738 key->md = key->tail; 739 SHA1_Update(&key->md, pmac->c, SHA_DIGEST_LENGTH); 740 SHA1_Final(pmac->c, &key->md); 741 742 /* verify HMAC */ 743 out += inp_len; 744 len -= inp_len; 745# if 1 746 { 747 unsigned char *p = out + len - 1 - maxpad - SHA_DIGEST_LENGTH; 748 size_t off = out - p; 749 unsigned int c, cmask; 750 751 maxpad += SHA_DIGEST_LENGTH; 752 for (res = 0, i = 0, j = 0; j < maxpad; j++) { 753 c = p[j]; 754 cmask = 755 ((int)(j - off - SHA_DIGEST_LENGTH)) >> (sizeof(int) * 756 8 - 1); 757 res |= (c ^ pad) & ~cmask; /* ... and padding */ 758 cmask &= ((int)(off - 1 - j)) >> (sizeof(int) * 8 - 1); 759 res |= (c ^ pmac->c[i]) & cmask; 760 i += 1 & cmask; 761 } 762 maxpad -= SHA_DIGEST_LENGTH; 763 764 res = 0 - ((0 - res) >> (sizeof(res) * 8 - 1)); 765 ret &= (int)~res; 766 } 767# else 768 for (res = 0, i = 0; i < SHA_DIGEST_LENGTH; i++) 769 res |= out[i] ^ pmac->c[i]; 770 res = 0 - ((0 - res) >> (sizeof(res) * 8 - 1)); 771 ret &= (int)~res; 772 773 /* verify padding */ 774 pad = (pad & ~res) | (maxpad & res); 775 out = out + len - 1 - pad; 776 for (res = 0, i = 0; i < pad; i++) 777 res |= out[i] ^ pad; 778 779 res = (0 - res) >> (sizeof(res) * 8 - 1); 780 ret &= (int)~res; 781# endif 782 return ret; 783 } else { 784# if defined(STITCHED_DECRYPT_CALL) 785 if (len >= 1024 && ctx->key_len == 32) { 786 if (sha_off %= SHA_CBLOCK) 787 blocks = (len - 3 * SHA_CBLOCK) / SHA_CBLOCK; 788 else 789 blocks = (len - 2 * SHA_CBLOCK) / SHA_CBLOCK; 790 aes_off = len - blocks * SHA_CBLOCK; 791 792 aesni_cbc_encrypt(in, out, aes_off, &key->ks, ctx->iv, 0); 793 SHA1_Update(&key->md, out, sha_off); 794 aesni256_cbc_sha1_dec(in + aes_off, 795 out + aes_off, blocks, &key->ks, 796 ctx->iv, &key->md, out + sha_off); 797 798 sha_off += blocks *= SHA_CBLOCK; 799 out += sha_off; 800 len -= sha_off; 801 802 key->md.Nh += blocks >> 29; 803 key->md.Nl += blocks <<= 3; 804 if (key->md.Nl < (unsigned int)blocks) 805 key->md.Nh++; 806 } else 807# endif 808 /* decrypt HMAC|padding at once */ 809 aesni_cbc_encrypt(in, out, len, &key->ks, ctx->iv, 0); 810 811 SHA1_Update(&key->md, out, len); 812 } 813 } 814 815 return 1; 816} 817 818static int aesni_cbc_hmac_sha1_ctrl(EVP_CIPHER_CTX *ctx, int type, int arg, 819 void *ptr) 820{ 821 EVP_AES_HMAC_SHA1 *key = data(ctx); 822 823 switch (type) { 824 case EVP_CTRL_AEAD_SET_MAC_KEY: 825 { 826 unsigned int i; 827 unsigned char hmac_key[64]; 828 829 memset(hmac_key, 0, sizeof(hmac_key)); 830 831 if (arg > (int)sizeof(hmac_key)) { 832 SHA1_Init(&key->head); 833 SHA1_Update(&key->head, ptr, arg); 834 SHA1_Final(hmac_key, &key->head); 835 } else { 836 memcpy(hmac_key, ptr, arg); 837 } 838 839 for (i = 0; i < sizeof(hmac_key); i++) 840 hmac_key[i] ^= 0x36; /* ipad */ 841 SHA1_Init(&key->head); 842 SHA1_Update(&key->head, hmac_key, sizeof(hmac_key)); 843 844 for (i = 0; i < sizeof(hmac_key); i++) 845 hmac_key[i] ^= 0x36 ^ 0x5c; /* opad */ 846 SHA1_Init(&key->tail); 847 SHA1_Update(&key->tail, hmac_key, sizeof(hmac_key)); 848 849 OPENSSL_cleanse(hmac_key, sizeof(hmac_key)); 850 851 return 1; 852 } 853 case EVP_CTRL_AEAD_TLS1_AAD: 854 { 855 unsigned char *p = ptr; 856 unsigned int len; 857 858 if (arg != EVP_AEAD_TLS1_AAD_LEN) 859 return -1; 860 861 len = p[arg - 2] << 8 | p[arg - 1]; 862 863 if (ctx->encrypt) { 864 key->payload_length = len; 865 if ((key->aux.tls_ver = 866 p[arg - 4] << 8 | p[arg - 3]) >= TLS1_1_VERSION) { 867 if (len < AES_BLOCK_SIZE) 868 return 0; 869 len -= AES_BLOCK_SIZE; 870 p[arg - 2] = len >> 8; 871 p[arg - 1] = len; 872 } 873 key->md = key->head; 874 SHA1_Update(&key->md, p, arg); 875 876 return (int)(((len + SHA_DIGEST_LENGTH + 877 AES_BLOCK_SIZE) & -AES_BLOCK_SIZE) 878 - len); 879 } else { 880 memcpy(key->aux.tls_aad, ptr, arg); 881 key->payload_length = arg; 882 883 return SHA_DIGEST_LENGTH; 884 } 885 } 886# if !defined(OPENSSL_NO_MULTIBLOCK) && EVP_CIPH_FLAG_TLS1_1_MULTIBLOCK 887 case EVP_CTRL_TLS1_1_MULTIBLOCK_MAX_BUFSIZE: 888 return (int)(5 + 16 + ((arg + 20 + 16) & -16)); 889 case EVP_CTRL_TLS1_1_MULTIBLOCK_AAD: 890 { 891 EVP_CTRL_TLS1_1_MULTIBLOCK_PARAM *param = 892 (EVP_CTRL_TLS1_1_MULTIBLOCK_PARAM *) ptr; 893 unsigned int n4x = 1, x4; 894 unsigned int frag, last, packlen, inp_len; 895 896 if (arg < (int)sizeof(EVP_CTRL_TLS1_1_MULTIBLOCK_PARAM)) 897 return -1; 898 899 inp_len = param->inp[11] << 8 | param->inp[12]; 900 901 if (ctx->encrypt) { 902 if ((param->inp[9] << 8 | param->inp[10]) < TLS1_1_VERSION) 903 return -1; 904 905 if (inp_len) { 906 if (inp_len < 4096) 907 return 0; /* too short */ 908 909 if (inp_len >= 8192 && OPENSSL_ia32cap_P[2] & (1 << 5)) 910 n4x = 2; /* AVX2 */ 911 } else if ((n4x = param->interleave / 4) && n4x <= 2) 912 inp_len = param->len; 913 else 914 return -1; 915 916 key->md = key->head; 917 SHA1_Update(&key->md, param->inp, 13); 918 919 x4 = 4 * n4x; 920 n4x += 1; 921 922 frag = inp_len >> n4x; 923 last = inp_len + frag - (frag << n4x); 924 if (last > frag && ((last + 13 + 9) % 64 < (x4 - 1))) { 925 frag++; 926 last -= x4 - 1; 927 } 928 929 packlen = 5 + 16 + ((frag + 20 + 16) & -16); 930 packlen = (packlen << n4x) - packlen; 931 packlen += 5 + 16 + ((last + 20 + 16) & -16); 932 933 param->interleave = x4; 934 935 return (int)packlen; 936 } else 937 return -1; /* not yet */ 938 } 939 case EVP_CTRL_TLS1_1_MULTIBLOCK_ENCRYPT: 940 { 941 EVP_CTRL_TLS1_1_MULTIBLOCK_PARAM *param = 942 (EVP_CTRL_TLS1_1_MULTIBLOCK_PARAM *) ptr; 943 944 return (int)tls1_1_multi_block_encrypt(key, param->out, 945 param->inp, param->len, 946 param->interleave / 4); 947 } 948 case EVP_CTRL_TLS1_1_MULTIBLOCK_DECRYPT: 949# endif 950 default: 951 return -1; 952 } 953} 954 955static EVP_CIPHER aesni_128_cbc_hmac_sha1_cipher = { 956# ifdef NID_aes_128_cbc_hmac_sha1 957 NID_aes_128_cbc_hmac_sha1, 958# else 959 NID_undef, 960# endif 961 16, 16, 16, 962 EVP_CIPH_CBC_MODE | EVP_CIPH_FLAG_DEFAULT_ASN1 | 963 EVP_CIPH_FLAG_AEAD_CIPHER | EVP_CIPH_FLAG_TLS1_1_MULTIBLOCK, 964 aesni_cbc_hmac_sha1_init_key, 965 aesni_cbc_hmac_sha1_cipher, 966 NULL, 967 sizeof(EVP_AES_HMAC_SHA1), 968 EVP_CIPH_FLAG_DEFAULT_ASN1 ? NULL : EVP_CIPHER_set_asn1_iv, 969 EVP_CIPH_FLAG_DEFAULT_ASN1 ? NULL : EVP_CIPHER_get_asn1_iv, 970 aesni_cbc_hmac_sha1_ctrl, 971 NULL 972}; 973 974static EVP_CIPHER aesni_256_cbc_hmac_sha1_cipher = { 975# ifdef NID_aes_256_cbc_hmac_sha1 976 NID_aes_256_cbc_hmac_sha1, 977# else 978 NID_undef, 979# endif 980 16, 32, 16, 981 EVP_CIPH_CBC_MODE | EVP_CIPH_FLAG_DEFAULT_ASN1 | 982 EVP_CIPH_FLAG_AEAD_CIPHER | EVP_CIPH_FLAG_TLS1_1_MULTIBLOCK, 983 aesni_cbc_hmac_sha1_init_key, 984 aesni_cbc_hmac_sha1_cipher, 985 NULL, 986 sizeof(EVP_AES_HMAC_SHA1), 987 EVP_CIPH_FLAG_DEFAULT_ASN1 ? NULL : EVP_CIPHER_set_asn1_iv, 988 EVP_CIPH_FLAG_DEFAULT_ASN1 ? NULL : EVP_CIPHER_get_asn1_iv, 989 aesni_cbc_hmac_sha1_ctrl, 990 NULL 991}; 992 993const EVP_CIPHER *EVP_aes_128_cbc_hmac_sha1(void) 994{ 995 return (OPENSSL_ia32cap_P[1] & AESNI_CAPABLE ? 996 &aesni_128_cbc_hmac_sha1_cipher : NULL); 997} 998 999const EVP_CIPHER *EVP_aes_256_cbc_hmac_sha1(void) 1000{ 1001 return (OPENSSL_ia32cap_P[1] & AESNI_CAPABLE ? 1002 &aesni_256_cbc_hmac_sha1_cipher : NULL); 1003} 1004# else 1005const EVP_CIPHER *EVP_aes_128_cbc_hmac_sha1(void) 1006{ 1007 return NULL; 1008} 1009 1010const EVP_CIPHER *EVP_aes_256_cbc_hmac_sha1(void) 1011{ 1012 return NULL; 1013} 1014# endif 1015#endif 1016