1/* 2 * Copyright 2013-2021 The OpenSSL Project Authors. All Rights Reserved. 3 * 4 * Licensed under the Apache License 2.0 (the "License"). You may not use 5 * this file except in compliance with the License. You can obtain a copy 6 * in the file LICENSE in the source distribution or at 7 * https://www.openssl.org/source/license.html 8 */ 9 10/* 11 * AES low level APIs are deprecated for public use, but still ok for internal 12 * use where we're using them to implement the higher level EVP interface, as is 13 * the case here. 14 */ 15#include "internal/deprecated.h" 16 17#include <stdio.h> 18#include <string.h> 19#include <openssl/opensslconf.h> 20#include <openssl/evp.h> 21#include <openssl/objects.h> 22#include <openssl/aes.h> 23#include <openssl/sha.h> 24#include <openssl/rand.h> 25#include "internal/cryptlib.h" 26#include "crypto/modes.h" 27#include "internal/constant_time.h" 28#include "crypto/evp.h" 29#include "evp_local.h" 30 31typedef struct { 32 AES_KEY ks; 33 SHA256_CTX head, tail, md; 34 size_t payload_length; /* AAD length in decrypt case */ 35 union { 36 unsigned int tls_ver; 37 unsigned char tls_aad[16]; /* 13 used */ 38 } aux; 39} EVP_AES_HMAC_SHA256; 40 41# define NO_PAYLOAD_LENGTH ((size_t)-1) 42 43#if defined(AES_ASM) && ( \ 44 defined(__x86_64) || defined(__x86_64__) || \ 45 defined(_M_AMD64) || defined(_M_X64) ) 46 47# define AESNI_CAPABLE (1<<(57-32)) 48 49int aesni_set_encrypt_key(const unsigned char *userKey, int bits, 50 AES_KEY *key); 51int aesni_set_decrypt_key(const unsigned char *userKey, int bits, 52 AES_KEY *key); 53 54void aesni_cbc_encrypt(const unsigned char *in, 55 unsigned char *out, 56 size_t length, 57 const AES_KEY *key, unsigned char *ivec, int enc); 58 59int aesni_cbc_sha256_enc(const void *inp, void *out, size_t blocks, 60 const AES_KEY *key, unsigned char iv[16], 61 SHA256_CTX *ctx, const void *in0); 62 63# define data(ctx) ((EVP_AES_HMAC_SHA256 *)EVP_CIPHER_CTX_get_cipher_data(ctx)) 64 65static int aesni_cbc_hmac_sha256_init_key(EVP_CIPHER_CTX *ctx, 66 const unsigned char *inkey, 67 const unsigned char *iv, int enc) 68{ 69 EVP_AES_HMAC_SHA256 *key = data(ctx); 70 int ret; 71 72 if (enc) 73 ret = aesni_set_encrypt_key(inkey, 74 EVP_CIPHER_CTX_get_key_length(ctx) * 8, 75 &key->ks); 76 else 77 ret = aesni_set_decrypt_key(inkey, 78 EVP_CIPHER_CTX_get_key_length(ctx) * 8, 79 &key->ks); 80 81 SHA256_Init(&key->head); /* handy when benchmarking */ 82 key->tail = key->head; 83 key->md = key->head; 84 85 key->payload_length = NO_PAYLOAD_LENGTH; 86 87 return ret < 0 ? 0 : 1; 88} 89 90# define STITCHED_CALL 91 92# if !defined(STITCHED_CALL) 93# define aes_off 0 94# endif 95 96void sha256_block_data_order(void *c, const void *p, size_t len); 97 98static void sha256_update(SHA256_CTX *c, const void *data, size_t len) 99{ 100 const unsigned char *ptr = data; 101 size_t res; 102 103 if ((res = c->num)) { 104 res = SHA256_CBLOCK - res; 105 if (len < res) 106 res = len; 107 SHA256_Update(c, ptr, res); 108 ptr += res; 109 len -= res; 110 } 111 112 res = len % SHA256_CBLOCK; 113 len -= res; 114 115 if (len) { 116 sha256_block_data_order(c, ptr, len / SHA256_CBLOCK); 117 118 ptr += len; 119 c->Nh += len >> 29; 120 c->Nl += len <<= 3; 121 if (c->Nl < (unsigned int)len) 122 c->Nh++; 123 } 124 125 if (res) 126 SHA256_Update(c, ptr, res); 127} 128 129# ifdef SHA256_Update 130# undef SHA256_Update 131# endif 132# define SHA256_Update sha256_update 133 134# if !defined(OPENSSL_NO_MULTIBLOCK) 135 136typedef struct { 137 unsigned int A[8], B[8], C[8], D[8], E[8], F[8], G[8], H[8]; 138} SHA256_MB_CTX; 139typedef struct { 140 const unsigned char *ptr; 141 int blocks; 142} HASH_DESC; 143 144void sha256_multi_block(SHA256_MB_CTX *, const HASH_DESC *, int); 145 146typedef struct { 147 const unsigned char *inp; 148 unsigned char *out; 149 int blocks; 150 u64 iv[2]; 151} CIPH_DESC; 152 153void aesni_multi_cbc_encrypt(CIPH_DESC *, void *, int); 154 155static size_t tls1_1_multi_block_encrypt(EVP_AES_HMAC_SHA256 *key, 156 unsigned char *out, 157 const unsigned char *inp, 158 size_t inp_len, int n4x) 159{ /* n4x is 1 or 2 */ 160 HASH_DESC hash_d[8], edges[8]; 161 CIPH_DESC ciph_d[8]; 162 unsigned char storage[sizeof(SHA256_MB_CTX) + 32]; 163 union { 164 u64 q[16]; 165 u32 d[32]; 166 u8 c[128]; 167 } blocks[8]; 168 SHA256_MB_CTX *ctx; 169 unsigned int frag, last, packlen, i, x4 = 4 * n4x, minblocks, processed = 170 0; 171 size_t ret = 0; 172 u8 *IVs; 173# if defined(BSWAP8) 174 u64 seqnum; 175# endif 176 177 /* ask for IVs in bulk */ 178 if (RAND_bytes((IVs = blocks[0].c), 16 * x4) <= 0) 179 return 0; 180 181 /* align */ 182 ctx = (SHA256_MB_CTX *) (storage + 32 - ((size_t)storage % 32)); 183 184 frag = (unsigned int)inp_len >> (1 + n4x); 185 last = (unsigned int)inp_len + frag - (frag << (1 + n4x)); 186 if (last > frag && ((last + 13 + 9) % 64) < (x4 - 1)) { 187 frag++; 188 last -= x4 - 1; 189 } 190 191 packlen = 5 + 16 + ((frag + 32 + 16) & -16); 192 193 /* populate descriptors with pointers and IVs */ 194 hash_d[0].ptr = inp; 195 ciph_d[0].inp = inp; 196 /* 5+16 is place for header and explicit IV */ 197 ciph_d[0].out = out + 5 + 16; 198 memcpy(ciph_d[0].out - 16, IVs, 16); 199 memcpy(ciph_d[0].iv, IVs, 16); 200 IVs += 16; 201 202 for (i = 1; i < x4; i++) { 203 ciph_d[i].inp = hash_d[i].ptr = hash_d[i - 1].ptr + frag; 204 ciph_d[i].out = ciph_d[i - 1].out + packlen; 205 memcpy(ciph_d[i].out - 16, IVs, 16); 206 memcpy(ciph_d[i].iv, IVs, 16); 207 IVs += 16; 208 } 209 210# if defined(BSWAP8) 211 memcpy(blocks[0].c, key->md.data, 8); 212 seqnum = BSWAP8(blocks[0].q[0]); 213# endif 214 for (i = 0; i < x4; i++) { 215 unsigned int len = (i == (x4 - 1) ? last : frag); 216# if !defined(BSWAP8) 217 unsigned int carry, j; 218# endif 219 220 ctx->A[i] = key->md.h[0]; 221 ctx->B[i] = key->md.h[1]; 222 ctx->C[i] = key->md.h[2]; 223 ctx->D[i] = key->md.h[3]; 224 ctx->E[i] = key->md.h[4]; 225 ctx->F[i] = key->md.h[5]; 226 ctx->G[i] = key->md.h[6]; 227 ctx->H[i] = key->md.h[7]; 228 229 /* fix seqnum */ 230# if defined(BSWAP8) 231 blocks[i].q[0] = BSWAP8(seqnum + i); 232# else 233 for (carry = i, j = 8; j--;) { 234 blocks[i].c[j] = ((u8 *)key->md.data)[j] + carry; 235 carry = (blocks[i].c[j] - carry) >> (sizeof(carry) * 8 - 1); 236 } 237# endif 238 blocks[i].c[8] = ((u8 *)key->md.data)[8]; 239 blocks[i].c[9] = ((u8 *)key->md.data)[9]; 240 blocks[i].c[10] = ((u8 *)key->md.data)[10]; 241 /* fix length */ 242 blocks[i].c[11] = (u8)(len >> 8); 243 blocks[i].c[12] = (u8)(len); 244 245 memcpy(blocks[i].c + 13, hash_d[i].ptr, 64 - 13); 246 hash_d[i].ptr += 64 - 13; 247 hash_d[i].blocks = (len - (64 - 13)) / 64; 248 249 edges[i].ptr = blocks[i].c; 250 edges[i].blocks = 1; 251 } 252 253 /* hash 13-byte headers and first 64-13 bytes of inputs */ 254 sha256_multi_block(ctx, edges, n4x); 255 /* hash bulk inputs */ 256# define MAXCHUNKSIZE 2048 257# if MAXCHUNKSIZE%64 258# error "MAXCHUNKSIZE is not divisible by 64" 259# elif MAXCHUNKSIZE 260 /* 261 * goal is to minimize pressure on L1 cache by moving in shorter steps, 262 * so that hashed data is still in the cache by the time we encrypt it 263 */ 264 minblocks = ((frag <= last ? frag : last) - (64 - 13)) / 64; 265 if (minblocks > MAXCHUNKSIZE / 64) { 266 for (i = 0; i < x4; i++) { 267 edges[i].ptr = hash_d[i].ptr; 268 edges[i].blocks = MAXCHUNKSIZE / 64; 269 ciph_d[i].blocks = MAXCHUNKSIZE / 16; 270 } 271 do { 272 sha256_multi_block(ctx, edges, n4x); 273 aesni_multi_cbc_encrypt(ciph_d, &key->ks, n4x); 274 275 for (i = 0; i < x4; i++) { 276 edges[i].ptr = hash_d[i].ptr += MAXCHUNKSIZE; 277 hash_d[i].blocks -= MAXCHUNKSIZE / 64; 278 edges[i].blocks = MAXCHUNKSIZE / 64; 279 ciph_d[i].inp += MAXCHUNKSIZE; 280 ciph_d[i].out += MAXCHUNKSIZE; 281 ciph_d[i].blocks = MAXCHUNKSIZE / 16; 282 memcpy(ciph_d[i].iv, ciph_d[i].out - 16, 16); 283 } 284 processed += MAXCHUNKSIZE; 285 minblocks -= MAXCHUNKSIZE / 64; 286 } while (minblocks > MAXCHUNKSIZE / 64); 287 } 288# endif 289# undef MAXCHUNKSIZE 290 sha256_multi_block(ctx, hash_d, n4x); 291 292 memset(blocks, 0, sizeof(blocks)); 293 for (i = 0; i < x4; i++) { 294 unsigned int len = (i == (x4 - 1) ? last : frag), 295 off = hash_d[i].blocks * 64; 296 const unsigned char *ptr = hash_d[i].ptr + off; 297 298 off = (len - processed) - (64 - 13) - off; /* remainder actually */ 299 memcpy(blocks[i].c, ptr, off); 300 blocks[i].c[off] = 0x80; 301 len += 64 + 13; /* 64 is HMAC header */ 302 len *= 8; /* convert to bits */ 303 if (off < (64 - 8)) { 304# ifdef BSWAP4 305 blocks[i].d[15] = BSWAP4(len); 306# else 307 PUTU32(blocks[i].c + 60, len); 308# endif 309 edges[i].blocks = 1; 310 } else { 311# ifdef BSWAP4 312 blocks[i].d[31] = BSWAP4(len); 313# else 314 PUTU32(blocks[i].c + 124, len); 315# endif 316 edges[i].blocks = 2; 317 } 318 edges[i].ptr = blocks[i].c; 319 } 320 321 /* hash input tails and finalize */ 322 sha256_multi_block(ctx, edges, n4x); 323 324 memset(blocks, 0, sizeof(blocks)); 325 for (i = 0; i < x4; i++) { 326# ifdef BSWAP4 327 blocks[i].d[0] = BSWAP4(ctx->A[i]); 328 ctx->A[i] = key->tail.h[0]; 329 blocks[i].d[1] = BSWAP4(ctx->B[i]); 330 ctx->B[i] = key->tail.h[1]; 331 blocks[i].d[2] = BSWAP4(ctx->C[i]); 332 ctx->C[i] = key->tail.h[2]; 333 blocks[i].d[3] = BSWAP4(ctx->D[i]); 334 ctx->D[i] = key->tail.h[3]; 335 blocks[i].d[4] = BSWAP4(ctx->E[i]); 336 ctx->E[i] = key->tail.h[4]; 337 blocks[i].d[5] = BSWAP4(ctx->F[i]); 338 ctx->F[i] = key->tail.h[5]; 339 blocks[i].d[6] = BSWAP4(ctx->G[i]); 340 ctx->G[i] = key->tail.h[6]; 341 blocks[i].d[7] = BSWAP4(ctx->H[i]); 342 ctx->H[i] = key->tail.h[7]; 343 blocks[i].c[32] = 0x80; 344 blocks[i].d[15] = BSWAP4((64 + 32) * 8); 345# else 346 PUTU32(blocks[i].c + 0, ctx->A[i]); 347 ctx->A[i] = key->tail.h[0]; 348 PUTU32(blocks[i].c + 4, ctx->B[i]); 349 ctx->B[i] = key->tail.h[1]; 350 PUTU32(blocks[i].c + 8, ctx->C[i]); 351 ctx->C[i] = key->tail.h[2]; 352 PUTU32(blocks[i].c + 12, ctx->D[i]); 353 ctx->D[i] = key->tail.h[3]; 354 PUTU32(blocks[i].c + 16, ctx->E[i]); 355 ctx->E[i] = key->tail.h[4]; 356 PUTU32(blocks[i].c + 20, ctx->F[i]); 357 ctx->F[i] = key->tail.h[5]; 358 PUTU32(blocks[i].c + 24, ctx->G[i]); 359 ctx->G[i] = key->tail.h[6]; 360 PUTU32(blocks[i].c + 28, ctx->H[i]); 361 ctx->H[i] = key->tail.h[7]; 362 blocks[i].c[32] = 0x80; 363 PUTU32(blocks[i].c + 60, (64 + 32) * 8); 364# endif 365 edges[i].ptr = blocks[i].c; 366 edges[i].blocks = 1; 367 } 368 369 /* finalize MACs */ 370 sha256_multi_block(ctx, edges, n4x); 371 372 for (i = 0; i < x4; i++) { 373 unsigned int len = (i == (x4 - 1) ? last : frag), pad, j; 374 unsigned char *out0 = out; 375 376 memcpy(ciph_d[i].out, ciph_d[i].inp, len - processed); 377 ciph_d[i].inp = ciph_d[i].out; 378 379 out += 5 + 16 + len; 380 381 /* write MAC */ 382 PUTU32(out + 0, ctx->A[i]); 383 PUTU32(out + 4, ctx->B[i]); 384 PUTU32(out + 8, ctx->C[i]); 385 PUTU32(out + 12, ctx->D[i]); 386 PUTU32(out + 16, ctx->E[i]); 387 PUTU32(out + 20, ctx->F[i]); 388 PUTU32(out + 24, ctx->G[i]); 389 PUTU32(out + 28, ctx->H[i]); 390 out += 32; 391 len += 32; 392 393 /* pad */ 394 pad = 15 - len % 16; 395 for (j = 0; j <= pad; j++) 396 *(out++) = pad; 397 len += pad + 1; 398 399 ciph_d[i].blocks = (len - processed) / 16; 400 len += 16; /* account for explicit iv */ 401 402 /* arrange header */ 403 out0[0] = ((u8 *)key->md.data)[8]; 404 out0[1] = ((u8 *)key->md.data)[9]; 405 out0[2] = ((u8 *)key->md.data)[10]; 406 out0[3] = (u8)(len >> 8); 407 out0[4] = (u8)(len); 408 409 ret += len + 5; 410 inp += frag; 411 } 412 413 aesni_multi_cbc_encrypt(ciph_d, &key->ks, n4x); 414 415 OPENSSL_cleanse(blocks, sizeof(blocks)); 416 OPENSSL_cleanse(ctx, sizeof(*ctx)); 417 418 return ret; 419} 420# endif 421 422static int aesni_cbc_hmac_sha256_cipher(EVP_CIPHER_CTX *ctx, 423 unsigned char *out, 424 const unsigned char *in, size_t len) 425{ 426 EVP_AES_HMAC_SHA256 *key = data(ctx); 427 unsigned int l; 428 size_t plen = key->payload_length, iv = 0, /* explicit IV in TLS 1.1 and 429 * later */ 430 sha_off = 0; 431# if defined(STITCHED_CALL) 432 size_t aes_off = 0, blocks; 433 434 sha_off = SHA256_CBLOCK - key->md.num; 435# endif 436 437 key->payload_length = NO_PAYLOAD_LENGTH; 438 439 if (len % AES_BLOCK_SIZE) 440 return 0; 441 442 if (EVP_CIPHER_CTX_is_encrypting(ctx)) { 443 if (plen == NO_PAYLOAD_LENGTH) 444 plen = len; 445 else if (len != 446 ((plen + SHA256_DIGEST_LENGTH + 447 AES_BLOCK_SIZE) & -AES_BLOCK_SIZE)) 448 return 0; 449 else if (key->aux.tls_ver >= TLS1_1_VERSION) 450 iv = AES_BLOCK_SIZE; 451 452# if defined(STITCHED_CALL) 453 /* 454 * Assembly stitch handles AVX-capable processors, but its 455 * performance is not optimal on AMD Jaguar, ~40% worse, for 456 * unknown reasons. Incidentally processor in question supports 457 * AVX, but not AMD-specific XOP extension, which can be used 458 * to identify it and avoid stitch invocation. So that after we 459 * establish that current CPU supports AVX, we even see if it's 460 * either even XOP-capable Bulldozer-based or GenuineIntel one. 461 * But SHAEXT-capable go ahead... 462 */ 463 if (((OPENSSL_ia32cap_P[2] & (1 << 29)) || /* SHAEXT? */ 464 ((OPENSSL_ia32cap_P[1] & (1 << (60 - 32))) && /* AVX? */ 465 ((OPENSSL_ia32cap_P[1] & (1 << (43 - 32))) /* XOP? */ 466 | (OPENSSL_ia32cap_P[0] & (1 << 30))))) && /* "Intel CPU"? */ 467 plen > (sha_off + iv) && 468 (blocks = (plen - (sha_off + iv)) / SHA256_CBLOCK)) { 469 SHA256_Update(&key->md, in + iv, sha_off); 470 471 (void)aesni_cbc_sha256_enc(in, out, blocks, &key->ks, 472 ctx->iv, &key->md, in + iv + sha_off); 473 blocks *= SHA256_CBLOCK; 474 aes_off += blocks; 475 sha_off += blocks; 476 key->md.Nh += blocks >> 29; 477 key->md.Nl += blocks <<= 3; 478 if (key->md.Nl < (unsigned int)blocks) 479 key->md.Nh++; 480 } else { 481 sha_off = 0; 482 } 483# endif 484 sha_off += iv; 485 SHA256_Update(&key->md, in + sha_off, plen - sha_off); 486 487 if (plen != len) { /* "TLS" mode of operation */ 488 if (in != out) 489 memcpy(out + aes_off, in + aes_off, plen - aes_off); 490 491 /* calculate HMAC and append it to payload */ 492 SHA256_Final(out + plen, &key->md); 493 key->md = key->tail; 494 SHA256_Update(&key->md, out + plen, SHA256_DIGEST_LENGTH); 495 SHA256_Final(out + plen, &key->md); 496 497 /* pad the payload|hmac */ 498 plen += SHA256_DIGEST_LENGTH; 499 for (l = len - plen - 1; plen < len; plen++) 500 out[plen] = l; 501 /* encrypt HMAC|padding at once */ 502 aesni_cbc_encrypt(out + aes_off, out + aes_off, len - aes_off, 503 &key->ks, ctx->iv, 1); 504 } else { 505 aesni_cbc_encrypt(in + aes_off, out + aes_off, len - aes_off, 506 &key->ks, ctx->iv, 1); 507 } 508 } else { 509 union { 510 unsigned int u[SHA256_DIGEST_LENGTH / sizeof(unsigned int)]; 511 unsigned char c[64 + SHA256_DIGEST_LENGTH]; 512 } mac, *pmac; 513 514 /* arrange cache line alignment */ 515 pmac = (void *)(((size_t)mac.c + 63) & ((size_t)0 - 64)); 516 517 /* decrypt HMAC|padding at once */ 518 aesni_cbc_encrypt(in, out, len, &key->ks, 519 ctx->iv, 0); 520 521 if (plen != NO_PAYLOAD_LENGTH) { /* "TLS" mode of operation */ 522 size_t inp_len, mask, j, i; 523 unsigned int res, maxpad, pad, bitlen; 524 int ret = 1; 525 union { 526 unsigned int u[SHA_LBLOCK]; 527 unsigned char c[SHA256_CBLOCK]; 528 } *data = (void *)key->md.data; 529 530 if ((key->aux.tls_aad[plen - 4] << 8 | key->aux.tls_aad[plen - 3]) 531 >= TLS1_1_VERSION) 532 iv = AES_BLOCK_SIZE; 533 534 if (len < (iv + SHA256_DIGEST_LENGTH + 1)) 535 return 0; 536 537 /* omit explicit iv */ 538 out += iv; 539 len -= iv; 540 541 /* figure out payload length */ 542 pad = out[len - 1]; 543 maxpad = len - (SHA256_DIGEST_LENGTH + 1); 544 maxpad |= (255 - maxpad) >> (sizeof(maxpad) * 8 - 8); 545 maxpad &= 255; 546 547 mask = constant_time_ge(maxpad, pad); 548 ret &= mask; 549 /* 550 * If pad is invalid then we will fail the above test but we must 551 * continue anyway because we are in constant time code. However, 552 * we'll use the maxpad value instead of the supplied pad to make 553 * sure we perform well defined pointer arithmetic. 554 */ 555 pad = constant_time_select(mask, pad, maxpad); 556 557 inp_len = len - (SHA256_DIGEST_LENGTH + pad + 1); 558 559 key->aux.tls_aad[plen - 2] = inp_len >> 8; 560 key->aux.tls_aad[plen - 1] = inp_len; 561 562 /* calculate HMAC */ 563 key->md = key->head; 564 SHA256_Update(&key->md, key->aux.tls_aad, plen); 565 566# if 1 /* see original reference version in #else */ 567 len -= SHA256_DIGEST_LENGTH; /* amend mac */ 568 if (len >= (256 + SHA256_CBLOCK)) { 569 j = (len - (256 + SHA256_CBLOCK)) & (0 - SHA256_CBLOCK); 570 j += SHA256_CBLOCK - key->md.num; 571 SHA256_Update(&key->md, out, j); 572 out += j; 573 len -= j; 574 inp_len -= j; 575 } 576 577 /* but pretend as if we hashed padded payload */ 578 bitlen = key->md.Nl + (inp_len << 3); /* at most 18 bits */ 579# ifdef BSWAP4 580 bitlen = BSWAP4(bitlen); 581# else 582 mac.c[0] = 0; 583 mac.c[1] = (unsigned char)(bitlen >> 16); 584 mac.c[2] = (unsigned char)(bitlen >> 8); 585 mac.c[3] = (unsigned char)bitlen; 586 bitlen = mac.u[0]; 587# endif 588 589 pmac->u[0] = 0; 590 pmac->u[1] = 0; 591 pmac->u[2] = 0; 592 pmac->u[3] = 0; 593 pmac->u[4] = 0; 594 pmac->u[5] = 0; 595 pmac->u[6] = 0; 596 pmac->u[7] = 0; 597 598 for (res = key->md.num, j = 0; j < len; j++) { 599 size_t c = out[j]; 600 mask = (j - inp_len) >> (sizeof(j) * 8 - 8); 601 c &= mask; 602 c |= 0x80 & ~mask & ~((inp_len - j) >> (sizeof(j) * 8 - 8)); 603 data->c[res++] = (unsigned char)c; 604 605 if (res != SHA256_CBLOCK) 606 continue; 607 608 /* j is not incremented yet */ 609 mask = 0 - ((inp_len + 7 - j) >> (sizeof(j) * 8 - 1)); 610 data->u[SHA_LBLOCK - 1] |= bitlen & mask; 611 sha256_block_data_order(&key->md, data, 1); 612 mask &= 0 - ((j - inp_len - 72) >> (sizeof(j) * 8 - 1)); 613 pmac->u[0] |= key->md.h[0] & mask; 614 pmac->u[1] |= key->md.h[1] & mask; 615 pmac->u[2] |= key->md.h[2] & mask; 616 pmac->u[3] |= key->md.h[3] & mask; 617 pmac->u[4] |= key->md.h[4] & mask; 618 pmac->u[5] |= key->md.h[5] & mask; 619 pmac->u[6] |= key->md.h[6] & mask; 620 pmac->u[7] |= key->md.h[7] & mask; 621 res = 0; 622 } 623 624 for (i = res; i < SHA256_CBLOCK; i++, j++) 625 data->c[i] = 0; 626 627 if (res > SHA256_CBLOCK - 8) { 628 mask = 0 - ((inp_len + 8 - j) >> (sizeof(j) * 8 - 1)); 629 data->u[SHA_LBLOCK - 1] |= bitlen & mask; 630 sha256_block_data_order(&key->md, data, 1); 631 mask &= 0 - ((j - inp_len - 73) >> (sizeof(j) * 8 - 1)); 632 pmac->u[0] |= key->md.h[0] & mask; 633 pmac->u[1] |= key->md.h[1] & mask; 634 pmac->u[2] |= key->md.h[2] & mask; 635 pmac->u[3] |= key->md.h[3] & mask; 636 pmac->u[4] |= key->md.h[4] & mask; 637 pmac->u[5] |= key->md.h[5] & mask; 638 pmac->u[6] |= key->md.h[6] & mask; 639 pmac->u[7] |= key->md.h[7] & mask; 640 641 memset(data, 0, SHA256_CBLOCK); 642 j += 64; 643 } 644 data->u[SHA_LBLOCK - 1] = bitlen; 645 sha256_block_data_order(&key->md, data, 1); 646 mask = 0 - ((j - inp_len - 73) >> (sizeof(j) * 8 - 1)); 647 pmac->u[0] |= key->md.h[0] & mask; 648 pmac->u[1] |= key->md.h[1] & mask; 649 pmac->u[2] |= key->md.h[2] & mask; 650 pmac->u[3] |= key->md.h[3] & mask; 651 pmac->u[4] |= key->md.h[4] & mask; 652 pmac->u[5] |= key->md.h[5] & mask; 653 pmac->u[6] |= key->md.h[6] & mask; 654 pmac->u[7] |= key->md.h[7] & mask; 655 656# ifdef BSWAP4 657 pmac->u[0] = BSWAP4(pmac->u[0]); 658 pmac->u[1] = BSWAP4(pmac->u[1]); 659 pmac->u[2] = BSWAP4(pmac->u[2]); 660 pmac->u[3] = BSWAP4(pmac->u[3]); 661 pmac->u[4] = BSWAP4(pmac->u[4]); 662 pmac->u[5] = BSWAP4(pmac->u[5]); 663 pmac->u[6] = BSWAP4(pmac->u[6]); 664 pmac->u[7] = BSWAP4(pmac->u[7]); 665# else 666 for (i = 0; i < 8; i++) { 667 res = pmac->u[i]; 668 pmac->c[4 * i + 0] = (unsigned char)(res >> 24); 669 pmac->c[4 * i + 1] = (unsigned char)(res >> 16); 670 pmac->c[4 * i + 2] = (unsigned char)(res >> 8); 671 pmac->c[4 * i + 3] = (unsigned char)res; 672 } 673# endif 674 len += SHA256_DIGEST_LENGTH; 675# else 676 SHA256_Update(&key->md, out, inp_len); 677 res = key->md.num; 678 SHA256_Final(pmac->c, &key->md); 679 680 { 681 unsigned int inp_blocks, pad_blocks; 682 683 /* but pretend as if we hashed padded payload */ 684 inp_blocks = 685 1 + ((SHA256_CBLOCK - 9 - res) >> (sizeof(res) * 8 - 1)); 686 res += (unsigned int)(len - inp_len); 687 pad_blocks = res / SHA256_CBLOCK; 688 res %= SHA256_CBLOCK; 689 pad_blocks += 690 1 + ((SHA256_CBLOCK - 9 - res) >> (sizeof(res) * 8 - 1)); 691 for (; inp_blocks < pad_blocks; inp_blocks++) 692 sha1_block_data_order(&key->md, data, 1); 693 } 694# endif /* pre-lucky-13 reference version of above */ 695 key->md = key->tail; 696 SHA256_Update(&key->md, pmac->c, SHA256_DIGEST_LENGTH); 697 SHA256_Final(pmac->c, &key->md); 698 699 /* verify HMAC */ 700 out += inp_len; 701 len -= inp_len; 702# if 1 /* see original reference version in #else */ 703 { 704 unsigned char *p = 705 out + len - 1 - maxpad - SHA256_DIGEST_LENGTH; 706 size_t off = out - p; 707 unsigned int c, cmask; 708 709 for (res = 0, i = 0, j = 0; j < maxpad + SHA256_DIGEST_LENGTH; 710 j++) { 711 c = p[j]; 712 cmask = 713 ((int)(j - off - SHA256_DIGEST_LENGTH)) >> 714 (sizeof(int) * 8 - 1); 715 res |= (c ^ pad) & ~cmask; /* ... and padding */ 716 cmask &= ((int)(off - 1 - j)) >> (sizeof(int) * 8 - 1); 717 res |= (c ^ pmac->c[i]) & cmask; 718 i += 1 & cmask; 719 } 720 721 res = 0 - ((0 - res) >> (sizeof(res) * 8 - 1)); 722 ret &= (int)~res; 723 } 724# else /* pre-lucky-13 reference version of above */ 725 for (res = 0, i = 0; i < SHA256_DIGEST_LENGTH; i++) 726 res |= out[i] ^ pmac->c[i]; 727 res = 0 - ((0 - res) >> (sizeof(res) * 8 - 1)); 728 ret &= (int)~res; 729 730 /* verify padding */ 731 pad = (pad & ~res) | (maxpad & res); 732 out = out + len - 1 - pad; 733 for (res = 0, i = 0; i < pad; i++) 734 res |= out[i] ^ pad; 735 736 res = (0 - res) >> (sizeof(res) * 8 - 1); 737 ret &= (int)~res; 738# endif 739 return ret; 740 } else { 741 SHA256_Update(&key->md, out, len); 742 } 743 } 744 745 return 1; 746} 747 748static int aesni_cbc_hmac_sha256_ctrl(EVP_CIPHER_CTX *ctx, int type, int arg, 749 void *ptr) 750{ 751 EVP_AES_HMAC_SHA256 *key = data(ctx); 752 unsigned int u_arg = (unsigned int)arg; 753 754 switch (type) { 755 case EVP_CTRL_AEAD_SET_MAC_KEY: 756 { 757 unsigned int i; 758 unsigned char hmac_key[64]; 759 760 memset(hmac_key, 0, sizeof(hmac_key)); 761 762 if (arg < 0) 763 return -1; 764 765 if (u_arg > sizeof(hmac_key)) { 766 SHA256_Init(&key->head); 767 SHA256_Update(&key->head, ptr, arg); 768 SHA256_Final(hmac_key, &key->head); 769 } else { 770 memcpy(hmac_key, ptr, arg); 771 } 772 773 for (i = 0; i < sizeof(hmac_key); i++) 774 hmac_key[i] ^= 0x36; /* ipad */ 775 SHA256_Init(&key->head); 776 SHA256_Update(&key->head, hmac_key, sizeof(hmac_key)); 777 778 for (i = 0; i < sizeof(hmac_key); i++) 779 hmac_key[i] ^= 0x36 ^ 0x5c; /* opad */ 780 SHA256_Init(&key->tail); 781 SHA256_Update(&key->tail, hmac_key, sizeof(hmac_key)); 782 783 OPENSSL_cleanse(hmac_key, sizeof(hmac_key)); 784 785 return 1; 786 } 787 case EVP_CTRL_AEAD_TLS1_AAD: 788 { 789 unsigned char *p = ptr; 790 unsigned int len; 791 792 if (arg != EVP_AEAD_TLS1_AAD_LEN) 793 return -1; 794 795 len = p[arg - 2] << 8 | p[arg - 1]; 796 797 if (EVP_CIPHER_CTX_is_encrypting(ctx)) { 798 key->payload_length = len; 799 if ((key->aux.tls_ver = 800 p[arg - 4] << 8 | p[arg - 3]) >= TLS1_1_VERSION) { 801 if (len < AES_BLOCK_SIZE) 802 return 0; 803 len -= AES_BLOCK_SIZE; 804 p[arg - 2] = len >> 8; 805 p[arg - 1] = len; 806 } 807 key->md = key->head; 808 SHA256_Update(&key->md, p, arg); 809 810 return (int)(((len + SHA256_DIGEST_LENGTH + 811 AES_BLOCK_SIZE) & -AES_BLOCK_SIZE) 812 - len); 813 } else { 814 memcpy(key->aux.tls_aad, ptr, arg); 815 key->payload_length = arg; 816 817 return SHA256_DIGEST_LENGTH; 818 } 819 } 820# if !defined(OPENSSL_NO_MULTIBLOCK) 821 case EVP_CTRL_TLS1_1_MULTIBLOCK_MAX_BUFSIZE: 822 return (int)(5 + 16 + ((arg + 32 + 16) & -16)); 823 case EVP_CTRL_TLS1_1_MULTIBLOCK_AAD: 824 { 825 EVP_CTRL_TLS1_1_MULTIBLOCK_PARAM *param = 826 (EVP_CTRL_TLS1_1_MULTIBLOCK_PARAM *) ptr; 827 unsigned int n4x = 1, x4; 828 unsigned int frag, last, packlen, inp_len; 829 830 if (arg < 0) 831 return -1; 832 833 if (u_arg < sizeof(EVP_CTRL_TLS1_1_MULTIBLOCK_PARAM)) 834 return -1; 835 836 inp_len = param->inp[11] << 8 | param->inp[12]; 837 838 if (EVP_CIPHER_CTX_is_encrypting(ctx)) { 839 if ((param->inp[9] << 8 | param->inp[10]) < TLS1_1_VERSION) 840 return -1; 841 842 if (inp_len) { 843 if (inp_len < 4096) 844 return 0; /* too short */ 845 846 if (inp_len >= 8192 && OPENSSL_ia32cap_P[2] & (1 << 5)) 847 n4x = 2; /* AVX2 */ 848 } else if ((n4x = param->interleave / 4) && n4x <= 2) 849 inp_len = param->len; 850 else 851 return -1; 852 853 key->md = key->head; 854 SHA256_Update(&key->md, param->inp, 13); 855 856 x4 = 4 * n4x; 857 n4x += 1; 858 859 frag = inp_len >> n4x; 860 last = inp_len + frag - (frag << n4x); 861 if (last > frag && ((last + 13 + 9) % 64 < (x4 - 1))) { 862 frag++; 863 last -= x4 - 1; 864 } 865 866 packlen = 5 + 16 + ((frag + 32 + 16) & -16); 867 packlen = (packlen << n4x) - packlen; 868 packlen += 5 + 16 + ((last + 32 + 16) & -16); 869 870 param->interleave = x4; 871 872 return (int)packlen; 873 } else 874 return -1; /* not yet */ 875 } 876 case EVP_CTRL_TLS1_1_MULTIBLOCK_ENCRYPT: 877 { 878 EVP_CTRL_TLS1_1_MULTIBLOCK_PARAM *param = 879 (EVP_CTRL_TLS1_1_MULTIBLOCK_PARAM *) ptr; 880 881 return (int)tls1_1_multi_block_encrypt(key, param->out, 882 param->inp, param->len, 883 param->interleave / 4); 884 } 885 case EVP_CTRL_TLS1_1_MULTIBLOCK_DECRYPT: 886# endif 887 default: 888 return -1; 889 } 890} 891 892static EVP_CIPHER aesni_128_cbc_hmac_sha256_cipher = { 893# ifdef NID_aes_128_cbc_hmac_sha256 894 NID_aes_128_cbc_hmac_sha256, 895# else 896 NID_undef, 897# endif 898 AES_BLOCK_SIZE, 16, AES_BLOCK_SIZE, 899 EVP_CIPH_CBC_MODE | EVP_CIPH_FLAG_DEFAULT_ASN1 | 900 EVP_CIPH_FLAG_AEAD_CIPHER | EVP_CIPH_FLAG_TLS1_1_MULTIBLOCK, 901 EVP_ORIG_GLOBAL, 902 aesni_cbc_hmac_sha256_init_key, 903 aesni_cbc_hmac_sha256_cipher, 904 NULL, 905 sizeof(EVP_AES_HMAC_SHA256), 906 EVP_CIPH_FLAG_DEFAULT_ASN1 ? NULL : EVP_CIPHER_set_asn1_iv, 907 EVP_CIPH_FLAG_DEFAULT_ASN1 ? NULL : EVP_CIPHER_get_asn1_iv, 908 aesni_cbc_hmac_sha256_ctrl, 909 NULL 910}; 911 912static EVP_CIPHER aesni_256_cbc_hmac_sha256_cipher = { 913# ifdef NID_aes_256_cbc_hmac_sha256 914 NID_aes_256_cbc_hmac_sha256, 915# else 916 NID_undef, 917# endif 918 AES_BLOCK_SIZE, 32, AES_BLOCK_SIZE, 919 EVP_CIPH_CBC_MODE | EVP_CIPH_FLAG_DEFAULT_ASN1 | 920 EVP_CIPH_FLAG_AEAD_CIPHER | EVP_CIPH_FLAG_TLS1_1_MULTIBLOCK, 921 EVP_ORIG_GLOBAL, 922 aesni_cbc_hmac_sha256_init_key, 923 aesni_cbc_hmac_sha256_cipher, 924 NULL, 925 sizeof(EVP_AES_HMAC_SHA256), 926 EVP_CIPH_FLAG_DEFAULT_ASN1 ? NULL : EVP_CIPHER_set_asn1_iv, 927 EVP_CIPH_FLAG_DEFAULT_ASN1 ? NULL : EVP_CIPHER_get_asn1_iv, 928 aesni_cbc_hmac_sha256_ctrl, 929 NULL 930}; 931 932const EVP_CIPHER *EVP_aes_128_cbc_hmac_sha256(void) 933{ 934 return ((OPENSSL_ia32cap_P[1] & AESNI_CAPABLE) && 935 aesni_cbc_sha256_enc(NULL, NULL, 0, NULL, NULL, NULL, NULL) ? 936 &aesni_128_cbc_hmac_sha256_cipher : NULL); 937} 938 939const EVP_CIPHER *EVP_aes_256_cbc_hmac_sha256(void) 940{ 941 return ((OPENSSL_ia32cap_P[1] & AESNI_CAPABLE) && 942 aesni_cbc_sha256_enc(NULL, NULL, 0, NULL, NULL, NULL, NULL) ? 943 &aesni_256_cbc_hmac_sha256_cipher : NULL); 944} 945#else 946const EVP_CIPHER *EVP_aes_128_cbc_hmac_sha256(void) 947{ 948 return NULL; 949} 950 951const EVP_CIPHER *EVP_aes_256_cbc_hmac_sha256(void) 952{ 953 return NULL; 954} 955#endif 956