1 /** 2 * Constant-time functions 3 * 4 * Copyright The Mbed TLS Contributors 5 * SPDX-License-Identifier: Apache-2.0 6 * 7 * Licensed under the Apache License, Version 2.0 (the "License"); you may 8 * not use this file except in compliance with the License. 9 * You may obtain a copy of the License at 10 * 11 * http://www.apache.org/licenses/LICENSE-2.0 12 * 13 * Unless required by applicable law or agreed to in writing, software 14 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 15 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 * See the License for the specific language governing permissions and 17 * limitations under the License. 18 */ 19 20 /* 21 * The following functions are implemented without using comparison operators, as those 22 * might be translated to branches by some compilers on some platforms. 23 */ 24 25 #include "common.h" 26 #include "constant_time_internal.h" 27 #include "mbedtls/constant_time.h" 28 #include "mbedtls/error.h" 29 #include "mbedtls/platform_util.h" 30 31 #if defined(MBEDTLS_BIGNUM_C) 32 #include "mbedtls/bignum.h" 33 #include "bignum_core.h" 34 #endif 35 36 #if defined(MBEDTLS_SSL_TLS_C) 37 #include "ssl_misc.h" 38 #endif 39 40 #if defined(MBEDTLS_RSA_C) 41 #include "mbedtls/rsa.h" 42 #endif 43 44 #if defined(MBEDTLS_BASE64_C) 45 #include "constant_time_invasive.h" 46 #endif 47 48 #include <string.h> 49 #if defined(MBEDTLS_USE_PSA_CRYPTO) 50 #define PSA_TO_MBEDTLS_ERR(status) PSA_TO_MBEDTLS_ERR_LIST(status, \ 51 psa_to_ssl_errors, \ 52 psa_generic_status_to_mbedtls) 53 #endif 54 55 /* 56 * Define MBEDTLS_EFFICIENT_UNALIGNED_VOLATILE_ACCESS where assembly is present to 57 * perform fast unaligned access to volatile data. 58 * 59 * This is needed because mbedtls_get_unaligned_uintXX etc don't support volatile 60 * memory accesses. 61 * 62 * Some of these definitions could be moved into alignment.h but for now they are 63 * only used here. 64 */ 65 #if defined(MBEDTLS_EFFICIENT_UNALIGNED_ACCESS) && defined(MBEDTLS_HAVE_ASM) 66 #if defined(__arm__) || defined(__thumb__) || defined(__thumb2__) || defined(__aarch64__) 67 #define MBEDTLS_EFFICIENT_UNALIGNED_VOLATILE_ACCESS 68 #endif 69 #endif 70 71 #if defined(MBEDTLS_EFFICIENT_UNALIGNED_VOLATILE_ACCESS) 72 static inline uint32_t mbedtls_get_unaligned_volatile_uint32(volatile const unsigned char *p) 73 { 74 /* This is UB, even where it's safe: 75 * return *((volatile uint32_t*)p); 76 * so instead the same thing is expressed in assembly below. 77 */ 78 uint32_t r; 79 #if defined(__arm__) || defined(__thumb__) || defined(__thumb2__) 80 asm volatile ("ldr %0, [%1]" : "=r" (r) : "r" (p) :); 81 #elif defined(__aarch64__) 82 asm volatile ("ldr %w0, [%1]" : "=r" (r) : "r" (p) :); 83 #endif 84 return r; 85 } 86 #endif /* MBEDTLS_EFFICIENT_UNALIGNED_VOLATILE_ACCESS */ 87 88 int mbedtls_ct_memcmp(const void *a, 89 const void *b, 90 size_t n) 91 { 92 size_t i = 0; 93 /* 94 * `A` and `B` are cast to volatile to ensure that the compiler 95 * generates code that always fully reads both buffers. 96 * Otherwise it could generate a test to exit early if `diff` has all 97 * bits set early in the loop. 98 */ 99 volatile const unsigned char *A = (volatile const unsigned char *) a; 100 volatile const unsigned char *B = (volatile const unsigned char *) b; 101 uint32_t diff = 0; 102 103 #if defined(MBEDTLS_EFFICIENT_UNALIGNED_VOLATILE_ACCESS) 104 for (; (i + 4) <= n; i += 4) { 105 uint32_t x = mbedtls_get_unaligned_volatile_uint32(A + i); 106 uint32_t y = mbedtls_get_unaligned_volatile_uint32(B + i); 107 diff |= x ^ y; 108 } 109 #endif 110 111 for (; i < n; i++) { 112 /* Read volatile data in order before computing diff. 113 * This avoids IAR compiler warning: 114 * 'the order of volatile accesses is undefined ..' */ 115 unsigned char x = A[i], y = B[i]; 116 diff |= x ^ y; 117 } 118 119 return (int) diff; 120 } 121 122 unsigned mbedtls_ct_uint_mask(unsigned value) 123 { 124 /* MSVC has a warning about unary minus on unsigned, but this is 125 * well-defined and precisely what we want to do here */ 126 #if defined(_MSC_VER) 127 #pragma warning( push ) 128 #pragma warning( disable : 4146 ) 129 #endif 130 return -((value | -value) >> (sizeof(value) * 8 - 1)); 131 #if defined(_MSC_VER) 132 #pragma warning( pop ) 133 #endif 134 } 135 136 #if defined(MBEDTLS_SSL_SOME_SUITES_USE_MAC) 137 138 size_t mbedtls_ct_size_mask(size_t value) 139 { 140 /* MSVC has a warning about unary minus on unsigned integer types, 141 * but this is well-defined and precisely what we want to do here. */ 142 #if defined(_MSC_VER) 143 #pragma warning( push ) 144 #pragma warning( disable : 4146 ) 145 #endif 146 return -((value | -value) >> (sizeof(value) * 8 - 1)); 147 #if defined(_MSC_VER) 148 #pragma warning( pop ) 149 #endif 150 } 151 152 #endif /* MBEDTLS_SSL_SOME_SUITES_USE_MAC */ 153 154 #if defined(MBEDTLS_BIGNUM_C) 155 156 mbedtls_mpi_uint mbedtls_ct_mpi_uint_mask(mbedtls_mpi_uint value) 157 { 158 /* MSVC has a warning about unary minus on unsigned, but this is 159 * well-defined and precisely what we want to do here */ 160 #if defined(_MSC_VER) 161 #pragma warning( push ) 162 #pragma warning( disable : 4146 ) 163 #endif 164 return -((value | -value) >> (sizeof(value) * 8 - 1)); 165 #if defined(_MSC_VER) 166 #pragma warning( pop ) 167 #endif 168 } 169 170 #endif /* MBEDTLS_BIGNUM_C */ 171 172 #if defined(MBEDTLS_SSL_SOME_SUITES_USE_TLS_CBC) 173 174 /** Constant-flow mask generation for "less than" comparison: 175 * - if \p x < \p y, return all-bits 1, that is (size_t) -1 176 * - otherwise, return all bits 0, that is 0 177 * 178 * This function can be used to write constant-time code by replacing branches 179 * with bit operations using masks. 180 * 181 * \param x The first value to analyze. 182 * \param y The second value to analyze. 183 * 184 * \return All-bits-one if \p x is less than \p y, otherwise zero. 185 */ 186 static size_t mbedtls_ct_size_mask_lt(size_t x, 187 size_t y) 188 { 189 /* This has the most significant bit set if and only if x < y */ 190 const size_t sub = x - y; 191 192 /* sub1 = (x < y) ? 1 : 0 */ 193 const size_t sub1 = sub >> (sizeof(sub) * 8 - 1); 194 195 /* mask = (x < y) ? 0xff... : 0x00... */ 196 const size_t mask = mbedtls_ct_size_mask(sub1); 197 198 return mask; 199 } 200 201 size_t mbedtls_ct_size_mask_ge(size_t x, 202 size_t y) 203 { 204 return ~mbedtls_ct_size_mask_lt(x, y); 205 } 206 207 #endif /* MBEDTLS_SSL_SOME_SUITES_USE_TLS_CBC */ 208 209 #if defined(MBEDTLS_BASE64_C) 210 211 /* Return 0xff if low <= c <= high, 0 otherwise. 212 * 213 * Constant flow with respect to c. 214 */ 215 MBEDTLS_STATIC_TESTABLE 216 unsigned char mbedtls_ct_uchar_mask_of_range(unsigned char low, 217 unsigned char high, 218 unsigned char c) 219 { 220 /* low_mask is: 0 if low <= c, 0x...ff if low > c */ 221 unsigned low_mask = ((unsigned) c - low) >> 8; 222 /* high_mask is: 0 if c <= high, 0x...ff if c > high */ 223 unsigned high_mask = ((unsigned) high - c) >> 8; 224 return ~(low_mask | high_mask) & 0xff; 225 } 226 227 #endif /* MBEDTLS_BASE64_C */ 228 229 unsigned mbedtls_ct_size_bool_eq(size_t x, 230 size_t y) 231 { 232 /* diff = 0 if x == y, non-zero otherwise */ 233 const size_t diff = x ^ y; 234 235 /* MSVC has a warning about unary minus on unsigned integer types, 236 * but this is well-defined and precisely what we want to do here. */ 237 #if defined(_MSC_VER) 238 #pragma warning( push ) 239 #pragma warning( disable : 4146 ) 240 #endif 241 242 /* diff_msb's most significant bit is equal to x != y */ 243 const size_t diff_msb = (diff | (size_t) -diff); 244 245 #if defined(_MSC_VER) 246 #pragma warning( pop ) 247 #endif 248 249 /* diff1 = (x != y) ? 1 : 0 */ 250 const unsigned diff1 = diff_msb >> (sizeof(diff_msb) * 8 - 1); 251 252 return 1 ^ diff1; 253 } 254 255 #if defined(MBEDTLS_PKCS1_V15) && defined(MBEDTLS_RSA_C) && !defined(MBEDTLS_RSA_ALT) 256 257 /** Constant-flow "greater than" comparison: 258 * return x > y 259 * 260 * This is equivalent to \p x > \p y, but is likely to be compiled 261 * to code using bitwise operation rather than a branch. 262 * 263 * \param x The first value to analyze. 264 * \param y The second value to analyze. 265 * 266 * \return 1 if \p x greater than \p y, otherwise 0. 267 */ 268 static unsigned mbedtls_ct_size_gt(size_t x, 269 size_t y) 270 { 271 /* Return the sign bit (1 for negative) of (y - x). */ 272 return (y - x) >> (sizeof(size_t) * 8 - 1); 273 } 274 275 #endif /* MBEDTLS_PKCS1_V15 && MBEDTLS_RSA_C && ! MBEDTLS_RSA_ALT */ 276 277 #if defined(MBEDTLS_BIGNUM_C) 278 279 unsigned mbedtls_ct_mpi_uint_lt(const mbedtls_mpi_uint x, 280 const mbedtls_mpi_uint y) 281 { 282 mbedtls_mpi_uint ret; 283 mbedtls_mpi_uint cond; 284 285 /* 286 * Check if the most significant bits (MSB) of the operands are different. 287 */ 288 cond = (x ^ y); 289 /* 290 * If the MSB are the same then the difference x-y will be negative (and 291 * have its MSB set to 1 during conversion to unsigned) if and only if x<y. 292 */ 293 ret = (x - y) & ~cond; 294 /* 295 * If the MSB are different, then the operand with the MSB of 1 is the 296 * bigger. (That is if y has MSB of 1, then x<y is true and it is false if 297 * the MSB of y is 0.) 298 */ 299 ret |= y & cond; 300 301 302 ret = ret >> (sizeof(mbedtls_mpi_uint) * 8 - 1); 303 304 return (unsigned) ret; 305 } 306 307 #endif /* MBEDTLS_BIGNUM_C */ 308 309 unsigned mbedtls_ct_uint_if(unsigned condition, 310 unsigned if1, 311 unsigned if0) 312 { 313 unsigned mask = mbedtls_ct_uint_mask(condition); 314 return (mask & if1) | (~mask & if0); 315 } 316 317 #if defined(MBEDTLS_BIGNUM_C) 318 319 /** Select between two sign values without branches. 320 * 321 * This is functionally equivalent to `condition ? if1 : if0` but uses only bit 322 * operations in order to avoid branches. 323 * 324 * \note if1 and if0 must be either 1 or -1, otherwise the result 325 * is undefined. 326 * 327 * \param condition Condition to test; must be either 0 or 1. 328 * \param if1 The first sign; must be either +1 or -1. 329 * \param if0 The second sign; must be either +1 or -1. 330 * 331 * \return \c if1 if \p condition is nonzero, otherwise \c if0. 332 * */ 333 static int mbedtls_ct_cond_select_sign(unsigned char condition, 334 int if1, 335 int if0) 336 { 337 /* In order to avoid questions about what we can reasonably assume about 338 * the representations of signed integers, move everything to unsigned 339 * by taking advantage of the fact that if1 and if0 are either +1 or -1. */ 340 unsigned uif1 = if1 + 1; 341 unsigned uif0 = if0 + 1; 342 343 /* condition was 0 or 1, mask is 0 or 2 as are uif1 and uif0 */ 344 const unsigned mask = condition << 1; 345 346 /* select uif1 or uif0 */ 347 unsigned ur = (uif0 & ~mask) | (uif1 & mask); 348 349 /* ur is now 0 or 2, convert back to -1 or +1 */ 350 return (int) ur - 1; 351 } 352 353 void mbedtls_ct_mpi_uint_cond_assign(size_t n, 354 mbedtls_mpi_uint *dest, 355 const mbedtls_mpi_uint *src, 356 unsigned char condition) 357 { 358 size_t i; 359 360 /* MSVC has a warning about unary minus on unsigned integer types, 361 * but this is well-defined and precisely what we want to do here. */ 362 #if defined(_MSC_VER) 363 #pragma warning( push ) 364 #pragma warning( disable : 4146 ) 365 #endif 366 367 /* all-bits 1 if condition is 1, all-bits 0 if condition is 0 */ 368 const mbedtls_mpi_uint mask = -condition; 369 370 #if defined(_MSC_VER) 371 #pragma warning( pop ) 372 #endif 373 374 for (i = 0; i < n; i++) { 375 dest[i] = (src[i] & mask) | (dest[i] & ~mask); 376 } 377 } 378 379 #endif /* MBEDTLS_BIGNUM_C */ 380 381 #if defined(MBEDTLS_BASE64_C) 382 383 unsigned char mbedtls_ct_base64_enc_char(unsigned char value) 384 { 385 unsigned char digit = 0; 386 /* For each range of values, if value is in that range, mask digit with 387 * the corresponding value. Since value can only be in a single range, 388 * only at most one masking will change digit. */ 389 digit |= mbedtls_ct_uchar_mask_of_range(0, 25, value) & ('A' + value); 390 digit |= mbedtls_ct_uchar_mask_of_range(26, 51, value) & ('a' + value - 26); 391 digit |= mbedtls_ct_uchar_mask_of_range(52, 61, value) & ('0' + value - 52); 392 digit |= mbedtls_ct_uchar_mask_of_range(62, 62, value) & '+'; 393 digit |= mbedtls_ct_uchar_mask_of_range(63, 63, value) & '/'; 394 return digit; 395 } 396 397 signed char mbedtls_ct_base64_dec_value(unsigned char c) 398 { 399 unsigned char val = 0; 400 /* For each range of digits, if c is in that range, mask val with 401 * the corresponding value. Since c can only be in a single range, 402 * only at most one masking will change val. Set val to one plus 403 * the desired value so that it stays 0 if c is in none of the ranges. */ 404 val |= mbedtls_ct_uchar_mask_of_range('A', 'Z', c) & (c - 'A' + 0 + 1); 405 val |= mbedtls_ct_uchar_mask_of_range('a', 'z', c) & (c - 'a' + 26 + 1); 406 val |= mbedtls_ct_uchar_mask_of_range('0', '9', c) & (c - '0' + 52 + 1); 407 val |= mbedtls_ct_uchar_mask_of_range('+', '+', c) & (c - '+' + 62 + 1); 408 val |= mbedtls_ct_uchar_mask_of_range('/', '/', c) & (c - '/' + 63 + 1); 409 /* At this point, val is 0 if c is an invalid digit and v+1 if c is 410 * a digit with the value v. */ 411 return val - 1; 412 } 413 414 #endif /* MBEDTLS_BASE64_C */ 415 416 #if defined(MBEDTLS_PKCS1_V15) && defined(MBEDTLS_RSA_C) && !defined(MBEDTLS_RSA_ALT) 417 418 /** Shift some data towards the left inside a buffer. 419 * 420 * `mbedtls_ct_mem_move_to_left(start, total, offset)` is functionally 421 * equivalent to 422 * ``` 423 * memmove(start, start + offset, total - offset); 424 * memset(start + offset, 0, total - offset); 425 * ``` 426 * but it strives to use a memory access pattern (and thus total timing) 427 * that does not depend on \p offset. This timing independence comes at 428 * the expense of performance. 429 * 430 * \param start Pointer to the start of the buffer. 431 * \param total Total size of the buffer. 432 * \param offset Offset from which to copy \p total - \p offset bytes. 433 */ 434 static void mbedtls_ct_mem_move_to_left(void *start, 435 size_t total, 436 size_t offset) 437 { 438 volatile unsigned char *buf = start; 439 size_t i, n; 440 if (total == 0) { 441 return; 442 } 443 for (i = 0; i < total; i++) { 444 unsigned no_op = mbedtls_ct_size_gt(total - offset, i); 445 /* The first `total - offset` passes are a no-op. The last 446 * `offset` passes shift the data one byte to the left and 447 * zero out the last byte. */ 448 for (n = 0; n < total - 1; n++) { 449 unsigned char current = buf[n]; 450 unsigned char next = buf[n+1]; 451 buf[n] = mbedtls_ct_uint_if(no_op, current, next); 452 } 453 buf[total-1] = mbedtls_ct_uint_if(no_op, buf[total-1], 0); 454 } 455 } 456 457 #endif /* MBEDTLS_PKCS1_V15 && MBEDTLS_RSA_C && ! MBEDTLS_RSA_ALT */ 458 459 #if defined(MBEDTLS_SSL_SOME_SUITES_USE_MAC) 460 461 void mbedtls_ct_memcpy_if_eq(unsigned char *dest, 462 const unsigned char *src, 463 size_t len, 464 size_t c1, 465 size_t c2) 466 { 467 /* mask = c1 == c2 ? 0xff : 0x00 */ 468 const size_t equal = mbedtls_ct_size_bool_eq(c1, c2); 469 470 /* dest[i] = c1 == c2 ? src[i] : dest[i] */ 471 size_t i = 0; 472 #if defined(MBEDTLS_EFFICIENT_UNALIGNED_ACCESS) 473 const uint32_t mask32 = (uint32_t) mbedtls_ct_size_mask(equal); 474 const unsigned char mask = (unsigned char) mask32 & 0xff; 475 476 for (; (i + 4) <= len; i += 4) { 477 uint32_t a = mbedtls_get_unaligned_uint32(src + i) & mask32; 478 uint32_t b = mbedtls_get_unaligned_uint32(dest + i) & ~mask32; 479 mbedtls_put_unaligned_uint32(dest + i, a | b); 480 } 481 #else 482 const unsigned char mask = (unsigned char) mbedtls_ct_size_mask(equal); 483 #endif /* MBEDTLS_EFFICIENT_UNALIGNED_ACCESS */ 484 for (; i < len; i++) { 485 dest[i] = (src[i] & mask) | (dest[i] & ~mask); 486 } 487 } 488 489 void mbedtls_ct_memcpy_offset(unsigned char *dest, 490 const unsigned char *src, 491 size_t offset, 492 size_t offset_min, 493 size_t offset_max, 494 size_t len) 495 { 496 size_t offsetval; 497 498 for (offsetval = offset_min; offsetval <= offset_max; offsetval++) { 499 mbedtls_ct_memcpy_if_eq(dest, src + offsetval, len, 500 offsetval, offset); 501 } 502 } 503 504 #if defined(MBEDTLS_USE_PSA_CRYPTO) 505 506 #if defined(PSA_WANT_ALG_SHA_384) 507 #define MAX_HASH_BLOCK_LENGTH PSA_HASH_BLOCK_LENGTH(PSA_ALG_SHA_384) 508 #elif defined(PSA_WANT_ALG_SHA_256) 509 #define MAX_HASH_BLOCK_LENGTH PSA_HASH_BLOCK_LENGTH(PSA_ALG_SHA_256) 510 #else /* See check_config.h */ 511 #define MAX_HASH_BLOCK_LENGTH PSA_HASH_BLOCK_LENGTH(PSA_ALG_SHA_1) 512 #endif 513 514 int mbedtls_ct_hmac(mbedtls_svc_key_id_t key, 515 psa_algorithm_t mac_alg, 516 const unsigned char *add_data, 517 size_t add_data_len, 518 const unsigned char *data, 519 size_t data_len_secret, 520 size_t min_data_len, 521 size_t max_data_len, 522 unsigned char *output) 523 { 524 /* 525 * This function breaks the HMAC abstraction and uses psa_hash_clone() 526 * extension in order to get constant-flow behaviour. 527 * 528 * HMAC(msg) is defined as HASH(okey + HASH(ikey + msg)) where + means 529 * concatenation, and okey/ikey are the XOR of the key with some fixed bit 530 * patterns (see RFC 2104, sec. 2). 531 * 532 * We'll first compute ikey/okey, then inner_hash = HASH(ikey + msg) by 533 * hashing up to minlen, then cloning the context, and for each byte up 534 * to maxlen finishing up the hash computation, keeping only the 535 * correct result. 536 * 537 * Then we only need to compute HASH(okey + inner_hash) and we're done. 538 */ 539 psa_algorithm_t hash_alg = PSA_ALG_HMAC_GET_HASH(mac_alg); 540 const size_t block_size = PSA_HASH_BLOCK_LENGTH(hash_alg); 541 unsigned char key_buf[MAX_HASH_BLOCK_LENGTH]; 542 const size_t hash_size = PSA_HASH_LENGTH(hash_alg); 543 psa_hash_operation_t operation = PSA_HASH_OPERATION_INIT; 544 size_t hash_length; 545 546 unsigned char aux_out[PSA_HASH_MAX_SIZE]; 547 psa_hash_operation_t aux_operation = PSA_HASH_OPERATION_INIT; 548 size_t offset; 549 psa_status_t status = PSA_ERROR_CORRUPTION_DETECTED; 550 551 size_t mac_key_length; 552 size_t i; 553 554 #define PSA_CHK(func_call) \ 555 do { \ 556 status = (func_call); \ 557 if (status != PSA_SUCCESS) \ 558 goto cleanup; \ 559 } while (0) 560 561 /* Export MAC key 562 * We assume key length is always exactly the output size 563 * which is never more than the block size, thus we use block_size 564 * as the key buffer size. 565 */ 566 PSA_CHK(psa_export_key(key, key_buf, block_size, &mac_key_length)); 567 568 /* Calculate ikey */ 569 for (i = 0; i < mac_key_length; i++) { 570 key_buf[i] = (unsigned char) (key_buf[i] ^ 0x36); 571 } 572 for (; i < block_size; ++i) { 573 key_buf[i] = 0x36; 574 } 575 576 PSA_CHK(psa_hash_setup(&operation, hash_alg)); 577 578 /* Now compute inner_hash = HASH(ikey + msg) */ 579 PSA_CHK(psa_hash_update(&operation, key_buf, block_size)); 580 PSA_CHK(psa_hash_update(&operation, add_data, add_data_len)); 581 PSA_CHK(psa_hash_update(&operation, data, min_data_len)); 582 583 /* Fill the hash buffer in advance with something that is 584 * not a valid hash (barring an attack on the hash and 585 * deliberately-crafted input), in case the caller doesn't 586 * check the return status properly. */ 587 memset(output, '!', hash_size); 588 589 /* For each possible length, compute the hash up to that point */ 590 for (offset = min_data_len; offset <= max_data_len; offset++) { 591 PSA_CHK(psa_hash_clone(&operation, &aux_operation)); 592 PSA_CHK(psa_hash_finish(&aux_operation, aux_out, 593 PSA_HASH_MAX_SIZE, &hash_length)); 594 /* Keep only the correct inner_hash in the output buffer */ 595 mbedtls_ct_memcpy_if_eq(output, aux_out, hash_size, 596 offset, data_len_secret); 597 598 if (offset < max_data_len) { 599 PSA_CHK(psa_hash_update(&operation, data + offset, 1)); 600 } 601 } 602 603 /* Abort current operation to prepare for final operation */ 604 PSA_CHK(psa_hash_abort(&operation)); 605 606 /* Calculate okey */ 607 for (i = 0; i < mac_key_length; i++) { 608 key_buf[i] = (unsigned char) ((key_buf[i] ^ 0x36) ^ 0x5C); 609 } 610 for (; i < block_size; ++i) { 611 key_buf[i] = 0x5C; 612 } 613 614 /* Now compute HASH(okey + inner_hash) */ 615 PSA_CHK(psa_hash_setup(&operation, hash_alg)); 616 PSA_CHK(psa_hash_update(&operation, key_buf, block_size)); 617 PSA_CHK(psa_hash_update(&operation, output, hash_size)); 618 PSA_CHK(psa_hash_finish(&operation, output, hash_size, &hash_length)); 619 620 #undef PSA_CHK 621 622 cleanup: 623 mbedtls_platform_zeroize(key_buf, MAX_HASH_BLOCK_LENGTH); 624 mbedtls_platform_zeroize(aux_out, PSA_HASH_MAX_SIZE); 625 626 psa_hash_abort(&operation); 627 psa_hash_abort(&aux_operation); 628 return PSA_TO_MBEDTLS_ERR(status); 629 } 630 631 #undef MAX_HASH_BLOCK_LENGTH 632 633 #else 634 int mbedtls_ct_hmac(mbedtls_md_context_t *ctx, 635 const unsigned char *add_data, 636 size_t add_data_len, 637 const unsigned char *data, 638 size_t data_len_secret, 639 size_t min_data_len, 640 size_t max_data_len, 641 unsigned char *output) 642 { 643 /* 644 * This function breaks the HMAC abstraction and uses the md_clone() 645 * extension to the MD API in order to get constant-flow behaviour. 646 * 647 * HMAC(msg) is defined as HASH(okey + HASH(ikey + msg)) where + means 648 * concatenation, and okey/ikey are the XOR of the key with some fixed bit 649 * patterns (see RFC 2104, sec. 2), which are stored in ctx->hmac_ctx. 650 * 651 * We'll first compute inner_hash = HASH(ikey + msg) by hashing up to 652 * minlen, then cloning the context, and for each byte up to maxlen 653 * finishing up the hash computation, keeping only the correct result. 654 * 655 * Then we only need to compute HASH(okey + inner_hash) and we're done. 656 */ 657 const mbedtls_md_type_t md_alg = mbedtls_md_get_type(ctx->md_info); 658 /* TLS 1.2 only supports SHA-384, SHA-256, SHA-1, MD-5, 659 * all of which have the same block size except SHA-384. */ 660 const size_t block_size = md_alg == MBEDTLS_MD_SHA384 ? 128 : 64; 661 const unsigned char * const ikey = ctx->hmac_ctx; 662 const unsigned char * const okey = ikey + block_size; 663 const size_t hash_size = mbedtls_md_get_size(ctx->md_info); 664 665 unsigned char aux_out[MBEDTLS_MD_MAX_SIZE]; 666 mbedtls_md_context_t aux; 667 size_t offset; 668 int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED; 669 670 mbedtls_md_init(&aux); 671 672 #define MD_CHK(func_call) \ 673 do { \ 674 ret = (func_call); \ 675 if (ret != 0) \ 676 goto cleanup; \ 677 } while (0) 678 679 MD_CHK(mbedtls_md_setup(&aux, ctx->md_info, 0)); 680 681 /* After hmac_start() of hmac_reset(), ikey has already been hashed, 682 * so we can start directly with the message */ 683 MD_CHK(mbedtls_md_update(ctx, add_data, add_data_len)); 684 MD_CHK(mbedtls_md_update(ctx, data, min_data_len)); 685 686 /* Fill the hash buffer in advance with something that is 687 * not a valid hash (barring an attack on the hash and 688 * deliberately-crafted input), in case the caller doesn't 689 * check the return status properly. */ 690 memset(output, '!', hash_size); 691 692 /* For each possible length, compute the hash up to that point */ 693 for (offset = min_data_len; offset <= max_data_len; offset++) { 694 MD_CHK(mbedtls_md_clone(&aux, ctx)); 695 MD_CHK(mbedtls_md_finish(&aux, aux_out)); 696 /* Keep only the correct inner_hash in the output buffer */ 697 mbedtls_ct_memcpy_if_eq(output, aux_out, hash_size, 698 offset, data_len_secret); 699 700 if (offset < max_data_len) { 701 MD_CHK(mbedtls_md_update(ctx, data + offset, 1)); 702 } 703 } 704 705 /* The context needs to finish() before it starts() again */ 706 MD_CHK(mbedtls_md_finish(ctx, aux_out)); 707 708 /* Now compute HASH(okey + inner_hash) */ 709 MD_CHK(mbedtls_md_starts(ctx)); 710 MD_CHK(mbedtls_md_update(ctx, okey, block_size)); 711 MD_CHK(mbedtls_md_update(ctx, output, hash_size)); 712 MD_CHK(mbedtls_md_finish(ctx, output)); 713 714 /* Done, get ready for next time */ 715 MD_CHK(mbedtls_md_hmac_reset(ctx)); 716 717 #undef MD_CHK 718 719 cleanup: 720 mbedtls_md_free(&aux); 721 return ret; 722 } 723 #endif /* MBEDTLS_USE_PSA_CRYPTO */ 724 725 #endif /* MBEDTLS_SSL_SOME_SUITES_USE_MAC */ 726 727 #if defined(MBEDTLS_BIGNUM_C) 728 729 #define MPI_VALIDATE_RET(cond) \ 730 MBEDTLS_INTERNAL_VALIDATE_RET(cond, MBEDTLS_ERR_MPI_BAD_INPUT_DATA) 731 732 /* 733 * Conditionally assign X = Y, without leaking information 734 * about whether the assignment was made or not. 735 * (Leaking information about the respective sizes of X and Y is ok however.) 736 */ 737 #if defined(_MSC_VER) && defined(_M_ARM64) && (_MSC_FULL_VER < 193131103) 738 /* 739 * MSVC miscompiles this function if it's inlined prior to Visual Studio 2022 version 17.1. See: 740 * https://developercommunity.visualstudio.com/t/c-compiler-miscompiles-part-of-mbedtls-library-on/1646989 741 */ 742 __declspec(noinline) 743 #endif 744 int mbedtls_mpi_safe_cond_assign(mbedtls_mpi *X, 745 const mbedtls_mpi *Y, 746 unsigned char assign) 747 { 748 int ret = 0; 749 MPI_VALIDATE_RET(X != NULL); 750 MPI_VALIDATE_RET(Y != NULL); 751 752 /* all-bits 1 if assign is 1, all-bits 0 if assign is 0 */ 753 mbedtls_mpi_uint limb_mask = mbedtls_ct_mpi_uint_mask(assign); 754 755 MBEDTLS_MPI_CHK(mbedtls_mpi_grow(X, Y->n)); 756 757 X->s = mbedtls_ct_cond_select_sign(assign, Y->s, X->s); 758 759 mbedtls_mpi_core_cond_assign(X->p, Y->p, Y->n, assign); 760 761 for (size_t i = Y->n; i < X->n; i++) { 762 X->p[i] &= ~limb_mask; 763 } 764 765 cleanup: 766 return ret; 767 } 768 769 /* 770 * Conditionally swap X and Y, without leaking information 771 * about whether the swap was made or not. 772 * Here it is not ok to simply swap the pointers, which would lead to 773 * different memory access patterns when X and Y are used afterwards. 774 */ 775 int mbedtls_mpi_safe_cond_swap(mbedtls_mpi *X, 776 mbedtls_mpi *Y, 777 unsigned char swap) 778 { 779 int ret = 0; 780 int s; 781 MPI_VALIDATE_RET(X != NULL); 782 MPI_VALIDATE_RET(Y != NULL); 783 784 if (X == Y) { 785 return 0; 786 } 787 788 MBEDTLS_MPI_CHK(mbedtls_mpi_grow(X, Y->n)); 789 MBEDTLS_MPI_CHK(mbedtls_mpi_grow(Y, X->n)); 790 791 s = X->s; 792 X->s = mbedtls_ct_cond_select_sign(swap, Y->s, X->s); 793 Y->s = mbedtls_ct_cond_select_sign(swap, s, Y->s); 794 795 mbedtls_mpi_core_cond_swap(X->p, Y->p, X->n, swap); 796 797 cleanup: 798 return ret; 799 } 800 801 /* 802 * Compare unsigned values in constant time 803 */ 804 unsigned mbedtls_mpi_core_lt_ct(const mbedtls_mpi_uint *A, 805 const mbedtls_mpi_uint *B, 806 size_t limbs) 807 { 808 unsigned ret, cond, done; 809 810 /* The value of any of these variables is either 0 or 1 for the rest of 811 * their scope. */ 812 ret = cond = done = 0; 813 814 for (size_t i = limbs; i > 0; i--) { 815 /* 816 * If B[i - 1] < A[i - 1] then A < B is false and the result must 817 * remain 0. 818 * 819 * Again even if we can make a decision, we just mark the result and 820 * the fact that we are done and continue looping. 821 */ 822 cond = mbedtls_ct_mpi_uint_lt(B[i - 1], A[i - 1]); 823 done |= cond; 824 825 /* 826 * If A[i - 1] < B[i - 1] then A < B is true. 827 * 828 * Again even if we can make a decision, we just mark the result and 829 * the fact that we are done and continue looping. 830 */ 831 cond = mbedtls_ct_mpi_uint_lt(A[i - 1], B[i - 1]); 832 ret |= cond & (1 - done); 833 done |= cond; 834 } 835 836 /* 837 * If all the limbs were equal, then the numbers are equal, A < B is false 838 * and leaving the result 0 is correct. 839 */ 840 841 return ret; 842 } 843 844 /* 845 * Compare signed values in constant time 846 */ 847 int mbedtls_mpi_lt_mpi_ct(const mbedtls_mpi *X, 848 const mbedtls_mpi *Y, 849 unsigned *ret) 850 { 851 size_t i; 852 /* The value of any of these variables is either 0 or 1 at all times. */ 853 unsigned cond, done, X_is_negative, Y_is_negative; 854 855 MPI_VALIDATE_RET(X != NULL); 856 MPI_VALIDATE_RET(Y != NULL); 857 MPI_VALIDATE_RET(ret != NULL); 858 859 if (X->n != Y->n) { 860 return MBEDTLS_ERR_MPI_BAD_INPUT_DATA; 861 } 862 863 /* 864 * Set sign_N to 1 if N >= 0, 0 if N < 0. 865 * We know that N->s == 1 if N >= 0 and N->s == -1 if N < 0. 866 */ 867 X_is_negative = (X->s & 2) >> 1; 868 Y_is_negative = (Y->s & 2) >> 1; 869 870 /* 871 * If the signs are different, then the positive operand is the bigger. 872 * That is if X is negative (X_is_negative == 1), then X < Y is true and it 873 * is false if X is positive (X_is_negative == 0). 874 */ 875 cond = (X_is_negative ^ Y_is_negative); 876 *ret = cond & X_is_negative; 877 878 /* 879 * This is a constant-time function. We might have the result, but we still 880 * need to go through the loop. Record if we have the result already. 881 */ 882 done = cond; 883 884 for (i = X->n; i > 0; i--) { 885 /* 886 * If Y->p[i - 1] < X->p[i - 1] then X < Y is true if and only if both 887 * X and Y are negative. 888 * 889 * Again even if we can make a decision, we just mark the result and 890 * the fact that we are done and continue looping. 891 */ 892 cond = mbedtls_ct_mpi_uint_lt(Y->p[i - 1], X->p[i - 1]); 893 *ret |= cond & (1 - done) & X_is_negative; 894 done |= cond; 895 896 /* 897 * If X->p[i - 1] < Y->p[i - 1] then X < Y is true if and only if both 898 * X and Y are positive. 899 * 900 * Again even if we can make a decision, we just mark the result and 901 * the fact that we are done and continue looping. 902 */ 903 cond = mbedtls_ct_mpi_uint_lt(X->p[i - 1], Y->p[i - 1]); 904 *ret |= cond & (1 - done) & (1 - X_is_negative); 905 done |= cond; 906 } 907 908 return 0; 909 } 910 911 #endif /* MBEDTLS_BIGNUM_C */ 912 913 #if defined(MBEDTLS_PKCS1_V15) && defined(MBEDTLS_RSA_C) && !defined(MBEDTLS_RSA_ALT) 914 915 int mbedtls_ct_rsaes_pkcs1_v15_unpadding(unsigned char *input, 916 size_t ilen, 917 unsigned char *output, 918 size_t output_max_len, 919 size_t *olen) 920 { 921 int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED; 922 size_t i, plaintext_max_size; 923 924 /* The following variables take sensitive values: their value must 925 * not leak into the observable behavior of the function other than 926 * the designated outputs (output, olen, return value). Otherwise 927 * this would open the execution of the function to 928 * side-channel-based variants of the Bleichenbacher padding oracle 929 * attack. Potential side channels include overall timing, memory 930 * access patterns (especially visible to an adversary who has access 931 * to a shared memory cache), and branches (especially visible to 932 * an adversary who has access to a shared code cache or to a shared 933 * branch predictor). */ 934 size_t pad_count = 0; 935 unsigned bad = 0; 936 unsigned char pad_done = 0; 937 size_t plaintext_size = 0; 938 unsigned output_too_large; 939 940 plaintext_max_size = (output_max_len > ilen - 11) ? ilen - 11 941 : output_max_len; 942 943 /* Check and get padding length in constant time and constant 944 * memory trace. The first byte must be 0. */ 945 bad |= input[0]; 946 947 948 /* Decode EME-PKCS1-v1_5 padding: 0x00 || 0x02 || PS || 0x00 949 * where PS must be at least 8 nonzero bytes. */ 950 bad |= input[1] ^ MBEDTLS_RSA_CRYPT; 951 952 /* Read the whole buffer. Set pad_done to nonzero if we find 953 * the 0x00 byte and remember the padding length in pad_count. */ 954 for (i = 2; i < ilen; i++) { 955 pad_done |= ((input[i] | (unsigned char) -input[i]) >> 7) ^ 1; 956 pad_count += ((pad_done | (unsigned char) -pad_done) >> 7) ^ 1; 957 } 958 959 960 /* If pad_done is still zero, there's no data, only unfinished padding. */ 961 bad |= mbedtls_ct_uint_if(pad_done, 0, 1); 962 963 /* There must be at least 8 bytes of padding. */ 964 bad |= mbedtls_ct_size_gt(8, pad_count); 965 966 /* If the padding is valid, set plaintext_size to the number of 967 * remaining bytes after stripping the padding. If the padding 968 * is invalid, avoid leaking this fact through the size of the 969 * output: use the maximum message size that fits in the output 970 * buffer. Do it without branches to avoid leaking the padding 971 * validity through timing. RSA keys are small enough that all the 972 * size_t values involved fit in unsigned int. */ 973 plaintext_size = mbedtls_ct_uint_if( 974 bad, (unsigned) plaintext_max_size, 975 (unsigned) (ilen - pad_count - 3)); 976 977 /* Set output_too_large to 0 if the plaintext fits in the output 978 * buffer and to 1 otherwise. */ 979 output_too_large = mbedtls_ct_size_gt(plaintext_size, 980 plaintext_max_size); 981 982 /* Set ret without branches to avoid timing attacks. Return: 983 * - INVALID_PADDING if the padding is bad (bad != 0). 984 * - OUTPUT_TOO_LARGE if the padding is good but the decrypted 985 * plaintext does not fit in the output buffer. 986 * - 0 if the padding is correct. */ 987 ret = -(int) mbedtls_ct_uint_if( 988 bad, -MBEDTLS_ERR_RSA_INVALID_PADDING, 989 mbedtls_ct_uint_if(output_too_large, 990 -MBEDTLS_ERR_RSA_OUTPUT_TOO_LARGE, 991 0)); 992 993 /* If the padding is bad or the plaintext is too large, zero the 994 * data that we're about to copy to the output buffer. 995 * We need to copy the same amount of data 996 * from the same buffer whether the padding is good or not to 997 * avoid leaking the padding validity through overall timing or 998 * through memory or cache access patterns. */ 999 bad = mbedtls_ct_uint_mask(bad | output_too_large); 1000 for (i = 11; i < ilen; i++) { 1001 input[i] &= ~bad; 1002 } 1003 1004 /* If the plaintext is too large, truncate it to the buffer size. 1005 * Copy anyway to avoid revealing the length through timing, because 1006 * revealing the length is as bad as revealing the padding validity 1007 * for a Bleichenbacher attack. */ 1008 plaintext_size = mbedtls_ct_uint_if(output_too_large, 1009 (unsigned) plaintext_max_size, 1010 (unsigned) plaintext_size); 1011 1012 /* Move the plaintext to the leftmost position where it can start in 1013 * the working buffer, i.e. make it start plaintext_max_size from 1014 * the end of the buffer. Do this with a memory access trace that 1015 * does not depend on the plaintext size. After this move, the 1016 * starting location of the plaintext is no longer sensitive 1017 * information. */ 1018 mbedtls_ct_mem_move_to_left(input + ilen - plaintext_max_size, 1019 plaintext_max_size, 1020 plaintext_max_size - plaintext_size); 1021 1022 /* Finally copy the decrypted plaintext plus trailing zeros into the output 1023 * buffer. If output_max_len is 0, then output may be an invalid pointer 1024 * and the result of memcpy() would be undefined; prevent undefined 1025 * behavior making sure to depend only on output_max_len (the size of the 1026 * user-provided output buffer), which is independent from plaintext 1027 * length, validity of padding, success of the decryption, and other 1028 * secrets. */ 1029 if (output_max_len != 0) { 1030 memcpy(output, input + ilen - plaintext_max_size, plaintext_max_size); 1031 } 1032 1033 /* Report the amount of data we copied to the output buffer. In case 1034 * of errors (bad padding or output too large), the value of *olen 1035 * when this function returns is not specified. Making it equivalent 1036 * to the good case limits the risks of leaking the padding validity. */ 1037 *olen = plaintext_size; 1038 1039 return ret; 1040 } 1041 1042 #endif /* MBEDTLS_PKCS1_V15 && MBEDTLS_RSA_C && ! MBEDTLS_RSA_ALT */ 1043