Sign Up
Log In
Log In
or
Sign Up
Places
All Projects
Status Monitor
Collapse sidebar
SUSE:SLE-15:Update
openssl-1_1.13606
openssl-Bleichenbachers_CAT.patch
Overview
Repositories
Revisions
Requests
Users
Attributes
Meta
File openssl-Bleichenbachers_CAT.patch of Package openssl-1_1.13606
https://github.com/openssl/openssl/issues/7739 Squash of the following commits: PR #6915: commit a6d8fe9, db09fdc, c5c6915 and 73bebc1 (part of 1.1.0j release) PR #7735, commit 0ba39c8, b58c44a, 43d53fa, 437b7f0, 6d405b6 and e29dcc3 (will be part of 1.1.0k) Index: openssl-1.1.0i/crypto/bn/bn_blind.c =================================================================== --- openssl-1.1.0i.orig/crypto/bn/bn_blind.c 2018-08-14 14:45:06.000000000 +0200 +++ openssl-1.1.0i/crypto/bn/bn_blind.c 2019-01-18 15:15:11.346099889 +0100 @@ -109,10 +109,15 @@ int BN_BLINDING_update(BN_BLINDING *b, B if (!BN_BLINDING_create_param(b, NULL, NULL, ctx, NULL, NULL)) goto err; } else if (!(b->flags & BN_BLINDING_NO_UPDATE)) { - if (!BN_mod_mul(b->A, b->A, b->A, b->mod, ctx)) - goto err; - if (!BN_mod_mul(b->Ai, b->Ai, b->Ai, b->mod, ctx)) - goto err; + if (b->m_ctx != NULL) { + if (!bn_mul_mont_fixed_top(b->Ai, b->Ai, b->Ai, b->m_ctx, ctx) + || !bn_mul_mont_fixed_top(b->A, b->A, b->A, b->m_ctx, ctx)) + goto err; + } else { + if (!BN_mod_mul(b->Ai, b->Ai, b->Ai, b->mod, ctx) + || !BN_mod_mul(b->A, b->A, b->A, b->mod, ctx)) + goto err; + } } ret = 1; @@ -144,13 +149,13 @@ int BN_BLINDING_convert_ex(BIGNUM *n, BI else if (!BN_BLINDING_update(b, ctx)) return (0); - if (r != NULL) { - if (!BN_copy(r, b->Ai)) - ret = 0; - } + if (r != NULL && (BN_copy(r, b->Ai) == NULL)) + return 0; - if (!BN_mod_mul(n, n, b->A, b->mod, ctx)) - ret = 0; + if (b->m_ctx != NULL) + ret = BN_mod_mul_montgomery(n, n, b->A, b->m_ctx, ctx); + else + ret = BN_mod_mul(n, n, b->A, b->mod, ctx); return ret; } @@ -167,14 +172,29 @@ int BN_BLINDING_invert_ex(BIGNUM *n, con bn_check_top(n); - if (r != NULL) - ret = BN_mod_mul(n, n, r, b->mod, ctx); - else { - if (b->Ai == NULL) { - BNerr(BN_F_BN_BLINDING_INVERT_EX, BN_R_NOT_INITIALIZED); - return (0); + if (r == NULL && (r = b->Ai) == NULL) { + BNerr(BN_F_BN_BLINDING_INVERT_EX, BN_R_NOT_INITIALIZED); + return 0; + } + + if (b->m_ctx != NULL) { + /* ensure that BN_mod_mul_montgomery takes pre-defined path */ + if (n->dmax >= r->top) { + size_t i, rtop = r->top, ntop = n->top; + BN_ULONG mask; + + for (i = 0; i < rtop; i++) { + mask = (BN_ULONG)0 - ((i - ntop) >> (8 * sizeof(i) - 1)); + n->d[i] &= mask; + } + mask = (BN_ULONG)0 - ((rtop - ntop) >> (8 * sizeof(ntop) - 1)); + /* always true, if (rtop >= ntop) n->top = r->top; */ + n->top = (int)(rtop & ~mask) | (ntop & mask); + n->flags |= (BN_FLG_FIXED_TOP & ~mask); } - ret = BN_mod_mul(n, n, b->Ai, b->mod, ctx); + ret = BN_mod_mul_montgomery(n, n, r, b->m_ctx, ctx); + } else { + ret = BN_mod_mul(n, n, r, b->mod, ctx); } bn_check_top(n); @@ -253,31 +273,35 @@ BN_BLINDING *BN_BLINDING_create_param(BN int rv; if (!BN_rand_range(ret->A, ret->mod)) goto err; - if (!int_bn_mod_inverse(ret->Ai, ret->A, ret->mod, ctx, &rv)) { - /* - * this should almost never happen for good RSA keys - */ - if (rv) { - if (retry_counter-- == 0) { - BNerr(BN_F_BN_BLINDING_CREATE_PARAM, - BN_R_TOO_MANY_ITERATIONS); - goto err; - } - } else - goto err; - } else + if (int_bn_mod_inverse(ret->Ai, ret->A, ret->mod, ctx, &rv)) break; + + /* + * this should almost never happen for good RSA keys + */ + if (!rv) + goto err; + + if (retry_counter-- == 0) { + BNerr(BN_F_BN_BLINDING_CREATE_PARAM, BN_R_TOO_MANY_ITERATIONS); + goto err; + } } while (1); if (ret->bn_mod_exp != NULL && ret->m_ctx != NULL) { - if (!ret->bn_mod_exp - (ret->A, ret->A, ret->e, ret->mod, ctx, ret->m_ctx)) + if (!ret->bn_mod_exp(ret->A, ret->A, ret->e, ret->mod, ctx, ret->m_ctx)) goto err; } else { if (!BN_mod_exp(ret->A, ret->A, ret->e, ret->mod, ctx)) goto err; } + if (ret->m_ctx != NULL) { + if (!bn_to_mont_fixed_top(ret->Ai, ret->Ai, ret->m_ctx, ctx) + || !bn_to_mont_fixed_top(ret->A, ret->A, ret->m_ctx, ctx)) + goto err; + } + return ret; err: if (b == NULL) { Index: openssl-1.1.0i/crypto/bn/bn_lib.c =================================================================== --- openssl-1.1.0i.orig/crypto/bn/bn_lib.c 2018-08-14 14:45:06.000000000 +0200 +++ openssl-1.1.0i/crypto/bn/bn_lib.c 2019-01-18 15:15:11.346099889 +0100 @@ -503,26 +503,40 @@ BIGNUM *BN_bin2bn(const unsigned char *s static int bn2binpad(const BIGNUM *a, unsigned char *to, int tolen) { int n; - size_t i, inc, lasti, j; + size_t i, lasti, j, atop, mask; BN_ULONG l; + /* + * In case |a| is fixed-top, BN_num_bytes can return bogus length, + * but it's assumed that fixed-top inputs ought to be "nominated" + * even for padded output, so it works out... + */ n = BN_num_bytes(a); - if (tolen == -1) + if (tolen == -1) { tolen = n; - else if (tolen < n) - return -1; + } else if (tolen < n) { /* uncommon/unlike case */ + BIGNUM temp = *a; - if (n == 0) { + bn_correct_top(&temp); + n = BN_num_bytes(&temp); + if (tolen < n) + return -1; + } + + /* Swipe through whole available data and don't give away padded zero. */ + atop = a->dmax * BN_BYTES; + if (atop == 0) { OPENSSL_cleanse(to, tolen); return tolen; } - lasti = n - 1; - for (i = 0, inc = 1, j = tolen; j > 0;) { + lasti = atop - 1; + atop = a->top * BN_BYTES; + for (i = 0, j = 0, to += tolen; j < (size_t)tolen; j++) { l = a->d[i / BN_BYTES]; - to[--j] = (unsigned char)(l >> (8 * (i % BN_BYTES)) & (0 - inc)); - inc = (i - lasti) >> (8 * sizeof(i) - 1); - i += inc; /* stay on top limb */ + mask = 0 - ((j - atop) >> (8 * sizeof(i) - 1)); + *--to = (unsigned char)(l >> (8 * (i % BN_BYTES)) & mask); + i += (i - lasti) >> (8 * sizeof(i) - 1); /* stay on last limb */ } return tolen; Index: openssl-1.1.0i/crypto/bn/bn_mod.c =================================================================== --- openssl-1.1.0i.orig/crypto/bn/bn_mod.c 2018-08-14 14:45:06.000000000 +0200 +++ openssl-1.1.0i/crypto/bn/bn_mod.c 2019-01-18 15:15:11.346099889 +0100 @@ -58,7 +58,7 @@ int bn_mod_add_fixed_top(BIGNUM *r, cons if (mtop > sizeof(storage) / sizeof(storage[0]) && (tp = OPENSSL_malloc(mtop * sizeof(BN_ULONG))) == NULL) - return 0; + return 0; ap = a->d != NULL ? a->d : tp; bp = b->d != NULL ? b->d : tp; @@ -83,6 +83,7 @@ int bn_mod_add_fixed_top(BIGNUM *r, cons ((volatile BN_ULONG *)tp)[i] = 0; } r->top = mtop; + r->flags |= BN_FLG_FIXED_TOP; r->neg = 0; if (tp != storage) @@ -111,6 +112,70 @@ int BN_mod_sub(BIGNUM *r, const BIGNUM * } /* + * BN_mod_sub variant that may be used if both a and b are non-negative, + * a is less than m, while b is of same bit width as m. It's implemented + * as subtraction followed by two conditional additions. + * + * 0 <= a < m + * 0 <= b < 2^w < 2*m + * + * after subtraction + * + * -2*m < r = a - b < m + * + * Thus it takes up to two conditional additions to make |r| positive. + */ +int bn_mod_sub_fixed_top(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, + const BIGNUM *m) +{ + size_t i, ai, bi, mtop = m->top; + BN_ULONG borrow, carry, ta, tb, mask, *rp; + const BN_ULONG *ap, *bp; + + if (bn_wexpand(r, mtop) == NULL) + return 0; + + rp = r->d; + ap = a->d != NULL ? a->d : rp; + bp = b->d != NULL ? b->d : rp; + + for (i = 0, ai = 0, bi = 0, borrow = 0; i < mtop;) { + mask = (BN_ULONG)0 - ((i - a->top) >> (8 * sizeof(i) - 1)); + ta = ap[ai] & mask; + + mask = (BN_ULONG)0 - ((i - b->top) >> (8 * sizeof(i) - 1)); + tb = bp[bi] & mask; + rp[i] = ta - tb - borrow; + if (ta != tb) + borrow = (ta < tb); + + i++; + ai += (i - a->dmax) >> (8 * sizeof(i) - 1); + bi += (i - b->dmax) >> (8 * sizeof(i) - 1); + } + ap = m->d; + for (i = 0, mask = 0 - borrow, carry = 0; i < mtop; i++) { + ta = ((ap[i] & mask) + carry) & BN_MASK2; + carry = (ta < carry); + rp[i] = (rp[i] + ta) & BN_MASK2; + carry += (rp[i] < ta); + } + borrow -= carry; + for (i = 0, mask = 0 - borrow, carry = 0; i < mtop; i++) { + ta = ((ap[i] & mask) + carry) & BN_MASK2; + carry = (ta < carry); + rp[i] = (rp[i] + ta) & BN_MASK2; + carry += (rp[i] < ta); + } + + r->top = mtop; + r->flags |= BN_FLG_FIXED_TOP; + r->neg = 0; + + return 1; +} + +/* * BN_mod_sub variant that may be used if both a and b are non-negative and * less than m */ Index: openssl-1.1.0i/crypto/bn/bn_mont.c =================================================================== --- openssl-1.1.0i.orig/crypto/bn/bn_mont.c 2018-08-14 14:45:06.000000000 +0200 +++ openssl-1.1.0i/crypto/bn/bn_mont.c 2019-01-18 15:15:11.346099889 +0100 @@ -64,10 +64,10 @@ int bn_mul_mont_fixed_top(BIGNUM *r, con bn_check_top(tmp); if (a == b) { - if (!BN_sqr(tmp, a, ctx)) + if (!bn_sqr_fixed_top(tmp, a, ctx)) goto err; } else { - if (!BN_mul(tmp, a, b, ctx)) + if (!bn_mul_fixed_top(tmp, a, b, ctx)) goto err; } /* reduce from aRR to aR */ @@ -90,6 +90,7 @@ static int bn_from_montgomery_word(BIGNU BIGNUM *n; BN_ULONG *ap, *np, *rp, n0, v, carry; int nl, max, i; + unsigned int rtop; n = &(mont->N); nl = n->top; @@ -106,10 +107,10 @@ static int bn_from_montgomery_word(BIGNU np = n->d; rp = r->d; - /* clear the top words of T */ - i = max - r->top; - if (i) - memset(&rp[r->top], 0, sizeof(*rp) * i); + for (rtop = r->top, i = 0; i < max; i++) { + v = (BN_ULONG)0 - ((i - rtop) >> (8 * sizeof(rtop) - 1)); + rp[i] &= v; + } r->top = max; r->flags |= BN_FLG_FIXED_TOP; @@ -160,6 +161,18 @@ static int bn_from_montgomery_word(BIGNU int BN_from_montgomery(BIGNUM *ret, const BIGNUM *a, BN_MONT_CTX *mont, BN_CTX *ctx) { + int retn; + + retn = bn_from_mont_fixed_top(ret, a, mont, ctx); + bn_correct_top(ret); + bn_check_top(ret); + + return retn; +} + +int bn_from_mont_fixed_top(BIGNUM *ret, const BIGNUM *a, BN_MONT_CTX *mont, + BN_CTX *ctx) +{ int retn = 0; #ifdef MONT_WORD BIGNUM *t; @@ -167,8 +180,6 @@ int BN_from_montgomery(BIGNUM *ret, cons BN_CTX_start(ctx); if ((t = BN_CTX_get(ctx)) && BN_copy(t, a)) { retn = bn_from_montgomery_word(ret, t, mont); - bn_correct_top(ret); - bn_check_top(ret); } BN_CTX_end(ctx); #else /* !MONT_WORD */ Index: openssl-1.1.0i/crypto/bn/bn_mul.c =================================================================== --- openssl-1.1.0i.orig/crypto/bn/bn_mul.c 2018-08-14 14:45:06.000000000 +0200 +++ openssl-1.1.0i/crypto/bn/bn_mul.c 2019-01-18 15:15:11.346099889 +0100 @@ -833,6 +833,16 @@ void bn_mul_high(BN_ULONG *r, BN_ULONG * int BN_mul(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx) { + int ret = bn_mul_fixed_top(r, a, b, ctx); + + bn_correct_top(r); + bn_check_top(r); + + return ret; +} + +int bn_mul_fixed_top(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx) +{ int ret = 0; int top, al, bl; BIGNUM *rr; @@ -935,7 +945,7 @@ int BN_mul(BIGNUM *r, const BIGNUM *a, c end: #endif rr->neg = a->neg ^ b->neg; - bn_correct_top(rr); + rr->flags |= BN_FLG_FIXED_TOP; if (r != rr && BN_copy(r, rr) == NULL) goto err; Index: openssl-1.1.0i/crypto/bn/bn_sqr.c =================================================================== --- openssl-1.1.0i.orig/crypto/bn/bn_sqr.c 2018-08-14 14:45:06.000000000 +0200 +++ openssl-1.1.0i/crypto/bn/bn_sqr.c 2019-01-18 15:15:11.346099889 +0100 @@ -16,6 +16,16 @@ */ int BN_sqr(BIGNUM *r, const BIGNUM *a, BN_CTX *ctx) { + int ret = bn_sqr_fixed_top(r, a, ctx); + + bn_correct_top(r); + bn_check_top(r); + + return ret; +} + +int bn_sqr_fixed_top(BIGNUM *r, const BIGNUM *a, BN_CTX *ctx) +{ int max, al; int ret = 0; BIGNUM *tmp, *rr; @@ -83,7 +93,7 @@ int BN_sqr(BIGNUM *r, const BIGNUM *a, B rr->neg = 0; rr->top = max; - bn_correct_top(rr); + rr->flags |= BN_FLG_FIXED_TOP; if (r != rr && BN_copy(r, rr) == NULL) goto err; Index: openssl-1.1.0i/crypto/err/err.c =================================================================== --- openssl-1.1.0i.orig/crypto/err/err.c 2018-08-14 14:45:07.000000000 +0200 +++ openssl-1.1.0i/crypto/err/err.c 2019-01-18 15:15:11.346099889 +0100 @@ -19,6 +19,7 @@ #include <openssl/bio.h> #include <openssl/opensslconf.h> #include <internal/thread_once.h> +#include "internal/constant_time_locl.h" static void err_load_strings(int lib, ERR_STRING_DATA *str); @@ -822,3 +823,42 @@ int ERR_pop_to_mark(void) es->err_flags[es->top] &= ~ERR_FLAG_MARK; return 1; } + +#ifdef UINTPTR_T +# undef UINTPTR_T +#endif +/* + * uintptr_t is the answer, but unfortunately C89, current "least common + * denominator" doesn't define it. Most legacy platforms typedef it anyway, + * so that attempt to fill the gaps means that one would have to identify + * that track these gaps, which would be undesirable. Macro it is... + */ +#if defined(__VMS) && __INITIAL_POINTER_SIZE==64 +/* + * But we can't use size_t on VMS, because it adheres to sizeof(size_t)==4 + * even in 64-bit builds, which means that it won't work as mask. + */ +# define UINTPTR_T unsigned long long +#else +# define UINTPTR_T size_t +#endif + +void err_clear_last_constant_time(int clear) +{ + ERR_STATE *es; + int top; + + es = ERR_get_state(); + if (es == NULL) + return; + + top = es->top; + + es->err_flags[top] &= ~(0 - clear); + es->err_buffer[top] &= ~(0UL - clear); + es->err_file[top] = (const char *)((UINTPTR_T)es->err_file[top] & + ~((UINTPTR_T)0 - clear)); + es->err_line[top] |= 0 - clear; + + es->top = (top + ERR_NUM_ERRORS - clear) % ERR_NUM_ERRORS; +} Index: openssl-1.1.0i/crypto/include/internal/bn_int.h =================================================================== --- openssl-1.1.0i.orig/crypto/include/internal/bn_int.h 2018-08-14 14:45:07.000000000 +0200 +++ openssl-1.1.0i/crypto/include/internal/bn_int.h 2019-01-18 15:15:11.346099889 +0100 @@ -85,8 +85,14 @@ int bn_mul_mont_fixed_top(BIGNUM *r, con BN_MONT_CTX *mont, BN_CTX *ctx); int bn_to_mont_fixed_top(BIGNUM *r, const BIGNUM *a, BN_MONT_CTX *mont, BN_CTX *ctx); +int bn_from_mont_fixed_top(BIGNUM *r, const BIGNUM *a, BN_MONT_CTX *mont, + BN_CTX *ctx); int bn_mod_add_fixed_top(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BIGNUM *m); +int bn_mod_sub_fixed_top(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, + const BIGNUM *m); +int bn_mul_fixed_top(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx); +int bn_sqr_fixed_top(BIGNUM *r, const BIGNUM *a, BN_CTX *ctx); #ifdef __cplusplus } Index: openssl-1.1.0i/crypto/rsa/rsa_oaep.c =================================================================== --- openssl-1.1.0i.orig/crypto/rsa/rsa_oaep.c 2018-08-14 14:45:07.000000000 +0200 +++ openssl-1.1.0i/crypto/rsa/rsa_oaep.c 2019-01-18 15:15:11.346099889 +0100 @@ -123,7 +123,7 @@ int RSA_padding_check_PKCS1_OAEP_mgf1(un const EVP_MD *mgf1md) { int i, dblen = 0, mlen = -1, one_index = 0, msg_index; - unsigned int good, found_one_byte; + unsigned int good = 0, found_one_byte, mask; const unsigned char *maskedseed, *maskeddb; /* * |em| is the encoded message, zero-padded to exactly |num| bytes: em = @@ -150,8 +150,11 @@ int RSA_padding_check_PKCS1_OAEP_mgf1(un * the ciphertext, see PKCS #1 v2.2, section 7.1.2. * This does not leak any side-channel information. */ - if (num < flen || num < 2 * mdlen + 2) - goto decoding_err; + if (num < flen || num < 2 * mdlen + 2) { + RSAerr(RSA_F_RSA_PADDING_CHECK_PKCS1_OAEP_MGF1, + RSA_R_OAEP_DECODING_ERROR); + return -1; + } dblen = num - mdlen - 1; db = OPENSSL_malloc(dblen); @@ -160,25 +163,26 @@ int RSA_padding_check_PKCS1_OAEP_mgf1(un goto cleanup; } - if (flen != num) { - em = OPENSSL_zalloc(num); - if (em == NULL) { - RSAerr(RSA_F_RSA_PADDING_CHECK_PKCS1_OAEP_MGF1, - ERR_R_MALLOC_FAILURE); - goto cleanup; - } + em = OPENSSL_malloc(num); + if (em == NULL) { + RSAerr(RSA_F_RSA_PADDING_CHECK_PKCS1_OAEP_MGF1, + ERR_R_MALLOC_FAILURE); + goto cleanup; + } - /* - * Caller is encouraged to pass zero-padded message created with - * BN_bn2binpad, but if it doesn't, we do this zero-padding copy - * to avoid leaking that information. The copy still leaks some - * side-channel information, but it's impossible to have a fixed - * memory access pattern since we can't read out of the bounds of - * |from|. - */ - memcpy(em + num - flen, from, flen); - from = em; + /* + * Caller is encouraged to pass zero-padded message created with + * BN_bn2binpad. Trouble is that since we can't read out of |from|'s + * bounds, it's impossible to have an invariant memory access pattern + * in case |from| was not zero-padded in advance. + */ + for (from += flen, em += num, i = 0; i < num; i++) { + mask = ~constant_time_is_zero(flen); + flen -= 1 & mask; + from -= 1 & mask; + *--em = *from & mask; } + from = em; /* * The first byte must be zero, however we must not leak if this is @@ -225,31 +229,47 @@ int RSA_padding_check_PKCS1_OAEP_mgf1(un * so plaintext-awareness ensures timing side-channels are no longer a * concern. */ - if (!good) - goto decoding_err; - msg_index = one_index + 1; mlen = dblen - msg_index; - if (tlen < mlen) { - RSAerr(RSA_F_RSA_PADDING_CHECK_PKCS1_OAEP_MGF1, RSA_R_DATA_TOO_LARGE); - mlen = -1; - } else { - memcpy(to, db + msg_index, mlen); - goto cleanup; + /* + * For good measure, do this check in constant tine as well. + */ + good &= constant_time_ge(tlen, mlen); + + /* + * Even though we can't fake result's length, we can pretend copying + * |tlen| bytes where |mlen| bytes would be real. Last |tlen| of |dblen| + * bytes are viewed as circular buffer with start at |tlen|-|mlen'|, + * where |mlen'| is "saturated" |mlen| value. Deducing information + * about failure or |mlen| would take attacker's ability to observe + * memory access pattern with byte granularity *as it occurs*. It + * should be noted that failure is indistinguishable from normal + * operation if |tlen| is fixed by protocol. + */ + tlen = constant_time_select_int(constant_time_lt(dblen, tlen), dblen, tlen); + msg_index = constant_time_select_int(good, msg_index, dblen - tlen); + mlen = dblen - msg_index; + for (from = db + msg_index, mask = good, i = 0; i < tlen; i++) { + unsigned int equals = constant_time_eq(i, mlen); + + from -= dblen & equals; /* if (i == dblen) rewind */ + mask &= mask ^ equals; /* if (i == dblen) mask = 0 */ + to[i] = constant_time_select_8(mask, from[i], to[i]); } - decoding_err: /* * To avoid chosen ciphertext attacks, the error message should not * reveal which kind of decoding error happened. */ RSAerr(RSA_F_RSA_PADDING_CHECK_PKCS1_OAEP_MGF1, RSA_R_OAEP_DECODING_ERROR); + err_clear_last_constant_time(1 & good); cleanup: OPENSSL_clear_free(db, dblen); OPENSSL_clear_free(em, num); - return mlen; + + return constant_time_select_int(good, mlen, -1); } int PKCS1_MGF1(unsigned char *mask, long len, Index: openssl-1.1.0i/crypto/rsa/rsa_ossl.c =================================================================== --- openssl-1.1.0i.orig/crypto/rsa/rsa_ossl.c 2019-01-18 15:15:10.870097140 +0100 +++ openssl-1.1.0i/crypto/rsa/rsa_ossl.c 2019-01-18 15:15:11.346099889 +0100 @@ -10,6 +10,7 @@ #include "internal/cryptlib.h" #include "internal/bn_int.h" #include "rsa_locl.h" +#include "internal/constant_time_locl.h" #ifdef OPENSSL_FIPS # include <openssl/fips.h> @@ -153,8 +154,8 @@ static int rsa_ossl_public_encrypt(int f } if (rsa->flags & RSA_FLAG_CACHE_PUBLIC) - if (!BN_MONT_CTX_set_locked - (&rsa->_method_mod_n, rsa->lock, rsa->n, ctx)) + if (!BN_MONT_CTX_set_locked(&rsa->_method_mod_n, rsa->lock, + rsa->n, ctx)) goto err; if (!rsa->meth->bn_mod_exp(ret, f, rsa->e, rsa->n, ctx, @@ -354,8 +355,8 @@ static int rsa_ossl_private_encrypt(int BN_with_flags(d, rsa->d, BN_FLG_CONSTTIME); if (rsa->flags & RSA_FLAG_CACHE_PUBLIC) - if (!BN_MONT_CTX_set_locked - (&rsa->_method_mod_n, rsa->lock, rsa->n, ctx)) { + if (!BN_MONT_CTX_set_locked(&rsa->_method_mod_n, rsa->lock, + rsa->n, ctx)) { BN_free(d); goto err; } @@ -493,8 +494,8 @@ static int rsa_ossl_private_decrypt(int BN_with_flags(d, rsa->d, BN_FLG_CONSTTIME); if (rsa->flags & RSA_FLAG_CACHE_PUBLIC) - if (!BN_MONT_CTX_set_locked - (&rsa->_method_mod_n, rsa->lock, rsa->n, ctx)) { + if (!BN_MONT_CTX_set_locked(&rsa->_method_mod_n, rsa->lock, + rsa->n, ctx)) { BN_free(d); goto err; } @@ -530,8 +531,8 @@ static int rsa_ossl_private_decrypt(int RSAerr(RSA_F_RSA_OSSL_PRIVATE_DECRYPT, RSA_R_UNKNOWN_PADDING_TYPE); goto err; } - if (r < 0) - RSAerr(RSA_F_RSA_OSSL_PRIVATE_DECRYPT, RSA_R_PADDING_CHECK_FAILED); + RSAerr(RSA_F_RSA_OSSL_PRIVATE_DECRYPT, RSA_R_PADDING_CHECK_FAILED); + err_clear_last_constant_time(r >= 0); err: if (ctx != NULL) @@ -615,8 +616,8 @@ static int rsa_ossl_public_decrypt(int f } if (rsa->flags & RSA_FLAG_CACHE_PUBLIC) - if (!BN_MONT_CTX_set_locked - (&rsa->_method_mod_n, rsa->lock, rsa->n, ctx)) + if (!BN_MONT_CTX_set_locked(&rsa->_method_mod_n, rsa->lock, + rsa->n, ctx)) goto err; if (!rsa->meth->bn_mod_exp(ret, f, rsa->e, rsa->n, ctx, @@ -657,7 +658,7 @@ static int rsa_ossl_public_decrypt(int f static int rsa_ossl_mod_exp(BIGNUM *r0, const BIGNUM *I, RSA *rsa, BN_CTX *ctx) { BIGNUM *r1, *m1, *vrfy; - int ret = 0; + int ret = 0, smooth = 0; BN_CTX_start(ctx); @@ -667,43 +668,79 @@ static int rsa_ossl_mod_exp(BIGNUM *r0, if (vrfy == NULL) goto err; - { - BIGNUM *p = BN_new(), *q = BN_new(); + if (rsa->flags & RSA_FLAG_CACHE_PRIVATE) { + BIGNUM *factor = BN_new(); + + if (factor == NULL) + goto err; /* * Make sure BN_mod_inverse in Montgomery initialization uses the * BN_FLG_CONSTTIME flag */ - if (p == NULL || q == NULL) { - BN_free(p); - BN_free(q); + if (!(BN_with_flags(factor, rsa->p, BN_FLG_CONSTTIME), + BN_MONT_CTX_set_locked(&rsa->_method_mod_p, rsa->lock, + factor, ctx)) + || !(BN_with_flags(factor, rsa->q, BN_FLG_CONSTTIME), + BN_MONT_CTX_set_locked(&rsa->_method_mod_q, rsa->lock, + factor, ctx))) { + BN_free(factor); goto err; } - BN_with_flags(p, rsa->p, BN_FLG_CONSTTIME); - BN_with_flags(q, rsa->q, BN_FLG_CONSTTIME); - - if (rsa->flags & RSA_FLAG_CACHE_PRIVATE) { - if (!BN_MONT_CTX_set_locked - (&rsa->_method_mod_p, rsa->lock, p, ctx) - || !BN_MONT_CTX_set_locked(&rsa->_method_mod_q, - rsa->lock, q, ctx)) { - BN_free(p); - BN_free(q); - goto err; - } - } /* - * We MUST free p and q before any further use of rsa->p and rsa->q + * We MUST free |factor| before any further use of the prime factors */ - BN_free(p); - BN_free(q); + BN_free(factor); + + smooth = (rsa->meth->bn_mod_exp == BN_mod_exp_mont) + && (BN_num_bits(rsa->q) == BN_num_bits(rsa->p)); } if (rsa->flags & RSA_FLAG_CACHE_PUBLIC) - if (!BN_MONT_CTX_set_locked - (&rsa->_method_mod_n, rsa->lock, rsa->n, ctx)) + if (!BN_MONT_CTX_set_locked(&rsa->_method_mod_n, rsa->lock, + rsa->n, ctx)) + goto err; + + if (smooth) { + /* + * Conversion from Montgomery domain, a.k.a. Montgomery reduction, + * accepts values in [0-m*2^w) range. w is m's bit width rounded up + * to limb width. So that at the very least if |I| is fully reduced, + * i.e. less than p*q, we can count on from-to round to perform + * below modulo operations on |I|. Unlike BN_mod it's constant time. + */ + if (/* m1 = I moq q */ + !bn_from_mont_fixed_top(m1, I, rsa->_method_mod_q, ctx) + || !bn_to_mont_fixed_top(m1, m1, rsa->_method_mod_q, ctx) + /* m1 = m1^dmq1 mod q */ + || !BN_mod_exp_mont_consttime(m1, m1, rsa->dmq1, rsa->q, ctx, + rsa->_method_mod_q) + /* r1 = I mod p */ + || !bn_from_mont_fixed_top(r1, I, rsa->_method_mod_p, ctx) + || !bn_to_mont_fixed_top(r1, r1, rsa->_method_mod_p, ctx) + /* r1 = r1^dmp1 mod p */ + || !BN_mod_exp_mont_consttime(r1, r1, rsa->dmp1, rsa->p, ctx, + rsa->_method_mod_p) + /* r1 = (r1 - m1) mod p */ + /* + * bn_mod_sub_fixed_top is not regular modular subtraction, + * it can tolerate subtrahend to be larger than modulus, but + * not bit-wise wider. This makes up for uncommon q>p case, + * when |m1| can be larger than |rsa->p|. + */ + || !bn_mod_sub_fixed_top(r1, r1, m1, rsa->p) + + /* r0 = r0 * iqmp mod p */ + || !bn_to_mont_fixed_top(r1, r1, rsa->_method_mod_p, ctx) + || !bn_mul_mont_fixed_top(r1, r1, rsa->iqmp, rsa->_method_mod_p, + ctx) + || !bn_mul_fixed_top(r0, r1, rsa->q, ctx) + || !bn_mod_add_fixed_top(r0, r0, m1, rsa->n)) goto err; + goto tail; + } + /* compute I mod q */ { BIGNUM *c = BN_new(); @@ -726,7 +763,7 @@ static int rsa_ossl_mod_exp(BIGNUM *r0, /* compute r1^dmq1 mod q */ if (!rsa->meth->bn_mod_exp(m1, r1, dmq1, rsa->q, ctx, - rsa->_method_mod_q)) { + rsa->_method_mod_q)) { BN_free(c); BN_free(dmq1); goto err; @@ -802,10 +839,18 @@ static int rsa_ossl_mod_exp(BIGNUM *r0, if (!BN_add(r0, r1, m1)) goto err; + tail: if (rsa->e && rsa->n) { - if (!rsa->meth->bn_mod_exp(vrfy, r0, rsa->e, rsa->n, ctx, - rsa->_method_mod_n)) - goto err; + if (rsa->meth->bn_mod_exp == BN_mod_exp_mont) { + if (!BN_mod_exp_mont(vrfy, r0, rsa->e, rsa->n, ctx, + rsa->_method_mod_n)) + goto err; + } else { + bn_correct_top(r0); + if (!rsa->meth->bn_mod_exp(vrfy, r0, rsa->e, rsa->n, ctx, + rsa->_method_mod_n)) + goto err; + } /* * If 'I' was greater than (or equal to) rsa->n, the operation will * be equivalent to using 'I mod n'. However, the result of the @@ -814,6 +859,11 @@ static int rsa_ossl_mod_exp(BIGNUM *r0, */ if (!BN_sub(vrfy, vrfy, I)) goto err; + if (BN_is_zero(vrfy)) { + bn_correct_top(r0); + ret = 1; + goto err; /* not actually error */ + } if (!BN_mod(vrfy, vrfy, rsa->n, ctx)) goto err; if (BN_is_negative(vrfy)) @@ -840,6 +890,15 @@ static int rsa_ossl_mod_exp(BIGNUM *r0, BN_free(d); } } + /* + * It's unfortunate that we have to bn_correct_top(r0). What hopefully + * saves the day is that correction is highly unlike, and private key + * operations are customarily performed on blinded message. Which means + * that attacker won't observe correlation with chosen plaintext. + * Secondly, remaining code would still handle it in same computational + * time and even conceal memory access pattern around corrected top. + */ + bn_correct_top(r0); ret = 1; err: BN_CTX_end(ctx); Index: openssl-1.1.0i/crypto/rsa/rsa_pk1.c =================================================================== --- openssl-1.1.0i.orig/crypto/rsa/rsa_pk1.c 2018-08-14 14:45:07.000000000 +0200 +++ openssl-1.1.0i/crypto/rsa/rsa_pk1.c 2019-01-18 15:15:11.346099889 +0100 @@ -158,7 +158,7 @@ int RSA_padding_check_PKCS1_type_2(unsig int i; /* |em| is the encoded message, zero-padded to exactly |num| bytes */ unsigned char *em = NULL; - unsigned int good, found_zero_byte; + unsigned int good, found_zero_byte, mask; int zero_index = 0, msg_index, mlen = -1; if (tlen < 0 || flen < 0) @@ -169,39 +169,41 @@ int RSA_padding_check_PKCS1_type_2(unsig * section 7.2.2. */ - if (flen > num) - goto err; - - if (num < 11) - goto err; + if (flen > num || num < 11) { + RSAerr(RSA_F_RSA_PADDING_CHECK_PKCS1_TYPE_2, + RSA_R_PKCS_DECODING_ERROR); + return -1; + } - if (flen != num) { - em = OPENSSL_zalloc(num); - if (em == NULL) { - RSAerr(RSA_F_RSA_PADDING_CHECK_PKCS1_TYPE_2, ERR_R_MALLOC_FAILURE); - return -1; - } - /* - * Caller is encouraged to pass zero-padded message created with - * BN_bn2binpad, but if it doesn't, we do this zero-padding copy - * to avoid leaking that information. The copy still leaks some - * side-channel information, but it's impossible to have a fixed - * memory access pattern since we can't read out of the bounds of - * |from|. - */ - memcpy(em + num - flen, from, flen); - from = em; + em = OPENSSL_malloc(num); + if (em == NULL) { + RSAerr(RSA_F_RSA_PADDING_CHECK_PKCS1_TYPE_2, ERR_R_MALLOC_FAILURE); + return -1; + } + /* + * Caller is encouraged to pass zero-padded message created with + * BN_bn2binpad. Trouble is that since we can't read out of |from|'s + * bounds, it's impossible to have an invariant memory access pattern + * in case |from| was not zero-padded in advance. + */ + for (from += flen, em += num, i = 0; i < num; i++) { + mask = ~constant_time_is_zero(flen); + flen -= 1 & mask; + from -= 1 & mask; + *--em = *from & mask; } + from = em; good = constant_time_is_zero(from[0]); good &= constant_time_eq(from[1], 2); + /* scan over padding data */ found_zero_byte = 0; for (i = 2; i < num; i++) { unsigned int equals0 = constant_time_is_zero(from[i]); - zero_index = - constant_time_select_int(~found_zero_byte & equals0, i, - zero_index); + + zero_index = constant_time_select_int(~found_zero_byte & equals0, + i, zero_index); found_zero_byte |= equals0; } @@ -210,7 +212,7 @@ int RSA_padding_check_PKCS1_type_2(unsig * If we never found a 0-byte, then |zero_index| is 0 and the check * also fails. */ - good &= constant_time_ge((unsigned int)(zero_index), 2 + 8); + good &= constant_time_ge(zero_index, 2 + 8); /* * Skip the zero byte. This is incorrect if we never found a zero-byte @@ -220,27 +222,34 @@ int RSA_padding_check_PKCS1_type_2(unsig mlen = num - msg_index; /* - * For good measure, do this check in constant time as well; it could - * leak something if |tlen| was assuming valid padding. + * For good measure, do this check in constant time as well. */ - good &= constant_time_ge((unsigned int)(tlen), (unsigned int)(mlen)); + good &= constant_time_ge(tlen, mlen); /* - * We can't continue in constant-time because we need to copy the result - * and we cannot fake its length. This unavoidably leaks timing - * information at the API boundary. + * Even though we can't fake result's length, we can pretend copying + * |tlen| bytes where |mlen| bytes would be real. Last |tlen| of |num| + * bytes are viewed as circular buffer with start at |tlen|-|mlen'|, + * where |mlen'| is "saturated" |mlen| value. Deducing information + * about failure or |mlen| would take attacker's ability to observe + * memory access pattern with byte granularity *as it occurs*. It + * should be noted that failure is indistinguishable from normal + * operation if |tlen| is fixed by protocol. */ - if (!good) { - mlen = -1; - goto err; - } + tlen = constant_time_select_int(constant_time_lt(num, tlen), num, tlen); + msg_index = constant_time_select_int(good, msg_index, num - tlen); + mlen = num - msg_index; + for (from += msg_index, mask = good, i = 0; i < tlen; i++) { + unsigned int equals = constant_time_eq(i, mlen); - memcpy(to, from + msg_index, mlen); + from -= tlen & equals; /* if (i == mlen) rewind */ + mask &= mask ^ equals; /* if (i == mlen) mask = 0 */ + to[i] = constant_time_select_8(mask, from[i], to[i]); + } - err: OPENSSL_clear_free(em, num); - if (mlen == -1) - RSAerr(RSA_F_RSA_PADDING_CHECK_PKCS1_TYPE_2, - RSA_R_PKCS_DECODING_ERROR); - return mlen; + RSAerr(RSA_F_RSA_PADDING_CHECK_PKCS1_TYPE_2, RSA_R_PKCS_DECODING_ERROR); + err_clear_last_constant_time(1 & good); + + return constant_time_select_int(good, mlen, -1); } Index: openssl-1.1.0i/crypto/rsa/rsa_ssl.c =================================================================== --- openssl-1.1.0i.orig/crypto/rsa/rsa_ssl.c 2018-08-14 14:45:07.000000000 +0200 +++ openssl-1.1.0i/crypto/rsa/rsa_ssl.c 2019-01-18 15:15:11.346099889 +0100 @@ -12,6 +12,7 @@ #include <openssl/bn.h> #include <openssl/rsa.h> #include <openssl/rand.h> +#include "internal/constant_time_locl.h" int RSA_padding_add_SSLv23(unsigned char *to, int tlen, const unsigned char *from, int flen) @@ -52,57 +53,115 @@ int RSA_padding_add_SSLv23(unsigned char return (1); } +/* + * Copy of RSA_padding_check_PKCS1_type_2 with a twist that rejects padding + * if nul delimiter is preceded by 8 consecutive 0x03 bytes. It also + * preserves error code reporting for backward compatibility. + */ int RSA_padding_check_SSLv23(unsigned char *to, int tlen, const unsigned char *from, int flen, int num) { - int i, j, k; - const unsigned char *p; + int i; + /* |em| is the encoded message, zero-padded to exactly |num| bytes */ + unsigned char *em = NULL; + unsigned int good, found_zero_byte, mask, threes_in_row; + int zero_index = 0, msg_index, mlen = -1, err; - p = from; if (flen < 10) { RSAerr(RSA_F_RSA_PADDING_CHECK_SSLV23, RSA_R_DATA_TOO_SMALL); return (-1); } - /* Accept even zero-padded input */ - if (flen == num) { - if (*(p++) != 0) { - RSAerr(RSA_F_RSA_PADDING_CHECK_SSLV23, RSA_R_BLOCK_TYPE_IS_NOT_02); - return -1; - } - flen--; - } - if ((num != (flen + 1)) || (*(p++) != 02)) { - RSAerr(RSA_F_RSA_PADDING_CHECK_SSLV23, RSA_R_BLOCK_TYPE_IS_NOT_02); - return (-1); - } - /* scan over padding data */ - j = flen - 1; /* one for type */ - for (i = 0; i < j; i++) - if (*(p++) == 0) - break; - - if ((i == j) || (i < 8)) { - RSAerr(RSA_F_RSA_PADDING_CHECK_SSLV23, - RSA_R_NULL_BEFORE_BLOCK_MISSING); - return (-1); - } - for (k = -9; k < -1; k++) { - if (p[k] != 0x03) - break; - } - if (k == -1) { - RSAerr(RSA_F_RSA_PADDING_CHECK_SSLV23, RSA_R_SSLV3_ROLLBACK_ATTACK); - return (-1); - } + em = OPENSSL_malloc(num); + if (em == NULL) { + RSAerr(RSA_F_RSA_PADDING_CHECK_SSLV23, ERR_R_MALLOC_FAILURE); + return -1; + } + /* + * Caller is encouraged to pass zero-padded message created with + * BN_bn2binpad. Trouble is that since we can't read out of |from|'s + * bounds, it's impossible to have an invariant memory access pattern + * in case |from| was not zero-padded in advance. + */ + for (from += flen, em += num, i = 0; i < num; i++) { + mask = ~constant_time_is_zero(flen); + flen -= 1 & mask; + from -= 1 & mask; + *--em = *from & mask; + } + from = em; + + good = constant_time_is_zero(from[0]); + good &= constant_time_eq(from[1], 2); + err = constant_time_select_int(good, 0, RSA_R_BLOCK_TYPE_IS_NOT_02); + mask = ~good; - i++; /* Skip over the '\0' */ - j -= i; - if (j > tlen) { - RSAerr(RSA_F_RSA_PADDING_CHECK_SSLV23, RSA_R_DATA_TOO_LARGE); - return (-1); - } - memcpy(to, p, (unsigned int)j); + /* scan over padding data */ + found_zero_byte = 0; + threes_in_row = 0; + for (i = 2; i < num; i++) { + unsigned int equals0 = constant_time_is_zero(from[i]); + + zero_index = constant_time_select_int(~found_zero_byte & equals0, + i, zero_index); + found_zero_byte |= equals0; + + threes_in_row += 1 & ~found_zero_byte; + threes_in_row &= found_zero_byte | constant_time_eq(from[i], 3); + } + + /* + * PS must be at least 8 bytes long, and it starts two bytes into |from|. + * If we never found a 0-byte, then |zero_index| is 0 and the check + * also fails. + */ + good &= constant_time_ge(zero_index, 2 + 8); + err = constant_time_select_int(mask | good, err, + RSA_R_NULL_BEFORE_BLOCK_MISSING); + mask = ~good; + + good &= constant_time_lt(threes_in_row, 8); + err = constant_time_select_int(mask | good, err, + RSA_R_SSLV3_ROLLBACK_ATTACK); + mask = ~good; + + /* + * Skip the zero byte. This is incorrect if we never found a zero-byte + * but in this case we also do not copy the message out. + */ + msg_index = zero_index + 1; + mlen = num - msg_index; + + /* + * For good measure, do this check in constant time as well. + */ + good &= constant_time_ge(tlen, mlen); + err = constant_time_select_int(mask | good, err, RSA_R_DATA_TOO_LARGE); + + /* + * Even though we can't fake result's length, we can pretend copying + * |tlen| bytes where |mlen| bytes would be real. Last |tlen| of |num| + * bytes are viewed as circular buffer with start at |tlen|-|mlen'|, + * where |mlen'| is "saturated" |mlen| value. Deducing information + * about failure or |mlen| would take attacker's ability to observe + * memory access pattern with byte granularity *as it occurs*. It + * should be noted that failure is indistinguishable from normal + * operation if |tlen| is fixed by protocol. + */ + tlen = constant_time_select_int(constant_time_lt(num, tlen), num, tlen); + msg_index = constant_time_select_int(good, msg_index, num - tlen); + mlen = num - msg_index; + for (from += msg_index, mask = good, i = 0; i < tlen; i++) { + unsigned int equals = constant_time_eq(i, mlen); + + from -= tlen & equals; /* if (i == mlen) rewind */ + mask &= mask ^ equals; /* if (i == mlen) mask = 0 */ + to[i] = constant_time_select_8(mask, from[i], to[i]); + } + + OPENSSL_clear_free(em, num); + RSAerr(RSA_F_RSA_PADDING_CHECK_SSLV23, err); + err_clear_last_constant_time(1 & good); - return (j); + return constant_time_select_int(good, mlen, -1); } Index: openssl-1.1.0i/doc/crypto/RSA_padding_add_PKCS1_type_1.pod =================================================================== --- openssl-1.1.0i.orig/doc/crypto/RSA_padding_add_PKCS1_type_1.pod 2018-08-14 14:45:08.000000000 +0200 +++ openssl-1.1.0i/doc/crypto/RSA_padding_add_PKCS1_type_1.pod 2019-01-18 15:15:11.346099889 +0100 @@ -109,7 +109,12 @@ L<ERR_get_error(3)>. The RSA_padding_check_PKCS1_type_2() padding check leaks timing information which can potentially be used to mount a Bleichenbacher padding oracle attack. This is an inherent weakness in the PKCS #1 -v1.5 padding design. Prefer PKCS1_OAEP padding. +v1.5 padding design. Prefer PKCS1_OAEP padding. Otherwise it can +be recommended to pass zero-padded B<f>, so that B<fl> equals to +B<rsa_len>, and if fixed by protocol, B<tlen> being set to the +expected length. In such case leakage would be minimal, it would +take attacker's ability to observe memory access pattern with byte +granilarity as it occurs, post-factum timing analysis won't do. =head1 SEE ALSO Index: openssl-1.1.0i/include/internal/constant_time_locl.h =================================================================== --- openssl-1.1.0i.orig/include/internal/constant_time_locl.h 2018-08-14 14:45:09.000000000 +0200 +++ openssl-1.1.0i/include/internal/constant_time_locl.h 2019-01-18 15:15:11.346099889 +0100 @@ -178,6 +178,12 @@ static ossl_inline int constant_time_sel return (int)(constant_time_select(mask, (unsigned)(a), (unsigned)(b))); } +/* + * Expected usage pattern is to unconditionally set error and then + * wipe it if there was no actual error. |clear| is 1 or 0. + */ +void err_clear_last_constant_time(int clear); + #ifdef __cplusplus } #endif
Locations
Projects
Search
Status Monitor
Help
OpenBuildService.org
Documentation
API Documentation
Code of Conduct
Contact
Support
@OBShq
Terms
openSUSE Build Service is sponsored by
The Open Build Service is an
openSUSE project
.
Sign Up
Log In
Places
Places
All Projects
Status Monitor