gh-135239: simpler use of mutexes in cryptographic modules (#135267)

This commit is contained in:
Bénédikt Tran 2025-06-22 16:59:57 +02:00 committed by GitHub
parent ac9d37c60b
commit e7295a89b8
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
9 changed files with 323 additions and 582 deletions

View file

@ -1043,49 +1043,67 @@ class HashLibTestCase(unittest.TestCase):
def test_sha256_gil(self):
gil_minsize = hashlib_helper.find_gil_minsize(['_sha2', '_hashlib'])
data = b'1' + b'#' * gil_minsize + b'1'
expected = hashlib.sha256(data).hexdigest()
m = hashlib.sha256()
m.update(b'1')
m.update(b'#' * gil_minsize)
m.update(b'1')
self.assertEqual(
m.hexdigest(),
'1cfceca95989f51f658e3f3ffe7f1cd43726c9e088c13ee10b46f57cef135b94'
)
m = hashlib.sha256(b'1' + b'#' * gil_minsize + b'1')
self.assertEqual(
m.hexdigest(),
'1cfceca95989f51f658e3f3ffe7f1cd43726c9e088c13ee10b46f57cef135b94'
)
self.assertEqual(m.hexdigest(), expected)
@threading_helper.reap_threads
@threading_helper.requires_working_threading()
def test_threaded_hashing(self):
def test_threaded_hashing_fast(self):
# Same as test_threaded_hashing_slow() but only tests some functions
# since otherwise test_hashlib.py becomes too slow during development.
for name in ['md5', 'sha1', 'sha256', 'sha3_256', 'blake2s']:
if constructor := getattr(hashlib, name, None):
with self.subTest(name):
self.do_test_threaded_hashing(constructor, is_shake=False)
if shake_128 := getattr(hashlib, 'shake_128', None):
self.do_test_threaded_hashing(shake_128, is_shake=True)
@requires_resource('cpu')
@threading_helper.reap_threads
@threading_helper.requires_working_threading()
def test_threaded_hashing_slow(self):
for algorithm, constructors in self.constructors_to_test.items():
is_shake = algorithm in self.shakes
for constructor in constructors:
with self.subTest(constructor.__name__, is_shake=is_shake):
self.do_test_threaded_hashing(constructor, is_shake)
def do_test_threaded_hashing(self, constructor, is_shake):
# Updating the same hash object from several threads at once
# using data chunk sizes containing the same byte sequences.
#
# If the internal locks are working to prevent multiple
# updates on the same object from running at once, the resulting
# hash will be the same as doing it single threaded upfront.
hasher = hashlib.sha1()
num_threads = 5
smallest_data = b'swineflu'
data = smallest_data * 200000
expected_hash = hashlib.sha1(data*num_threads).hexdigest()
def hash_in_chunks(chunk_size):
index = 0
while index < len(data):
hasher.update(data[index:index + chunk_size])
index += chunk_size
# The data to hash has length s|M|q^N and the chunk size for the i-th
# thread is s|M|q^(N-i), where N is the number of threads, M is a fixed
# message of small length, and s >= 1 and q >= 2 are small integers.
smallest_size, num_threads, s, q = 8, 5, 2, 10
smallest_data = os.urandom(smallest_size)
data = s * smallest_data * (q ** num_threads)
h1 = constructor(usedforsecurity=False)
h2 = constructor(data * num_threads, usedforsecurity=False)
def update(chunk_size):
for index in range(0, len(data), chunk_size):
h1.update(data[index:index + chunk_size])
threads = []
for threadnum in range(num_threads):
chunk_size = len(data) // (10 ** threadnum)
for thread_num in range(num_threads):
# chunk_size = len(data) // (q ** thread_num)
chunk_size = s * smallest_size * q ** (num_threads - thread_num)
self.assertGreater(chunk_size, 0)
self.assertEqual(chunk_size % len(smallest_data), 0)
thread = threading.Thread(target=hash_in_chunks,
args=(chunk_size,))
self.assertEqual(chunk_size % smallest_size, 0)
thread = threading.Thread(target=update, args=(chunk_size,))
threads.append(thread)
for thread in threads:
@ -1093,7 +1111,10 @@ class HashLibTestCase(unittest.TestCase):
for thread in threads:
thread.join()
self.assertEqual(expected_hash, hasher.hexdigest())
if is_shake:
self.assertEqual(h1.hexdigest(16), h2.hexdigest(16))
else:
self.assertEqual(h1.hexdigest(), h2.hexdigest())
def test_get_fips_mode(self):
fips_mode = self.is_fips_mode

View file

@ -278,21 +278,15 @@ get_hashlib_state(PyObject *module)
}
typedef struct {
PyObject_HEAD
HASHLIB_OBJECT_HEAD
EVP_MD_CTX *ctx; /* OpenSSL message digest context */
// Prevents undefined behavior via multiple threads entering the C API.
bool use_mutex;
PyMutex mutex; /* OpenSSL context lock */
} HASHobject;
#define HASHobject_CAST(op) ((HASHobject *)(op))
typedef struct {
PyObject_HEAD
HASHLIB_OBJECT_HEAD
HMAC_CTX *ctx; /* OpenSSL hmac context */
// Prevents undefined behavior via multiple threads entering the C API.
bool use_mutex;
PyMutex mutex; /* HMAC context lock */
} HMACobject;
#define HMACobject_CAST(op) ((HMACobject *)(op))
@ -700,9 +694,9 @@ static int
_hashlib_HASH_copy_locked(HASHobject *self, EVP_MD_CTX *new_ctx_p)
{
int result;
ENTER_HASHLIB(self);
HASHLIB_ACQUIRE_LOCK(self);
result = EVP_MD_CTX_copy(new_ctx_p, self->ctx);
LEAVE_HASHLIB(self);
HASHLIB_RELEASE_LOCK(self);
if (result == 0) {
notify_smart_ssl_error_occurred_in(Py_STRINGIFY(EVP_MD_CTX_copy));
return -1;
@ -802,27 +796,13 @@ _hashlib_HASH_update_impl(HASHobject *self, PyObject *obj)
{
int result;
Py_buffer view;
GET_BUFFER_VIEW_OR_ERROUT(obj, &view);
if (!self->use_mutex && view.len >= HASHLIB_GIL_MINSIZE) {
self->use_mutex = true;
}
if (self->use_mutex) {
Py_BEGIN_ALLOW_THREADS
PyMutex_Lock(&self->mutex);
result = _hashlib_HASH_hash(self, view.buf, view.len);
PyMutex_Unlock(&self->mutex);
Py_END_ALLOW_THREADS
} else {
result = _hashlib_HASH_hash(self, view.buf, view.len);
}
HASHLIB_EXTERNAL_INSTRUCTIONS_LOCKED(
self, view.len,
result = _hashlib_HASH_hash(self, view.buf, view.len)
);
PyBuffer_Release(&view);
if (result == -1)
return NULL;
Py_RETURN_NONE;
return result < 0 ? NULL : Py_None;
}
static PyMethodDef HASH_methods[] = {
@ -1144,15 +1124,12 @@ _hashlib_HASH(PyObject *module, const char *digestname, PyObject *data_obj,
}
if (view.buf && view.len) {
if (view.len >= HASHLIB_GIL_MINSIZE) {
/* We do not initialize self->lock here as this is the constructor
* where it is not yet possible to have concurrent access. */
Py_BEGIN_ALLOW_THREADS
result = _hashlib_HASH_hash(self, view.buf, view.len);
Py_END_ALLOW_THREADS
} else {
result = _hashlib_HASH_hash(self, view.buf, view.len);
}
/* Do not use self->mutex here as this is the constructor
* where it is not yet possible to have concurrent access. */
HASHLIB_EXTERNAL_INSTRUCTIONS_UNLOCKED(
view.len,
result = _hashlib_HASH_hash(self, view.buf, view.len)
);
if (result == -1) {
assert(PyErr_Occurred());
Py_CLEAR(self);
@ -1813,9 +1790,9 @@ static int
locked_HMAC_CTX_copy(HMAC_CTX *new_ctx_p, HMACobject *self)
{
int result;
ENTER_HASHLIB(self);
HASHLIB_ACQUIRE_LOCK(self);
result = HMAC_CTX_copy(new_ctx_p, self->ctx);
LEAVE_HASHLIB(self);
HASHLIB_RELEASE_LOCK(self);
if (result == 0) {
notify_smart_ssl_error_occurred_in(Py_STRINGIFY(HMAC_CTX_copy));
return -1;
@ -1846,24 +1823,12 @@ _hmac_update(HMACobject *self, PyObject *obj)
Py_buffer view = {0};
GET_BUFFER_VIEW_OR_ERROR(obj, &view, return 0);
if (!self->use_mutex && view.len >= HASHLIB_GIL_MINSIZE) {
self->use_mutex = true;
}
if (self->use_mutex) {
Py_BEGIN_ALLOW_THREADS
PyMutex_Lock(&self->mutex);
r = HMAC_Update(self->ctx,
(const unsigned char *)view.buf,
(size_t)view.len);
PyMutex_Unlock(&self->mutex);
Py_END_ALLOW_THREADS
} else {
r = HMAC_Update(self->ctx,
(const unsigned char *)view.buf,
(size_t)view.len);
}
HASHLIB_EXTERNAL_INSTRUCTIONS_LOCKED(
self, view.len,
r = HMAC_Update(
self->ctx, (const unsigned char *)view.buf, (size_t)view.len
)
);
PyBuffer_Release(&view);
if (r == 0) {

View file

@ -352,7 +352,7 @@ type_to_impl(PyTypeObject *type)
}
typedef struct {
PyObject_HEAD
HASHLIB_OBJECT_HEAD
union {
Hacl_Hash_Blake2s_state_t *blake2s_state;
Hacl_Hash_Blake2b_state_t *blake2b_state;
@ -364,8 +364,6 @@ typedef struct {
#endif
};
blake2_impl impl;
bool use_mutex;
PyMutex mutex;
} Blake2Object;
#define _Blake2Object_CAST(op) ((Blake2Object *)(op))
@ -422,7 +420,7 @@ new_Blake2Object(PyTypeObject *type)
} while (0)
static void
update(Blake2Object *self, uint8_t *buf, Py_ssize_t len)
blake2_update_unlocked(Blake2Object *self, uint8_t *buf, Py_ssize_t len)
{
switch (self->impl) {
// blake2b_256_state and blake2s_128_state must be if'd since
@ -646,14 +644,12 @@ py_blake2_new(PyTypeObject *type, PyObject *data, int digest_size,
if (data != NULL) {
Py_buffer buf;
GET_BUFFER_VIEW_OR_ERROR(data, &buf, goto error);
if (buf.len >= HASHLIB_GIL_MINSIZE) {
Py_BEGIN_ALLOW_THREADS
update(self, buf.buf, buf.len);
Py_END_ALLOW_THREADS
}
else {
update(self, buf.buf, buf.len);
}
/* Do not use self->mutex here as this is the constructor
* where it is not yet possible to have concurrent access. */
HASHLIB_EXTERNAL_INSTRUCTIONS_UNLOCKED(
buf.len,
blake2_update_unlocked(self, buf.buf, buf.len)
);
PyBuffer_Release(&buf);
}
@ -744,7 +740,7 @@ py_blake2s_new_impl(PyTypeObject *type, PyObject *data_obj, int digest_size,
}
static int
blake2_blake2b_copy_locked(Blake2Object *self, Blake2Object *cpy)
blake2_blake2b_copy_unlocked(Blake2Object *self, Blake2Object *cpy)
{
assert(cpy != NULL);
#define BLAKE2_COPY(TYPE, STATE_ATTR) \
@ -801,9 +797,9 @@ _blake2_blake2b_copy_impl(Blake2Object *self)
return NULL;
}
ENTER_HASHLIB(self);
rc = blake2_blake2b_copy_locked(self, cpy);
LEAVE_HASHLIB(self);
HASHLIB_ACQUIRE_LOCK(self);
rc = blake2_blake2b_copy_unlocked(self, cpy);
HASHLIB_RELEASE_LOCK(self);
if (rc < 0) {
Py_DECREF(cpy);
return NULL;
@ -825,25 +821,12 @@ _blake2_blake2b_update_impl(Blake2Object *self, PyObject *data)
/*[clinic end generated code: output=99330230068e8c99 input=ffc4aa6a6a225d31]*/
{
Py_buffer buf;
GET_BUFFER_VIEW_OR_ERROUT(data, &buf);
if (!self->use_mutex && buf.len >= HASHLIB_GIL_MINSIZE) {
self->use_mutex = true;
}
if (self->use_mutex) {
Py_BEGIN_ALLOW_THREADS
PyMutex_Lock(&self->mutex);
update(self, buf.buf, buf.len);
PyMutex_Unlock(&self->mutex);
Py_END_ALLOW_THREADS
}
else {
update(self, buf.buf, buf.len);
}
HASHLIB_EXTERNAL_INSTRUCTIONS_LOCKED(
self, buf.len,
blake2_update_unlocked(self, buf.buf, buf.len)
);
PyBuffer_Release(&buf);
Py_RETURN_NONE;
}
@ -881,9 +864,9 @@ _blake2_blake2b_digest_impl(Blake2Object *self)
/*[clinic end generated code: output=31ab8ad477f4a2f7 input=7d21659e9c5fff02]*/
{
uint8_t digest_length = 0, digest[HACL_HASH_BLAKE2B_OUT_BYTES];
ENTER_HASHLIB(self);
HASHLIB_ACQUIRE_LOCK(self);
digest_length = blake2_blake2b_compute_digest(self, digest);
LEAVE_HASHLIB(self);
HASHLIB_RELEASE_LOCK(self);
return PyBytes_FromStringAndSize((const char *)digest, digest_length);
}
@ -898,9 +881,9 @@ _blake2_blake2b_hexdigest_impl(Blake2Object *self)
/*[clinic end generated code: output=5ef54b138db6610a input=76930f6946351f56]*/
{
uint8_t digest_length = 0, digest[HACL_HASH_BLAKE2B_OUT_BYTES];
ENTER_HASHLIB(self);
HASHLIB_ACQUIRE_LOCK(self);
digest_length = blake2_blake2b_compute_digest(self, digest);
LEAVE_HASHLIB(self);
HASHLIB_RELEASE_LOCK(self);
return _Py_strhex((const char *)digest, digest_length);
}

View file

@ -34,47 +34,78 @@
/*
* Helper code to synchronize access to the hash object when the GIL is
* released around a CPU consuming hashlib operation. All code paths that
* access a mutable part of obj must be enclosed in an ENTER_HASHLIB /
* LEAVE_HASHLIB block or explicitly acquire and release the lock inside
* a PY_BEGIN / END_ALLOW_THREADS block if they wish to release the GIL for
* an operation.
* released around a CPU consuming hashlib operation.
*
* These only drop the GIL if the lock acquisition itself is likely to
* block. Thus the non-blocking acquire gating the GIL release for a
* blocking lock acquisition. The intent of these macros is to surround
* the assumed always "fast" operations that you aren't releasing the
* GIL around. Otherwise use code similar to what you see in hash
* function update() methods.
* Code accessing a mutable part of the hash object must be enclosed in
* an HASHLIB_{ACQUIRE,RELEASE}_LOCK block or explicitly acquire and release
* the mutex inside a Py_BEGIN_ALLOW_THREADS -- Py_END_ALLOW_THREADS block if
* they wish to release the GIL for an operation.
*/
#include "pythread.h"
#define ENTER_HASHLIB(obj) \
if ((obj)->use_mutex) { \
PyMutex_Lock(&(obj)->mutex); \
}
#define LEAVE_HASHLIB(obj) \
if ((obj)->use_mutex) { \
PyMutex_Unlock(&(obj)->mutex); \
}
#define HASHLIB_OBJECT_HEAD \
PyObject_HEAD \
/* Guard against race conditions during incremental update(). */ \
PyMutex mutex;
#ifdef Py_GIL_DISABLED
#define HASHLIB_INIT_MUTEX(obj) \
do { \
(obj)->mutex = (PyMutex){0}; \
(obj)->use_mutex = true; \
#define HASHLIB_INIT_MUTEX(OBJ) \
do { \
(OBJ)->mutex = (PyMutex){0}; \
} while (0)
#else
#define HASHLIB_INIT_MUTEX(obj) \
do { \
(obj)->mutex = (PyMutex){0}; \
(obj)->use_mutex = false; \
} while (0)
#endif
/* TODO(gpshead): We should make this a module or class attribute
* to allow the user to optimize based on the platform they're using. */
#define HASHLIB_GIL_MINSIZE 2048
#define HASHLIB_ACQUIRE_LOCK(OBJ) PyMutex_Lock(&(OBJ)->mutex)
#define HASHLIB_RELEASE_LOCK(OBJ) PyMutex_Unlock(&(OBJ)->mutex)
/*
* Message length above which the GIL is to be released
* when performing hashing operations.
*/
#define HASHLIB_GIL_MINSIZE 2048
// Macros for executing code while conditionally holding the GIL.
//
// These only drop the GIL if the lock acquisition itself is likely to
// block. Thus the non-blocking acquire gating the GIL release for a
// blocking lock acquisition. The intent of these macros is to surround
// the assumed always "fast" operations that you aren't releasing the
// GIL around.
/*
* Execute a suite of C statements 'STATEMENTS'.
*
* The GIL is held if 'SIZE' is below the HASHLIB_GIL_MINSIZE threshold.
*/
#define HASHLIB_EXTERNAL_INSTRUCTIONS_UNLOCKED(SIZE, STATEMENTS) \
do { \
if ((SIZE) > HASHLIB_GIL_MINSIZE) { \
Py_BEGIN_ALLOW_THREADS \
STATEMENTS; \
Py_END_ALLOW_THREADS \
} \
else { \
STATEMENTS; \
} \
} while (0)
/*
* Lock 'OBJ' and execute a suite of C statements 'STATEMENTS'.
*
* The GIL is held if 'SIZE' is below the HASHLIB_GIL_MINSIZE threshold.
*/
#define HASHLIB_EXTERNAL_INSTRUCTIONS_LOCKED(OBJ, SIZE, STATEMENTS) \
do { \
if ((SIZE) > HASHLIB_GIL_MINSIZE) { \
Py_BEGIN_ALLOW_THREADS \
HASHLIB_ACQUIRE_LOCK(OBJ); \
STATEMENTS; \
HASHLIB_RELEASE_LOCK(OBJ); \
Py_END_ALLOW_THREADS \
} \
else { \
HASHLIB_ACQUIRE_LOCK(OBJ); \
STATEMENTS; \
HASHLIB_RELEASE_LOCK(OBJ); \
} \
} while (0)
static inline int
_Py_hashlib_data_argument(PyObject **res, PyObject *data, PyObject *string)

View file

@ -215,105 +215,6 @@ typedef struct py_hmac_hacl_api {
#define Py_CHECK_HACL_UINT32_T_LENGTH(LEN)
#endif
/*
* Call the HACL* HMAC-HASH update function on the given data.
*
* The magnitude of 'LEN' is not checked and thus 'LEN' must be
* safely convertible to a uint32_t value.
*/
#define Py_HMAC_HACL_UPDATE_CALL(HACL_STATE, BUF, LEN) \
Hacl_Streaming_HMAC_update(HACL_STATE, BUF, (uint32_t)(LEN))
/*
* Call the HACL* HMAC-HASH update function on the given data.
*
* On DEBUG builds, the 'ERRACTION' statements are executed if
* the update() call returned a non-successful HACL* exit code.
*
* The buffer 'BUF' and its length 'LEN' are left untouched.
*
* The formal signature of this macro is:
*
* (HACL_HMAC_state *, uint8_t *, uint32_t, (C statements))
*/
#ifndef NDEBUG
#define Py_HMAC_HACL_UPDATE_ONCE( \
HACL_STATE, BUF, LEN, \
ERRACTION \
) \
do { \
Py_CHECK_HACL_UINT32_T_LENGTH(LEN); \
hacl_errno_t code = Py_HMAC_HACL_UPDATE_CALL(HACL_STATE, BUF, LEN); \
if (_hacl_convert_errno(code) < 0) { \
ERRACTION; \
} \
} while (0)
#else
#define Py_HMAC_HACL_UPDATE_ONCE( \
HACL_STATE, BUF, LEN, \
_ERRACTION \
) \
do { \
(void)Py_HMAC_HACL_UPDATE_CALL(HACL_STATE, BUF, (LEN)); \
} while (0)
#endif
/*
* Repetivively call the HACL* HMAC-HASH update function on the given
* data until the buffer length 'LEN' is strictly less than UINT32_MAX.
*
* On builds with PY_SSIZE_T_MAX <= UINT32_MAX, this is a no-op.
*
* The buffer 'BUF' (resp. 'LEN') is advanced (resp. decremented)
* by UINT32_MAX after each update. On DEBUG builds, each update()
* call is verified and the 'ERRACTION' statements are executed if
* a non-successful HACL* exit code is being returned.
*
* In particular, 'BUF' and 'LEN' must be variable names and not
* expressions on their own.
*
* The formal signature of this macro is:
*
* (HACL_HMAC_state *, uint8_t *, C integer, (C statements))
*/
#ifdef Py_HMAC_SSIZE_LARGER_THAN_UINT32
#define Py_HMAC_HACL_UPDATE_LOOP( \
HACL_STATE, BUF, LEN, \
ERRACTION \
) \
do { \
while ((Py_ssize_t)LEN > UINT32_MAX_AS_SSIZE_T) { \
Py_HMAC_HACL_UPDATE_ONCE(HACL_STATE, BUF, UINT32_MAX, \
ERRACTION); \
BUF += UINT32_MAX; \
LEN -= UINT32_MAX; \
} \
} while (0)
#else
#define Py_HMAC_HACL_UPDATE_LOOP( \
HACL_STATE, BUF, LEN, \
_ERRACTION \
)
#endif
/*
* Perform the HMAC-HASH update() operation in a streaming fashion.
*
* The formal signature of this macro is:
*
* (HACL_HMAC_state *, uint8_t *, C integer, (C statements))
*/
#define Py_HMAC_HACL_UPDATE( \
HACL_STATE, BUF, LEN, \
ERRACTION \
) \
do { \
Py_HMAC_HACL_UPDATE_LOOP(HACL_STATE, BUF, LEN, \
ERRACTION); \
Py_HMAC_HACL_UPDATE_ONCE(HACL_STATE, BUF, LEN, \
ERRACTION); \
} while (0)
/*
* HMAC underlying hash function static information.
*/
@ -382,11 +283,7 @@ get_hmacmodule_state_by_cls(PyTypeObject *cls)
typedef Hacl_Streaming_HMAC_agile_state HACL_HMAC_state;
typedef struct HMACObject {
PyObject_HEAD
bool use_mutex;
PyMutex mutex;
HASHLIB_OBJECT_HEAD
// Hash function information
PyObject *name; // rendered name (exact unicode object)
HMAC_Hash_Kind kind; // can be used for runtime dispatch (must be known)
@ -556,6 +453,51 @@ _hacl_hmac_state_free(HACL_HMAC_state *state)
}
}
/*
* Call the HACL* HMAC-HASH update function on the given data.
*
* On DEBUG builds, the update() call is verified.
*
* Return 0 on success; otherwise, set an exception and return -1 on failure.
*/
static int
_hacl_hmac_state_update_once(HACL_HMAC_state *state,
uint8_t *buf, uint32_t len)
{
#ifndef NDEBUG
hacl_errno_t code = Hacl_Streaming_HMAC_update(state, buf, len);
return _hacl_convert_errno(code);
#else
(void)Hacl_Streaming_HMAC_update(state, buf, len);
return 0;
#endif
}
/*
* Perform the HMAC-HASH update() operation in a streaming fashion.
*
* On DEBUG builds, each update() call is verified.
*
* Return 0 on success; otherwise, set an exception and return -1 on failure.
*/
static int
_hacl_hmac_state_update(HACL_HMAC_state *state, uint8_t *buf, Py_ssize_t len)
{
assert(len >= 0);
#ifdef Py_HMAC_SSIZE_LARGER_THAN_UINT32
while (len > UINT32_MAX_AS_SSIZE_T) {
if (_hacl_hmac_state_update_once(state, buf, UINT32_MAX) < 0) {
assert(PyErr_Occurred());
return -1;
}
buf += UINT32_MAX;
len -= UINT32_MAX;
}
#endif
Py_CHECK_HACL_UINT32_T_LENGTH(len);
return _hacl_hmac_state_update_once(state, buf, (uint32_t)len);
}
/* Static information used to construct the hash table. */
static const py_hmac_hinfo py_hmac_static_hinfo[] = {
#define Py_HMAC_HINFO_HACL_API(HACL_HID) \
@ -786,45 +728,6 @@ hmac_new_initial_state(HMACObject *self, uint8_t *key, Py_ssize_t len)
return self->state == NULL ? -1 : 0;
}
/*
* Feed initial data.
*
* This function MUST only be called by the HMAC object constructor
* and after hmac_set_hinfo() and hmac_new_initial_state() have been
* called, lest the behaviour is undefined.
*
* Return 0 on success; otherwise, set an exception and return -1 on failure.
*/
static int
hmac_feed_initial_data(HMACObject *self, uint8_t *msg, Py_ssize_t len)
{
assert(self->name != NULL);
assert(self->state != NULL);
if (len == 0) {
// do nothing if the buffer is empty
return 0;
}
if (len < HASHLIB_GIL_MINSIZE) {
Py_HMAC_HACL_UPDATE(self->state, msg, len, return -1);
return 0;
}
int res = 0;
Py_BEGIN_ALLOW_THREADS
Py_HMAC_HACL_UPDATE(self->state, msg, len, goto error);
goto done;
#ifndef NDEBUG
error:
res = -1;
#else
Py_UNREACHABLE();
#endif
done:
Py_END_ALLOW_THREADS
return res;
}
/*[clinic input]
_hmac.new
@ -871,7 +774,12 @@ _hmac_new_impl(PyObject *module, PyObject *keyobj, PyObject *msgobj,
if (msgobj != NULL && msgobj != Py_None) {
Py_buffer msg;
GET_BUFFER_VIEW_OR_ERROR(msgobj, &msg, goto error);
rc = hmac_feed_initial_data(self, msg.buf, msg.len);
/* Do not use self->mutex here as this is the constructor
* where it is not yet possible to have concurrent access. */
HASHLIB_EXTERNAL_INSTRUCTIONS_UNLOCKED(
msg.len,
rc = _hacl_hmac_state_update(self->state, msg.buf, msg.len)
);
PyBuffer_Release(&msg);
#ifndef NDEBUG
if (rc < 0) {
@ -948,12 +856,12 @@ _hmac_HMAC_copy_impl(HMACObject *self, PyTypeObject *cls)
return NULL;
}
ENTER_HASHLIB(self);
HASHLIB_ACQUIRE_LOCK(self);
/* copy hash information */
hmac_copy_hinfo(copy, self);
/* copy internal state */
int rc = hmac_copy_state(copy, self);
LEAVE_HASHLIB(self);
HASHLIB_RELEASE_LOCK(self);
if (rc < 0) {
Py_DECREF(copy);
@ -965,78 +873,6 @@ _hmac_HMAC_copy_impl(HMACObject *self, PyTypeObject *cls)
return (PyObject *)copy;
}
/*
* Update the HMAC object with the given buffer.
*
* This unconditionally acquires the lock on the HMAC object.
*
* On DEBUG builds, each update() call is verified.
*
* Return 0 on success; otherwise, set an exception and return -1 on failure.
*/
static int
hmac_update_state_with_lock(HMACObject *self, uint8_t *buf, Py_ssize_t len)
{
int res = 0;
Py_BEGIN_ALLOW_THREADS
PyMutex_Lock(&self->mutex); // unconditionally acquire a lock
Py_HMAC_HACL_UPDATE(self->state, buf, len, goto error);
goto done;
#ifndef NDEBUG
error:
res = -1;
#else
Py_UNREACHABLE();
#endif
done:
PyMutex_Unlock(&self->mutex);
Py_END_ALLOW_THREADS
return res;
}
/*
* Update the HMAC object with the given buffer.
*
* This conditionally acquires the lock on the HMAC object.
*
* On DEBUG builds, each update() call is verified.
*
* Return 0 on success; otherwise, set an exception and return -1 on failure.
*/
static int
hmac_update_state_cond_lock(HMACObject *self, uint8_t *buf, Py_ssize_t len)
{
ENTER_HASHLIB(self); // conditionally acquire a lock
Py_HMAC_HACL_UPDATE(self->state, buf, len, goto error);
LEAVE_HASHLIB(self);
return 0;
#ifndef NDEBUG
error:
LEAVE_HASHLIB(self);
return -1;
#else
Py_UNREACHABLE();
#endif
}
/*
* Update the internal HMAC state with the given buffer.
*
* Return 0 on success; otherwise, set an exception and return -1 on failure.
*/
static inline int
hmac_update_state(HMACObject *self, uint8_t *buf, Py_ssize_t len)
{
assert(buf != 0);
assert(len >= 0);
return len == 0
? 0 /* nothing to do */
: len < HASHLIB_GIL_MINSIZE
? hmac_update_state_cond_lock(self, buf, len)
: hmac_update_state_with_lock(self, buf, len);
}
/*[clinic input]
_hmac.HMAC.update
@ -1049,9 +885,13 @@ static PyObject *
_hmac_HMAC_update_impl(HMACObject *self, PyObject *msgobj)
/*[clinic end generated code: output=962134ada5e55985 input=7c0ea830efb03367]*/
{
int rc = 0;
Py_buffer msg;
GET_BUFFER_VIEW_OR_ERROUT(msgobj, &msg);
int rc = hmac_update_state(self, msg.buf, msg.len);
HASHLIB_EXTERNAL_INSTRUCTIONS_LOCKED(
self, msg.len,
rc = _hacl_hmac_state_update(self->state, msg.buf, msg.len)
);
PyBuffer_Release(&msg);
return rc < 0 ? NULL : Py_None;
}
@ -1067,13 +907,13 @@ _hmac_HMAC_update_impl(HMACObject *self, PyObject *msgobj)
* Note: this function may raise a MemoryError.
*/
static int
hmac_digest_compute_cond_lock(HMACObject *self, uint8_t *digest)
hmac_digest_compute_locked(HMACObject *self, uint8_t *digest)
{
assert(digest != NULL);
hacl_errno_t rc;
ENTER_HASHLIB(self); // conditionally acquire a lock
HASHLIB_ACQUIRE_LOCK(self);
rc = Hacl_Streaming_HMAC_digest(self->state, digest, self->digest_size);
LEAVE_HASHLIB(self);
HASHLIB_RELEASE_LOCK(self);
assert(
rc == Hacl_Streaming_Types_Success ||
rc == Hacl_Streaming_Types_OutOfMemory
@ -1095,7 +935,7 @@ _hmac_HMAC_digest_impl(HMACObject *self)
{
assert(self->digest_size <= Py_hmac_hash_max_digest_size);
uint8_t digest[Py_hmac_hash_max_digest_size];
if (hmac_digest_compute_cond_lock(self, digest) < 0) {
if (hmac_digest_compute_locked(self, digest) < 0) {
return NULL;
}
return PyBytes_FromStringAndSize((const char *)digest, self->digest_size);
@ -1118,7 +958,7 @@ _hmac_HMAC_hexdigest_impl(HMACObject *self)
{
assert(self->digest_size <= Py_hmac_hash_max_digest_size);
uint8_t digest[Py_hmac_hash_max_digest_size];
if (hmac_digest_compute_cond_lock(self, digest) < 0) {
if (hmac_digest_compute_locked(self, digest) < 0) {
return NULL;
}
return _Py_strhex((const char *)digest, self->digest_size);

View file

@ -38,12 +38,8 @@ class MD5Type "MD5object *" "&PyType_Type"
#include "_hacl/Hacl_Hash_MD5.h"
typedef struct {
PyObject_HEAD
// Prevents undefined behavior via multiple threads entering the C API.
bool use_mutex;
PyMutex mutex;
HASHLIB_OBJECT_HEAD
Hacl_Hash_MD5_state_t *hash_state;
} MD5object;
@ -118,9 +114,9 @@ MD5Type_copy_impl(MD5object *self, PyTypeObject *cls)
return NULL;
}
ENTER_HASHLIB(self);
HASHLIB_ACQUIRE_LOCK(self);
newobj->hash_state = Hacl_Hash_MD5_copy(self->hash_state);
LEAVE_HASHLIB(self);
HASHLIB_RELEASE_LOCK(self);
if (newobj->hash_state == NULL) {
Py_DECREF(newobj);
return PyErr_NoMemory();
@ -139,9 +135,9 @@ MD5Type_digest_impl(MD5object *self)
/*[clinic end generated code: output=eb691dc4190a07ec input=bc0c4397c2994be6]*/
{
uint8_t digest[MD5_DIGESTSIZE];
ENTER_HASHLIB(self);
HASHLIB_ACQUIRE_LOCK(self);
Hacl_Hash_MD5_digest(self->hash_state, digest);
LEAVE_HASHLIB(self);
HASHLIB_RELEASE_LOCK(self);
return PyBytes_FromStringAndSize((const char *)digest, MD5_DIGESTSIZE);
}
@ -156,9 +152,9 @@ MD5Type_hexdigest_impl(MD5object *self)
/*[clinic end generated code: output=17badced1f3ac932 input=b60b19de644798dd]*/
{
uint8_t digest[MD5_DIGESTSIZE];
ENTER_HASHLIB(self);
HASHLIB_ACQUIRE_LOCK(self);
Hacl_Hash_MD5_digest(self->hash_state, digest);
LEAVE_HASHLIB(self);
HASHLIB_RELEASE_LOCK(self);
return _Py_strhex((const char *)digest, MD5_DIGESTSIZE);
}
@ -170,6 +166,7 @@ update(Hacl_Hash_MD5_state_t *state, uint8_t *buf, Py_ssize_t len)
* take more than 1 billion years to overflow the maximum admissible length
* for MD5 (2^61 - 1).
*/
assert(len >= 0);
#if PY_SSIZE_T_MAX > UINT32_MAX
while (len > UINT32_MAX) {
(void)Hacl_Hash_MD5_update(state, buf, UINT32_MAX);
@ -195,22 +192,11 @@ MD5Type_update_impl(MD5object *self, PyObject *obj)
/*[clinic end generated code: output=b0fed9a7ce7ad253 input=6e1efcd9ecf17032]*/
{
Py_buffer buf;
GET_BUFFER_VIEW_OR_ERROUT(obj, &buf);
if (!self->use_mutex && buf.len >= HASHLIB_GIL_MINSIZE) {
self->use_mutex = true;
}
if (self->use_mutex) {
Py_BEGIN_ALLOW_THREADS
PyMutex_Lock(&self->mutex);
update(self->hash_state, buf.buf, buf.len);
PyMutex_Unlock(&self->mutex);
Py_END_ALLOW_THREADS
} else {
update(self->hash_state, buf.buf, buf.len);
}
HASHLIB_EXTERNAL_INSTRUCTIONS_LOCKED(
self, buf.len,
update(self->hash_state, buf.buf, buf.len)
);
PyBuffer_Release(&buf);
Py_RETURN_NONE;
}
@ -312,16 +298,12 @@ _md5_md5_impl(PyObject *module, PyObject *data, int usedforsecurity,
}
if (string) {
if (buf.len >= HASHLIB_GIL_MINSIZE) {
/* We do not initialize self->lock here as this is the constructor
* where it is not yet possible to have concurrent access. */
Py_BEGIN_ALLOW_THREADS
update(new->hash_state, buf.buf, buf.len);
Py_END_ALLOW_THREADS
}
else {
update(new->hash_state, buf.buf, buf.len);
}
/* Do not use self->mutex here as this is the constructor
* where it is not yet possible to have concurrent access. */
HASHLIB_EXTERNAL_INSTRUCTIONS_UNLOCKED(
buf.len,
update(new->hash_state, buf.buf, buf.len)
);
PyBuffer_Release(&buf);
}

View file

@ -38,11 +38,7 @@ class SHA1Type "SHA1object *" "&PyType_Type"
#include "_hacl/Hacl_Hash_SHA1.h"
typedef struct {
PyObject_HEAD
// Prevents undefined behavior via multiple threads entering the C API.
bool use_mutex;
PyMutex mutex;
PyThread_type_lock lock;
HASHLIB_OBJECT_HEAD
Hacl_Hash_SHA1_state_t *hash_state;
} SHA1object;
@ -121,9 +117,9 @@ SHA1Type_copy_impl(SHA1object *self, PyTypeObject *cls)
return NULL;
}
ENTER_HASHLIB(self);
HASHLIB_ACQUIRE_LOCK(self);
newobj->hash_state = Hacl_Hash_SHA1_copy(self->hash_state);
LEAVE_HASHLIB(self);
HASHLIB_RELEASE_LOCK(self);
if (newobj->hash_state == NULL) {
Py_DECREF(newobj);
return PyErr_NoMemory();
@ -142,9 +138,9 @@ SHA1Type_digest_impl(SHA1object *self)
/*[clinic end generated code: output=2f05302a7aa2b5cb input=13824b35407444bd]*/
{
unsigned char digest[SHA1_DIGESTSIZE];
ENTER_HASHLIB(self);
HASHLIB_ACQUIRE_LOCK(self);
Hacl_Hash_SHA1_digest(self->hash_state, digest);
LEAVE_HASHLIB(self);
HASHLIB_RELEASE_LOCK(self);
return PyBytes_FromStringAndSize((const char *)digest, SHA1_DIGESTSIZE);
}
@ -159,9 +155,9 @@ SHA1Type_hexdigest_impl(SHA1object *self)
/*[clinic end generated code: output=4161fd71e68c6659 input=97691055c0c74ab0]*/
{
unsigned char digest[SHA1_DIGESTSIZE];
ENTER_HASHLIB(self);
HASHLIB_ACQUIRE_LOCK(self);
Hacl_Hash_SHA1_digest(self->hash_state, digest);
LEAVE_HASHLIB(self);
HASHLIB_RELEASE_LOCK(self);
return _Py_strhex((const char *)digest, SHA1_DIGESTSIZE);
}
@ -198,22 +194,11 @@ SHA1Type_update_impl(SHA1object *self, PyObject *obj)
/*[clinic end generated code: output=cdc8e0e106dbec5f input=aad8e07812edbba3]*/
{
Py_buffer buf;
GET_BUFFER_VIEW_OR_ERROUT(obj, &buf);
if (!self->use_mutex && buf.len >= HASHLIB_GIL_MINSIZE) {
self->use_mutex = true;
}
if (self->use_mutex) {
Py_BEGIN_ALLOW_THREADS
PyMutex_Lock(&self->mutex);
update(self->hash_state, buf.buf, buf.len);
PyMutex_Unlock(&self->mutex);
Py_END_ALLOW_THREADS
} else {
update(self->hash_state, buf.buf, buf.len);
}
HASHLIB_EXTERNAL_INSTRUCTIONS_LOCKED(
self, buf.len,
update(self->hash_state, buf.buf, buf.len)
);
PyBuffer_Release(&buf);
Py_RETURN_NONE;
}
@ -314,16 +299,12 @@ _sha1_sha1_impl(PyObject *module, PyObject *data, int usedforsecurity,
return PyErr_NoMemory();
}
if (string) {
if (buf.len >= HASHLIB_GIL_MINSIZE) {
/* We do not initialize self->lock here as this is the constructor
* where it is not yet possible to have concurrent access. */
Py_BEGIN_ALLOW_THREADS
update(new->hash_state, buf.buf, buf.len);
Py_END_ALLOW_THREADS
}
else {
update(new->hash_state, buf.buf, buf.len);
}
/* Do not use self->mutex here as this is the constructor
* where it is not yet possible to have concurrent access. */
HASHLIB_EXTERNAL_INSTRUCTIONS_UNLOCKED(
buf.len,
update(new->hash_state, buf.buf, buf.len)
);
PyBuffer_Release(&buf);
}

View file

@ -50,20 +50,14 @@ class SHA512Type "SHA512object *" "&PyType_Type"
// TODO: Get rid of int digestsize in favor of Hacl state info?
typedef struct {
PyObject_HEAD
HASHLIB_OBJECT_HEAD
int digestsize;
// Prevents undefined behavior via multiple threads entering the C API.
bool use_mutex;
PyMutex mutex;
Hacl_Hash_SHA2_state_t_256 *state;
} SHA256object;
typedef struct {
PyObject_HEAD
HASHLIB_OBJECT_HEAD
int digestsize;
// Prevents undefined behavior via multiple threads entering the C API.
bool use_mutex;
PyMutex mutex;
Hacl_Hash_SHA2_state_t_512 *state;
} SHA512object;
@ -272,9 +266,9 @@ SHA256Type_copy_impl(SHA256object *self, PyTypeObject *cls)
}
}
ENTER_HASHLIB(self);
HASHLIB_ACQUIRE_LOCK(self);
rc = SHA256copy(self, newobj);
LEAVE_HASHLIB(self);
HASHLIB_RELEASE_LOCK(self);
if (rc < 0) {
Py_DECREF(newobj);
return NULL;
@ -309,9 +303,9 @@ SHA512Type_copy_impl(SHA512object *self, PyTypeObject *cls)
}
}
ENTER_HASHLIB(self);
HASHLIB_ACQUIRE_LOCK(self);
rc = SHA512copy(self, newobj);
LEAVE_HASHLIB(self);
HASHLIB_RELEASE_LOCK(self);
if (rc < 0) {
Py_DECREF(newobj);
return NULL;
@ -331,11 +325,11 @@ SHA256Type_digest_impl(SHA256object *self)
{
uint8_t digest[SHA256_DIGESTSIZE];
assert(self->digestsize <= SHA256_DIGESTSIZE);
ENTER_HASHLIB(self);
HASHLIB_ACQUIRE_LOCK(self);
// HACL* performs copies under the hood so that self->state remains valid
// after this call.
Hacl_Hash_SHA2_digest_256(self->state, digest);
LEAVE_HASHLIB(self);
HASHLIB_RELEASE_LOCK(self);
return PyBytes_FromStringAndSize((const char *)digest, self->digestsize);
}
@ -351,11 +345,11 @@ SHA512Type_digest_impl(SHA512object *self)
{
uint8_t digest[SHA512_DIGESTSIZE];
assert(self->digestsize <= SHA512_DIGESTSIZE);
ENTER_HASHLIB(self);
HASHLIB_ACQUIRE_LOCK(self);
// HACL* performs copies under the hood so that self->state remains valid
// after this call.
Hacl_Hash_SHA2_digest_512(self->state, digest);
LEAVE_HASHLIB(self);
HASHLIB_RELEASE_LOCK(self);
return PyBytes_FromStringAndSize((const char *)digest, self->digestsize);
}
@ -371,9 +365,9 @@ SHA256Type_hexdigest_impl(SHA256object *self)
{
uint8_t digest[SHA256_DIGESTSIZE];
assert(self->digestsize <= SHA256_DIGESTSIZE);
ENTER_HASHLIB(self);
HASHLIB_ACQUIRE_LOCK(self);
Hacl_Hash_SHA2_digest_256(self->state, digest);
LEAVE_HASHLIB(self);
HASHLIB_RELEASE_LOCK(self);
return _Py_strhex((const char *)digest, self->digestsize);
}
@ -389,9 +383,9 @@ SHA512Type_hexdigest_impl(SHA512object *self)
{
uint8_t digest[SHA512_DIGESTSIZE];
assert(self->digestsize <= SHA512_DIGESTSIZE);
ENTER_HASHLIB(self);
HASHLIB_ACQUIRE_LOCK(self);
Hacl_Hash_SHA2_digest_512(self->state, digest);
LEAVE_HASHLIB(self);
HASHLIB_RELEASE_LOCK(self);
return _Py_strhex((const char *)digest, self->digestsize);
}
@ -409,22 +403,11 @@ SHA256Type_update_impl(SHA256object *self, PyObject *obj)
/*[clinic end generated code: output=dc58a580cf8905a5 input=b2d449d5b30f0f5a]*/
{
Py_buffer buf;
GET_BUFFER_VIEW_OR_ERROUT(obj, &buf);
if (!self->use_mutex && buf.len >= HASHLIB_GIL_MINSIZE) {
self->use_mutex = true;
}
if (self->use_mutex) {
Py_BEGIN_ALLOW_THREADS
PyMutex_Lock(&self->mutex);
update_256(self->state, buf.buf, buf.len);
PyMutex_Unlock(&self->mutex);
Py_END_ALLOW_THREADS
} else {
update_256(self->state, buf.buf, buf.len);
}
HASHLIB_EXTERNAL_INSTRUCTIONS_LOCKED(
self, buf.len,
update_256(self->state, buf.buf, buf.len)
);
PyBuffer_Release(&buf);
Py_RETURN_NONE;
}
@ -443,22 +426,11 @@ SHA512Type_update_impl(SHA512object *self, PyObject *obj)
/*[clinic end generated code: output=9af211766c0b7365 input=ded2b46656566283]*/
{
Py_buffer buf;
GET_BUFFER_VIEW_OR_ERROUT(obj, &buf);
if (!self->use_mutex && buf.len >= HASHLIB_GIL_MINSIZE) {
self->use_mutex = true;
}
if (self->use_mutex) {
Py_BEGIN_ALLOW_THREADS
PyMutex_Lock(&self->mutex);
update_512(self->state, buf.buf, buf.len);
PyMutex_Unlock(&self->mutex);
Py_END_ALLOW_THREADS
} else {
update_512(self->state, buf.buf, buf.len);
}
HASHLIB_EXTERNAL_INSTRUCTIONS_LOCKED(
self, buf.len,
update_512(self->state, buf.buf, buf.len)
);
PyBuffer_Release(&buf);
Py_RETURN_NONE;
}
@ -638,16 +610,12 @@ _sha2_sha256_impl(PyObject *module, PyObject *data, int usedforsecurity,
return PyErr_NoMemory();
}
if (string) {
if (buf.len >= HASHLIB_GIL_MINSIZE) {
/* We do not initialize self->lock here as this is the constructor
* where it is not yet possible to have concurrent access. */
Py_BEGIN_ALLOW_THREADS
update_256(new->state, buf.buf, buf.len);
Py_END_ALLOW_THREADS
}
else {
update_256(new->state, buf.buf, buf.len);
}
/* Do not use self->mutex here as this is the constructor
* where it is not yet possible to have concurrent access. */
HASHLIB_EXTERNAL_INSTRUCTIONS_UNLOCKED(
buf.len,
update_256(new->state, buf.buf, buf.len)
);
PyBuffer_Release(&buf);
}
@ -700,16 +668,12 @@ _sha2_sha224_impl(PyObject *module, PyObject *data, int usedforsecurity,
return PyErr_NoMemory();
}
if (string) {
if (buf.len >= HASHLIB_GIL_MINSIZE) {
/* We do not initialize self->lock here as this is the constructor
* where it is not yet possible to have concurrent access. */
Py_BEGIN_ALLOW_THREADS
update_256(new->state, buf.buf, buf.len);
Py_END_ALLOW_THREADS
}
else {
update_256(new->state, buf.buf, buf.len);
}
/* Do not use self->mutex here as this is the constructor
* where it is not yet possible to have concurrent access. */
HASHLIB_EXTERNAL_INSTRUCTIONS_UNLOCKED(
buf.len,
update_256(new->state, buf.buf, buf.len)
);
PyBuffer_Release(&buf);
}
@ -763,16 +727,12 @@ _sha2_sha512_impl(PyObject *module, PyObject *data, int usedforsecurity,
return PyErr_NoMemory();
}
if (string) {
if (buf.len >= HASHLIB_GIL_MINSIZE) {
/* We do not initialize self->lock here as this is the constructor
* where it is not yet possible to have concurrent access. */
Py_BEGIN_ALLOW_THREADS
update_512(new->state, buf.buf, buf.len);
Py_END_ALLOW_THREADS
}
else {
update_512(new->state, buf.buf, buf.len);
}
/* Do not use self->mutex here as this is the constructor
* where it is not yet possible to have concurrent access. */
HASHLIB_EXTERNAL_INSTRUCTIONS_UNLOCKED(
buf.len,
update_512(new->state, buf.buf, buf.len)
);
PyBuffer_Release(&buf);
}
@ -826,16 +786,12 @@ _sha2_sha384_impl(PyObject *module, PyObject *data, int usedforsecurity,
return PyErr_NoMemory();
}
if (string) {
if (buf.len >= HASHLIB_GIL_MINSIZE) {
/* We do not initialize self->lock here as this is the constructor
* where it is not yet possible to have concurrent access. */
Py_BEGIN_ALLOW_THREADS
update_512(new->state, buf.buf, buf.len);
Py_END_ALLOW_THREADS
}
else {
update_512(new->state, buf.buf, buf.len);
}
/* Do not use self->mutex here as this is the constructor
* where it is not yet possible to have concurrent access. */
HASHLIB_EXTERNAL_INSTRUCTIONS_UNLOCKED(
buf.len,
update_512(new->state, buf.buf, buf.len)
);
PyBuffer_Release(&buf);
}

View file

@ -70,10 +70,7 @@ class _sha3.shake_256 "SHA3object *" "&SHAKE256type"
#include "_hacl/Hacl_Hash_SHA3.h"
typedef struct {
PyObject_HEAD
// Prevents undefined behavior via multiple threads entering the C API.
bool use_mutex;
PyMutex mutex;
HASHLIB_OBJECT_HEAD
Hacl_Hash_SHA3_state_t *hash_state;
} SHA3object;
@ -174,16 +171,12 @@ py_sha3_new_impl(PyTypeObject *type, PyObject *data_obj, int usedforsecurity,
if (data) {
GET_BUFFER_VIEW_OR_ERROR(data, &buf, goto error);
if (buf.len >= HASHLIB_GIL_MINSIZE) {
/* We do not initialize self->lock here as this is the constructor
* where it is not yet possible to have concurrent access. */
Py_BEGIN_ALLOW_THREADS
sha3_update(self->hash_state, buf.buf, buf.len);
Py_END_ALLOW_THREADS
}
else {
sha3_update(self->hash_state, buf.buf, buf.len);
}
/* Do not use self->mutex here as this is the constructor
* where it is not yet possible to have concurrent access. */
HASHLIB_EXTERNAL_INSTRUCTIONS_UNLOCKED(
buf.len,
sha3_update(self->hash_state, buf.buf, buf.len)
);
}
PyBuffer_Release(&buf);
@ -249,9 +242,9 @@ _sha3_sha3_224_copy_impl(SHA3object *self)
if ((newobj = newSHA3object(Py_TYPE(self))) == NULL) {
return NULL;
}
ENTER_HASHLIB(self);
HASHLIB_ACQUIRE_LOCK(self);
newobj->hash_state = Hacl_Hash_SHA3_copy(self->hash_state);
LEAVE_HASHLIB(self);
HASHLIB_RELEASE_LOCK(self);
if (newobj->hash_state == NULL) {
Py_DECREF(newobj);
return PyErr_NoMemory();
@ -273,9 +266,9 @@ _sha3_sha3_224_digest_impl(SHA3object *self)
unsigned char digest[SHA3_MAX_DIGESTSIZE];
// This function errors out if the algorithm is SHAKE. Here, we know this
// not to be the case, and therefore do not perform error checking.
ENTER_HASHLIB(self);
HASHLIB_ACQUIRE_LOCK(self);
(void)Hacl_Hash_SHA3_digest(self->hash_state, digest);
LEAVE_HASHLIB(self);
HASHLIB_RELEASE_LOCK(self);
return PyBytes_FromStringAndSize((const char *)digest,
Hacl_Hash_SHA3_hash_len(self->hash_state));
}
@ -292,9 +285,9 @@ _sha3_sha3_224_hexdigest_impl(SHA3object *self)
/*[clinic end generated code: output=75ad03257906918d input=2d91bb6e0d114ee3]*/
{
unsigned char digest[SHA3_MAX_DIGESTSIZE];
ENTER_HASHLIB(self);
HASHLIB_ACQUIRE_LOCK(self);
(void)Hacl_Hash_SHA3_digest(self->hash_state, digest);
LEAVE_HASHLIB(self);
HASHLIB_RELEASE_LOCK(self);
return _Py_strhex((const char *)digest,
Hacl_Hash_SHA3_hash_len(self->hash_state));
}
@ -314,22 +307,11 @@ _sha3_sha3_224_update_impl(SHA3object *self, PyObject *data)
/*[clinic end generated code: output=390b7abf7c9795a5 input=a887f54dcc4ae227]*/
{
Py_buffer buf;
GET_BUFFER_VIEW_OR_ERROUT(data, &buf);
if (!self->use_mutex && buf.len >= HASHLIB_GIL_MINSIZE) {
self->use_mutex = true;
}
if (self->use_mutex) {
Py_BEGIN_ALLOW_THREADS
PyMutex_Lock(&self->mutex);
sha3_update(self->hash_state, buf.buf, buf.len);
PyMutex_Unlock(&self->mutex);
Py_END_ALLOW_THREADS
} else {
sha3_update(self->hash_state, buf.buf, buf.len);
}
HASHLIB_EXTERNAL_INSTRUCTIONS_LOCKED(
self, buf.len,
sha3_update(self->hash_state, buf.buf, buf.len)
);
PyBuffer_Release(&buf);
Py_RETURN_NONE;
}
@ -531,9 +513,9 @@ _sha3_shake_128_digest_impl(SHA3object *self, Py_ssize_t length)
CHECK_HACL_UINT32_T_LENGTH(length);
PyObject *digest = PyBytes_FromStringAndSize(NULL, length);
uint8_t *buffer = (uint8_t *)PyBytes_AS_STRING(digest);
ENTER_HASHLIB(self);
HASHLIB_ACQUIRE_LOCK(self);
(void)Hacl_Hash_SHA3_squeeze(self->hash_state, buffer, (uint32_t)length);
LEAVE_HASHLIB(self);
HASHLIB_RELEASE_LOCK(self);
return digest;
}
@ -565,9 +547,9 @@ _sha3_shake_128_hexdigest_impl(SHA3object *self, Py_ssize_t length)
return PyErr_NoMemory();
}
ENTER_HASHLIB(self);
HASHLIB_ACQUIRE_LOCK(self);
(void)Hacl_Hash_SHA3_squeeze(self->hash_state, buffer, (uint32_t)length);
LEAVE_HASHLIB(self);
HASHLIB_RELEASE_LOCK(self);
PyObject *digest = _Py_strhex((const char *)buffer, length);
PyMem_Free(buffer);
return digest;