mirror of
https://github.com/python/cpython.git
synced 2025-10-09 16:34:44 +00:00
bpo-43179: Generalise alignment for optimised string routines (GH-24624)
* Remove m68k-specific hack from ascii_decode On m68k, alignments of primitives is more relaxed, with 4-byte and 8-byte types only requiring 2-byte alignment, thus using sizeof(size_t) does not work. Instead, use the portable alternative. Note that this is a minimal fix that only relaxes the assertion and the condition for when to use the optimised version remains overly strict. Such issues will be fixed tree-wide in the next commit. NB: In C11 we could use _Alignof(size_t) instead, but for compatibility we use autoconf. * Optimise string routines for architectures with non-natural alignment C only requires that sizeof(x) is a multiple of alignof(x), not that the two are equal. Thus anywhere where we optimise based on alignment we should be using alignof(x) not sizeof(x). This is more annoying than it would be in C11 where we could just use _Alignof(x) (and alignof(x) in C++11), but since we still require only C99 we must plumb the information all the way from autoconf through the various typedefs and defines.
This commit is contained in:
parent
cfa176685a
commit
dec0757549
8 changed files with 96 additions and 31 deletions
|
@ -26,7 +26,6 @@ STRINGLIB(utf8_decode)(const char **inptr, const char *end,
|
|||
{
|
||||
Py_UCS4 ch;
|
||||
const char *s = *inptr;
|
||||
const char *aligned_end = (const char *) _Py_ALIGN_DOWN(end, SIZEOF_SIZE_T);
|
||||
STRINGLIB_CHAR *p = dest + *outpos;
|
||||
|
||||
while (s < end) {
|
||||
|
@ -40,11 +39,11 @@ STRINGLIB(utf8_decode)(const char **inptr, const char *end,
|
|||
First, check if we can do an aligned read, as most CPUs have
|
||||
a penalty for unaligned reads.
|
||||
*/
|
||||
if (_Py_IS_ALIGNED(s, SIZEOF_SIZE_T)) {
|
||||
if (_Py_IS_ALIGNED(s, ALIGNOF_SIZE_T)) {
|
||||
/* Help register allocation */
|
||||
const char *_s = s;
|
||||
STRINGLIB_CHAR *_p = p;
|
||||
while (_s < aligned_end) {
|
||||
while (_s + SIZEOF_SIZE_T <= end) {
|
||||
/* Read a whole size_t at a time (either 4 or 8 bytes),
|
||||
and do a fast unrolled copy if it only contains ASCII
|
||||
characters. */
|
||||
|
@ -496,8 +495,6 @@ STRINGLIB(utf16_decode)(const unsigned char **inptr, const unsigned char *e,
|
|||
int native_ordering)
|
||||
{
|
||||
Py_UCS4 ch;
|
||||
const unsigned char *aligned_end =
|
||||
(const unsigned char *) _Py_ALIGN_DOWN(e, SIZEOF_LONG);
|
||||
const unsigned char *q = *inptr;
|
||||
STRINGLIB_CHAR *p = dest + *outpos;
|
||||
/* Offsets from q for retrieving byte pairs in the right order. */
|
||||
|
@ -512,10 +509,10 @@ STRINGLIB(utf16_decode)(const unsigned char **inptr, const unsigned char *e,
|
|||
Py_UCS4 ch2;
|
||||
/* First check for possible aligned read of a C 'long'. Unaligned
|
||||
reads are more expensive, better to defer to another iteration. */
|
||||
if (_Py_IS_ALIGNED(q, SIZEOF_LONG)) {
|
||||
if (_Py_IS_ALIGNED(q, ALIGNOF_LONG)) {
|
||||
/* Fast path for runs of in-range non-surrogate chars. */
|
||||
const unsigned char *_q = q;
|
||||
while (_q < aligned_end) {
|
||||
while (_q + SIZEOF_LONG <= e) {
|
||||
unsigned long block = * (const unsigned long *) _q;
|
||||
if (native_ordering) {
|
||||
/* Can use buffer directly */
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue