unofficial mirror of libc-alpha@sourceware.org
 help / color / mirror / Atom feed
From: Noah Goldstein via Libc-alpha <libc-alpha@sourceware.org>
To: Adhemerval Zanella <adhemerval.zanella@linaro.org>
Cc: GNU C Library <libc-alpha@sourceware.org>
Subject: Re: [PATCH v3 3/7] stdlib: Optimization qsort{_r} swap implementation (BZ #19305)
Date: Tue, 12 Oct 2021 23:39:50 -0400	[thread overview]
Message-ID: <CAFUsyfKUh3Ai+udk8atdwhv+HBMKXRRuPCPD4znTY3KVSK8XOw@mail.gmail.com> (raw)
In-Reply-To: <CAFUsyfL-Ee0=6HmnA8Q4udtWHrxbSbHLY+ceWLFPcyvpS-yXUA@mail.gmail.com>

On Tue, Oct 12, 2021 at 11:29 PM Noah Goldstein <goldstein.w.n@gmail.com>
wrote:

>
>
> On Fri, Sep 3, 2021 at 1:14 PM Adhemerval Zanella via Libc-alpha <
> libc-alpha@sourceware.org> wrote:
>
>> It optimizes take in consideration both the most common elements are
>> either 32 or 64 bit in size [1] and inputs are aligned to the word
>> boundary.  This is similar to the optimization done on lib/sort.c
>> from Linux.
>>
>> This patchs adds an optimized swap operation on qsort based in previous
>> msort one.  Instead of byte operation, three variants are provided:
>>
>>   1. Using uint32_t loads and stores.
>>   2. Using uint64_t loads and stores.
>>   3. Generic one with a temporary buffer and memcpy/mempcpy.
>>
>> The 1. and 2. options are selected only either if architecture defines
>> _STRING_ARCH_unaligned or if base pointer is aligned to required type.
>>
>> It also fixes BZ#19305 by checking input size against number of
>> elements 1 besides 0.
>>
>> Checked on x86_64-linux-gnu.
>>
>> [1] https://sourceware.org/pipermail/libc-alpha/2018-August/096984.html
>> ---
>>  stdlib/qsort.c | 109 +++++++++++++++++++++++++++++++++++++++++--------
>>  1 file changed, 91 insertions(+), 18 deletions(-)
>>
>> diff --git a/stdlib/qsort.c b/stdlib/qsort.c
>> index 23f2d28314..59458d151b 100644
>> --- a/stdlib/qsort.c
>> +++ b/stdlib/qsort.c
>> @@ -24,20 +24,85 @@
>>  #include <limits.h>
>>  #include <stdlib.h>
>>  #include <string.h>
>> +#include <stdbool.h>
>>
>> -/* Byte-wise swap two items of size SIZE. */
>> -#define SWAP(a, b, size)
>>      \
>> -  do
>>      \
>> -    {
>>     \
>> -      size_t __size = (size);
>>     \
>> -      char *__a = (a), *__b = (b);
>>      \
>> -      do
>>      \
>> -       {
>>      \
>> -         char __tmp = *__a;
>>     \
>> -         *__a++ = *__b;
>>     \
>> -         *__b++ = __tmp;
>>      \
>> -       } while (--__size > 0);
>>      \
>> -    } while (0)
>> +/* Swap SIZE bytes between addresses A and B.  These helpers are provided
>> +   along the generic one as an optimization.  */
>> +
>> +typedef void (*swap_func_t)(void * restrict, void * restrict, size_t);
>> +
>> +/* Return trues is elements can be copied used word load and sortes.
>> +   The size must be a multiple of the alignment, and the base address.
>> */
>> +static inline bool
>> +is_aligned_to_copy (const void *base, size_t size, size_t align)
>> +{
>> +  unsigned char lsbits = size;
>> +#if !_STRING_ARCH_unaligned
>> +  lsbits |= (unsigned char)(uintptr_t) base;
>> +#endif
>> +  return (lsbits & (align - 1)) == 0;
>> +}
>> +
>> +#define SWAP_WORDS_64 (swap_func_t)0
>> +#define SWAP_WORDS_32 (swap_func_t)1
>> +#define SWAP_BYTES    (swap_func_t)2
>> +
>> +static void
>> +swap_words_64 (void * restrict a, void * restrict b, size_t n)
>> +{
>> +  do
>> +   {
>> +     n -= 8;
>> +     uint64_t t = *(uint64_t *)(a + n);
>> +     *(uint64_t *)(a + n) = *(uint64_t *)(b + n);
>> +     *(uint64_t *)(b + n) = t;
>> +   } while (n);
>> +}
>> +
>> +static void
>> +swap_words_32 (void * restrict a, void * restrict b, size_t n)
>> +{
>> +  do
>> +   {
>> +     n -= 4;
>> +     uint32_t t = *(uint32_t *)(a + n);
>> +     *(uint32_t *)(a + n) = *(uint32_t *)(b + n);
>> +     *(uint32_t *)(b + n) = t;
>> +   } while (n);
>> +}
>>
>
I'm not certain swap_words_32 / swap_words_8 will be optimal for larger
key sizes. Looking at GCC's implementation of swap_generic on modern
x86_64:
https://godbolt.org/z/638h3Y9va
It's able to optimize the temporary buffer out of the loop and use xmm
registers which
will likely win out for larger sizes.

> +
>> +static void
>> +swap_bytes (void * restrict a, void * restrict b, size_t n)
>> +{
>> +  /* Use multiple small memcpys with constant size to enable inlining
>> +     on most targets.  */
>> +  enum { SWAP_GENERIC_SIZE = 32 };
>> +  unsigned char tmp[SWAP_GENERIC_SIZE];
>> +  while (n > SWAP_GENERIC_SIZE)
>> +    {
>> +      memcpy (tmp, a, SWAP_GENERIC_SIZE);
>> +      a = memcpy (a, b, SWAP_GENERIC_SIZE) + SWAP_GENERIC_SIZE;
>> +      b = memcpy (b, tmp, SWAP_GENERIC_SIZE) + SWAP_GENERIC_SIZE;
>> +      n -= SWAP_GENERIC_SIZE;
>> +    }
>> +  memcpy (tmp, a, n);
>> +  memcpy (a, b, n);
>> +  memcpy (b, tmp, n);
>> +}
>> +
>> +/* Replace the indirect call with a serie of if statements.  It should
>> help
>> +   the branch predictor.  */
>>
>
> 1) Really? On Intel at least an indirect call that is always going to the
> same place
> is certainly going to be predicted as well if not better than 2/3
> branches + direct call.
>
> 2) If you're going to just test which swap function to use, why
> bother initializing
> swap_func? Why not just use an int?
>
>
>
>> +static void
>> +do_swap (void * restrict a, void * restrict b, size_t size,
>> +        swap_func_t swap_func)
>> +{
>> +  if (swap_func == SWAP_WORDS_64)
>> +    swap_words_64 (a, b, size);
>> +  else if (swap_func == SWAP_WORDS_32)
>> +    swap_words_32 (a, b, size);
>> +  else
>> +    swap_bytes (a, b, size);
>> +}
>>
>>  /* Discontinue quicksort algorithm when partition gets below this size.
>>     This particular magic number was chosen to work best on a Sun 4/260.
>> */
>> @@ -97,6 +162,14 @@ _quicksort (void *const pbase, size_t total_elems,
>> size_t size,
>>      /* Avoid lossage with unsigned arithmetic below.  */
>>      return;
>>
>> +  swap_func_t swap_func;
>> +  if (is_aligned_to_copy (pbase, size, 8))
>> +    swap_func = SWAP_WORDS_64;
>> +  else if (is_aligned_to_copy (pbase, size, 4))
>>
>
For many modern architectures that support fast unaligned loads/stores
(for example x86_64 SnB and newer) I don't think this check really makes
sense.


> +    swap_func = SWAP_WORDS_32;
>> +  else
>> +    swap_func = SWAP_BYTES;
>> +
>>    if (total_elems > MAX_THRESH)
>>      {
>>        char *lo = base_ptr;
>> @@ -120,13 +193,13 @@ _quicksort (void *const pbase, size_t total_elems,
>> size_t size,
>>           char *mid = lo + size * ((hi - lo) / size >> 1);
>>
>>           if ((*cmp) ((void *) mid, (void *) lo, arg) < 0)
>> -           SWAP (mid, lo, size);
>> +           do_swap (mid, lo, size, swap_func);
>>           if ((*cmp) ((void *) hi, (void *) mid, arg) < 0)
>> -           SWAP (mid, hi, size);
>> +           do_swap (mid, hi, size, swap_func);
>>           else
>>             goto jump_over;
>>           if ((*cmp) ((void *) mid, (void *) lo, arg) < 0)
>> -           SWAP (mid, lo, size);
>> +           do_swap (mid, lo, size, swap_func);
>>         jump_over:;
>>
>>           left_ptr  = lo + size;
>> @@ -145,7 +218,7 @@ _quicksort (void *const pbase, size_t total_elems,
>> size_t size,
>>
>>               if (left_ptr < right_ptr)
>>                 {
>> -                 SWAP (left_ptr, right_ptr, size);
>> +                 do_swap (left_ptr, right_ptr, size, swap_func);
>>                   if (mid == left_ptr)
>>                     mid = right_ptr;
>>                   else if (mid == right_ptr)
>> @@ -217,7 +290,7 @@ _quicksort (void *const pbase, size_t total_elems,
>> size_t size,
>>          tmp_ptr = run_ptr;
>>
>>      if (tmp_ptr != base_ptr)
>> -      SWAP (tmp_ptr, base_ptr, size);
>> +      do_swap (tmp_ptr, base_ptr, size, swap_func);
>>
>>      /* Insertion sort, running from left-hand-side up to
>> right-hand-side.  */
>>
>> --
>> 2.30.2
>>
>>

  reply	other threads:[~2021-10-13  3:40 UTC|newest]

Thread overview: 41+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-09-03 17:11 [PATCH v3 0/7] Use introsort for qsort Adhemerval Zanella via Libc-alpha
2021-09-03 17:11 ` [PATCH v3 1/7] benchtests: Add bench-qsort Adhemerval Zanella via Libc-alpha
2021-09-04  9:09   ` Alexander Monakov via Libc-alpha
2021-09-06 18:30     ` Adhemerval Zanella via Libc-alpha
2021-10-13  3:19   ` Noah Goldstein via Libc-alpha
2021-10-15 12:52     ` Adhemerval Zanella via Libc-alpha
2021-10-15 16:39       ` Noah Goldstein via Libc-alpha
2021-10-15 17:19         ` Adhemerval Zanella via Libc-alpha
2021-09-03 17:11 ` [PATCH v3 2/7] support: Fix getopt_long with CMDLINE_OPTIONS Adhemerval Zanella via Libc-alpha
2021-09-03 17:11 ` [PATCH v3 3/7] stdlib: Optimization qsort{_r} swap implementation (BZ #19305) Adhemerval Zanella via Libc-alpha
2021-10-13  3:29   ` Noah Goldstein via Libc-alpha
2021-10-13  3:39     ` Noah Goldstein via Libc-alpha [this message]
2021-10-15 13:29       ` Adhemerval Zanella via Libc-alpha
2021-10-15 17:17         ` Noah Goldstein via Libc-alpha
2021-10-15 17:34           ` Adhemerval Zanella via Libc-alpha
2021-10-15 17:45             ` Noah Goldstein via Libc-alpha
2021-10-15 17:56               ` Adhemerval Zanella via Libc-alpha
2021-10-15 13:12     ` Adhemerval Zanella via Libc-alpha
2021-10-15 16:45       ` Noah Goldstein via Libc-alpha
2021-10-15 17:21         ` Adhemerval Zanella via Libc-alpha
2021-09-03 17:11 ` [PATCH v3 4/7] stdlib: Move insertion sort out qsort Adhemerval Zanella via Libc-alpha
2021-09-06 20:35   ` Fangrui Song via Libc-alpha
2021-09-06 20:48     ` Fangrui Song via Libc-alpha
2021-09-03 17:11 ` [PATCH v3 5/7] stdlib: qsort: Move some macros to inline function Adhemerval Zanella via Libc-alpha
2021-09-03 17:11 ` [PATCH v3 6/7] stdlib: Implement introsort with qsort Adhemerval Zanella via Libc-alpha
2021-09-04  9:17   ` Alexander Monakov via Libc-alpha
2021-09-06 18:43     ` Adhemerval Zanella via Libc-alpha
2021-09-06 20:23   ` Fangrui Song via Libc-alpha
2021-10-13  3:53   ` Noah Goldstein via Libc-alpha
2021-09-03 17:11 ` [PATCH v3 7/7] stdlib: Remove use of mergesort on qsort (BZ #21719) Adhemerval Zanella via Libc-alpha
2021-09-03 19:18 ` [PATCH v3 0/7] Use introsort for qsort Paul Eggert
2021-09-06 14:13   ` Carlos O'Donell via Libc-alpha
2021-09-06 17:03     ` Zack Weinberg via Libc-alpha
2021-09-06 18:19       ` Adhemerval Zanella via Libc-alpha
2021-09-07  0:14     ` Paul Eggert
2021-09-07 14:32       ` Adhemerval Zanella via Libc-alpha
2021-09-07 17:39         ` Paul Eggert
2021-09-07 18:07           ` Adhemerval Zanella via Libc-alpha
2021-09-07 19:28             ` Paul Eggert
2021-09-08 11:56               ` Adhemerval Zanella via Libc-alpha
2021-09-09  0:39                 ` Paul Eggert

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

  List information: https://www.gnu.org/software/libc/involved.html

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=CAFUsyfKUh3Ai+udk8atdwhv+HBMKXRRuPCPD4znTY3KVSK8XOw@mail.gmail.com \
    --to=libc-alpha@sourceware.org \
    --cc=adhemerval.zanella@linaro.org \
    --cc=goldstein.w.n@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).