git@vger.kernel.org list mirror (unofficial, one of many)
 help / color / Atom feed
From: Duy Nguyen <pclouds@gmail.com>
To: Ben Peart <Ben.Peart@microsoft.com>
Cc: Git Mailing List <git@vger.kernel.org>,
	Junio C Hamano <gitster@pobox.com>
Subject: Re: [PATCH v2 1/3] read-cache: speed up index load through parallelization
Date: Mon, 3 Sep 2018 21:16:35 +0200
Message-ID: <CACsJy8BK5a1DgY4o5Xxrdjyg1QYxW-kCPNmAXkdudCD78O7dTg@mail.gmail.com> (raw)
In-Reply-To: <20180829152500.46640-2-benpeart@microsoft.com>

On Wed, Aug 29, 2018 at 5:25 PM Ben Peart <Ben.Peart@microsoft.com> wrote:
> diff --git a/read-cache.c b/read-cache.c
> index 7b1354d759..c30346388a 100644
> --- a/read-cache.c
> +++ b/read-cache.c
> @@ -1889,16 +1889,229 @@ static size_t estimate_cache_size(size_t ondisk_size, unsigned int entries)
>         return ondisk_size + entries * per_entry;
>  }
>
> +/*
> + * A helper function that will load the specified range of cache entries
> + * from the memory mapped file and add them to the given index.
> + */
> +static unsigned long load_cache_entry_block(struct index_state *istate,
> +                       struct mem_pool *ce_mem_pool, int offset, int nr, void *mmap,
> +                       unsigned long start_offset, struct strbuf *previous_name)
> +{
> +       int i;
> +       unsigned long src_offset = start_offset;
> +
> +       for (i = offset; i < offset + nr; i++) {

It may be micro optimization, but since we're looping a lot and can't
trust the compiler to optimize this, maybe just calculate this upper
limit and store in a local variable to make it clear the upper limit
is known, no point of recalculating it at every iteration.

> +               struct ondisk_cache_entry *disk_ce;
> +               struct cache_entry *ce;
> +               unsigned long consumed;
> +
> +               disk_ce = (struct ondisk_cache_entry *)((char *)mmap + src_offset);
> +               ce = create_from_disk(ce_mem_pool, disk_ce, &consumed, previous_name);
> +               set_index_entry(istate, i, ce);
> +
> +               src_offset += consumed;
> +       }
> +       return src_offset - start_offset;
> +}
> +
> +static unsigned long load_all_cache_entries(struct index_state *istate,
> +                       void *mmap, size_t mmap_size, unsigned long src_offset)
> +{
> +       struct strbuf previous_name_buf = STRBUF_INIT, *previous_name;
> +       unsigned long consumed;
> +
> +       if (istate->version == 4) {
> +               previous_name = &previous_name_buf;
> +               mem_pool_init(&istate->ce_mem_pool,
> +                             estimate_cache_size_from_compressed(istate->cache_nr));
> +       } else {
> +               previous_name = NULL;
> +               mem_pool_init(&istate->ce_mem_pool,
> +                             estimate_cache_size(mmap_size, istate->cache_nr));
> +       }
> +
> +       consumed = load_cache_entry_block(istate, istate->ce_mem_pool,
> +                                       0, istate->cache_nr, mmap, src_offset, previous_name);
> +       strbuf_release(&previous_name_buf);
> +       return consumed;
> +}
> +
> +#ifdef NO_PTHREADS
> +
> +#define load_cache_entries load_all_cache_entries
> +
> +#else
> +
> +#include "thread-utils.h"

Don't include files in a middle of a file.

> +
> +/*
> +* Mostly randomly chosen maximum thread counts: we
> +* cap the parallelism to online_cpus() threads, and we want
> +* to have at least 7500 cache entries per thread for it to
> +* be worth starting a thread.
> +*/
> +#define THREAD_COST            (7500)

Isn't 7500 a bit too low? I'm still basing on webkit.git,  and 7500
entries take about 1.2ms on average. 100k files would take about 16ms
and may be more reasonable (still too low in my opinion).

> +
> +struct load_cache_entries_thread_data
> +{
> +       pthread_t pthread;
> +       struct index_state *istate;
> +       struct mem_pool *ce_mem_pool;
> +       int offset, nr;
> +       void *mmap;
> +       unsigned long start_offset;
> +       struct strbuf previous_name_buf;
> +       struct strbuf *previous_name;
> +       unsigned long consumed; /* return # of bytes in index file processed */
> +};
> +
> +/*
> +* A thread proc to run the load_cache_entries() computation
> +* across multiple background threads.
> +*/
> +static void *load_cache_entries_thread(void *_data)
> +{
> +       struct load_cache_entries_thread_data *p = _data;
> +
> +       p->consumed += load_cache_entry_block(p->istate, p->ce_mem_pool,
> +               p->offset, p->nr, p->mmap, p->start_offset, p->previous_name);
> +       return NULL;
> +}
> +
> +static unsigned long load_cache_entries(struct index_state *istate,
> +                       void *mmap, size_t mmap_size, unsigned long src_offset)
> +{
> +       struct strbuf previous_name_buf = STRBUF_INIT, *previous_name;
> +       struct load_cache_entries_thread_data *data;
> +       int nr_threads, cpus, ce_per_thread;
> +       unsigned long consumed;
> +       int i, thread;
> +
> +       nr_threads = git_config_get_index_threads();
> +       if (!nr_threads) {
> +               cpus = online_cpus();
> +               nr_threads = istate->cache_nr / THREAD_COST;
> +               if (nr_threads > cpus)
> +                       nr_threads = cpus;
> +       }
> +
> +       /* enable testing with fewer than default minimum of entries */
> +       if ((istate->cache_nr > 1) && (nr_threads < 2) && git_env_bool("GIT_INDEX_THREADS_TEST", 0))
> +               nr_threads = 2;

Please don't add more '()' than necessary. It's just harder to read.
Maybe break that "if" into two lines since it's getting long.

> +
> +       if (nr_threads < 2)
> +               return load_all_cache_entries(istate, mmap, mmap_size, src_offset);
> +
> +       /* a little sanity checking */
> +       if (istate->name_hash_initialized)
> +               die("the name hash isn't thread safe");
> +
> +       mem_pool_init(&istate->ce_mem_pool, 0);
> +       if (istate->version == 4)
> +               previous_name = &previous_name_buf;
> +       else
> +               previous_name = NULL;
> +
> +       ce_per_thread = DIV_ROUND_UP(istate->cache_nr, nr_threads);
> +       data = xcalloc(nr_threads, sizeof(struct load_cache_entries_thread_data));
> +
> +       /*
> +        * Loop through index entries starting a thread for every ce_per_thread
> +        * entries. Exit the loop when we've created the final thread (no need
> +        * to parse the remaining entries.
> +        */
> +       consumed = thread = 0;
> +       for (i = 0; ; i++) {
> +               struct ondisk_cache_entry *ondisk;
> +               const char *name;
> +               unsigned int flags;
> +
> +               /*
> +                * we've reached the beginning of a block of cache entries,
> +                * kick off a thread to process them
> +                */
> +               if (0 == i % ce_per_thread) {

I don't get why people keep putting constants in reversed order like
this. Perhaps in the old days, it helps catch "a = 0" mistakes, but
compilers nowadays are smart enough to complain about that and this is
just hard to read.

> +                       struct load_cache_entries_thread_data *p = &data[thread];
> +
> +                       p->istate = istate;
> +                       p->offset = i;
> +                       p->nr = ce_per_thread < istate->cache_nr - i ? ce_per_thread : istate->cache_nr - i;
> +
> +                       /* create a mem_pool for each thread */
> +                       if (istate->version == 4)
> +                               mem_pool_init(&p->ce_mem_pool,
> +                                                 estimate_cache_size_from_compressed(p->nr));
> +                       else
> +                               mem_pool_init(&p->ce_mem_pool,
> +                                                 estimate_cache_size(mmap_size, p->nr));
> +
> +                       p->mmap = mmap;
> +                       p->start_offset = src_offset;
> +                       if (previous_name) {
> +                               strbuf_addbuf(&p->previous_name_buf, previous_name);
> +                               p->previous_name = &p->previous_name_buf;
> +                       }
> +
> +                       if (pthread_create(&p->pthread, NULL, load_cache_entries_thread, p))
> +                               die("unable to create load_cache_entries_thread");
> +
> +                       /* exit the loop when we've created the last thread */
> +                       if (++thread == nr_threads)
> +                               break;

I still think it's better to have an extension to avoid looping
through like this. How much time does this "for (i = 0; ; i++)" loop
cost? The first thread can't start until you've scanned to the second
block, when you have zillion of entries and about 4 cores, that could
be significant delay. Unless you break smaller blocks and have one
thread handles multiple blocks, but then you pay the cost for
synchronization. Other threads may overlap a bit, but starting all
threads at the same time would benefit more. You also can't start
loading the extensions until you've scanned through all this.

> +               }
> +
> +               ondisk = (struct ondisk_cache_entry *)((char *)mmap + src_offset);
> +
> +               /* On-disk flags are just 16 bits */
> +               flags = get_be16(&ondisk->flags);
> +
> +               if (flags & CE_EXTENDED) {
> +                       struct ondisk_cache_entry_extended *ondisk2;
> +                       ondisk2 = (struct ondisk_cache_entry_extended *)ondisk;
> +                       name = ondisk2->name;
> +               } else
> +                       name = ondisk->name;
> +
> +               if (!previous_name) {
> +                       size_t len;
> +
> +                       /* v3 and earlier */
> +                       len = flags & CE_NAMEMASK;
> +                       if (len == CE_NAMEMASK)
> +                               len = strlen(name);
> +                       src_offset += (flags & CE_EXTENDED) ?
> +                               ondisk_cache_entry_extended_size(len) :
> +                               ondisk_cache_entry_size(len);
> +               } else
> +                       src_offset += (name - ((char *)ondisk)) + expand_name_field(previous_name, name);
> +       }
> +
> +       for (i = 0; i < nr_threads; i++) {
> +               struct load_cache_entries_thread_data *p = data + i;
> +               if (pthread_join(p->pthread, NULL))
> +                       die("unable to join load_cache_entries_thread");

_()

> +               mem_pool_combine(istate->ce_mem_pool, p->ce_mem_pool);
> +               strbuf_release(&p->previous_name_buf);
> +               consumed += p->consumed;
> +       }
> +
> +       free(data);
> +       strbuf_release(&previous_name_buf);
> +
> +       return consumed;
> +}
> +
> +#endif
> +
-- 
Duy

  parent reply index

Thread overview: 199+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-08-23 15:41 [PATCH v1] " Ben Peart
2018-08-23 17:31 ` Stefan Beller
2018-08-23 19:44   ` Ben Peart
2018-08-24 18:40   ` Duy Nguyen
2018-08-28 14:53     ` Ben Peart
2018-08-23 18:06 ` Junio C Hamano
2018-08-23 20:33   ` Ben Peart
2018-08-24 15:37     ` Duy Nguyen
2018-08-24 15:57       ` Duy Nguyen
2018-08-24 17:28         ` Ben Peart
2018-08-25  6:44         ` [PATCH] read-cache.c: optimize reading index format v4 Nguyễn Thái Ngọc Duy
2018-08-27 19:36           ` Junio C Hamano
2018-08-28 19:25             ` Duy Nguyen
2018-08-28 23:54               ` Ben Peart
2018-08-29 17:14               ` Junio C Hamano
2018-09-04 16:08             ` Duy Nguyen
2018-09-02 13:19           ` [PATCH v2 0/1] " Nguyễn Thái Ngọc Duy
2018-09-02 13:19             ` [PATCH v2 1/1] read-cache.c: " Nguyễn Thái Ngọc Duy
2018-09-04 18:58               ` Junio C Hamano
2018-09-04 19:31               ` Junio C Hamano
2018-08-24 18:20       ` [PATCH v1] read-cache: speed up index load through parallelization Duy Nguyen
2018-08-24 18:40         ` Ben Peart
2018-08-24 19:00           ` Duy Nguyen
2018-08-24 19:57             ` Ben Peart
2018-08-29 15:25 ` [PATCH v2 0/3] " Ben Peart
2018-08-29 15:25   ` [PATCH v2 1/3] " Ben Peart
2018-08-29 17:14     ` Junio C Hamano
2018-08-29 21:35       ` Ben Peart
2018-09-03 19:16     ` Duy Nguyen [this message]
2018-08-29 15:25   ` [PATCH v2 2/3] read-cache: load cache extensions on worker thread Ben Peart
2018-08-29 17:12     ` Junio C Hamano
2018-08-29 21:42       ` Ben Peart
2018-08-29 22:19         ` Junio C Hamano
2018-09-03 19:21     ` Duy Nguyen
2018-09-03 19:27       ` Duy Nguyen
2018-08-29 15:25   ` [PATCH v2 3/3] read-cache: micro-optimize expand_name_field() to speed up V4 index parsing Ben Peart
2018-09-06 21:03 ` [PATCH v3 0/4] read-cache: speed up index load through parallelization Ben Peart
2018-09-06 21:03   ` [PATCH v3 1/4] read-cache: optimize expand_name_field() to speed up V4 index parsing Ben Peart
2018-09-06 21:03   ` [PATCH v3 2/4] eoie: add End of Index Entry (EOIE) extension Ben Peart
2018-09-07 17:55     ` Junio C Hamano
2018-09-07 20:23       ` Ben Peart
2018-09-08  6:29         ` Martin Ågren
2018-09-08 14:03           ` Ben Peart
2018-09-08 17:08             ` Martin Ågren
2018-09-06 21:03   ` [PATCH v3 3/4] read-cache: load cache extensions on a worker thread Ben Peart
2018-09-07 21:10     ` Junio C Hamano
2018-09-08 14:56       ` Ben Peart
2018-09-06 21:03   ` [PATCH v3 4/4] read-cache: speed up index load through parallelization Ben Peart
2018-09-07  4:16     ` Torsten Bögershausen
2018-09-07 13:43       ` Ben Peart
2018-09-07 17:21   ` [PATCH v3 0/4] " Junio C Hamano
2018-09-07 18:31     ` Ben Peart
2018-09-08 13:18     ` Duy Nguyen
2018-09-11 23:26 ` [PATCH v4 0/5] " Ben Peart
2018-09-11 23:26   ` [PATCH v4 1/5] eoie: add End of Index Entry (EOIE) extension Ben Peart
2018-09-11 23:26   ` [PATCH v4 2/5] read-cache: load cache extensions on a worker thread Ben Peart
2018-09-11 23:26   ` [PATCH v4 3/5] read-cache: speed up index load through parallelization Ben Peart
2018-09-11 23:26   ` [PATCH v4 4/5] read-cache.c: optimize reading index format v4 Ben Peart
2018-09-11 23:26   ` [PATCH v4 5/5] read-cache: clean up casting and byte decoding Ben Peart
2018-09-12 14:34   ` [PATCH v4 0/5] read-cache: speed up index load through parallelization Ben Peart
2018-09-12 16:18 ` [PATCH v5 " Ben Peart
2018-09-12 16:18   ` [PATCH v5 1/5] eoie: add End of Index Entry (EOIE) extension Ben Peart
2018-09-13 22:44     ` Junio C Hamano
2018-09-15 10:02     ` Duy Nguyen
2018-09-17 14:54       ` Ben Peart
2018-09-17 16:05         ` Duy Nguyen
2018-09-17 17:31           ` Junio C Hamano
2018-09-17 17:38             ` Duy Nguyen
2018-09-17 19:08               ` Junio C Hamano
2018-09-12 16:18   ` [PATCH v5 2/5] read-cache: load cache extensions on a worker thread Ben Peart
2018-09-15 10:22     ` Duy Nguyen
2018-09-15 10:24       ` Duy Nguyen
2018-09-17 16:38         ` Ben Peart
2018-09-15 16:23       ` Duy Nguyen
2018-09-17 17:19         ` Junio C Hamano
2018-09-17 16:26       ` Ben Peart
2018-09-17 16:45         ` Duy Nguyen
2018-09-17 21:32       ` Junio C Hamano
2018-09-12 16:18   ` [PATCH v5 3/5] read-cache: load cache entries on worker threads Ben Peart
2018-09-15 10:31     ` Duy Nguyen
2018-09-17 17:25       ` Ben Peart
2018-09-15 11:07     ` Duy Nguyen
2018-09-15 11:09       ` Duy Nguyen
2018-09-17 18:52         ` Ben Peart
2018-09-15 11:29     ` Duy Nguyen
2018-09-12 16:18   ` [PATCH v5 4/5] read-cache.c: optimize reading index format v4 Ben Peart
2018-09-12 16:18   ` [PATCH v5 5/5] read-cache: clean up casting and byte decoding Ben Peart
2018-09-26 19:54 ` [PATCH v6 0/7] speed up index load through parallelization Ben Peart
2018-09-26 19:54   ` [PATCH v6 1/7] read-cache.c: optimize reading index format v4 Ben Peart
2018-09-26 19:54   ` [PATCH v6 2/7] read-cache: clean up casting and byte decoding Ben Peart
2018-09-26 19:54   ` [PATCH v6 3/7] eoie: add End of Index Entry (EOIE) extension Ben Peart
2018-09-28  0:19     ` SZEDER Gábor
2018-09-28 18:38       ` Ben Peart
2018-09-29  0:51     ` SZEDER Gábor
2018-09-29  5:45     ` Duy Nguyen
2018-09-29 18:24       ` Junio C Hamano
2018-09-26 19:54   ` [PATCH v6 4/7] config: add new index.threads config setting Ben Peart
2018-09-28  0:26     ` SZEDER Gábor
2018-09-28 13:39       ` Ben Peart
2018-09-28 17:07         ` Junio C Hamano
2018-09-28 19:41           ` Ben Peart
2018-09-28 20:30             ` Ramsay Jones
2018-09-28 22:15               ` Junio C Hamano
2018-10-01 13:17                 ` Ben Peart
2018-10-01 15:06                   ` SZEDER Gábor
2018-09-26 19:54   ` [PATCH v6 5/7] read-cache: load cache extensions on a worker thread Ben Peart
2018-09-26 19:54   ` [PATCH v6 6/7] ieot: add Index Entry Offset Table (IEOT) extension Ben Peart
2018-09-26 19:54   ` [PATCH v6 7/7] read-cache: load cache entries on worker threads Ben Peart
2018-09-26 22:06   ` [PATCH v6 0/7] speed up index load through parallelization Junio C Hamano
2018-09-27 17:13   ` Duy Nguyen
2018-10-01 13:45 ` [PATCH v7 " Ben Peart
2018-10-01 13:45   ` [PATCH v7 1/7] read-cache.c: optimize reading index format v4 Ben Peart
2018-10-01 13:45   ` [PATCH v7 2/7] read-cache: clean up casting and byte decoding Ben Peart
2018-10-01 15:10     ` Duy Nguyen
2018-10-01 13:45   ` [PATCH v7 3/7] eoie: add End of Index Entry (EOIE) extension Ben Peart
2018-10-01 15:17     ` SZEDER Gábor
2018-10-02 14:34       ` Ben Peart
2018-10-01 15:30     ` Duy Nguyen
2018-10-02 15:13       ` Ben Peart
2018-10-01 13:45   ` [PATCH v7 4/7] config: add new index.threads config setting Ben Peart
2018-10-01 13:45   ` [PATCH v7 5/7] read-cache: load cache extensions on a worker thread Ben Peart
2018-10-01 15:50     ` Duy Nguyen
2018-10-02 15:00       ` Ben Peart
2018-10-01 13:45   ` [PATCH v7 6/7] ieot: add Index Entry Offset Table (IEOT) extension Ben Peart
2018-10-01 16:27     ` Duy Nguyen
2018-10-02 16:34       ` Ben Peart
2018-10-02 17:02         ` Duy Nguyen
2018-10-01 13:45   ` [PATCH v7 7/7] read-cache: load cache entries on worker threads Ben Peart
2018-10-01 17:09     ` Duy Nguyen
2018-10-02 19:09       ` Ben Peart
2018-10-10 15:59 ` [PATCH v8 0/7] speed up index load through parallelization Ben Peart
2018-10-10 15:59   ` [PATCH v8 1/7] read-cache.c: optimize reading index format v4 Ben Peart
2018-10-10 15:59   ` [PATCH v8 2/7] read-cache: clean up casting and byte decoding Ben Peart
2018-10-10 15:59   ` [PATCH v8 3/7] eoie: add End of Index Entry (EOIE) extension Ben Peart
2018-10-10 15:59   ` [PATCH v8 4/7] config: add new index.threads config setting Ben Peart
2018-10-10 15:59   ` [PATCH v8 5/7] read-cache: load cache extensions on a worker thread Ben Peart
2018-10-10 15:59   ` [PATCH v8 6/7] ieot: add Index Entry Offset Table (IEOT) extension Ben Peart
2018-10-10 15:59   ` [PATCH v8 7/7] read-cache: load cache entries on worker threads Ben Peart
2018-10-19 16:11     ` Jeff King
2018-10-22  2:14       ` Junio C Hamano
2018-10-22 14:40         ` Ben Peart
2018-10-12  3:18   ` [PATCH v8 0/7] speed up index load through parallelization Junio C Hamano
2018-10-14 12:28   ` Duy Nguyen
2018-10-15 17:33     ` Ben Peart
2018-11-13  0:38   ` [PATCH 0/3] Avoid confusing messages from new index extensions (Re: [PATCH v8 0/7] speed up index load through parallelization) Jonathan Nieder
2018-11-13  0:39     ` [PATCH 1/3] eoie: default to not writing EOIE section Jonathan Nieder
2018-11-13  1:05       ` Junio C Hamano
2018-11-13 15:14         ` Ben Peart
2018-11-13 18:25           ` Jonathan Nieder
2018-11-14  1:36             ` Junio C Hamano
2018-11-15  0:19               ` Jonathan Nieder
2018-11-13  0:39     ` [PATCH 2/3] ieot: default to not writing IEOT section Jonathan Nieder
2018-11-13  0:58       ` Jonathan Tan
2018-11-13  1:09       ` Junio C Hamano
2018-11-13  1:12         ` Jonathan Nieder
2018-11-13 15:37           ` Duy Nguyen
2018-11-13 18:09         ` Jonathan Nieder
2018-11-13 15:22       ` Ben Peart
2018-11-13 18:18         ` Jonathan Nieder
2018-11-13 19:15           ` Ben Peart
2018-11-13 21:08             ` Jonathan Nieder
2018-11-14 18:09               ` Ben Peart
2018-11-15  0:05                 ` Jonathan Nieder
2018-11-14  3:05         ` Junio C Hamano
2018-11-20  6:09           ` [PATCH v2 0/5] Avoid confusing messages from new index extensions Jonathan Nieder
2018-11-20  6:11             ` [PATCH 1/5] eoie: default to not writing EOIE section Jonathan Nieder
2018-11-20 13:06               ` Ben Peart
2018-11-20 13:21                 ` SZEDER Gábor
2018-11-21 16:46                   ` Jeff King
2018-11-22  0:47                     ` Junio C Hamano
2018-11-20 15:01               ` Ben Peart
2018-11-20  6:12             ` [PATCH 2/5] ieot: default to not writing IEOT section Jonathan Nieder
2018-11-20 13:07               ` Ben Peart
2018-11-26 19:59                 ` Stefan Beller
2018-11-26 21:47                   ` Ben Peart
2018-11-26 22:02                     ` Stefan Beller
2018-11-27  0:50                   ` Junio C Hamano
2018-11-20  6:12             ` [PATCH 3/5] index: do not warn about unrecognized extensions Jonathan Nieder
2018-11-20  6:14             ` [PATCH 4/5] index: make index.threads=true enable ieot and eoie Jonathan Nieder
2018-11-20 13:24               ` Ben Peart
2018-11-20  6:15             ` [PATCH 5/5] index: offer advice for unknown index extensions Jonathan Nieder
2018-11-20  9:26               ` Ævar Arnfjörð Bjarmason
2018-11-20 13:30                 ` Ben Peart
2018-11-21  0:22                   ` Junio C Hamano
2018-11-21  0:39                     ` Jonathan Nieder
2018-11-21  0:44                       ` Jonathan Nieder
2018-11-21  5:01                       ` Junio C Hamano
2018-11-21  5:04                         ` Jonathan Nieder
2018-11-21  5:15                         ` Junio C Hamano
2018-11-21  5:31                           ` Junio C Hamano
2018-11-21  1:03                     ` Jonathan Nieder
2018-11-21  4:23                       ` Junio C Hamano
2018-11-21  4:57                         ` Jonathan Nieder
2018-11-21  9:30                       ` Ævar Arnfjörð Bjarmason
2018-11-13  0:40     ` [PATCH 3/3] index: do not warn about unrecognized extensions Jonathan Nieder
2018-11-13  1:10       ` Junio C Hamano
2018-11-13 15:25       ` Ben Peart
2018-11-14  3:24       ` Junio C Hamano
2018-11-14 18:19         ` Ben Peart

Reply instructions:

You may reply publically to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

  List information: http://vger.kernel.org/majordomo-info.html

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=CACsJy8BK5a1DgY4o5Xxrdjyg1QYxW-kCPNmAXkdudCD78O7dTg@mail.gmail.com \
    --to=pclouds@gmail.com \
    --cc=Ben.Peart@microsoft.com \
    --cc=git@vger.kernel.org \
    --cc=gitster@pobox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link

git@vger.kernel.org list mirror (unofficial, one of many)

Archives are clonable:
	git clone --mirror https://public-inbox.org/git
	git clone --mirror http://ou63pmih66umazou.onion/git
	git clone --mirror http://czquwvybam4bgbro.onion/git
	git clone --mirror http://hjrcffqmbrq6wope.onion/git

Newsgroups are available over NNTP:
	nntp://news.public-inbox.org/inbox.comp.version-control.git
	nntp://ou63pmih66umazou.onion/inbox.comp.version-control.git
	nntp://czquwvybam4bgbro.onion/inbox.comp.version-control.git
	nntp://hjrcffqmbrq6wope.onion/inbox.comp.version-control.git
	nntp://news.gmane.org/gmane.comp.version-control.git

 note: .onion URLs require Tor: https://www.torproject.org/

AGPL code for this site: git clone https://public-inbox.org/ public-inbox