about summary refs log tree commit homepage
path: root/lib/PublicInbox/V2Writable.pm
diff options
context:
space:
mode:
Diffstat (limited to 'lib/PublicInbox/V2Writable.pm')
-rw-r--r--lib/PublicInbox/V2Writable.pm1434
1 files changed, 692 insertions, 742 deletions
diff --git a/lib/PublicInbox/V2Writable.pm b/lib/PublicInbox/V2Writable.pm
index fc2f33f9..43f37f60 100644
--- a/lib/PublicInbox/V2Writable.pm
+++ b/lib/PublicInbox/V2Writable.pm
@@ -1,90 +1,44 @@
-# Copyright (C) 2018-2020 all contributors <meta@public-inbox.org>
+# Copyright (C) all contributors <meta@public-inbox.org>
 # License: AGPL-3.0+ <https://www.gnu.org/licenses/agpl-3.0.txt>
 
 # This interface wraps and mimics PublicInbox::Import
 # Used to write to V2 inboxes (see L<public-inbox-v2-format(5)>).
 package PublicInbox::V2Writable;
 use strict;
-use warnings;
-use base qw(PublicInbox::Lock);
-use 5.010_001;
+use v5.10.1;
+use parent qw(PublicInbox::Lock PublicInbox::IPC);
 use PublicInbox::SearchIdxShard;
-use PublicInbox::MIME;
+use PublicInbox::IPC qw(nproc_shards);
+use PublicInbox::Eml;
 use PublicInbox::Git;
 use PublicInbox::Import;
+use PublicInbox::MultiGit;
 use PublicInbox::MID qw(mids references);
-use PublicInbox::ContentId qw(content_id content_digest);
-use PublicInbox::Inbox;
+use PublicInbox::ContentHash qw(content_hash content_digest git_sha);
+use PublicInbox::InboxWritable;
 use PublicInbox::OverIdx;
 use PublicInbox::Msgmap;
-use PublicInbox::Spawn qw(spawn popen_rd);
-use PublicInbox::SearchIdx;
+use PublicInbox::Spawn qw(spawn popen_rd run_die);
+use PublicInbox::Search;
+use PublicInbox::SearchIdx qw(log2stack is_ancestor check_size is_bad_blob);
 use IO::Handle; # ->autoflush
-use File::Temp qw(tempfile);
+use POSIX ();
 
+my $OID = qr/[a-f0-9]{40,}/;
 # an estimate of the post-packed size to the raw uncompressed size
-my $PACKING_FACTOR = 0.4;
-
-# SATA storage lags behind what CPUs are capable of, so relying on
-# nproc(1) can be misleading and having extra Xapian shards is a
-# waste of FDs and space.  It can also lead to excessive IO latency
-# and slow things down.  Users on NVME or other fast storage can
-# use the NPROC env or switches in our script/public-inbox-* programs
-# to increase Xapian shards
-our $NPROC_MAX_DEFAULT = 4;
-
-sub detect_nproc () {
-        for my $nproc (qw(nproc gnproc)) { # GNU coreutils nproc
-                `$nproc 2>/dev/null` =~ /^(\d+)$/ and return $1;
-        }
-
-        # getconf(1) is POSIX, but *NPROCESSORS* vars are not
-        for (qw(_NPROCESSORS_ONLN NPROCESSORS_ONLN)) {
-                `getconf $_ 2>/dev/null` =~ /^(\d+)$/ and return $1;
-        }
-
-        # should we bother with `sysctl hw.ncpu`?  Those only give
-        # us total processor count, not online processor count.
-        undef
-}
-
-sub nproc_shards ($) {
-        my ($creat_opt) = @_;
-        my $n = $creat_opt->{nproc} if ref($creat_opt) eq 'HASH';
-        $n //= $ENV{NPROC};
-        if (!$n) {
-                # assume 2 cores if not detectable or zero
-                state $NPROC_DETECTED = detect_nproc() || 2;
-                $n = $NPROC_DETECTED;
-                $n = $NPROC_MAX_DEFAULT if $n > $NPROC_MAX_DEFAULT;
-        }
-
-        # subtract for the main process and git-fast-import
-        $n -= 1;
-        $n < 1 ? 1 : $n;
-}
+our $PACKING_FACTOR = 0.4;
 
 sub count_shards ($) {
         my ($self) = @_;
-        my $n = 0;
-        my $xpfx = $self->{xpfx};
-
         # always load existing shards in case core count changes:
         # Also, shard count may change while -watch is running
-        # due to "xcpdb --reshard"
-        if (-d $xpfx) {
-                require PublicInbox::Search;
-                PublicInbox::Search::load_xapian();
-                my $XapianDatabase = $PublicInbox::Search::X{Database};
-                foreach my $shard (<$xpfx/*>) {
-                        -d $shard && $shard =~ m!/[0-9]+\z! or next;
-                        eval {
-                                $XapianDatabase->new($shard)->close;
-                                $n++;
-                        };
-                }
+        if (my $ibx = $self->{ibx}) {
+                my $srch = $ibx->search or return 0;
+                delete $ibx->{search};
+                $srch->{nshard} // 0
+        } else { # ExtSearchIdx
+                $self->{nshard} = scalar($self->xdb_shards_flat);
         }
-        $n;
 }
 
 sub new {
@@ -94,75 +48,72 @@ sub new {
         $v2ibx = PublicInbox::InboxWritable->new($v2ibx);
         my $dir = $v2ibx->assert_usable_dir;
         unless (-d $dir) {
-                if ($creat) {
-                        require File::Path;
-                        File::Path::mkpath($dir);
-                } else {
-                        die "$dir does not exist\n";
-                }
+                die "$dir does not exist\n" if !$creat;
+                require File::Path;
+                File::Path::mkpath($dir);
         }
-        $v2ibx->umask_prepare;
-
         my $xpfx = "$dir/xap" . PublicInbox::Search::SCHEMA_VERSION;
         my $self = {
-                -inbox => $v2ibx,
+                ibx => $v2ibx,
+                mg => PublicInbox::MultiGit->new($dir, 'all.git', 'git'),
                 im => undef, #  PublicInbox::Import
                 parallel => 1,
                 transact_bytes => 0,
+                total_bytes => 0,
                 current_info => '',
                 xpfx => $xpfx,
-                over => PublicInbox::OverIdx->new("$xpfx/over.sqlite3", 1),
+                oidx => PublicInbox::OverIdx->new("$xpfx/over.sqlite3"),
                 lock_path => "$dir/inbox.lock",
                 # limit each git repo (epoch) to 1GB or so
                 rotate_bytes => int((1024 * 1024 * 1024) / $PACKING_FACTOR),
-                last_commit => [], # git repo -> commit
+                last_commit => [], # git epoch -> commit
         };
+        $self->{oidx}->{-no_fsync} = 1 if $v2ibx->{-no_fsync};
         $self->{shards} = count_shards($self) || nproc_shards($creat);
         bless $self, $class;
 }
 
 # public (for now?)
 sub init_inbox {
-        my ($self, $shards, $skip_epoch) = @_;
+        my ($self, $shards, $skip_epoch, $skip_artnum) = @_;
         if (defined $shards) {
                 $self->{parallel} = 0 if $shards == 0;
                 $self->{shards} = $shards if $shards > 0;
         }
         $self->idx_init;
-        my $epoch_max = -1;
-        git_dir_latest($self, \$epoch_max);
-        if (defined $skip_epoch && $epoch_max == -1) {
-                $epoch_max = $skip_epoch;
-        }
-        $self->git_init($epoch_max >= 0 ? $epoch_max : 0);
+        $self->{mm}->skip_artnum($skip_artnum) if defined $skip_artnum;
+        my $max = $self->{ibx}->max_git_epoch;
+        $max = $skip_epoch if (defined($skip_epoch) && !defined($max));
+        $self->{mg}->add_epoch($max // 0);
         $self->done;
 }
 
-# returns undef on duplicate or spam
-# mimics Import::add and wraps it for v2
-sub add {
-        my ($self, $mime, $check_cb) = @_;
-        $self->{-inbox}->with_umask(sub {
-                _add($self, $mime, $check_cb)
-        });
+sub idx_shard ($$) {
+        my ($self, $num) = @_;
+        $self->{idx_shards}->[$num % scalar(@{$self->{idx_shards}})];
 }
 
 # indexes a message, returns true if checkpointing is needed
-sub do_idx ($$$$$$$) {
-        my ($self, $msgref, $mime, $len, $num, $oid, $mid0) = @_;
-        $self->{over}->add_overview($mime, $len, $num, $oid, $mid0);
-        my $idx = idx_shard($self, $num % $self->{shards});
-        $idx->index_raw($len, $msgref, $num, $oid, $mid0, $mime);
-        my $n = $self->{transact_bytes} += $len;
-        $n >= (PublicInbox::SearchIdx::BATCH_BYTES * $self->{shards});
+sub do_idx ($$$) {
+        my ($self, $eml, $smsg) = @_;
+        $self->{oidx}->add_overview($eml, $smsg);
+        if ($self->{-need_xapian}) {
+                my $idx = idx_shard($self, $smsg->{num});
+                $idx->index_eml($eml, $smsg);
+        }
+        my $n = $self->{transact_bytes} += $smsg->{bytes};
+        $n >= $self->{batch_bytes};
 }
 
-sub _add {
+# returns undef on duplicate or spam
+# mimics Import::add and wraps it for v2
+sub add {
         my ($self, $mime, $check_cb) = @_;
+        my $restore = $self->{ibx}->with_umask;
 
         # spam check:
         if ($check_cb) {
-                $mime = $check_cb->($mime) or return;
+                $mime = $check_cb->($mime, $self->{ibx}) or return;
         }
 
         # All pipes (> $^F) known to Perl 5.6+ have FD_CLOEXEC set,
@@ -174,23 +125,22 @@ sub _add {
 
         my ($num, $mid0) = v2_num_for($self, $mime);
         defined $num or return; # duplicate
-        defined $mid0 or die "BUG: $mid0 undefined\n";
+        defined $mid0 or die "BUG: \$mid0 undefined\n";
         my $im = $self->importer;
-        my $cmt = $im->add($mime);
+        my $smsg = bless { mid => $mid0, num => $num }, 'PublicInbox::Smsg';
+        my $cmt = $im->add($mime, undef, $smsg); # sets $smsg->{ds|ts|blob}
         $cmt = $im->get_mark($cmt);
         $self->{last_commit}->[$self->{epoch_max}] = $cmt;
 
-        my ($oid, $len, $msgref) = @{$im->{last_object}};
-        if (do_idx($self, $msgref, $mime, $len, $num, $oid, $mid0)) {
+        if (do_idx($self, $mime, $smsg)) {
                 $self->checkpoint;
         }
-
         $cmt;
 }
 
 sub v2_num_for {
         my ($self, $mime) = @_;
-        my $mids = mids($mime->header_obj);
+        my $mids = mids($mime);
         if (@$mids) {
                 my $mid = $mids->[0];
                 my $num = $self->{mm}->mid_insert($mid);
@@ -201,21 +151,20 @@ sub v2_num_for {
                 # crap, Message-ID is already known, hope somebody just resent:
                 foreach my $m (@$mids) {
                         # read-only lookup now safe to do after above barrier
-                        my $existing = lookup_content($self, $mime, $m);
                         # easy, don't store duplicates
                         # note: do not add more diagnostic info here since
                         # it gets noisy on public-inbox-watch restarts
-                        return () if $existing;
+                        return () if content_exists($self, $mime, $m);
                 }
 
                 # AltId may pre-populate article numbers (e.g. X-Mail-Count
                 # or NNTP article number), use that article number if it's
                 # not in Over.
-                my $altid = $self->{-inbox}->{altid};
+                my $altid = $self->{ibx}->{altid};
                 if ($altid && grep(/:file=msgmap\.sqlite3\z/, @$altid)) {
                         my $num = $self->{mm}->num_for($mid);
 
-                        if (defined $num && !$self->{over}->get_art($num)) {
+                        if (defined $num && !$self->{oidx}->get_art($num)) {
                                 return ($num, $mid);
                         }
                 }
@@ -238,88 +187,79 @@ sub v2_num_for {
 }
 
 sub v2_num_for_harder {
-        my ($self, $mime) = @_;
+        my ($self, $eml) = @_;
 
-        my $hdr = $mime->header_obj;
-        my $dig = content_digest($mime);
-        my $mid0 = PublicInbox::Import::digest2mid($dig, $hdr);
+        my $dig = content_digest($eml);
+        my $mid0 = PublicInbox::Import::digest2mid($dig, $eml);
         my $num = $self->{mm}->mid_insert($mid0);
         unless (defined $num) {
                 # it's hard to spoof the last Received: header
-                my @recvd = $hdr->header_raw('Received');
+                my @recvd = $eml->header_raw('Received');
                 $dig->add("Received: $_") foreach (@recvd);
-                $mid0 = PublicInbox::Import::digest2mid($dig, $hdr);
+                $mid0 = PublicInbox::Import::digest2mid($dig, $eml);
                 $num = $self->{mm}->mid_insert($mid0);
 
                 # fall back to a random Message-ID and give up determinism:
                 until (defined($num)) {
                         $dig->add(rand);
-                        $mid0 = PublicInbox::Import::digest2mid($dig, $hdr);
+                        $mid0 = PublicInbox::Import::digest2mid($dig, $eml);
                         warn "using random Message-ID <$mid0> as fallback\n";
                         $num = $self->{mm}->mid_insert($mid0);
                 }
         }
-        PublicInbox::Import::append_mid($hdr, $mid0);
+        PublicInbox::Import::append_mid($eml, $mid0);
         ($num, $mid0);
 }
 
-sub idx_shard {
-        my ($self, $shard_i) = @_;
-        $self->{idx_shards}->[$shard_i];
+sub _idx_init { # with_umask callback
+        my ($self, $opt) = @_;
+        $self->lock_acquire unless $opt && $opt->{-skip_lock};
+        $self->{oidx}->create;
+
+        # xcpdb can change shard count while -watch is idle
+        my $nshards = count_shards($self);
+        $self->{shards} = $nshards if $nshards && $nshards != $self->{shards};
+        $self->{batch_bytes} = $opt->{batch_size} //
+                                $PublicInbox::SearchIdx::BATCH_BYTES;
+
+        # need to create all shards before initializing msgmap FD
+        # idx_shards must be visible to all forked processes
+        my $max = $self->{shards} - 1;
+        my $idx = $self->{idx_shards} = [];
+        push @$idx, PublicInbox::SearchIdxShard->new($self, $_) for (0..$max);
+        $self->{-need_xapian} = $idx->[0]->need_xapian;
+
+        # SearchIdxShard may do their own flushing, so don't scale
+        # until after forking
+        $self->{batch_bytes} *= $self->{shards} if $self->{parallel};
+
+        my $ibx = $self->{ibx} or return; # ExtIdxSearch
+
+        # Now that all subprocesses are up, we can open the FDs
+        # for SQLite:
+        my $mm = $self->{mm} = PublicInbox::Msgmap->new_file($ibx, 1);
+        $mm->{dbh}->begin_work;
+}
+
+sub parallel_init ($$) {
+        my ($self, $indexlevel) = @_;
+        $self->{parallel} = 0 if ($indexlevel // 'full') eq 'basic';
 }
 
 # idempotent
 sub idx_init {
         my ($self, $opt) = @_;
         return if $self->{idx_shards};
-        my $ibx = $self->{-inbox};
+        my $ibx = $self->{ibx};
 
         # do not leak read-only FDs to child processes, we only have these
         # FDs for duplicate detection so they should not be
         # frequently activated.
-        delete $ibx->{$_} foreach (qw(git mm search));
+        delete @$ibx{qw(mm search)};
+        $ibx->git->cleanup;
 
-        my $indexlevel = $ibx->{indexlevel};
-        if ($indexlevel && $indexlevel eq 'basic') {
-                $self->{parallel} = 0;
-        }
-
-        if ($self->{parallel}) {
-                pipe(my ($r, $w)) or die "pipe failed: $!";
-                # pipe for barrier notifications doesn't need to be big,
-                # 1031: F_SETPIPE_SZ
-                fcntl($w, 1031, 4096) if $^O eq 'linux';
-                $self->{bnote} = [ $r, $w ];
-                $w->autoflush(1);
-        }
-
-        my $over = $self->{over};
-        $ibx->umask_prepare;
-        $ibx->with_umask(sub {
-                $self->lock_acquire unless ($opt && $opt->{-skip_lock});
-                $over->create;
-
-                # xcpdb can change shard count while -watch is idle
-                my $nshards = count_shards($self);
-                if ($nshards && $nshards != $self->{shards}) {
-                        $self->{shards} = $nshards;
-                }
-
-                # need to create all shards before initializing msgmap FD
-                my $max = $self->{shards} - 1;
-
-                # idx_shards must be visible to all forked processes
-                my $idx = $self->{idx_shards} = [];
-                for my $i (0..$max) {
-                        push @$idx, PublicInbox::SearchIdxShard->new($self, $i);
-                }
-
-                # Now that all subprocesses are up, we can open the FDs
-                # for SQLite:
-                my $mm = $self->{mm} = PublicInbox::Msgmap->new_file(
-                        "$self->{-inbox}->{inboxdir}/msgmap.sqlite3", 1);
-                $mm->{dbh}->begin_work;
-        });
+        parallel_init($self, $ibx->{indexlevel});
+        $ibx->with_umask(\&_idx_init, $self, $opt);
 }
 
 # returns an array mapping [ epoch => latest_commit ]
@@ -328,14 +268,10 @@ sub idx_init {
 sub _replace_oids ($$$) {
         my ($self, $mime, $replace_map) = @_;
         $self->done;
-        my $pfx = "$self->{-inbox}->{inboxdir}/git";
+        my $ibx = $self->{ibx};
+        my $pfx = "$ibx->{inboxdir}/git";
         my $rewrites = []; # epoch => commit
-        my $max = $self->{epoch_max};
-
-        unless (defined($max)) {
-                defined(my $latest = git_dir_latest($self, \$max)) or return;
-                $self->{epoch_max} = $max;
-        }
+        my $max = $self->{epoch_max} //= $ibx->max_git_epoch // return;
 
         foreach my $i (0..$max) {
                 my $git_dir = "$pfx/$i.git";
@@ -348,62 +284,62 @@ sub _replace_oids ($$$) {
         $rewrites;
 }
 
-sub content_ids ($) {
+sub content_hashes ($) {
         my ($mime) = @_;
-        my @cids = ( content_id($mime) );
+        my @chashes = ( content_hash($mime) );
 
+        # We still support Email::MIME, here, and
         # Email::MIME->as_string doesn't always round-trip, so we may
-        # use a second content_id
-        my $rt = content_id(PublicInbox::MIME->new(\($mime->as_string)));
-        push @cids, $rt if $cids[0] ne $rt;
-        \@cids;
+        # use a second content_hash
+        my $rt = content_hash(PublicInbox::Eml->new(\($mime->as_string)));
+        push @chashes, $rt if $chashes[0] ne $rt;
+        \@chashes;
 }
 
 sub content_matches ($$) {
-        my ($cids, $existing) = @_;
-        my $cid = content_id($existing);
-        foreach (@$cids) {
-                return 1 if $_ eq $cid
+        my ($chashes, $existing) = @_;
+        my $chash = content_hash($existing);
+        foreach (@$chashes) {
+                return 1 if $_ eq $chash
         }
         0
 }
 
 # used for removing or replacing (purging)
 sub rewrite_internal ($$;$$$) {
-        my ($self, $old_mime, $cmt_msg, $new_mime, $sref) = @_;
+        my ($self, $old_eml, $cmt_msg, $new_eml, $sref) = @_;
         $self->idx_init;
         my ($im, $need_reindex, $replace_map);
         if ($sref) {
                 $replace_map = {}; # oid => sref
-                $need_reindex = [] if $new_mime;
+                $need_reindex = [] if $new_eml;
         } else {
                 $im = $self->importer;
         }
-        my $over = $self->{over};
-        my $cids = content_ids($old_mime);
-        my $removed;
-        my $mids = mids($old_mime->header_obj);
+        my $oidx = $self->{oidx};
+        my $chashes = content_hashes($old_eml);
+        my $removed = [];
+        my $mids = mids($old_eml);
 
         # We avoid introducing new blobs into git since the raw content
         # can be slightly different, so we do not need the user-supplied
-        # message now that we have the mids and content_id
-        $old_mime = undef;
+        # message now that we have the mids and content_hash
+        $old_eml = undef;
         my $mark;
 
         foreach my $mid (@$mids) {
-                my %gone; # num => [ smsg, raw ]
+                my %gone; # num => [ smsg, $mime, raw ]
                 my ($id, $prev);
-                while (my $smsg = $over->next_by_mid($mid, \$id, \$prev)) {
+                while (my $smsg = $oidx->next_by_mid($mid, \$id, \$prev)) {
                         my $msg = get_blob($self, $smsg);
                         if (!defined($msg)) {
                                 warn "broken smsg for $mid\n";
                                 next; # continue
                         }
                         my $orig = $$msg;
-                        my $cur = PublicInbox::MIME->new($msg);
-                        if (content_matches($cids, $cur)) {
-                                $smsg->{mime} = $cur;
-                                $gone{$smsg->{num}} = [ $smsg, \$orig ];
+                        my $cur = PublicInbox::Eml->new($msg);
+                        if (content_matches($chashes, $cur)) {
+                                $gone{$smsg->{num}} = [ $smsg, $cur, \$orig ];
                         }
                 }
                 my $n = scalar keys %gone;
@@ -413,15 +349,16 @@ sub rewrite_internal ($$;$$$) {
                                 join(',', sort keys %gone), "\n";
                 }
                 foreach my $num (keys %gone) {
-                        my ($smsg, $orig) = @{$gone{$num}};
+                        my ($smsg, $mime, $orig) = @{$gone{$num}};
                         # $removed should only be set once assuming
                         # no bugs in our deduplication code:
-                        $removed = $smsg;
+                        $removed = [ undef, $mime, $smsg ];
                         my $oid = $smsg->{blob};
                         if ($replace_map) {
                                 $replace_map->{$oid} = $sref;
                         } else {
                                 ($mark, undef) = $im->remove($orig, $cmt_msg);
+                                $removed->[0] = $mark;
                         }
                         $orig = undef;
                         if ($need_reindex) { # ->replace
@@ -429,7 +366,7 @@ sub rewrite_internal ($$;$$$) {
                         } else { # ->purge or ->remove
                                 $self->{mm}->num_delete($num);
                         }
-                        unindex_oid_remote($self, $oid, $mid);
+                        unindex_oid_aux($self, $oid, $mid);
                 }
         }
 
@@ -438,26 +375,26 @@ sub rewrite_internal ($$;$$$) {
                 $self->{last_commit}->[$self->{epoch_max}] = $cmt;
         }
         if ($replace_map && scalar keys %$replace_map) {
-                my $rewrites = _replace_oids($self, $new_mime, $replace_map);
+                my $rewrites = _replace_oids($self, $new_eml, $replace_map);
                 return { rewrites => $rewrites, need_reindex => $need_reindex };
         }
-        $removed;
+        defined($mark) ? $removed : undef;
 }
 
-# public
+# public (see PublicInbox::Import->remove), but note the 3rd element
+# (retval[2]) is not part of the stable API shared with Import->remove
 sub remove {
-        my ($self, $mime, $cmt_msg) = @_;
-        $self->{-inbox}->with_umask(sub {
-                rewrite_internal($self, $mime, $cmt_msg);
-        });
+        my ($self, $eml, $cmt_msg) = @_;
+        my $restore = $self->{ibx}->with_umask;
+        my $r = rewrite_internal($self, $eml, $cmt_msg);
+        defined($r) && defined($r->[0]) ? @$r: undef;
 }
 
 sub _replace ($$;$$) {
-        my ($self, $old_mime, $new_mime, $sref) = @_;
-        my $rewritten = $self->{-inbox}->with_umask(sub {
-                rewrite_internal($self, $old_mime, undef, $new_mime, $sref);
-        }) or return;
-
+        my ($self, $old_eml, $new_eml, $sref) = @_;
+        my $restore = $self->{ibx}->with_umask;
+        my $rewritten = rewrite_internal($self, $old_eml, undef,
+                                        $new_eml, $sref) or return;
         my $rewrites = $rewritten->{rewrites};
         # ->done is called if there are rewrites since we gc+prune from git
         $self->idx_init if @$rewrites;
@@ -476,25 +413,6 @@ sub purge {
         $rewritten->{rewrites}
 }
 
-# returns the git object_id of $fh, does not write the object to FS
-sub git_hash_raw ($$) {
-        my ($self, $raw) = @_;
-        # grab the expected OID we have to reindex:
-        open my $tmp_fh, '+>', undef or die "failed to open tmp: $!";
-        $tmp_fh->autoflush(1);
-        print $tmp_fh $$raw or die "print \$tmp_fh: $!";
-        sysseek($tmp_fh, 0, 0) or die "seek failed: $!";
-
-        my $git_dir = $self->{-inbox}->git->{git_dir};
-        my $cmd = ['git', "--git-dir=$git_dir", qw(hash-object --stdin)];
-        my $r = popen_rd($cmd, undef, { 0 => $tmp_fh });
-        local $/ = "\n";
-        chomp(my $oid = <$r>);
-        close $r or die "git hash-object failed: $?";
-        $oid =~ /\A[a-f0-9]{40}\z/ or die "OID not expected: $oid";
-        $oid;
-}
-
 sub _check_mids_match ($$$) {
         my ($old_list, $new_list, $hdrs) = @_;
         my %old_mids = map { $_ => 1 } @$old_list;
@@ -513,9 +431,7 @@ sub _check_mids_match ($$$) {
 # Message-IDs are pretty complex and rethreading hasn't been fully
 # implemented, yet.
 sub check_mids_match ($$) {
-        my ($old_mime, $new_mime) = @_;
-        my $old = $old_mime->header_obj;
-        my $new = $new_mime->header_obj;
+        my ($old, $new) = @_;
         _check_mids_match(mids($old), mids($new), 'Message-ID(s)');
         _check_mids_match(references($old), references($new),
                         'References/In-Reply-To');
@@ -531,7 +447,7 @@ sub replace ($$$) {
         PublicInbox::Import::drop_unwanted_headers($new_mime);
 
         my $raw = $new_mime->as_string;
-        my $expect_oid = git_hash_raw($self, \$raw);
+        my $expect_oid = git_sha(1, \$raw)->hexdigest;
         my $rewritten = _replace($self, $old_mime, $new_mime, \$raw) or return;
         my $need_reindex = $rewritten->{need_reindex};
 
@@ -549,17 +465,23 @@ W: $list
         }
 
         # make sure we really got the OID:
-        my ($oid, $type, $len) = $self->{-inbox}->git->check($expect_oid);
-        $oid eq $expect_oid or die "BUG: $expect_oid not found after replace";
+        my ($blob, $type, $bytes) = $self->git->check($expect_oid);
+        $blob eq $expect_oid or die "BUG: $expect_oid not found after replace";
 
         # don't leak FDs to Xapian:
-        $self->{-inbox}->git->cleanup;
+        $self->git->cleanup;
 
         # reindex modified messages:
         for my $smsg (@$need_reindex) {
-                my $num = $smsg->{num};
-                my $mid0 = $smsg->{mid};
-                do_idx($self, \$raw, $new_mime, $len, $num, $oid, $mid0);
+                my $new_smsg = bless {
+                        blob => $blob,
+                        num => $smsg->{num},
+                        mid => $smsg->{mid},
+                }, 'PublicInbox::Smsg';
+                my $sync = { autime => $smsg->{ds}, cotime => $smsg->{ts} };
+                $new_smsg->populate($new_mime, $sync);
+                $new_smsg->set_bytes($raw, $bytes);
+                do_idx($self, $new_mime, $new_smsg);
         }
         $rewritten->{rewrites};
 }
@@ -570,7 +492,7 @@ sub last_epoch_commit ($$;$) {
         $self->{mm}->last_commit_xap($v, $i, $cmt);
 }
 
-sub set_last_commits ($) {
+sub set_last_commits ($) { # this is NOT for ExtSearchIdx
         my ($self) = @_;
         defined(my $epoch_max = $self->{epoch_max}) or return;
         my $last_commit = $self->{last_commit};
@@ -581,66 +503,59 @@ sub set_last_commits ($) {
         }
 }
 
-sub barrier_init {
-        my ($self, $n) = @_;
-        $self->{bnote} or return;
-        --$n;
-        my $barrier = { map { $_ => 1 } (0..$n) };
-}
-
-sub barrier_wait {
-        my ($self, $barrier) = @_;
-        my $bnote = $self->{bnote} or return;
-        my $r = $bnote->[0];
-        while (scalar keys %$barrier) {
-                defined(my $l = $r->getline) or die "EOF on barrier_wait: $!";
-                $l =~ /\Abarrier (\d+)/ or die "bad line on barrier_wait: $l";
-                delete $barrier->{$1} or die "bad shard[$1] on barrier wait";
-        }
-}
-
 # public
 sub checkpoint ($;$) {
         my ($self, $wait) = @_;
 
-        if (my $im = $self->{im}) {
-                if ($wait) {
-                        $im->barrier;
-                } else {
-                        $im->checkpoint;
-                }
-        }
+        $self->{im}->barrier if $self->{im};
         my $shards = $self->{idx_shards};
         if ($shards) {
-                my $dbh = $self->{mm}->{dbh};
+                my $dbh = $self->{mm}->{dbh} if $self->{mm};
 
                 # SQLite msgmap data is second in importance
-                $dbh->commit;
+                $dbh->commit if $dbh;
+                eval { $dbh->do('PRAGMA optimize') };
 
                 # SQLite overview is third
-                $self->{over}->commit_lazy;
+                $self->{oidx}->commit_lazy;
 
                 # Now deal with Xapian
-                if ($wait) {
-                        my $barrier = $self->barrier_init(scalar @$shards);
 
-                        # each shard needs to issue a barrier command
-                        $_->remote_barrier for @$shards;
+                # start commit_txn_lazy asynchronously on all parallel shards
+                # (non-parallel waits here)
+                $_->ipc_do('commit_txn_lazy') for @$shards;
+
+                # transactions started on parallel shards,
+                # wait for them by issuing an echo command (echo can only
+                # run after commit_txn_lazy is done)
+                if ($wait && $self->{parallel}) {
+                        my $i = 0;
+                        for my $shard (@$shards) {
+                                my $echo = $shard->ipc_do('echo', $i);
+                                $echo == $i or die <<"";
+shard[$i] bad echo:$echo != $i waiting for txn commit
+
+                                ++$i;
+                        }
+                }
 
-                        # wait for each Xapian shard
-                        $self->barrier_wait($barrier);
-                } else {
-                        $_->remote_commit for @$shards;
+                my $midx = $self->{midx}; # misc index
+                if ($midx) {
+                        $midx->commit_txn;
+                        $PublicInbox::Search::X{CLOEXEC_UNSET} and
+                                $self->git->cleanup;
                 }
 
                 # last_commit is special, don't commit these until
-                # remote shards are done:
-                $dbh->begin_work;
+                # Xapian shards are done:
+                $dbh->begin_work if $dbh;
                 set_last_commits($self);
-                $dbh->commit;
-
-                $dbh->begin_work;
+                if ($dbh) {
+                        $dbh->commit;
+                        $dbh->begin_work;
+                }
         }
+        $self->{total_bytes} += $self->{transact_bytes};
         $self->{transact_bytes} = 0;
 }
 
@@ -649,100 +564,46 @@ sub checkpoint ($;$) {
 # public
 sub barrier { checkpoint($_[0], 1) };
 
+# true if locked and active
+sub active { !!$_[0]->{im} }
+
 # public
 sub done {
         my ($self) = @_;
-        my $im = delete $self->{im};
-        $im->done if $im; # PublicInbox::Import::done
-        checkpoint($self);
-        my $mm = delete $self->{mm};
-        $mm->{dbh}->commit if $mm;
-        my $shards = delete $self->{idx_shards};
-        if ($shards) {
-                $_->remote_close for @$shards;
+        my $err = '';
+        if (my $im = delete $self->{im}) {
+                eval { $im->done }; # PublicInbox::Import::done
+                $err .= "import done: $@\n" if $@;
         }
-        $self->{over}->disconnect;
-        delete $self->{bnote};
-        $self->{transact_bytes} = 0;
-        $self->lock_release if $shards;
-        $self->{-inbox}->git->cleanup;
-}
-
-sub fill_alternates ($$) {
-        my ($self, $epoch) = @_;
-
-        my $pfx = "$self->{-inbox}->{inboxdir}/git";
-        my $all = "$self->{-inbox}->{inboxdir}/all.git";
-
-        unless (-d $all) {
-                PublicInbox::Import::init_bare($all);
-        }
-        my $info_dir = "$all/objects/info";
-        my $alt = "$info_dir/alternates";
-        my (%alt, $new);
-        my $mode = 0644;
-        if (-e $alt) {
-                open(my $fh, '<', $alt) or die "open < $alt: $!\n";
-                $mode = (stat($fh))[2] & 07777;
-
-                # we assign a sort score to every alternate and favor
-                # the newest (highest numbered) one when we
-                my $score;
-                my $other = 0; # in case admin adds non-epoch repos
-                %alt = map {;
-                        if (m!\A\Q../../\E([0-9]+)\.git/objects\z!) {
-                                $score = $1 + 0;
-                        } else {
-                                $score = --$other;
-                        }
-                        $_ => $score;
-                } split(/\n+/, do { local $/; <$fh> });
+        if (!$err) {
+                eval { checkpoint($self) };
+                $err .= "checkpoint: $@\n" if $@;
         }
-
-        foreach my $i (0..$epoch) {
-                my $dir = "../../git/$i.git/objects";
-                if (!exists($alt{$dir}) && -d "$pfx/$i.git") {
-                        $alt{$dir} = $i;
-                        $new = 1;
-                }
+        if (my $mm = delete $self->{mm}) {
+                my $m = $err ? 'rollback' : 'commit';
+                eval { $mm->{dbh}->$m };
+                $err .= "msgmap $m: $@\n" if $@;
+        }
+        if ($self->{oidx} && $self->{oidx}->{dbh} && $err) {
+                eval { $self->{oidx}->rollback_lazy };
+                $err .= "overview rollback: $@\n" if $@;
         }
-        return unless $new;
-
-        my ($fh, $tmp) = tempfile('alt-XXXXXXXX', DIR => $info_dir);
-        print $fh join("\n", sort { $alt{$b} <=> $alt{$a} } keys %alt), "\n"
-                or die "print $tmp: $!\n";
-        chmod($mode, $fh) or die "fchmod $tmp: $!\n";
-        close $fh or die "close $tmp $!\n";
-        rename($tmp, $alt) or die "rename $tmp => $alt: $!\n";
-}
-
-sub git_init {
-        my ($self, $epoch) = @_;
-        my $git_dir = "$self->{-inbox}->{inboxdir}/git/$epoch.git";
-        my @cmd = (qw(git init --bare -q), $git_dir);
-        PublicInbox::Import::run_die(\@cmd);
-        @cmd = (qw/git config/, "--file=$git_dir/config",
-                        'include.path', '../../all.git/config');
-        PublicInbox::Import::run_die(\@cmd);
-        fill_alternates($self, $epoch);
-        $git_dir
-}
 
-sub git_dir_latest {
-        my ($self, $max) = @_;
-        $$max = -1;
-        my $pfx = "$self->{-inbox}->{inboxdir}/git";
-        return unless -d $pfx;
-        my $latest;
-        opendir my $dh, $pfx or die "opendir $pfx: $!\n";
-        while (defined(my $git_dir = readdir($dh))) {
-                $git_dir =~ m!\A([0-9]+)\.git\z! or next;
-                if ($1 > $$max) {
-                        $$max = $1;
-                        $latest = "$pfx/$git_dir";
+        my $shards = delete $self->{idx_shards};
+        if ($shards) {
+                for (@$shards) {
+                        eval { $_->shard_close };
+                        $err .= "shard close: $@\n" if $@;
                 }
         }
-        $latest;
+        eval { $self->{oidx}->dbh_close };
+        $err .= "over close: $@\n" if $@;
+        delete $self->{midx};
+        my $nbytes = $self->{total_bytes};
+        $self->{total_bytes} = 0;
+        $self->lock_release(!!$nbytes) if $shards;
+        $self->git->cleanup;
+        die $err if $err;
 }
 
 sub importer {
@@ -756,14 +617,14 @@ sub importer {
                         $im->done;
                         $im = undef;
                         $self->checkpoint;
-                        my $git_dir = $self->git_init(++$self->{epoch_max});
-                        my $git = PublicInbox::Git->new($git_dir);
+                        my $dir = $self->{mg}->add_epoch(++$self->{epoch_max});
+                        my $git = PublicInbox::Git->new($dir);
                         return $self->import_init($git, 0);
                 }
         }
         my $epoch = 0;
         my $max;
-        my $latest = git_dir_latest($self, \$max);
+        my $latest = $self->{ibx}->git_dir_latest(\$max);
         if (defined $latest) {
                 my $git = PublicInbox::Git->new($latest);
                 my $packed_bytes = $git->packed_bytes;
@@ -777,39 +638,20 @@ sub importer {
                 }
         }
         $self->{epoch_max} = $epoch;
-        $latest = $self->git_init($epoch);
-        $self->import_init(PublicInbox::Git->new($latest), 0);
+        my $dir = $self->{mg}->add_epoch($epoch);
+        $self->import_init(PublicInbox::Git->new($dir), 0);
 }
 
 sub import_init {
         my ($self, $git, $packed_bytes, $tmp) = @_;
-        my $im = PublicInbox::Import->new($git, undef, undef, $self->{-inbox});
+        my $im = PublicInbox::Import->new($git, undef, undef, $self->{ibx});
         $im->{bytes_added} = int($packed_bytes / $PACKING_FACTOR);
-        $im->{want_object_info} = 1;
         $im->{lock_path} = undef;
         $im->{path_type} = 'v2';
         $self->{im} = $im unless $tmp;
         $im;
 }
 
-# XXX experimental
-sub diff ($$$) {
-        my ($mid, $cur, $new) = @_;
-
-        my ($ah, $an) = tempfile('email-cur-XXXXXXXX', TMPDIR => 1);
-        print $ah $cur->as_string or die "print: $!";
-        close $ah or die "close: $!";
-        my ($bh, $bn) = tempfile('email-new-XXXXXXXX', TMPDIR => 1);
-        PublicInbox::Import::drop_unwanted_headers($new);
-        print $bh $new->as_string or die "print: $!";
-        close $bh or die "close: $!";
-        my $cmd = [ qw(diff -u), $an, $bn ];
-        print STDERR "# MID conflict <$mid>\n";
-        my $pid = spawn($cmd, undef, { 1 => 2 });
-        waitpid($pid, 0) == $pid or die "diff did not finish";
-        unlink($an, $bn);
-}
-
 sub get_blob ($$) {
         my ($self, $smsg) = @_;
         if (my $im = $self->{im}) {
@@ -817,273 +659,199 @@ sub get_blob ($$) {
                 return $msg if $msg;
         }
         # older message, should be in alternates
-        my $ibx = $self->{-inbox};
-        $ibx->msg_by_smsg($smsg);
+        $self->{ibx}->msg_by_smsg($smsg);
 }
 
-sub lookup_content ($$$) {
+sub content_exists ($$$) {
         my ($self, $mime, $mid) = @_;
-        my $over = $self->{over};
-        my $cids = content_ids($mime);
+        my $oidx = $self->{oidx};
+        my $chashes = content_hashes($mime);
         my ($id, $prev);
-        while (my $smsg = $over->next_by_mid($mid, \$id, \$prev)) {
+        while (my $smsg = $oidx->next_by_mid($mid, \$id, \$prev)) {
                 my $msg = get_blob($self, $smsg);
                 if (!defined($msg)) {
                         warn "broken smsg for $mid\n";
                         next;
                 }
-                my $cur = PublicInbox::MIME->new($msg);
-                if (content_matches($cids, $cur)) {
-                        $smsg->{mime} = $cur;
-                        return $smsg;
-                }
-
-
-                # XXX DEBUG_DIFF is experimental and may be removed
-                diff($mid, $cur, $mime) if $ENV{DEBUG_DIFF};
+                my $cur = PublicInbox::Eml->new($msg);
+                return 1 if content_matches($chashes, $cur);
         }
         undef;
 }
 
 sub atfork_child {
         my ($self) = @_;
-        my $fh = delete $self->{reindex_pipe};
-        close $fh if $fh;
-        if (my $shards = $self->{idx_shards}) {
-                $_->atfork_child foreach @$shards;
+        if (my $older_siblings = $self->{idx_shards}) {
+                $_->ipc_sibling_atfork_child for @$older_siblings;
         }
         if (my $im = $self->{im}) {
                 $im->atfork_child;
         }
-        die "unexpected mm" if $self->{mm};
-        close $self->{bnote}->[0] or die "close bnote[0]: $!\n";
-        $self->{bnote}->[1];
+        die "BUG: unexpected mm" if $self->{mm};
 }
 
-sub mark_deleted ($$$$) {
-        my ($self, $sync, $git, $oid) = @_;
-        my $msgref = $git->cat_file($oid);
-        my $mime = PublicInbox::MIME->new($$msgref);
-        my $mids = mids($mime->header_obj);
-        my $cid = content_id($mime);
-        foreach my $mid (@$mids) {
-                $sync->{D}->{"$mid\0$cid"} = $oid;
+sub reindex_checkpoint ($$) {
+        my ($self, $sync) = @_;
+
+        $self->git->async_wait_all;
+        $self->update_last_commit($sync);
+        ${$sync->{need_checkpoint}} = 0;
+        my $mm_tmp = $sync->{mm_tmp};
+        $mm_tmp->atfork_prepare if $mm_tmp;
+        die 'BUG: {im} during reindex' if $self->{im};
+        if ($self->{ibx_map} && !$sync->{checkpoint_unlocks}) {
+                checkpoint($self, 1); # no need to release lock on pure index
+        } else {
+                $self->done; # release lock
         }
-}
 
-sub reindex_checkpoint ($$$) {
-        my ($self, $sync, $git) = @_;
-
-        $git->cleanup;
-        $sync->{mm_tmp}->atfork_prepare;
-        $self->done; # release lock
-
-        if (my $pr = $sync->{-opt}->{-progress}) {
-                my ($bn) = (split('/', $git->{git_dir}))[-1];
-                $pr->("$bn ".sprintf($sync->{-regen_fmt}, $sync->{nr}));
+        if (my $pr = $sync->{-regen_fmt} ? $sync->{-opt}->{-progress} : undef) {
+                $pr->(sprintf($sync->{-regen_fmt}, ${$sync->{nr}}));
         }
 
         # allow -watch or -mda to write...
-        $self->idx_init; # reacquire lock
-        $sync->{mm_tmp}->atfork_parent;
-}
-
-# only for a few odd messages with multiple Message-IDs
-sub reindex_oid_m ($$$$;$) {
-        my ($self, $sync, $git, $oid, $regen_num) = @_;
-        $self->{current_info} = "multi_mid $oid";
-        my ($num, $mid0, $len);
-        my $msgref = $git->cat_file($oid, \$len);
-        my $mime = PublicInbox::MIME->new($$msgref);
-        my $mids = mids($mime->header_obj);
-        my $cid = content_id($mime);
-        die "BUG: reindex_oid_m called for <=1 mids" if scalar(@$mids) <= 1;
-
-        for my $mid (reverse @$mids) {
-                delete($sync->{D}->{"$mid\0$cid"}) and
-                        die "BUG: reindex_oid should handle <$mid> delete";
-        }
-        my $over = $self->{over};
-        for my $mid (reverse @$mids) {
-                ($num, $mid0) = $over->num_mid0_for_oid($oid, $mid);
-                next unless defined $num;
-                if (defined($regen_num) && $regen_num != $num) {
-                        die "BUG: regen(#$regen_num) != over(#$num)";
-                }
-        }
-        unless (defined($num)) {
-                for my $mid (reverse @$mids) {
-                        # is this a number we got before?
-                        my $n = $sync->{mm_tmp}->num_for($mid);
-                        next unless defined $n;
-                        next if defined($regen_num) && $regen_num != $n;
-                        ($num, $mid0) = ($n, $mid);
-                        last;
-                }
-        }
-        if (defined($num)) {
-                $sync->{mm_tmp}->num_delete($num);
-        } elsif (defined $regen_num) {
-                $num = $regen_num;
-                for my $mid (reverse @$mids) {
-                        $self->{mm}->mid_set($num, $mid) == 1 or next;
-                        $mid0 = $mid;
-                        last;
-                }
-                unless (defined $mid0) {
-                        warn "E: cannot regen #$num\n";
-                        return;
-                }
-        } else { # fixup bugs in old mirrors on reindex
-                for my $mid (reverse @$mids) {
-                        $num = $self->{mm}->mid_insert($mid);
-                        next unless defined $num;
-                        $mid0 = $mid;
-                        last;
-                }
-                if (defined $mid0) {
-                        if ($sync->{reindex}) {
-                                warn "reindex added #$num <$mid0>\n";
-                        }
-                } else {
-                        warn "E: cannot find article #\n";
-                        return;
-                }
-        }
-        $sync->{nr}++;
-        if (do_idx($self, $msgref, $mime, $len, $num, $oid, $mid0)) {
-                reindex_checkpoint($self, $sync, $git);
+        $self->idx_init($sync->{-opt}); # reacquire lock
+        if (my $intvl = $sync->{check_intvl}) { # eidx
+                $sync->{next_check} = PublicInbox::DS::now() + $intvl;
         }
+        $mm_tmp->atfork_parent if $mm_tmp;
 }
 
-sub check_unindexed ($$$) {
-        my ($self, $num, $mid0) = @_;
-        my $unindexed = $self->{unindexed} // {};
-        my $n = delete($unindexed->{$mid0});
-        defined $n or return;
-        if ($n != $num) {
-                die "BUG: unindexed $n != $num <$mid0>\n";
-        } else {
-                $self->{mm}->mid_set($num, $mid0);
-        }
+sub index_finalize ($$) {
+        my ($arg, $index) = @_;
+        ++$arg->{self}->{nidx};
+        if (defined(my $cur = $arg->{cur_cmt})) {
+                ${$arg->{latest_cmt}} = $cur;
+        } elsif ($index) {
+                die 'BUG: {cur_cmt} missing';
+        } # else { unindexing @leftovers doesn't set {cur_cmt}
 }
 
-# reuse Msgmap to store num => oid mapping (rather than num => mid)
-sub multi_mid_q_new () {
-        my ($fh, $fn) = tempfile('multi_mid-XXXXXXX', EXLOCK => 0, TMPDIR => 1);
-        my $multi_mid = PublicInbox::Msgmap->new_file($fn, 1);
-        $multi_mid->{dbh}->do('PRAGMA synchronous = OFF');
-        # for Msgmap->DESTROY:
-        $multi_mid->{tmp_name} = $fn;
-        $multi_mid->{pid} = $$;
-        close $fh or die "failed to close $fn: $!";
-        $multi_mid
-}
-
-sub multi_mid_q_push ($$) {
-        my ($sync, $oid) = @_;
-        my $multi_mid = $sync->{multi_mid} //= multi_mid_q_new();
-        if ($sync->{reindex}) { # no regen on reindex
-                $multi_mid->mid_insert($oid);
-        } else {
-                my $num = $sync->{regen}--;
-                die "BUG: ran out of article numbers" if $num <= 0;
-                $multi_mid->mid_set($num, $oid);
-        }
-}
-
-sub reindex_oid ($$$$) {
-        my ($self, $sync, $git, $oid) = @_;
-        my ($num, $mid0, $len);
-        my $msgref = $git->cat_file($oid, \$len);
-        return if $len == 0; # purged
-        my $mime = PublicInbox::MIME->new($$msgref);
-        my $mids = mids($mime->header_obj);
-        my $cid = content_id($mime);
+sub index_oid { # cat_async callback
+        my ($bref, $oid, $type, $size, $arg) = @_;
+        is_bad_blob($oid, $type, $size, $arg->{oid}) and
+                return index_finalize($arg, 1); # size == 0 purged returns here
+        my $self = $arg->{self};
+        local $self->{current_info} = "$self->{current_info} $oid";
+        my ($num, $mid0);
+        my $eml = PublicInbox::Eml->new($$bref);
+        my $mids = mids($eml);
+        my $chash = content_hash($eml);
 
         if (scalar(@$mids) == 0) {
                 warn "E: $oid has no Message-ID, skipping\n";
                 return;
-        } elsif (scalar(@$mids) == 1) {
-                my $mid = $mids->[0];
+        }
 
-                # was the file previously marked as deleted?, skip if so
-                if (delete($sync->{D}->{"$mid\0$cid"})) {
-                        if (!$sync->{reindex}) {
-                                $num = $sync->{regen}--;
-                                $self->{mm}->num_highwater($num);
+        # {unindexed} is unlikely
+        if (my $unindexed = $arg->{unindexed}) {
+                my $oidbin = pack('H*', $oid);
+                my $u = $unindexed->{$oidbin};
+                ($num, $mid0) = splice(@$u, 0, 2) if $u;
+                if (defined $num) {
+                        $self->{mm}->mid_set($num, $mid0);
+                        if (scalar(@$u) == 0) { # done with current OID
+                                delete $unindexed->{$oidbin};
+                                delete($arg->{unindexed}) if !keys(%$unindexed);
                         }
-                        return;
                 }
+        }
+        my $oidx = $self->{oidx};
+        if (!defined($num)) { # reuse if reindexing (or duplicates)
+                for my $mid (@$mids) {
+                        ($num, $mid0) = $oidx->num_mid0_for_oid($oid, $mid);
+                        last if defined $num;
+                }
+        }
+        $mid0 //= do { # is this a number we got before?
+                $num = $arg->{mm_tmp}->num_for($mids->[0]);
 
-                # is this a number we got before?
-                $num = $sync->{mm_tmp}->num_for($mid);
-                if (defined $num) {
-                        $mid0 = $mid;
-                        check_unindexed($self, $num, $mid0);
-                } else {
-                        $num = $sync->{regen}--;
-                        die "BUG: ran out of article numbers" if $num <= 0;
-                        if ($self->{mm}->mid_set($num, $mid) != 1) {
-                                warn "E: unable to assign $num => <$mid>\n";
-                                return;
-                        }
-                        $mid0 = $mid;
+                # don't clobber existing if Message-ID is reused:
+                if (my $x = defined($num) ? $oidx->get_art($num) : undef) {
+                        undef($num) if $x->{blob} ne $oid;
                 }
-        } else { # multiple MIDs are a weird case:
-                my $del = 0;
-                for (@$mids) {
-                        $del += delete($sync->{D}->{"$_\0$cid"}) // 0;
+                defined($num) ? $mids->[0] : undef;
+        };
+        if (!defined($num)) {
+                for (my $i = $#$mids; $i >= 1; $i--) {
+                        $num = $arg->{mm_tmp}->num_for($mids->[$i]);
+                        if (defined($num)) {
+                                $mid0 = $mids->[$i];
+                                last;
+                        }
                 }
-                if ($del) {
-                        unindex_oid_remote($self, $oid, $_) for @$mids;
-                        # do not delete from {mm_tmp}, since another
-                        # single-MID message may use it.
-                } else { # handle them at the end:
-                        multi_mid_q_push($sync, $oid);
+        }
+        if (defined($num)) {
+                $arg->{mm_tmp}->num_delete($num);
+        } else { # never seen
+                $num = $self->{mm}->mid_insert($mids->[0]);
+                if (defined($num)) {
+                        $mid0 = $mids->[0];
+                } else { # rare, try the rest of them, backwards
+                        for (my $i = $#$mids; $i >= 1; $i--) {
+                                $num = $self->{mm}->mid_insert($mids->[$i]);
+                                if (defined($num)) {
+                                        $mid0 = $mids->[$i];
+                                        last;
+                                }
+                        }
                 }
-                return;
         }
-        $sync->{mm_tmp}->mid_delete($mid0) or
-                die "failed to delete <$mid0> for article #$num\n";
-        $sync->{nr}++;
-        if (do_idx($self, $msgref, $mime, $len, $num, $oid, $mid0)) {
-                reindex_checkpoint($self, $sync, $git);
+        if (!defined($num)) {
+                warn "E: $oid <", join('> <', @$mids), "> is a duplicate\n";
+                return;
         }
+        ++${$arg->{nr}};
+        my $smsg = bless {
+                num => $num,
+                blob => $oid,
+                mid => $mid0,
+        }, 'PublicInbox::Smsg';
+        $smsg->populate($eml, $arg);
+        $smsg->set_bytes($$bref, $size);
+        if (do_idx($self, $eml, $smsg)) {
+                ${$arg->{need_checkpoint}} = 1;
+        }
+        index_finalize($arg, 1);
 }
 
 # only update last_commit for $i on reindex iff newer than current
-sub update_last_commit ($$$$) {
-        my ($self, $git, $i, $cmt) = @_;
-        my $last = last_epoch_commit($self, $i);
-        if (defined $last && is_ancestor($git, $last, $cmt)) {
-                my @cmd = (qw(rev-list --count), "$last..$cmt");
-                chomp(my $n = $git->qx(@cmd));
+sub update_last_commit {
+        my ($self, $sync, $stk) = @_;
+        my $unit = $sync->{unit} // return;
+        my $latest_cmt = $stk ? $stk->{latest_cmt} : ${$sync->{latest_cmt}};
+        defined($latest_cmt) or return;
+        my $last = last_epoch_commit($self, $unit->{epoch});
+        if (defined $last && is_ancestor($self->git, $last, $latest_cmt)) {
+                my @cmd = (qw(rev-list --count), "$last..$latest_cmt");
+                chomp(my $n = $unit->{git}->qx(@cmd));
                 return if $n ne '' && $n == 0;
         }
-        last_epoch_commit($self, $i, $cmt);
-}
+        # don't rewind if --{since,until,before,after} are in use
+        return if (defined($last) &&
+                        grep(defined, @{$sync->{-opt}}{qw(since until)}) &&
+                        is_ancestor($self->git, $latest_cmt, $last));
 
-sub git_dir_n ($$) { "$_[0]->{-inbox}->{inboxdir}/git/$_[1].git" }
+        last_epoch_commit($self, $unit->{epoch}, $latest_cmt);
+}
 
-sub last_commits ($$) {
-        my ($self, $epoch_max) = @_;
+sub last_commits {
+        my ($self, $sync) = @_;
         my $heads = [];
-        for (my $i = $epoch_max; $i >= 0; $i--) {
+        for (my $i = $sync->{epoch_max}; $i >= 0; $i--) {
                 $heads->[$i] = last_epoch_commit($self, $i);
         }
         $heads;
 }
 
-*is_ancestor = *PublicInbox::SearchIdx::is_ancestor;
-
 # returns a revision range for git-log(1)
-sub log_range ($$$$$) {
-        my ($self, $sync, $git, $i, $tip) = @_;
+sub log_range ($$$) {
+        my ($sync, $unit, $tip) = @_;
         my $opt = $sync->{-opt};
         my $pr = $opt->{-progress} if (($opt->{verbose} || 0) > 1);
+        my $i = $unit->{epoch};
         my $cur = $sync->{ranges}->[$i] or do {
-                $pr->("$i.git indexing all of $tip") if $pr;
+                $pr->("$i.git indexing all of $tip\n") if $pr;
                 return $tip; # all of it
         };
 
@@ -1095,7 +863,8 @@ sub log_range ($$$$$) {
 
         my $range = "$cur..$tip";
         $pr->("$i.git checking contiguity... ") if $pr;
-        if (is_ancestor($git, $cur, $tip)) { # common case
+        my $git = $unit->{git};
+        if (is_ancestor($sync->{self}->git, $cur, $tip)) { # common case
                 $pr->("OK\n") if $pr;
                 my $n = $git->qx(qw(rev-list --count), $range);
                 chomp($n);
@@ -1120,129 +889,196 @@ Rewritten history? (in $git->{git_dir})
                         warn "discarding history at $cur\n";
                 }
                 warn <<"";
-reindexing $git->{git_dir} starting at
-$range
-
-                $sync->{unindex_range}->{$i} = "$base..$cur";
+reindexing $git->{git_dir}
+starting at $range
+
+                # $cur^0 may no longer exist if pruned by git
+                if ($git->qx(qw(rev-parse -q --verify), "$cur^0")) {
+                        $unit->{unindex_range} = "$base..$cur";
+                } elsif ($base && $git->qx(qw(rev-parse -q --verify), $base)) {
+                        $unit->{unindex_range} = "$base..";
+                } else {
+                        warn "W: unable to unindex before $range\n";
+                }
         }
         $range;
 }
 
-sub sync_prepare ($$$) {
-        my ($self, $sync, $epoch_max) = @_;
+# overridden by ExtSearchIdx
+sub artnum_max { $_[0]->{mm}->num_highwater }
+
+sub sync_prepare ($$) {
+        my ($self, $sync) = @_;
+        $sync->{ranges} = sync_ranges($self, $sync);
         my $pr = $sync->{-opt}->{-progress};
         my $regen_max = 0;
-        my $head = $self->{-inbox}->{ref_head} || 'refs/heads/master';
-
-        # reindex stops at the current heads and we later rerun index_sync
-        # without {reindex}
-        my $reindex_heads = last_commits($self, $epoch_max) if $sync->{reindex};
-
-        for (my $i = $epoch_max; $i >= 0; $i--) {
-                die 'BUG: already indexing!' if $self->{reindex_pipe};
-                my $git_dir = git_dir_n($self, $i);
+        my $head = $sync->{ibx}->{ref_head} || 'HEAD';
+        my $pfx;
+        if ($pr) {
+                ($pfx) = ($sync->{ibx}->{inboxdir} =~ m!([^/]+)\z!g);
+                $pfx //= $sync->{ibx}->{inboxdir};
+        }
+
+        my $reindex_heads;
+        if ($self->{ibx_map}) {
+                # ExtSearchIdx won't index messages unless they're in
+                # over.sqlite3 for a given inbox, so don't read beyond
+                # what's in the per-inbox index.
+                $reindex_heads = [];
+                my $v = PublicInbox::Search::SCHEMA_VERSION;
+                my $mm = $sync->{ibx}->mm;
+                for my $i (0..$sync->{epoch_max}) {
+                        $reindex_heads->[$i] = $mm->last_commit_xap($v, $i);
+                }
+        } elsif ($sync->{reindex}) { # V2 inbox
+                # reindex stops at the current heads and we later
+                # rerun index_sync without {reindex}
+                $reindex_heads = $self->last_commits($sync);
+        }
+        if ($sync->{max_size} = $sync->{-opt}->{max_size}) {
+                $sync->{index_oid} = $self->can('index_oid');
+        }
+        my $git_pfx = "$sync->{ibx}->{inboxdir}/git";
+        for (my $i = $sync->{epoch_max}; $i >= 0; $i--) {
+                my $git_dir = "$git_pfx/$i.git";
                 -d $git_dir or next; # missing epochs are fine
                 my $git = PublicInbox::Git->new($git_dir);
+                my $unit = { git => $git, epoch => $i };
+                my $tip;
                 if ($reindex_heads) {
-                        $head = $reindex_heads->[$i] or next;
+                        $tip = $head = $reindex_heads->[$i] or next;
+                } else {
+                        $tip = $git->qx(qw(rev-parse -q --verify), $head);
+                        next if $?; # new repo
+                        chomp $tip;
                 }
-                chomp(my $tip = $git->qx(qw(rev-parse -q --verify), $head));
-
-                next if $?; # new repo
-                my $range = log_range($self, $sync, $git, $i, $tip) or next;
-                $sync->{ranges}->[$i] = $range;
-
+                my $range = log_range($sync, $unit, $tip) or next;
                 # can't use 'rev-list --count' if we use --diff-filter
-                $pr->("$i.git counting $range ... ") if $pr;
-                my $n = 0;
-                my $fh = $git->popen(qw(log --pretty=tformat:%H
-                                --no-notes --no-color --no-renames
-                                --diff-filter=AM), $range, '--', 'm');
-                ++$n while <$fh>;
-                close $fh or die "git log failed: \$?=$?";
-                $pr->("$n\n") if $pr;
-                $regen_max += $n;
+                $pr->("$pfx $i.git counting $range ... ") if $pr;
+                # Don't bump num_highwater on --reindex by using {D}.
+                # We intentionally do NOT use {D} in the non-reindex case
+                # because we want NNTP article number gaps from unindexed
+                # messages to show up in mirrors, too.
+                $sync->{D} //= $sync->{reindex} ? {} : undef; # OID_BIN => NR
+                my $stk = log2stack($sync, $git, $range);
+                return 0 if $sync->{quit};
+                my $nr = $stk ? $stk->num_records : 0;
+                $pr->("$nr\n") if $pr;
+                $unit->{stack} = $stk; # may be undef
+                unshift @{$sync->{todo}}, $unit;
+                $regen_max += $nr;
+        }
+        return 0 if $sync->{quit};
+
+        # XXX this should not happen unless somebody bypasses checks in
+        # our code and blindly injects "d" file history into git repos
+        if (my @leftovers = keys %{delete($sync->{D}) // {}}) {
+                warn('W: unindexing '.scalar(@leftovers)." leftovers\n");
+                local $self->{current_info} = 'leftover ';
+                my $unindex_oid = $self->can('unindex_oid');
+                for my $oid (@leftovers) {
+                        last if $sync->{quit};
+                        $oid = unpack('H*', $oid);
+                        my $req = { %$sync, oid => $oid };
+                        $self->git->cat_async($oid, $unindex_oid, $req);
+                }
+                $self->git->async_wait_all;
+        }
+        return 0 if $sync->{quit};
+        if (!$regen_max) {
+                $sync->{-regen_fmt} = "%u/?\n";
+                return 0;
         }
-
-        return 0 if (!$regen_max && !keys(%{$self->{unindex_range}}));
 
         # reindex should NOT see new commits anymore, if we do,
         # it's a problem and we need to notice it via die()
         my $pad = length($regen_max) + 1;
         $sync->{-regen_fmt} = "% ${pad}u/$regen_max\n";
-        $sync->{nr} = 0;
+        $sync->{nr} = \(my $nr = 0);
         return -1 if $sync->{reindex};
-        $regen_max + $self->{mm}->num_highwater() || 0;
+        $regen_max + $self->artnum_max || 0;
 }
 
-sub unindex_oid_remote ($$$) {
+sub unindex_oid_aux ($$$) {
         my ($self, $oid, $mid) = @_;
-        $_->remote_remove($oid, $mid) foreach @{$self->{idx_shards}};
-        $self->{over}->remove_oid($oid, $mid);
+        my @removed = $self->{oidx}->remove_oid($oid, $mid);
+        return unless $self->{-need_xapian};
+        for my $num (@removed) {
+                idx_shard($self, $num)->ipc_do('xdb_remove', $num);
+        }
 }
 
-sub unindex_oid ($$$;$) {
-        my ($self, $git, $oid, $unindexed) = @_;
+sub unindex_oid ($$;$) { # git->cat_async callback
+        my ($bref, $oid, $type, $size, $arg) = @_;
+        is_bad_blob($oid, $type, $size, $arg->{oid}) and
+                return index_finalize($arg, 0);
+        my $self = $arg->{self};
+        local $self->{current_info} = "$self->{current_info} $oid";
+        my $unindexed = $arg->{in_unindex} ? $arg->{unindexed} : undef;
         my $mm = $self->{mm};
-        my $msgref = $git->cat_file($oid);
-        my $mime = PublicInbox::MIME->new($msgref);
-        my $mids = mids($mime->header_obj);
-        $mime = $msgref = undef;
-        my $over = $self->{over};
+        my $mids = mids(PublicInbox::Eml->new($bref));
+        undef $$bref;
+        my $oidx = $self->{oidx};
         foreach my $mid (@$mids) {
                 my %gone;
                 my ($id, $prev);
-                while (my $smsg = $over->next_by_mid($mid, \$id, \$prev)) {
+                while (my $smsg = $oidx->next_by_mid($mid, \$id, \$prev)) {
                         $gone{$smsg->{num}} = 1 if $oid eq $smsg->{blob};
-                        1; # continue
                 }
-                my $n = scalar keys %gone;
-                next unless $n;
+                my $n = scalar(keys(%gone)) or next;
                 if ($n > 1) {
                         warn "BUG: multiple articles linked to $oid\n",
                                 join(',',sort keys %gone), "\n";
                 }
-                foreach my $num (keys %gone) {
+                # reuse (num => mid) mapping in ascending numeric order
+                for my $num (sort { $a <=> $b } keys %gone) {
+                        $num += 0;
                         if ($unindexed) {
                                 my $mid0 = $mm->mid_for($num);
-                                $unindexed->{$mid0} = $num;
+                                my $oidbin = pack('H*', $oid);
+                                push @{$unindexed->{$oidbin}}, $num, $mid0;
                         }
                         $mm->num_delete($num);
                 }
-                unindex_oid_remote($self, $oid, $mid);
+                unindex_oid_aux($self, $oid, $mid);
         }
+        index_finalize($arg, 0);
 }
 
-my $x40 = qr/[a-f0-9]{40}/;
-sub unindex ($$$$) {
-        my ($self, $sync, $git, $unindex_range) = @_;
-        my $unindexed = $self->{unindexed} ||= {}; # $mid0 => $num
+sub git { $_[0]->{ibx}->git }
+
+# this is rare, it only happens when we get discontiguous history in
+# a mirror because the source used -purge or -edit
+sub unindex_todo ($$$) {
+        my ($self, $sync, $unit) = @_;
+        my $unindex_range = delete($unit->{unindex_range}) // return;
+        my $unindexed = $sync->{unindexed} //= {}; # $oidbin => [$num, $mid0]
         my $before = scalar keys %$unindexed;
         # order does not matter, here:
-        my @cmd = qw(log --raw -r
-                        --no-notes --no-color --no-abbrev --no-renames);
-        my $fh = $self->{reindex_pipe} = $git->popen(@cmd, $unindex_range);
+        my $fh = $unit->{git}->popen(qw(log --raw -r --no-notes --no-color
+                                --no-abbrev --no-renames), $unindex_range);
+        local $sync->{in_unindex} = 1;
+        my $unindex_oid = $self->can('unindex_oid');
         while (<$fh>) {
-                /\A:\d{6} 100644 $x40 ($x40) [AM]\tm$/o or next;
-                unindex_oid($self, $git, $1, $unindexed);
+                /\A:\d{6} 100644 $OID ($OID) [AM]\tm$/o or next;
+                $self->git->cat_async($1, $unindex_oid, { %$sync, oid => $1 });
         }
-        delete $self->{reindex_pipe};
-        close $fh or die "git log failed: \$?=$?";
+        $fh->close or die "git log failed: \$?=$?";
+        $self->git->async_wait_all;
 
         return unless $sync->{-opt}->{prune};
         my $after = scalar keys %$unindexed;
         return if $before == $after;
 
         # ensure any blob can not longer be accessed via dumb HTTP
-        PublicInbox::Import::run_die(['git', "--git-dir=$git->{git_dir}",
+        run_die(['git', "--git-dir=$unit->{git}->{git_dir}",
                 qw(-c gc.reflogExpire=now gc --prune=all --quiet)]);
 }
 
-sub sync_ranges ($$$) {
-        my ($self, $sync, $epoch_max) = @_;
+sub sync_ranges ($$) {
+        my ($self, $sync) = @_;
         my $reindex = $sync->{reindex};
-
-        return last_commits($self, $epoch_max) unless $reindex;
+        return $self->last_commits($sync) unless $reindex;
         return [] if ref($reindex) ne 'HASH';
 
         my $ranges = $reindex->{from}; # arrayref;
@@ -1252,119 +1088,233 @@ sub sync_ranges ($$$) {
         $ranges;
 }
 
-sub index_epoch ($$$) {
-        my ($self, $sync, $i) = @_;
+sub index_xap_only { # git->cat_async callback
+        my ($bref, $oid, $type, $size, $smsg) = @_;
+        my $self = delete $smsg->{self};
+        my $idx = idx_shard($self, $smsg->{num});
+        $idx->index_eml(PublicInbox::Eml->new($bref), $smsg);
+        $self->{transact_bytes} += $smsg->{bytes};
+}
 
-        my $git_dir = git_dir_n($self, $i);
-        die 'BUG: already reindexing!' if $self->{reindex_pipe};
-        -d $git_dir or return; # missing epochs are fine
-        fill_alternates($self, $i);
-        my $git = PublicInbox::Git->new($git_dir);
-        if (my $unindex_range = delete $sync->{unindex_range}->{$i}) {
-                unindex($self, $sync, $git, $unindex_range);
-        }
-        defined(my $range = $sync->{ranges}->[$i]) or return;
+sub index_xap_step ($$$;$) {
+        my ($self, $sync, $beg, $step) = @_;
+        my $end = $sync->{art_end};
+        return if $beg > $end; # nothing to do
+
+        $step //= $self->{shards};
+        my $ibx = $self->{ibx};
         if (my $pr = $sync->{-opt}->{-progress}) {
-                $pr->("$i.git indexing $range\n");
+                $pr->("Xapian indexlevel=$ibx->{indexlevel} ".
+                        "$beg..$end (% $step)\n");
+        }
+        for (my $num = $beg; $num <= $end; $num += $step) {
+                last if $sync->{quit};
+                my $smsg = $ibx->over->get_art($num) or next;
+                $smsg->{self} = $self;
+                $ibx->git->cat_async($smsg->{blob}, \&index_xap_only, $smsg);
+                if ($self->{transact_bytes} >= $self->{batch_bytes}) {
+                        ${$sync->{nr}} = $num;
+                        reindex_checkpoint($self, $sync);
+                }
         }
+}
 
-        my @cmd = qw(log --raw -r --pretty=tformat:%H
-                        --no-notes --no-color --no-abbrev --no-renames);
-        my $fh = $self->{reindex_pipe} = $git->popen(@cmd, $range);
-        my $cmt;
-        while (<$fh>) {
-                chomp;
-                $self->{current_info} = "$i.git $_";
-                if (/\A$x40$/o && !defined($cmt)) {
-                        $cmt = $_;
-                } elsif (/\A:\d{6} 100644 $x40 ($x40) [AM]\tm$/o) {
-                        reindex_oid($self, $sync, $git, $1);
-                } elsif (/\A:\d{6} 100644 $x40 ($x40) [AM]\td$/o) {
-                        mark_deleted($self, $sync, $git, $1);
+sub index_todo ($$$) {
+        my ($self, $sync, $unit) = @_;
+        return if $sync->{quit};
+        unindex_todo($self, $sync, $unit);
+        my $stk = delete($unit->{stack}) or return;
+        my $all = $self->git;
+        my $index_oid = $self->can('index_oid');
+        my $unindex_oid = $self->can('unindex_oid');
+        my $pfx;
+        if ($unit->{git}->{git_dir} =~ m!/([^/]+)/git/([0-9]+\.git)\z!) {
+                $pfx = "$1 $2"; # v2
+        } else { # v1
+                ($pfx) = ($unit->{git}->{git_dir} =~ m!/([^/]+)\z!g);
+                $pfx //= $unit->{git}->{git_dir};
+        }
+        local $self->{current_info} = "$pfx ";
+        local $sync->{latest_cmt} = \(my $latest_cmt);
+        local $sync->{unit} = $unit;
+        while (my ($f, $at, $ct, $oid, $cmt) = $stk->pop_rec) {
+                if ($sync->{quit}) {
+                        warn "waiting to quit...\n";
+                        $all->async_wait_all;
+                        $self->update_last_commit($sync);
+                        return;
+                }
+                my $req = {
+                        %$sync,
+                        autime => $at,
+                        cotime => $ct,
+                        oid => $oid,
+                        cur_cmt => $cmt
+                };
+                if ($f eq 'm') {
+                        if ($sync->{max_size}) {
+                                $req->{git} = $all;
+                                $all->check_async($oid, \&check_size, $req);
+                        } else {
+                                $all->cat_async($oid, $index_oid, $req);
+                        }
+                } elsif ($f eq 'd') {
+                        $all->cat_async($oid, $unindex_oid, $req);
+                }
+                if (${$sync->{need_checkpoint}}) {
+                        reindex_checkpoint($self, $sync);
+                }
+        }
+        $all->async_wait_all;
+        $self->update_last_commit($sync, $stk);
+}
+
+sub xapian_only {
+        my ($self, $opt, $sync, $art_beg) = @_;
+        my $seq = $opt->{'sequential-shard'};
+        $art_beg //= 0;
+        local $self->{parallel} = 0 if $seq;
+        $self->idx_init($opt); # acquire lock
+        if (my $art_end = $self->{ibx}->mm->max) {
+                $sync //= {
+                        need_checkpoint => \(my $bool = 0),
+                        -opt => $opt,
+                        self => $self,
+                        nr => \(my $nr = 0),
+                        -regen_fmt => "%u/?\n",
+                };
+                $sync->{art_end} = $art_end;
+                if ($seq || !$self->{parallel}) {
+                        my $shard_end = $self->{shards} - 1;
+                        for my $i (0..$shard_end) {
+                                last if $sync->{quit};
+                                index_xap_step($self, $sync, $art_beg + $i);
+                                if ($i != $shard_end) {
+                                        reindex_checkpoint($self, $sync);
+                                }
+                        }
+                } else { # parallel (maybe)
+                        index_xap_step($self, $sync, $art_beg, 1);
                 }
         }
-        close $fh or die "git log failed: \$?=$?";
-        delete $self->{reindex_pipe};
-        update_last_commit($self, $git, $i, $cmt) if defined $cmt;
+        $self->git->async_wait_all;
+        $self->{ibx}->cleanup;
+        $self->done;
 }
 
 # public, called by public-inbox-index
 sub index_sync {
         my ($self, $opt) = @_;
-        $opt ||= {};
-        my $pr = $opt->{-progress};
+        $opt //= {};
+        return xapian_only($self, $opt) if $opt->{xapian_only};
+
         my $epoch_max;
-        my $latest = git_dir_latest($self, \$epoch_max);
-        return unless defined $latest;
+        my $latest = $self->{ibx}->git_dir_latest(\$epoch_max) // return;
+        if ($opt->{'fast-noop'}) { # nanosecond (st_ctim) comparison
+                use Time::HiRes qw(stat);
+                if (my @mm = stat("$self->{ibx}->{inboxdir}/msgmap.sqlite3")) {
+                        my $c = $mm[10]; # 10 = ctime (nsec NV)
+                        my @hd = stat("$latest/refs/heads");
+                        my @pr = stat("$latest/packed-refs");
+                        return if $c > ($hd[10] // 0) && $c > ($pr[10] // 0);
+                }
+        }
+
+        my $pr = $opt->{-progress};
+        my $seq = $opt->{'sequential-shard'};
+        my $art_beg; # the NNTP article number we start xapian_only at
+        my $idxlevel = $self->{ibx}->{indexlevel};
+        local $self->{ibx}->{indexlevel} = 'basic' if $seq;
+
         $self->idx_init($opt); # acquire lock
+        $self->{mg}->fill_alternates;
+        $self->{oidx}->rethread_prepare($opt);
         my $sync = {
-                D => {}, # "$mid\0$cid" => $oid
-                unindex_range => {}, # EPOCH => oid_old..oid_new
+                need_checkpoint => \(my $bool = 0),
                 reindex => $opt->{reindex},
-                -opt => $opt
+                -opt => $opt,
+                self => $self,
+                ibx => $self->{ibx},
+                epoch_max => $epoch_max,
         };
-        $sync->{ranges} = sync_ranges($self, $sync, $epoch_max);
-        $sync->{regen} = sync_prepare($self, $sync, $epoch_max);
+        my $quit = PublicInbox::SearchIdx::quit_cb($sync);
+        local $SIG{QUIT} = $quit;
+        local $SIG{INT} = $quit;
+        local $SIG{TERM} = $quit;
 
-        if ($sync->{regen}) {
+        if (sync_prepare($self, $sync)) {
                 # tmp_clone seems to fail if inside a transaction, so
                 # we rollback here (because we opened {mm} for reading)
                 # Note: we do NOT rely on DBI transactions for atomicity;
                 # only for batch performance.
                 $self->{mm}->{dbh}->rollback;
                 $self->{mm}->{dbh}->begin_work;
-                $sync->{mm_tmp} = $self->{mm}->tmp_clone;
-        }
+                $sync->{mm_tmp} =
+                        $self->{mm}->tmp_clone($self->{ibx}->{inboxdir});
 
-        # work backwards through history
-        for (my $i = $epoch_max; $i >= 0; $i--) {
-                index_epoch($self, $sync, $i);
-        }
-
-        # unindex is required for leftovers if "deletes" affect messages
-        # in a previous fetch+index window:
-        my $git;
-        if (my @leftovers = values %{delete $sync->{D}}) {
-                $git = $self->{-inbox}->git;
-                for my $oid (@leftovers) {
-                        $self->{current_info} = "leftover $oid";
-                        unindex_oid($self, $git, $oid);
-                }
-        }
-        if (my $multi_mid = delete $sync->{multi_mid}) {
-                $git //= $self->{-inbox}->git;
-                my ($min, $max) = $multi_mid->minmax;
-                if ($sync->{reindex}) {
-                        # we may need to create new Message-IDs if mirrors
-                        # were initially indexed with old versions
-                        for (my $i = $max; $i >= $min; $i--) {
-                                my $oid = $multi_mid->mid_for($i);
-                                next unless defined $oid;
-                                reindex_oid_m($self, $sync, $git, $oid);
-                        }
-                } else { # regen on initial index
-                        for my $num ($min..$max) {
-                                my $oid = $multi_mid->mid_for($num);
-                                next unless defined $oid;
-                                reindex_oid_m($self, $sync, $git, $oid, $num);
-                        }
+                # xapian_only works incrementally w/o --reindex
+                if ($seq && !$opt->{reindex}) {
+                        $art_beg = $sync->{mm_tmp}->max || -1;
+                        $art_beg++;
                 }
         }
-        $git->cleanup if $git;
+        # work forwards through history
+        index_todo($self, $sync, $_) for @{delete($sync->{todo}) // []};
+        $self->{oidx}->rethread_done($opt) unless $sync->{quit};
         $self->done;
 
         if (my $nr = $sync->{nr}) {
                 my $pr = $sync->{-opt}->{-progress};
-                $pr->('all.git '.sprintf($sync->{-regen_fmt}, $nr)) if $pr;
+                $pr->('all.git '.sprintf($sync->{-regen_fmt}, $$nr)) if $pr;
+        }
+
+        my $quit_warn;
+        # deal with Xapian shards sequentially
+        if ($seq && delete($sync->{mm_tmp})) {
+                if ($sync->{quit}) {
+                        $quit_warn = 1;
+                } else {
+                        $self->{ibx}->{indexlevel} = $idxlevel;
+                        xapian_only($self, $opt, $sync, $art_beg);
+                        $quit_warn = 1 if $sync->{quit};
+                }
+        }
+
+        # --reindex on the command-line
+        if (!$sync->{quit} && $opt->{reindex} &&
+                        !ref($opt->{reindex}) && $idxlevel ne 'basic') {
+                $self->lock_acquire;
+                my $s0 = PublicInbox::SearchIdx->new($self->{ibx}, 0, 0);
+                if (my $xdb = $s0->idx_acquire) {
+                        my $n = $xdb->get_metadata('has_threadid');
+                        $xdb->set_metadata('has_threadid', '1') if $n ne '1';
+                }
+                $s0->idx_release;
+                $self->lock_release;
         }
 
         # reindex does not pick up new changes, so we rerun w/o it:
-        if ($opt->{reindex}) {
+        if ($opt->{reindex} && !$sync->{quit} &&
+                        !grep(defined, @$opt{qw(since until)})) {
                 my %again = %$opt;
                 $sync = undef;
-                delete @again{qw(reindex -skip_lock)};
+                delete @again{qw(rethread reindex -skip_lock)};
                 index_sync($self, \%again);
+                $opt->{quit} = $again{quit}; # propagate to caller
+        }
+        warn <<EOF if $quit_warn;
+W: interrupted, --xapian-only --reindex required upon restart
+EOF
+}
+
+sub ipc_atfork_child {
+        my ($self) = @_;
+        if (my $lei = delete $self->{lei}) {
+                $lei->_lei_atfork_child;
+                my $pkt_op_p = delete $lei->{pkt_op_p};
+                close($pkt_op_p->{op_p});
         }
+        $self->SUPER::ipc_atfork_child;
 }
 
 1;