user/dev discussion of public-inbox itself
 help / color / mirror / code / Atom feed
From: "Eric Wong (Contractor, The Linux Foundation)" <e@80x24.org>
To: meta@public-inbox.org
Subject: [PATCH 3/8] over: remove forked subprocess
Date: Sat,  7 Apr 2018 03:41:49 +0000	[thread overview]
Message-ID: <20180407034154.2309-4-e@80x24.org> (raw)
In-Reply-To: <20180407034154.2309-1-e@80x24.org>

Since the overview stuff is a synchronization point anyways,
move it into the main V2Writable process and allow us to
drop a bunch of code.  This is another step towards making
Xapian optional for v2.

In other words, the fan-out point is moved and the Xapian
partitions no longer need to synchronize against each other:

Before:
                     /-------->\
                    /---------->\
     v2writable -->+----parts----> over
                    \---------->/
                     \-------->/

After:

                          /---------->
                         /----------->
  v2writable --> over-->+----parts--->
                         \----------->
                          \---------->

Since the overview/threading logic needs to run on the same core
that feeds git-fast-import, it's slower for small repos but is
not noticeable in large imports where I/O wait in the partitions
dominates.
---
 MANIFEST                         |   1 -
 lib/PublicInbox/OverIdx.pm       |  57 ++++++++++++-
 lib/PublicInbox/OverIdxFork.pm   | 180 ---------------------------------------
 lib/PublicInbox/SearchIdx.pm     |  62 +++++---------
 lib/PublicInbox/SearchIdxPart.pm |  14 +--
 lib/PublicInbox/V2Writable.pm    |  89 +++++++++++++------
 6 files changed, 144 insertions(+), 259 deletions(-)
 delete mode 100644 lib/PublicInbox/OverIdxFork.pm

diff --git a/MANIFEST b/MANIFEST
index 82cc67d..58b3634 100644
--- a/MANIFEST
+++ b/MANIFEST
@@ -84,7 +84,6 @@ lib/PublicInbox/NNTPD.pm
 lib/PublicInbox/NewsWWW.pm
 lib/PublicInbox/Over.pm
 lib/PublicInbox/OverIdx.pm
-lib/PublicInbox/OverIdxFork.pm
 lib/PublicInbox/ParentPipe.pm
 lib/PublicInbox/ProcessPipe.pm
 lib/PublicInbox/Qspawn.pm
diff --git a/lib/PublicInbox/OverIdx.pm b/lib/PublicInbox/OverIdx.pm
index 28e4aa9..08f8744 100644
--- a/lib/PublicInbox/OverIdx.pm
+++ b/lib/PublicInbox/OverIdx.pm
@@ -2,14 +2,21 @@
 # License: AGPL-3.0+ <https://www.gnu.org/licenses/agpl-3.0.txt>
 
 # for XOVER, OVER in NNTP, and feeds/homepage/threads in PSGI
-# Unlike Msgmap, this is an _UNSTABLE_ database which can be
+# Unlike Msgmap, this is an _UNSTABLE_ cache which can be
 # tweaked/updated over time and rebuilt.
+#
+# Ghost messages (messages which are only referenced in References/In-Reply-To)
+# are denoted by a negative NNTP article number.
 package PublicInbox::OverIdx;
 use strict;
 use warnings;
 use base qw(PublicInbox::Over);
 use IO::Handle;
 use DBI qw(:sql_types); # SQL_BLOB
+use PublicInbox::MID qw/id_compress mids references/;
+use PublicInbox::SearchMsg;
+use Compress::Zlib qw(compress);
+use PublicInbox::Search;
 
 sub dbh_new {
 	my ($self) = @_;
@@ -200,6 +207,54 @@ sub link_refs {
 	$tid;
 }
 
+sub parse_references ($$$$) {
+	my ($self, $smsg, $mid0, $mids) = @_;
+	my $mime = $smsg->{mime};
+	my $hdr = $mime->header_obj;
+	my $refs = references($hdr);
+	push(@$refs, @$mids) if scalar(@$mids) > 1;
+	return $refs if scalar(@$refs) == 0;
+
+	# prevent circular references here:
+	my %seen = ( $mid0 => 1 );
+	my @keep;
+	foreach my $ref (@$refs) {
+		if (length($ref) > PublicInbox::MID::MAX_MID_SIZE) {
+			warn "References: <$ref> too long, ignoring\n";
+			next;
+		}
+		next if $seen{$ref}++;
+		push @keep, $ref;
+	}
+	$smsg->{references} = '<'.join('> <', @keep).'>' if @keep;
+	\@keep;
+}
+
+sub add_overview {
+	my ($self, $mime, $bytes, $num, $oid, $mid0) = @_;
+	my $lines = $mime->body_raw =~ tr!\n!\n!;
+	my $smsg = bless {
+		mime => $mime,
+		mid => $mid0,
+		bytes => $bytes,
+		lines => $lines,
+		blob => $oid,
+	}, 'PublicInbox::SearchMsg';
+	my $mids = mids($mime->header_obj);
+	my $refs = $self->parse_references($smsg, $mid0, $mids);
+	my $subj = $smsg->subject;
+	my $xpath;
+	if ($subj ne '') {
+		$xpath = PublicInbox::Search::subject_path($subj);
+		$xpath = id_compress($xpath);
+	}
+	my $dd = $smsg->to_doc_data($oid, $mid0);
+	utf8::encode($dd);
+	$dd = compress($dd);
+	my $values = [ $smsg->ts, $smsg->ds, $num, $mids, $refs, $xpath, $dd ];
+	add_over($self, $values);
+}
+
 sub add_over {
 	my ($self, $values) = @_;
 	my ($ts, $ds, $num, $mids, $refs, $xpath, $ddd) = @$values;
diff --git a/lib/PublicInbox/OverIdxFork.pm b/lib/PublicInbox/OverIdxFork.pm
deleted file mode 100644
index ec96528..0000000
--- a/lib/PublicInbox/OverIdxFork.pm
+++ /dev/null
@@ -1,180 +0,0 @@
-# Copyright (C) 2018 all contributors <meta@public-inbox.org>
-# License: AGPL-3.0+ <https://www.gnu.org/licenses/agpl-3.0.txt>
-package PublicInbox::OverIdxFork;
-use strict;
-use warnings;
-use base qw(PublicInbox::OverIdx PublicInbox::Lock);
-use Storable qw(freeze thaw);
-use IO::Handle;
-
-sub create {
-	my ($self, $v2writable) = @_;
-	$self->SUPER::create();
-	$self->spawn_worker($v2writable) if $v2writable->{parallel};
-}
-
-sub spawn_worker {
-	my ($self, $v2writable) = @_;
-	my ($r, $w);
-	pipe($r, $w) or die "pipe failed: $!\n";
-	my ($barrier_wait, $barrier_note);
-	pipe($barrier_wait, $barrier_note) or die "pipe failed: $!\n";
-	binmode $_, ':raw' foreach ($r, $w, $barrier_wait, $barrier_note);
-	my $pid = fork;
-	defined $pid or die "fork failed: $!\n";
-	if ($pid == 0) {
-		$v2writable->atfork_child;
-		$v2writable = undef;
-		close $w;
-		close $barrier_wait;
-
-		# F_SETPIPE_SZ = 1031 on Linux; increasing the pipe size here
-		# speeds V2Writable batch imports across 8 cores by nearly 20%
-		fcntl($r, 1031, 1048576) if $^O eq 'linux';
-
-		eval { over_worker_loop($self, $r, $barrier_note) };
-		die "over worker died: $@\n" if $@;
-		exit;
-	}
-	$self->{w} = $w;
-	$self->{pid} = $pid;
-	$self->{lock_path} = "$self->{filename}.pipe.lock";
-	close $r;
-	close $barrier_note;
-	$self->{barrier_wait} = $barrier_wait;
-	$w->autoflush(1);
-}
-
-sub over_worker_loop {
-	my ($self, $r, $barrier_note) = @_;
-	$barrier_note->autoflush(1);
-	$0 = 'pi-v2-overview';
-	$self->begin_lazy;
-	my $barrier = undef;
-	while (my $line = $r->getline) {
-		if ($line eq "commit\n") {
-			$self->commit_lazy;
-		} elsif ($line eq "close\n") {
-			$self->disconnect;
-		} elsif ($line =~ /\Abarrier_init (\d+)\n\z/) {
-			my $n = $1 - 1;
-			die "barrier in-progress\n" if defined $barrier;
-			$barrier = { map { $_ => 1 } (0..$n) };
-		} elsif ($line =~ /\Abarrier (\d+)\n\z/) {
-			my $part = $1;
-			die "no barrier in-progress\n" unless defined $barrier;
-			delete $barrier->{$1} or die "unknown barrier: $part\n";
-			if ((scalar keys %$barrier) == 0) {
-				$barrier = undef;
-				$self->commit_lazy;
-				print $barrier_note "barrier_done\n" or die
-					"print failed to barrier note: $!";
-			}
-		} elsif ($line =~ /\AD ([a-f0-9]{40,}) (.*)\n\z/s) {
-			my ($oid, $mid) = ($1, $2);
-			$self->remove_oid($oid, $mid);
-		} else {
-			my $len = int($line);
-			my $n = read($r, my $msg, $len) or die "read: $!\n";
-			$n == $len or die "short read: $n != $len\n";
-			$msg = thaw($msg); # should raise on error
-			defined $msg or die "failed to thaw buffer\n";
-			eval { add_over($self, $msg) };
-			warn "failed to index message <$msg->[-1]>: $@\n" if $@;
-		}
-	}
-	die "$$ $0 dbh not released\n" if $self->{dbh};
-	die "$$ $0 still in transaction\n" if $self->{txn};
-}
-
-# called by a partition worker
-# values: [ DS, NUM, BYTES, LINES, TS, MIDS, XPATH, doc_data ]
-sub add_over {
-	my ($self, $values) = @_;
-	if (my $w = $self->{w}) {
-		my $err;
-		my $str = freeze($values);
-		$str = length($str) . "\n" . $str;
-
-		# multiple processes write to the same pipe, so use flock
-		# We can't avoid this lock for <=PIPE_BUF writes, either,
-		# because those atomic writes can break up >PIPE_BUF ones
-		$self->lock_acquire;
-		print $w $str or $err = $!;
-		$self->lock_release;
-
-		die "print failed: $err\n" if $err;
-	} else {
-		$self->SUPER::add_over($values);
-	}
-}
-
-sub remove_oid {
-	my ($self, $oid, $mid) = @_;
-	if (my $w = $self->{w}) {
-		my $err;
-		$self->lock_acquire;
-		print $w "D $oid $mid\n" or $err = $!;
-		$self->lock_release;
-		die $err if $err;
-	} else {
-		$self->SUPER::remove_oid($oid, $mid); # OverIdx
-	}
-}
-
-# write to the subprocess
-sub barrier_init {
-	my ($self, $nparts) = @_;
-	my $w = $self->{w} or return;
-	my $err;
-	$self->lock_acquire;
-	print $w "barrier_init $nparts\n" or $err = $!;
-	$self->lock_release;
-	die $err if $err;
-}
-
-sub barrier_wait {
-	my ($self) = @_;
-	if (my $bw = $self->{barrier_wait}) {
-		my $l = $bw->getline;
-		$l eq "barrier_done\n" or die "bad response from barrier_wait: $l\n";
-	} else {
-		$self->commit_lazy;
-	}
-}
-
-sub remote_commit {
-	my ($self) = @_;
-	if (my $w = $self->{w}) {
-		my $err;
-		$self->lock_acquire;
-		print $w "commit\n" or $err = $!;
-		$self->lock_release;
-		die $err if $err;
-	} else {
-		$self->commit_lazy;
-	}
-}
-
-# prevent connections when using forked subprocesses
-sub connect {
-	my ($self) = @_;
-	return if $self->{w};
-	$self->SUPER::connect;
-}
-
-sub remote_close {
-	my ($self) = @_;
-	if (my $w = delete $self->{w}) {
-		my $pid = delete $self->{pid} or die "no process to wait on\n";
-		print $w "close\n" or die "failed to write to pid:$pid: $!\n";
-		close $w or die "failed to close pipe for pid:$pid: $!\n";
-		waitpid($pid, 0) == $pid or die "remote process did not finish";
-		$? == 0 or die ref($self)." pid:$pid exited with: $?";
-	} else {
-		die "transaction in progress $self\n" if $self->{txn};
-		$self->disconnect;
-	}
-}
-
-1;
diff --git a/lib/PublicInbox/SearchIdx.pm b/lib/PublicInbox/SearchIdx.pm
index 3596972..7cfa745 100644
--- a/lib/PublicInbox/SearchIdx.pm
+++ b/lib/PublicInbox/SearchIdx.pm
@@ -12,7 +12,7 @@ use warnings;
 use base qw(PublicInbox::Search PublicInbox::Lock);
 use PublicInbox::MIME;
 use PublicInbox::InboxWritable;
-use PublicInbox::MID qw/mid_clean id_compress mid_mime mids references/;
+use PublicInbox::MID qw/mid_clean id_compress mid_mime mids/;
 use PublicInbox::MsgIter;
 use Carp qw(croak);
 use POSIX qw(strftime);
@@ -76,8 +76,7 @@ sub new {
 	if ($version == 1) {
 		$self->{lock_path} = "$mainrepo/ssoma.lock";
 		my $dir = $self->xdir;
-		$self->{over_ro} = $self->{over} =
-				PublicInbox::OverIdx->new("$dir/over.sqlite3");
+		$self->{over} = PublicInbox::OverIdx->new("$dir/over.sqlite3");
 	} elsif ($version == 2) {
 		defined $part or die "partition is required for v2\n";
 		# partition is a number
@@ -274,11 +273,6 @@ sub add_message {
 		my $smsg = PublicInbox::SearchMsg->new($mime);
 		my $doc = $smsg->{doc};
 		my $subj = $smsg->subject;
-		my $xpath;
-		if ($subj ne '') {
-			$xpath = $self->subject_path($subj);
-			$xpath = id_compress($xpath);
-		}
 
 		$smsg->{lines} = $mime->body_raw =~ tr!\n!\n!;
 		defined $bytes or $bytes = length($mime->as_string);
@@ -340,7 +334,6 @@ sub add_message {
 		});
 
 		# populates smsg->references for smsg->to_doc_data
-		my $refs = parse_references($smsg, $mid0, $mids);
 		my $data = $smsg->to_doc_data($oid, $mid0);
 		foreach my $mid (@$mids) {
 			$tg->index_text($mid, 1, 'XM');
@@ -359,10 +352,19 @@ sub add_message {
 
 		$self->delete_article($num) if defined $num; # for reindexing
 
-		utf8::encode($data);
-		$data = compress($data);
-		push @vals, $num, $mids, $refs, $xpath, $data;
-		$self->{over}->add_over(\@vals);
+		if (my $over = $self->{over}) {
+			utf8::encode($data);
+			$data = compress($data);
+			my $refs = $over->parse_references($smsg, $mid0, $mids);
+			my $xpath;
+			if ($subj ne '') {
+				$xpath = $self->subject_path($subj);
+				$xpath = id_compress($xpath);
+			}
+
+			push @vals, $num, $mids, $refs, $xpath, $data;
+			$over->add_over(\@vals);
+		}
 		$doc->add_boolean_term('Q' . $_) foreach @$mids;
 		$doc->add_boolean_term('XNUM' . $num) if defined $num;
 		$doc_id = $self->{xdb}->add_document($doc);
@@ -432,6 +434,8 @@ sub remove_by_oid {
 	my ($self, $oid, $mid) = @_;
 	my $db = $self->{xdb};
 
+	$self->{over}->remove_oid($oid, $mid) if $self->{over};
+
 	# XXX careful, we cannot use batch_do here since we conditionally
 	# delete documents based on other factors, so we cannot call
 	# find_doc_ids twice.
@@ -441,7 +445,6 @@ sub remove_by_oid {
 	# there is only ONE element in @delete unless we
 	# have bugs in our v2writable deduplication check
 	my @delete;
-	my @over_del;
 	for (; $head != $tail; $head->inc) {
 		my $docid = $head->get_docid;
 		my $doc = $db->get_document($docid);
@@ -449,11 +452,9 @@ sub remove_by_oid {
 		$smsg->load_expand;
 		if ($smsg->{blob} eq $oid) {
 			push(@delete, $docid);
-			push(@over_del, $smsg->num);
 		}
 	}
 	$db->delete_document($_) foreach @delete;
-	$self->{over}->remove_oid($oid, $mid);
 	scalar(@delete);
 }
 
@@ -469,29 +470,6 @@ sub term_generator { # write-only
 	$self->{term_generator} = $tg;
 }
 
-sub parse_references ($$$) {
-	my ($smsg, $mid0, $mids) = @_;
-	my $mime = $smsg->{mime};
-	my $hdr = $mime->header_obj;
-	my $refs = references($hdr);
-	push(@$refs, @$mids) if scalar(@$mids) > 1;
-	return $refs if scalar(@$refs) == 0;
-
-	# prevent circular references here:
-	my %seen = ( $mid0 => 1 );
-	my @keep;
-	foreach my $ref (@$refs) {
-		if (length($ref) > PublicInbox::MID::MAX_MID_SIZE) {
-			warn "References: <$ref> too long, ignoring\n";
-			next;
-		}
-		next if $seen{$ref}++;
-		push @keep, $ref;
-	}
-	$smsg->{references} = '<'.join('> <', @keep).'>' if @keep;
-	\@keep;
-}
-
 sub index_git_blob_id {
 	my ($doc, $pfx, $objid) = @_;
 
@@ -619,7 +597,7 @@ sub _git_log {
 				--raw -r --no-abbrev/, $range);
 }
 
-# indexes all unindexed messages
+# indexes all unindexed messages (v1 only)
 sub _index_sync {
 	my ($self, $opts) = @_;
 	my $tip = $opts->{ref} || 'HEAD';
@@ -750,7 +728,7 @@ sub begin_txn_lazy {
 	my ($self) = @_;
 	return if $self->{txn};
 	my $xdb = $self->{xdb} || $self->_xdb_acquire;
-	$self->{over}->begin_lazy;
+	$self->{over}->begin_lazy if $self->{over};
 	$xdb->begin_transaction;
 	$self->{txn} = 1;
 	$xdb;
@@ -760,7 +738,7 @@ sub commit_txn_lazy {
 	my ($self) = @_;
 	delete $self->{txn} or return;
 	$self->{xdb}->commit_transaction;
-	$self->{over}->commit_lazy;
+	$self->{over}->commit_lazy if $self->{over};
 }
 
 sub worker_done {
diff --git a/lib/PublicInbox/SearchIdxPart.pm b/lib/PublicInbox/SearchIdxPart.pm
index e5766a8..078d2df 100644
--- a/lib/PublicInbox/SearchIdxPart.pm
+++ b/lib/PublicInbox/SearchIdxPart.pm
@@ -11,7 +11,6 @@ sub new {
 	# create the DB before forking:
 	$self->_xdb_acquire;
 	$self->_xdb_release;
-	$self->{over} = $v2writable->{over};
 	$self->spawn_worker($v2writable, $part) if $v2writable->{parallel};
 	$self;
 }
@@ -25,7 +24,7 @@ sub spawn_worker {
 	my $pid = fork;
 	defined $pid or die "fork failed: $!\n";
 	if ($pid == 0) {
-		$v2writable->atfork_child;
+		my $bnote = $v2writable->atfork_child;
 		$v2writable = undef;
 		close $w or die "failed to close: $!";
 
@@ -33,7 +32,7 @@ sub spawn_worker {
 		# speeds V2Writable batch imports across 8 cores by nearly 20%
 		fcntl($r, 1031, 1048576) if $^O eq 'linux';
 
-		eval { partition_worker_loop($self, $r, $part) };
+		eval { partition_worker_loop($self, $r, $part, $bnote) };
 		die "worker $part died: $@\n" if $@;
 		die "unexpected MM $self->{mm}" if $self->{mm};
 		exit;
@@ -43,8 +42,8 @@ sub spawn_worker {
 	close $r or die "failed to close: $!";
 }
 
-sub partition_worker_loop ($$$) {
-	my ($self, $r, $part) = @_;
+sub partition_worker_loop ($$$$) {
+	my ($self, $r, $part, $bnote) = @_;
 	$0 = "pi-v2-partition[$part]";
 	$self->begin_txn_lazy;
 	while (my $line = $r->getline) {
@@ -54,8 +53,9 @@ sub partition_worker_loop ($$$) {
 			$self->_xdb_release;
 		} elsif ($line eq "barrier\n") {
 			$self->commit_txn_lazy;
-			print { $self->{over}->{w} } "barrier $part\n" or
-					die "write failed to overview $!\n";
+			# no need to lock < 512 bytes is atomic under POSIX
+			print $bnote "barrier $part\n" or
+					die "write failed for barrier $!\n";
 		} elsif ($line =~ /\AD ([a-f0-9]{40,}) (.+)\n\z/s) {
 			my ($oid, $mid) = ($1, $2);
 			$self->begin_txn_lazy;
diff --git a/lib/PublicInbox/V2Writable.pm b/lib/PublicInbox/V2Writable.pm
index 877a459..8361d09 100644
--- a/lib/PublicInbox/V2Writable.pm
+++ b/lib/PublicInbox/V2Writable.pm
@@ -13,7 +13,7 @@ use PublicInbox::Import;
 use PublicInbox::MID qw(mids);
 use PublicInbox::ContentId qw(content_id content_digest);
 use PublicInbox::Inbox;
-use PublicInbox::OverIdxFork;
+use PublicInbox::OverIdx;
 use PublicInbox::Msgmap;
 use PublicInbox::Spawn;
 use IO::Handle;
@@ -67,7 +67,7 @@ sub new {
 		parallel => 1,
 		transact_bytes => 0,
 		xpfx => $xpfx,
-		over => PublicInbox::OverIdxFork->new("$xpfx/over.sqlite3"),
+		over => PublicInbox::OverIdx->new("$xpfx/over.sqlite3", 1),
 		lock_path => "$dir/inbox.lock",
 		# limit each git repo (epoch) to 1GB or so
 		rotate_bytes => int((1024 * 1024 * 1024) / $PACKING_FACTOR),
@@ -111,11 +111,12 @@ sub add {
 	my $im = $self->importer;
 	my $cmt = $im->add($mime);
 	$cmt = $im->get_mark($cmt);
-	my ($oid, $len, $msgref) = @{$im->{last_object}};
+	$self->{last_commit}->[$self->{epoch_max}] = $cmt;
 
+	my ($oid, $len, $msgref) = @{$im->{last_object}};
+	$self->{over}->add_overview($mime, $len, $num, $oid, $mid0);
 	my $nparts = $self->{partitions};
 	my $part = $num % $nparts;
-	$self->{last_commit}->[$self->{epoch_max}] = $cmt;
 	my $idx = $self->idx_part($part);
 	$idx->index_raw($len, $msgref, $num, $oid, $mid0, $mime);
 	my $n = $self->{transact_bytes} += $len;
@@ -208,11 +209,17 @@ sub idx_init {
 	# frequently activated.
 	delete $ibx->{$_} foreach (qw(git mm search));
 
+       if ($self->{parallel}) {
+               pipe(my ($r, $w)) or die "pipe failed: $!";
+               $self->{bnote} = [ $r, $w ];
+               $w->autoflush(1);
+       }
+
 	my $over = $self->{over};
 	$ibx->umask_prepare;
 	$ibx->with_umask(sub {
 		$self->lock_acquire;
-		$over->create($self);
+		$over->create;
 
 		# -compact can change partition count while -watch is idle
 		my $nparts = count_partitions($self);
@@ -256,7 +263,7 @@ sub remove_internal {
 	$self->idx_init;
 	my $im = $self->importer unless $purge;
 	my $ibx = $self->{-inbox};
-	my $srch = $ibx->search;
+	my $over = $self->{over};
 	my $cid = content_id($mime);
 	my $parts = $self->{idx_parts};
 	my $mm = $self->{mm};
@@ -272,7 +279,7 @@ sub remove_internal {
 	foreach my $mid (@$mids) {
 		my %gone;
 		my ($id, $prev);
-		while (my $smsg = $srch->next_by_mid($mid, \$id, \$prev)) {
+		while (my $smsg = $over->next_by_mid($mid, \$id, \$prev)) {
 			my $msg = $ibx->msg_by_smsg($smsg);
 			if (!defined($msg)) {
 				warn "broken smsg for $mid\n";
@@ -304,9 +311,7 @@ sub remove_internal {
 				($mark, undef) = $im->remove($orig, $cmt_msg);
 			}
 			$orig = undef;
-			foreach my $idx (@$parts) {
-				$idx->remote_remove($oid, $mid);
-			}
+			$self->unindex_oid_remote($oid, $mid);
 		}
 		$self->barrier;
 	}
@@ -371,8 +376,8 @@ sub done {
 	}
 
 	my $over = $self->{over};
-	$over->remote_commit;
-	$over->remote_close;
+	$over->commit_lazy;
+	$over->disconnect;
 
 	if ($mm) {
 		$mm->{dbh}->begin_work;
@@ -381,6 +386,7 @@ sub done {
 		delete $self->{mm};
 	}
 
+	delete $self->{bnote};
 	$self->{transact_bytes} = 0;
 	$self->lock_release if $parts;
 }
@@ -389,7 +395,25 @@ sub checkpoint {
 	my ($self) = @_;
 	my $im = $self->{im};
 	$im->checkpoint if $im; # PublicInbox::Import::checkpoint
-	$self->barrier(1);
+	$self->barrier;
+}
+
+sub barrier_init {
+	my ($self, $n) = @_;
+	$self->{bnote} or return;
+	--$n;
+	my $barrier = { map { $_ => 1 } (0..$n) };
+}
+
+sub barrier_wait {
+	my ($self, $barrier) = @_;
+	my $bnote = $self->{bnote} or return;
+	my $r = $bnote->[0];
+	while (scalar keys %$barrier) {
+		defined(my $l = $r->getline) or die "EOF on barrier_wait: $!";
+		$l =~ /\Abarrier (\d+)/ or die "bad line on barrier_wait: $l";
+		delete $barrier->{$1} or die "bad part[$1] on barrier wait";
+	}
 }
 
 # issue a write barrier to ensure all data is visible to other processes
@@ -403,17 +427,19 @@ sub barrier {
 	my $parts = $self->{idx_parts};
 	if ($parts) {
 		my $dbh = $self->{mm}->{dbh};
-		$dbh->commit; # SQLite msgmap data is second in importance
 
-		my $over = $self->{over};
+		# SQLite msgmap data is second in importance
+		$dbh->commit;
 
-		# Now deal with Xapian and overview DB
-		$over->barrier_init(scalar(@$parts));
+		# SQLite overview is third
+		$self->{over}->commit_lazy;
 
-		# each partition needs to issue a barrier command to over
-		$_->remote_barrier foreach @$parts;
+		# Now deal with Xapian
+		my $barrier = $self->barrier_init(scalar @$parts);
 
-		$over->barrier_wait; # wait for each Xapian partition
+		# each partition needs to issue a barrier command
+		$_->remote_barrier for @$parts;
+		$self->barrier_wait($barrier); # wait for each Xapian partition
 
 		# last_commit is special, don't commit these until
 		# remote partitions are done:
@@ -486,7 +512,7 @@ sub importer {
 		} else {
 			$self->{im} = undef;
 			$im->done;
-			$self->barrier(1);
+			$self->barrier;
 			$im = undef;
 			my $git_dir = $self->git_init(++$self->{epoch_max});
 			my $git = PublicInbox::Git->new($git_dir);
@@ -546,12 +572,11 @@ sub diff ($$$) {
 sub lookup_content {
 	my ($self, $mime, $mid) = @_;
 	my $ibx = $self->{-inbox};
-
-	my $srch = $ibx->search->reopen;
+	my $over = $self->{over};
 	my $cid = content_id($mime);
 	my $found;
 	my ($id, $prev);
-	while (my $smsg = $srch->next_by_mid($mid, \$id, \$prev)) {
+	while (my $smsg = $over->next_by_mid($mid, \$id, \$prev)) {
 		my $msg = $ibx->msg_by_smsg($smsg);
 		if (!defined($msg)) {
 			warn "broken smsg for $mid\n";
@@ -581,6 +606,8 @@ sub atfork_child {
 		$im->atfork_child;
 	}
 	die "unexpected mm" if $self->{mm};
+	close $self->{bnote}->[0] or die "close bnote[0]: $!\n";
+	$self->{bnote}->[1];
 }
 
 sub mark_deleted {
@@ -654,6 +681,7 @@ sub reindex_oid {
 	$mm_tmp->mid_delete($mid0) or
 		die "failed to delete <$mid0> for article #$num\n";
 
+	$self->{over}->add_overview($mime, $len, $num, $oid, $mid0);
 	my $nparts = $self->{partitions};
 	my $part = $num % $nparts;
 	my $idx = $self->idx_part($part);
@@ -759,17 +787,23 @@ $range
 	\$regen_max;
 }
 
+sub unindex_oid_remote {
+	my ($self, $oid, $mid) = @_;
+	$_->remote_remove($oid, $mid) foreach @{$self->{idx_parts}};
+	$self->{over}->remove_oid($oid, $mid);
+}
+
 sub unindex_oid {
 	my ($self, $git, $oid) = @_;
 	my $msgref = $git->cat_file($oid);
 	my $mime = PublicInbox::MIME->new($msgref);
 	my $mids = mids($mime->header_obj);
 	$mime = $msgref = undef;
-	my $srch = $self->{-inbox}->search;
+	my $over = $self->{over};
 	foreach my $mid (@$mids) {
 		my %gone;
 		my ($id, $prev);
-		while (my $smsg = $srch->next_by_mid($mid, \$id, \$prev)) {
+		while (my $smsg = $over->next_by_mid($mid, \$id, \$prev)) {
 			$gone{$smsg->num} = 1 if $oid eq $smsg->{blob};
 			1; # continue
 		}
@@ -780,8 +814,7 @@ sub unindex_oid {
 				join(',',sort keys %gone), "\n";
 		}
 		$self->{unindexed}->{$_}++ foreach keys %gone;
-		$_->remote_remove($oid, $mid) foreach @{$self->{idx_parts}};
-		$self->{over}->remove_oid($oid, $mid);
+		$self->unindex_oid_remote($oid, $mid);
 		$self->barrier;
 	}
 }
-- 
EW


  parent reply	other threads:[~2018-04-07  3:41 UTC|newest]

Thread overview: 9+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-04-07  3:41 [PATCH 0/8] less code, less memory, more speed Eric Wong (Contractor, The Linux Foundation)
2018-04-07  3:41 ` [PATCH 1/8] psgi: ensure /$INBOX/$MESSAGE_ID/T/ endpoint is chronological Eric Wong (Contractor, The Linux Foundation)
2018-04-07  3:41 ` [PATCH 2/8] over: avoid excessive SELECT Eric Wong (Contractor, The Linux Foundation)
2018-04-07  3:41 ` Eric Wong (Contractor, The Linux Foundation) [this message]
2018-04-07  3:41 ` [PATCH 4/8] v2writable: reduce barriers Eric Wong (Contractor, The Linux Foundation)
2018-04-07  3:41 ` [PATCH 5/8] index: allow specifying --jobs=0 to disable multiprocess Eric Wong (Contractor, The Linux Foundation)
2018-04-07  3:41 ` [PATCH 6/8] convert: support converting with altid defined Eric Wong (Contractor, The Linux Foundation)
2018-04-07  3:41 ` [PATCH 7/8] store less data in the Xapian document Eric Wong (Contractor, The Linux Foundation)
2018-04-07  3:41 ` [PATCH 8/8] msgmap: speed up minmax with separate queries Eric Wong (Contractor, The Linux Foundation)

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

  List information: http://public-inbox.org/README

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180407034154.2309-4-e@80x24.org \
    --to=e@80x24.org \
    --cc=meta@public-inbox.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
Code repositories for project(s) associated with this public inbox

	https://80x24.org/public-inbox.git

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).