git@vger.kernel.org mailing list mirror (one of many)
 help / color / mirror / code / Atom feed
* Fix potential hang in https handshake.
@ 2012-10-18 21:35 szager
  2012-10-18 22:59 ` Junio C Hamano
  0 siblings, 1 reply; 10+ messages in thread
From: szager @ 2012-10-18 21:35 UTC (permalink / raw)
  To: git; +Cc: gitster, sop

>From 700b8075c578941c8f951711825c390ac68b190f Mon Sep 17 00:00:00 2001
From: Stefan Zager <szager@google.com>
Date: Thu, 18 Oct 2012 14:03:59 -0700
Subject: [PATCH] Fix potential hang in https handshake.

It will sometimes happen that curl_multi_fdset() doesn't
return any file descriptors.  In that case, it's recommended
that the application sleep for a short time before running
curl_multi_perform() again.

http://curl.haxx.se/libcurl/c/curl_multi_fdset.html

Signed-off-by: Stefan Zager <szager@google.com>
---
 http.c |   40 ++++++++++++++++++++++++++--------------
 1 files changed, 26 insertions(+), 14 deletions(-)

diff --git a/http.c b/http.c
index df9bb71..a6f66c0 100644
--- a/http.c
+++ b/http.c
@@ -602,35 +602,47 @@ void run_active_slot(struct active_request_slot *slot)
 	int max_fd;
 	struct timeval select_timeout;
 	int finished = 0;
+	long curl_timeout;
 
 	slot->finished = &finished;
 	while (!finished) {
 		step_active_slots();
 
 		if (slot->in_use) {
+			max_fd = -1;
+			FD_ZERO(&readfds);
+			FD_ZERO(&writefds);
+			FD_ZERO(&excfds);
+			curl_multi_fdset(curlm, &readfds, &writefds, &excfds, &max_fd);
+
 #if LIBCURL_VERSION_NUM >= 0x070f04
-			long curl_timeout;
-			curl_multi_timeout(curlm, &curl_timeout);
-			if (curl_timeout == 0) {
-				continue;
-			} else if (curl_timeout == -1) {
+			/* It will sometimes happen that curl_multi_fdset() doesn't
+			   return any file descriptors.  In that case, it's recommended
+			   that the application sleep for a short time before running
+			   curl_multi_perform() again.
+
+			   http://curl.haxx.se/libcurl/c/curl_multi_fdset.html
+			*/
+			if (max_fd == -1) {
 				select_timeout.tv_sec  = 0;
 				select_timeout.tv_usec = 50000;
 			} else {
-				select_timeout.tv_sec  =  curl_timeout / 1000;
-				select_timeout.tv_usec = (curl_timeout % 1000) * 1000;
+				curl_timeout = 0;
+				curl_multi_timeout(curlm, &curl_timeout);
+				if (curl_timeout == 0) {
+					continue;
+				} else if (curl_timeout == -1) {
+					select_timeout.tv_sec  = 0;
+					select_timeout.tv_usec = 50000;
+				} else {
+					select_timeout.tv_sec  =  curl_timeout / 1000;
+					select_timeout.tv_usec = (curl_timeout % 1000) * 1000;
+				}
 			}
 #else
 			select_timeout.tv_sec  = 0;
 			select_timeout.tv_usec = 50000;
 #endif
-
-			max_fd = -1;
-			FD_ZERO(&readfds);
-			FD_ZERO(&writefds);
-			FD_ZERO(&excfds);
-			curl_multi_fdset(curlm, &readfds, &writefds, &excfds, &max_fd);
-
 			select(max_fd+1, &readfds, &writefds, &excfds, &select_timeout);
 		}
 	}
-- 
1.7.7.3

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* Re: Fix potential hang in https handshake.
  2012-10-18 21:35 Fix potential hang in https handshake szager
@ 2012-10-18 22:59 ` Junio C Hamano
  2012-10-19 10:36   ` Jeff King
  0 siblings, 1 reply; 10+ messages in thread
From: Junio C Hamano @ 2012-10-18 22:59 UTC (permalink / raw)
  To: szager; +Cc: git, sop

szager@google.com writes:

> From 700b8075c578941c8f951711825c390ac68b190f Mon Sep 17 00:00:00 2001
> From: Stefan Zager <szager@google.com>
> Date: Thu, 18 Oct 2012 14:03:59 -0700
> Subject: [PATCH] Fix potential hang in https handshake.
>
> It will sometimes happen that curl_multi_fdset() doesn't
> return any file descriptors.  In that case, it's recommended
> that the application sleep for a short time before running
> curl_multi_perform() again.
>
> http://curl.haxx.se/libcurl/c/curl_multi_fdset.html
>
> Signed-off-by: Stefan Zager <szager@google.com>
> ---

Thanks.  Would it be a better idea to "patch up" in problematic
case, instead of making this logic too deeply nested, like this
instead, I have to wonder...


	... all the existing code above unchanged ...
	curl_multi_fdset(..., &max_fd);
+	if (max_fd < 0) {    
+		/* nothing actionable??? */
+		select_timeout.tv_sec = 0;
+		select_timeout.tv_usec = 50000;
+	}

	select(max_fd+1, ..., &select_timeout);



>  http.c |   40 ++++++++++++++++++++++++++--------------
>  1 files changed, 26 insertions(+), 14 deletions(-)
>
> diff --git a/http.c b/http.c
> index df9bb71..a6f66c0 100644
> --- a/http.c
> +++ b/http.c
> @@ -602,35 +602,47 @@ void run_active_slot(struct active_request_slot *slot)
>  	int max_fd;
>  	struct timeval select_timeout;
>  	int finished = 0;
> +	long curl_timeout;
>  
>  	slot->finished = &finished;
>  	while (!finished) {
>  		step_active_slots();
>  
>  		if (slot->in_use) {
> +			max_fd = -1;
> +			FD_ZERO(&readfds);
> +			FD_ZERO(&writefds);
> +			FD_ZERO(&excfds);
> +			curl_multi_fdset(curlm, &readfds, &writefds, &excfds, &max_fd);
> +
>  #if LIBCURL_VERSION_NUM >= 0x070f04
> -			long curl_timeout;
> -			curl_multi_timeout(curlm, &curl_timeout);
> -			if (curl_timeout == 0) {
> -				continue;
> -			} else if (curl_timeout == -1) {
> +			/* It will sometimes happen that curl_multi_fdset() doesn't
> +			   return any file descriptors.  In that case, it's recommended
> +			   that the application sleep for a short time before running
> +			   curl_multi_perform() again.
> +
> +			   http://curl.haxx.se/libcurl/c/curl_multi_fdset.html
> +			*/
> +			if (max_fd == -1) {
>  				select_timeout.tv_sec  = 0;
>  				select_timeout.tv_usec = 50000;
>  			} else {
> -				select_timeout.tv_sec  =  curl_timeout / 1000;
> -				select_timeout.tv_usec = (curl_timeout % 1000) * 1000;
> +				curl_timeout = 0;
> +				curl_multi_timeout(curlm, &curl_timeout);
> +				if (curl_timeout == 0) {
> +					continue;
> +				} else if (curl_timeout == -1) {
> +					select_timeout.tv_sec  = 0;
> +					select_timeout.tv_usec = 50000;
> +				} else {
> +					select_timeout.tv_sec  =  curl_timeout / 1000;
> +					select_timeout.tv_usec = (curl_timeout % 1000) * 1000;
> +				}
>  			}
>  #else
>  			select_timeout.tv_sec  = 0;
>  			select_timeout.tv_usec = 50000;
>  #endif
> -
> -			max_fd = -1;
> -			FD_ZERO(&readfds);
> -			FD_ZERO(&writefds);
> -			FD_ZERO(&excfds);
> -			curl_multi_fdset(curlm, &readfds, &writefds, &excfds, &max_fd);
> -
>  			select(max_fd+1, &readfds, &writefds, &excfds, &select_timeout);
>  		}
>  	}

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: Fix potential hang in https handshake.
  2012-10-18 22:59 ` Junio C Hamano
@ 2012-10-19 10:36   ` Jeff King
  2012-10-19 14:10     ` Shawn Pearce
  0 siblings, 1 reply; 10+ messages in thread
From: Jeff King @ 2012-10-19 10:36 UTC (permalink / raw)
  To: Junio C Hamano; +Cc: szager, git, sop

On Thu, Oct 18, 2012 at 03:59:41PM -0700, Junio C Hamano wrote:

> > It will sometimes happen that curl_multi_fdset() doesn't
> > return any file descriptors.  In that case, it's recommended
> > that the application sleep for a short time before running
> > curl_multi_perform() again.
> >
> > http://curl.haxx.se/libcurl/c/curl_multi_fdset.html
> >
> > Signed-off-by: Stefan Zager <szager@google.com>
> > ---
> 
> Thanks.  Would it be a better idea to "patch up" in problematic
> case, instead of making this logic too deeply nested, like this
> instead, I have to wonder...
> 
> 
> 	... all the existing code above unchanged ...
> 	curl_multi_fdset(..., &max_fd);
> +	if (max_fd < 0) {    
> +		/* nothing actionable??? */
> +		select_timeout.tv_sec = 0;
> +		select_timeout.tv_usec = 50000;
> +	}
> 
> 	select(max_fd+1, ..., &select_timeout);

But wouldn't that override a potentially shorter timeout that curl gave
us via curl_multi_timeout, making us unnecessarily slow to hand control
back to curl?

The current logic is:

  - if curl says there is something to do now (timeout == 0), do it
    immediately

  - if curl gives us a timeout, use it with select

  - otherwise, feed 50ms to selection

It should not matter what we get from curl_multi_fdset. If there are
fds, great, we will feed them to select with the timeout, and we may
break out early if there is work to do. If not, then we are already
doing this wait.

IOW, it seems like we are _already_ following the advice referenced in
curl's manpage. Is there some case I am missing? Confused...

-Peff

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: Fix potential hang in https handshake.
  2012-10-19 10:36   ` Jeff King
@ 2012-10-19 14:10     ` Shawn Pearce
       [not found]       ` <CAHOQ7J9W8FdKqzqbuDqj4bcFyN02kUigWtbL_xCen-PYWF9LUg@mail.gmail.com>
                         ` (2 more replies)
  0 siblings, 3 replies; 10+ messages in thread
From: Shawn Pearce @ 2012-10-19 14:10 UTC (permalink / raw)
  To: Jeff King; +Cc: Junio C Hamano, szager, git, sop

On Fri, Oct 19, 2012 at 3:36 AM, Jeff King <peff@peff.net> wrote:
> On Thu, Oct 18, 2012 at 03:59:41PM -0700, Junio C Hamano wrote:
>
>> > It will sometimes happen that curl_multi_fdset() doesn't
>> > return any file descriptors.  In that case, it's recommended
>> > that the application sleep for a short time before running
>> > curl_multi_perform() again.
>> >
>> > http://curl.haxx.se/libcurl/c/curl_multi_fdset.html
>> >
>> > Signed-off-by: Stefan Zager <szager@google.com>
>> > ---
>>
>> Thanks.  Would it be a better idea to "patch up" in problematic
>> case, instead of making this logic too deeply nested, like this
>> instead, I have to wonder...
>>
>>
>>       ... all the existing code above unchanged ...
>>       curl_multi_fdset(..., &max_fd);
>> +     if (max_fd < 0) {
>> +             /* nothing actionable??? */
>> +             select_timeout.tv_sec = 0;
>> +             select_timeout.tv_usec = 50000;
>> +     }
>>
>>       select(max_fd+1, ..., &select_timeout);
>
> But wouldn't that override a potentially shorter timeout that curl gave
> us via curl_multi_timeout, making us unnecessarily slow to hand control
> back to curl?
>
> The current logic is:
>
>   - if curl says there is something to do now (timeout == 0), do it
>     immediately
>
>   - if curl gives us a timeout, use it with select
>
>   - otherwise, feed 50ms to selection
>
> It should not matter what we get from curl_multi_fdset. If there are
> fds, great, we will feed them to select with the timeout, and we may
> break out early if there is work to do. If not, then we are already
> doing this wait.
>
> IOW, it seems like we are _already_ following the advice referenced in
> curl's manpage. Is there some case I am missing? Confused...

The issue with the current code is sometimes when libcurl is opening a
CONNECT style connection through an HTTP proxy it returns a crazy high
timeout (>240 seconds) and no fds. In this case Git waits forever.
Stefan observed that using a timeout of 50 ms in this situation to
poll libcurl is better, as it figures out a lot more quickly that it
is connected to the proxy and can issue the request.

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: Fix potential hang in https handshake.
       [not found]       ` <CAHOQ7J9W8FdKqzqbuDqj4bcFyN02kUigWtbL_xCen-PYWF9LUg@mail.gmail.com>
@ 2012-10-19 17:02         ` Junio C Hamano
  0 siblings, 0 replies; 10+ messages in thread
From: Junio C Hamano @ 2012-10-19 17:02 UTC (permalink / raw)
  To: Stefan Zager; +Cc: Shawn Pearce, git, Jeff King, sop

Stefan Zager <szager@google.com> writes:

> On Oct 19, 2012 7:11 AM, "Shawn Pearce" <spearce@spearce.org> wrote:
>>
>> The issue with the current code is sometimes when libcurl is opening a
>> CONNECT style connection through an HTTP proxy it returns a crazy high
>> timeout (>240 seconds) and no fds. In this case Git waits forever.
>> Stefan observed that using a timeout of 50 ms in this situation to
>> poll libcurl is better, as it figures out a lot more quickly that it
>> is connected to the proxy and can issue the request.
>
> Correct.  Anecdotally, the zero-file-descriptor situation happens only once
> per process invocation, so the risk of passing a too-long timeout to
> select() is small.

Thanks.

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: Fix potential hang in https handshake.
  2012-10-19 14:10     ` Shawn Pearce
       [not found]       ` <CAHOQ7J9W8FdKqzqbuDqj4bcFyN02kUigWtbL_xCen-PYWF9LUg@mail.gmail.com>
@ 2012-10-19 17:08       ` Daniel Stenberg
  2012-10-19 20:27       ` Jeff King
  2 siblings, 0 replies; 10+ messages in thread
From: Daniel Stenberg @ 2012-10-19 17:08 UTC (permalink / raw)
  To: Shawn Pearce; +Cc: Jeff King, Junio C Hamano, szager, git, sop

On Fri, 19 Oct 2012, Shawn Pearce wrote:

> The issue with the current code is sometimes when libcurl is opening a 
> CONNECT style connection through an HTTP proxy it returns a crazy high 
> timeout (>240 seconds) and no fds. In this case Git waits forever.

Is this repeatable with a recent libcurl? It certainly sounds like a bug to 
me, and I might be interested in giving a try at tracking it down...

-- 

  / daniel.haxx.se

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: Fix potential hang in https handshake.
  2012-10-19 14:10     ` Shawn Pearce
       [not found]       ` <CAHOQ7J9W8FdKqzqbuDqj4bcFyN02kUigWtbL_xCen-PYWF9LUg@mail.gmail.com>
  2012-10-19 17:08       ` Daniel Stenberg
@ 2012-10-19 20:27       ` Jeff King
  2012-10-19 20:37         ` Stefan Zager
  2012-10-19 20:40         ` Junio C Hamano
  2 siblings, 2 replies; 10+ messages in thread
From: Jeff King @ 2012-10-19 20:27 UTC (permalink / raw)
  To: Shawn Pearce; +Cc: Junio C Hamano, szager, git, sop

On Fri, Oct 19, 2012 at 07:10:46AM -0700, Shawn O. Pearce wrote:

> > IOW, it seems like we are _already_ following the advice referenced in
> > curl's manpage. Is there some case I am missing? Confused...
> 
> The issue with the current code is sometimes when libcurl is opening a
> CONNECT style connection through an HTTP proxy it returns a crazy high
> timeout (>240 seconds) and no fds. In this case Git waits forever.
> Stefan observed that using a timeout of 50 ms in this situation to
> poll libcurl is better, as it figures out a lot more quickly that it
> is connected to the proxy and can issue the request.

Ah. That sounds like a bug in curl to me. But either way, if we want to
work around it, wouldn't the right thing be to override curl's timeout
in that instance? Like:

diff --git a/http.c b/http.c
index df9bb71..cd07cdf 100644
--- a/http.c
+++ b/http.c
@@ -631,6 +631,19 @@ void run_active_slot(struct active_request_slot *slot)
 			FD_ZERO(&excfds);
 			curl_multi_fdset(curlm, &readfds, &writefds, &excfds, &max_fd);
 
+			/*
+			 * Sometimes curl will give a really long timeout for a
+			 * CONNECT when there are no fds to read, but we can
+			 * get better results by running curl_multi_perform
+			 * more frequently.
+			 */
+			if (maxfd < 0 &&
+			    (select_timeout.tv_sec > 0 ||
+			     select_timeout.tv_usec > 50000)) {
+				select_timeout.tv_sec = 0;
+				select_timeout.tv_usec = 50000;
+			}
+
 			select(max_fd+1, &readfds, &writefds, &excfds, &select_timeout);
 		}
 	}

-Peff

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* Re: Fix potential hang in https handshake.
  2012-10-19 20:27       ` Jeff King
@ 2012-10-19 20:37         ` Stefan Zager
  2012-10-19 20:40           ` Jeff King
  2012-10-19 20:40         ` Junio C Hamano
  1 sibling, 1 reply; 10+ messages in thread
From: Stefan Zager @ 2012-10-19 20:37 UTC (permalink / raw)
  To: Jeff King; +Cc: Shawn Pearce, Junio C Hamano, git, Shawn Pearce

On Fri, Oct 19, 2012 at 1:27 PM, Jeff King <peff@peff.net> wrote:
>
> On Fri, Oct 19, 2012 at 07:10:46AM -0700, Shawn O. Pearce wrote:
>
> > > IOW, it seems like we are _already_ following the advice referenced in
> > > curl's manpage. Is there some case I am missing? Confused...
> >
> > The issue with the current code is sometimes when libcurl is opening a
> > CONNECT style connection through an HTTP proxy it returns a crazy high
> > timeout (>240 seconds) and no fds. In this case Git waits forever.
> > Stefan observed that using a timeout of 50 ms in this situation to
> > poll libcurl is better, as it figures out a lot more quickly that it
> > is connected to the proxy and can issue the request.
>
> Ah. That sounds like a bug in curl to me. But either way, if we want to
> work around it, wouldn't the right thing be to override curl's timeout
> in that instance? Like:
>
> diff --git a/http.c b/http.c
> index df9bb71..cd07cdf 100644
> --- a/http.c
> +++ b/http.c
> @@ -631,6 +631,19 @@ void run_active_slot(struct active_request_slot *slot)
>                         FD_ZERO(&excfds);
>                         curl_multi_fdset(curlm, &readfds, &writefds, &excfds, &max_fd);
>
> +                       /*
> +                        * Sometimes curl will give a really long timeout for a
> +                        * CONNECT when there are no fds to read, but we can
> +                        * get better results by running curl_multi_perform
> +                        * more frequently.
> +                        */
> +                       if (maxfd < 0 &&
> +                           (select_timeout.tv_sec > 0 ||
> +                            select_timeout.tv_usec > 50000)) {
> +                               select_timeout.tv_sec = 0;
> +                               select_timeout.tv_usec = 50000;
> +                       }
> +
>                         select(max_fd+1, &readfds, &writefds, &excfds, &select_timeout);
>                 }
>         }
>
> -Peff

I have no objection to this; any one else?

Stefan

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: Fix potential hang in https handshake.
  2012-10-19 20:37         ` Stefan Zager
@ 2012-10-19 20:40           ` Jeff King
  0 siblings, 0 replies; 10+ messages in thread
From: Jeff King @ 2012-10-19 20:40 UTC (permalink / raw)
  To: Stefan Zager; +Cc: Shawn Pearce, Junio C Hamano, git, Shawn Pearce

On Fri, Oct 19, 2012 at 01:37:06PM -0700, Stefan Zager wrote:

> > diff --git a/http.c b/http.c
> > index df9bb71..cd07cdf 100644
> > --- a/http.c
> > +++ b/http.c
> > @@ -631,6 +631,19 @@ void run_active_slot(struct active_request_slot *slot)
> >                         FD_ZERO(&excfds);
> >                         curl_multi_fdset(curlm, &readfds, &writefds, &excfds, &max_fd);
> >
> > +                       /*
> > +                        * Sometimes curl will give a really long timeout for a
> > +                        * CONNECT when there are no fds to read, but we can
> > +                        * get better results by running curl_multi_perform
> > +                        * more frequently.
> > +                        */
> > +                       if (maxfd < 0 &&
> > +                           (select_timeout.tv_sec > 0 ||
> > +                            select_timeout.tv_usec > 50000)) {
> > +                               select_timeout.tv_sec = 0;
> > +                               select_timeout.tv_usec = 50000;
> > +                       }
> > +
> >                         select(max_fd+1, &readfds, &writefds, &excfds, &select_timeout);
> >                 }
> >         }
> >
> I have no objection to this; any one else?

If you wouldn't mind, I was hoping you could flesh out the comment a bit
more with real details of when this happens (and/or put them in the
commit message). If this is indeed a bug to be worked around, it will be
a huge help to somebody reading this code in a year who can confirm that
modern curl does not need it anymore.

-Peff

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: Fix potential hang in https handshake.
  2012-10-19 20:27       ` Jeff King
  2012-10-19 20:37         ` Stefan Zager
@ 2012-10-19 20:40         ` Junio C Hamano
  1 sibling, 0 replies; 10+ messages in thread
From: Junio C Hamano @ 2012-10-19 20:40 UTC (permalink / raw)
  To: Jeff King; +Cc: Shawn Pearce, szager, git, sop

Jeff King <peff@peff.net> writes:

> On Fri, Oct 19, 2012 at 07:10:46AM -0700, Shawn O. Pearce wrote:
>
>> > IOW, it seems like we are _already_ following the advice referenced in
>> > curl's manpage. Is there some case I am missing? Confused...
>> 
>> The issue with the current code is sometimes when libcurl is opening a
>> CONNECT style connection through an HTTP proxy it returns a crazy high
>> timeout (>240 seconds) and no fds. In this case Git waits forever.
>> Stefan observed that using a timeout of 50 ms in this situation to
>> poll libcurl is better, as it figures out a lot more quickly that it
>> is connected to the proxy and can issue the request.
>
> Ah. That sounds like a bug in curl to me. But either way, if we want to
> work around it, wouldn't the right thing be to override curl's timeout
> in that instance? Like:

Yeah, that sounds like a more targetted workaround (read: better).

>
> diff --git a/http.c b/http.c
> index df9bb71..cd07cdf 100644
> --- a/http.c
> +++ b/http.c
> @@ -631,6 +631,19 @@ void run_active_slot(struct active_request_slot *slot)
>  			FD_ZERO(&excfds);
>  			curl_multi_fdset(curlm, &readfds, &writefds, &excfds, &max_fd);
>  
> +			/*
> +			 * Sometimes curl will give a really long timeout for a
> +			 * CONNECT when there are no fds to read, but we can
> +			 * get better results by running curl_multi_perform
> +			 * more frequently.
> +			 */
> +			if (maxfd < 0 &&
> +			    (select_timeout.tv_sec > 0 ||
> +			     select_timeout.tv_usec > 50000)) {
> +				select_timeout.tv_sec = 0;
> +				select_timeout.tv_usec = 50000;
> +			}
> +
>  			select(max_fd+1, &readfds, &writefds, &excfds, &select_timeout);
>  		}
>  	}
>
> -Peff

^ permalink raw reply	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2012-10-19 20:41 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2012-10-18 21:35 Fix potential hang in https handshake szager
2012-10-18 22:59 ` Junio C Hamano
2012-10-19 10:36   ` Jeff King
2012-10-19 14:10     ` Shawn Pearce
     [not found]       ` <CAHOQ7J9W8FdKqzqbuDqj4bcFyN02kUigWtbL_xCen-PYWF9LUg@mail.gmail.com>
2012-10-19 17:02         ` Junio C Hamano
2012-10-19 17:08       ` Daniel Stenberg
2012-10-19 20:27       ` Jeff King
2012-10-19 20:37         ` Stefan Zager
2012-10-19 20:40           ` Jeff King
2012-10-19 20:40         ` Junio C Hamano

Code repositories for project(s) associated with this public inbox

	https://80x24.org/mirrors/git.git

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).