unofficial mirror of libc-alpha@sourceware.org
 help / color / mirror / Atom feed
* [PATCH] benchtests: improve argument parsing through argparse library
@ 2018-07-13 17:44 leonardo.sandoval.gonzalez
  2018-07-16 13:23 ` Siddhesh Poyarekar
  0 siblings, 1 reply; 2+ messages in thread
From: leonardo.sandoval.gonzalez @ 2018-07-13 17:44 UTC (permalink / raw
  To: libc-alpha; +Cc: hjl.tools, siddhesh, Leonardo Sandoval

From: Leonardo Sandoval <leonardo.sandoval.gonzalez@linux.intel.com>

The argparse library is used on compare_bench script to improve command line
argument parsing. The 'schema validation file' is now optional, reducing by
one the number of required parameters.

            * benchtests/scripts/compare_bench.py (__main__): use the argparse
              library to improve command line parsing.
	      (__main__): make schema file as optional parameter (--schema),
              defaulting to benchtests/scripts/benchout.schema.json.
	      (main): move out of the parsing stuff to __main_  and leave it
              only as caller of main comparison functions.
---
 benchtests/scripts/compare_bench.py | 40 ++++++++++++++---------------
 1 file changed, 19 insertions(+), 21 deletions(-)

diff --git a/benchtests/scripts/compare_bench.py b/benchtests/scripts/compare_bench.py
index ea25f778c09..88e8911d812 100755
--- a/benchtests/scripts/compare_bench.py
+++ b/benchtests/scripts/compare_bench.py
@@ -25,6 +25,7 @@ import sys
 import os
 import pylab
 import import_bench as bench
+import argparse
 
 def do_compare(func, var, tl1, tl2, par, threshold):
     """Compare one of the aggregate measurements
@@ -151,26 +152,9 @@ def plot_graphs(bench1, bench2):
             print('Writing out %s' % filename)
             pylab.savefig(filename)
 
-
-def main(args):
-    """Program Entry Point
-
-    Take two benchmark output files and compare their timings.
-    """
-    if len(args) > 4 or len(args) < 3:
-        print('Usage: %s <schema> <file1> <file2> [threshold in %%]' % sys.argv[0])
-        sys.exit(os.EX_USAGE)
-
-    bench1 = bench.parse_bench(args[1], args[0])
-    bench2 = bench.parse_bench(args[2], args[0])
-    if len(args) == 4:
-        threshold = float(args[3])
-    else:
-        threshold = 10.0
-
-    if (bench1['timing_type'] != bench2['timing_type']):
-        print('Cannot compare benchmark outputs: timing types are different')
-        return
+def main(bench1, bench2, schema, threshold):
+    bench1 = bench.parse_bench(bench1, schema)
+    bench2 = bench.parse_bench(bench2, schema)
 
     plot_graphs(bench1, bench2)
 
@@ -181,4 +165,18 @@ def main(args):
 
 
 if __name__ == '__main__':
-    main(sys.argv[1:])
+    parser = argparse.ArgumentParser(description='Take two benchmark and compare their timings.')
+
+    # Required parameters
+    parser.add_argument('bench1', help='First bench to compare')
+    parser.add_argument('bench2', help='Second bench to compare')
+
+    # Optional parameters
+    parser.add_argument('--schema',
+                        default=os.path.join(os.path.dirname(os.path.realpath(__file__)),'benchout.schema.json'),
+                        help='JSON file to validate source/dest files (default: %(default)s)')
+    parser.add_argument('--threshold', default=10.0, help='Only print those with equal or higher threshold (default: %(default)s)')
+
+    args = parser.parse_args()
+
+    main(args.bench1, args.bench2, args.schema, args.threshold)
-- 
2.18.0


^ permalink raw reply related	[flat|nested] 2+ messages in thread

* Re: [PATCH] benchtests: improve argument parsing through argparse library
  2018-07-13 17:44 [PATCH] benchtests: improve argument parsing through argparse library leonardo.sandoval.gonzalez
@ 2018-07-16 13:23 ` Siddhesh Poyarekar
  0 siblings, 0 replies; 2+ messages in thread
From: Siddhesh Poyarekar @ 2018-07-16 13:23 UTC (permalink / raw
  To: leonardo.sandoval.gonzalez, libc-alpha; +Cc: hjl.tools

This is OK, thanks.

Siddhesh

On 07/13/2018 11:14 PM, leonardo.sandoval.gonzalez@linux.intel.com wrote:
> From: Leonardo Sandoval <leonardo.sandoval.gonzalez@linux.intel.com>
> 
> The argparse library is used on compare_bench script to improve command line
> argument parsing. The 'schema validation file' is now optional, reducing by
> one the number of required parameters.
> 
>              * benchtests/scripts/compare_bench.py (__main__): use the argparse
>                library to improve command line parsing.
> 	      (__main__): make schema file as optional parameter (--schema),
>                defaulting to benchtests/scripts/benchout.schema.json.
> 	      (main): move out of the parsing stuff to __main_  and leave it
>                only as caller of main comparison functions.
> ---
>   benchtests/scripts/compare_bench.py | 40 ++++++++++++++---------------
>   1 file changed, 19 insertions(+), 21 deletions(-)
> 
> diff --git a/benchtests/scripts/compare_bench.py b/benchtests/scripts/compare_bench.py
> index ea25f778c09..88e8911d812 100755
> --- a/benchtests/scripts/compare_bench.py
> +++ b/benchtests/scripts/compare_bench.py
> @@ -25,6 +25,7 @@ import sys
>   import os
>   import pylab
>   import import_bench as bench
> +import argparse
>   
>   def do_compare(func, var, tl1, tl2, par, threshold):
>       """Compare one of the aggregate measurements
> @@ -151,26 +152,9 @@ def plot_graphs(bench1, bench2):
>               print('Writing out %s' % filename)
>               pylab.savefig(filename)
>   
> -
> -def main(args):
> -    """Program Entry Point
> -
> -    Take two benchmark output files and compare their timings.
> -    """
> -    if len(args) > 4 or len(args) < 3:
> -        print('Usage: %s <schema> <file1> <file2> [threshold in %%]' % sys.argv[0])
> -        sys.exit(os.EX_USAGE)
> -
> -    bench1 = bench.parse_bench(args[1], args[0])
> -    bench2 = bench.parse_bench(args[2], args[0])
> -    if len(args) == 4:
> -        threshold = float(args[3])
> -    else:
> -        threshold = 10.0
> -
> -    if (bench1['timing_type'] != bench2['timing_type']):
> -        print('Cannot compare benchmark outputs: timing types are different')
> -        return
> +def main(bench1, bench2, schema, threshold):
> +    bench1 = bench.parse_bench(bench1, schema)
> +    bench2 = bench.parse_bench(bench2, schema)
>   
>       plot_graphs(bench1, bench2)
>   
> @@ -181,4 +165,18 @@ def main(args):
>   
>   
>   if __name__ == '__main__':
> -    main(sys.argv[1:])
> +    parser = argparse.ArgumentParser(description='Take two benchmark and compare their timings.')
> +
> +    # Required parameters
> +    parser.add_argument('bench1', help='First bench to compare')
> +    parser.add_argument('bench2', help='Second bench to compare')
> +
> +    # Optional parameters
> +    parser.add_argument('--schema',
> +                        default=os.path.join(os.path.dirname(os.path.realpath(__file__)),'benchout.schema.json'),
> +                        help='JSON file to validate source/dest files (default: %(default)s)')
> +    parser.add_argument('--threshold', default=10.0, help='Only print those with equal or higher threshold (default: %(default)s)')
> +
> +    args = parser.parse_args()
> +
> +    main(args.bench1, args.bench2, args.schema, args.threshold)
> 

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2018-07-16 13:24 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2018-07-13 17:44 [PATCH] benchtests: improve argument parsing through argparse library leonardo.sandoval.gonzalez
2018-07-16 13:23 ` Siddhesh Poyarekar

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).