1#!/usr/bin/env python
2
3"""
4compare.py - versatile benchmark output compare tool
5"""
6
7import argparse
8from argparse import ArgumentParser
9import sys
10import gbench
11from gbench import util, report
12from gbench.util import *
13
14
15def check_inputs(in1, in2, flags):
16    """
17    Perform checking on the user provided inputs and diagnose any abnormalities
18    """
19    in1_kind, in1_err = classify_input_file(in1)
20    in2_kind, in2_err = classify_input_file(in2)
21    output_file = find_benchmark_flag('--benchmark_out=', flags)
22    output_type = find_benchmark_flag('--benchmark_out_format=', flags)
23    if in1_kind == IT_Executable and in2_kind == IT_Executable and output_file:
24        print(("WARNING: '--benchmark_out=%s' will be passed to both "
25               "benchmarks causing it to be overwritten") % output_file)
26    if in1_kind == IT_JSON and in2_kind == IT_JSON and len(flags) > 0:
27        print("WARNING: passing optional flags has no effect since both "
28              "inputs are JSON")
29    if output_type is not None and output_type != 'json':
30        print(("ERROR: passing '--benchmark_out_format=%s' to 'compare.py`"
31               " is not supported.") % output_type)
32        sys.exit(1)
33
34
35def create_parser():
36    parser = ArgumentParser(
37        description='versatile benchmark output compare tool')
38    subparsers = parser.add_subparsers(
39        help='This tool has multiple modes of operation:',
40        dest='mode')
41
42    parser_a = subparsers.add_parser(
43        'benchmarks',
44        help='The most simple use-case, compare all the output of these two benchmarks')
45    baseline = parser_a.add_argument_group(
46        'baseline', 'The benchmark baseline')
47    baseline.add_argument(
48        'test_baseline',
49        metavar='test_baseline',
50        type=argparse.FileType('r'),
51        nargs=1,
52        help='A benchmark executable or JSON output file')
53    contender = parser_a.add_argument_group(
54        'contender', 'The benchmark that will be compared against the baseline')
55    contender.add_argument(
56        'test_contender',
57        metavar='test_contender',
58        type=argparse.FileType('r'),
59        nargs=1,
60        help='A benchmark executable or JSON output file')
61    parser_a.add_argument(
62        'benchmark_options',
63        metavar='benchmark_options',
64        nargs=argparse.REMAINDER,
65        help='Arguments to pass when running benchmark executables')
66
67    parser_b = subparsers.add_parser(
68        'filters', help='Compare filter one with the filter two of benchmark')
69    baseline = parser_b.add_argument_group(
70        'baseline', 'The benchmark baseline')
71    baseline.add_argument(
72        'test',
73        metavar='test',
74        type=argparse.FileType('r'),
75        nargs=1,
76        help='A benchmark executable or JSON output file')
77    baseline.add_argument(
78        'filter_baseline',
79        metavar='filter_baseline',
80        type=str,
81        nargs=1,
82        help='The first filter, that will be used as baseline')
83    contender = parser_b.add_argument_group(
84        'contender', 'The benchmark that will be compared against the baseline')
85    contender.add_argument(
86        'filter_contender',
87        metavar='filter_contender',
88        type=str,
89        nargs=1,
90        help='The second filter, that will be compared against the baseline')
91    parser_b.add_argument(
92        'benchmark_options',
93        metavar='benchmark_options',
94        nargs=argparse.REMAINDER,
95        help='Arguments to pass when running benchmark executables')
96
97    parser_c = subparsers.add_parser(
98        'benchmarksfiltered',
99        help='Compare filter one of first benchmark with filter two of the second benchmark')
100    baseline = parser_c.add_argument_group(
101        'baseline', 'The benchmark baseline')
102    baseline.add_argument(
103        'test_baseline',
104        metavar='test_baseline',
105        type=argparse.FileType('r'),
106        nargs=1,
107        help='A benchmark executable or JSON output file')
108    baseline.add_argument(
109        'filter_baseline',
110        metavar='filter_baseline',
111        type=str,
112        nargs=1,
113        help='The first filter, that will be used as baseline')
114    contender = parser_c.add_argument_group(
115        'contender', 'The benchmark that will be compared against the baseline')
116    contender.add_argument(
117        'test_contender',
118        metavar='test_contender',
119        type=argparse.FileType('r'),
120        nargs=1,
121        help='The second benchmark executable or JSON output file, that will be compared against the baseline')
122    contender.add_argument(
123        'filter_contender',
124        metavar='filter_contender',
125        type=str,
126        nargs=1,
127        help='The second filter, that will be compared against the baseline')
128    parser_c.add_argument(
129        'benchmark_options',
130        metavar='benchmark_options',
131        nargs=argparse.REMAINDER,
132        help='Arguments to pass when running benchmark executables')
133
134    return parser
135
136
137def main():
138    # Parse the command line flags
139    parser = create_parser()
140    args, unknown_args = parser.parse_known_args()
141    if args.mode is None:
142      parser.print_help()
143      exit(1)
144    assert not unknown_args
145    benchmark_options = args.benchmark_options
146
147    if args.mode == 'benchmarks':
148        test_baseline = args.test_baseline[0].name
149        test_contender = args.test_contender[0].name
150        filter_baseline = ''
151        filter_contender = ''
152
153        # NOTE: if test_baseline == test_contender, you are analyzing the stdev
154
155        description = 'Comparing %s to %s' % (test_baseline, test_contender)
156    elif args.mode == 'filters':
157        test_baseline = args.test[0].name
158        test_contender = args.test[0].name
159        filter_baseline = args.filter_baseline[0]
160        filter_contender = args.filter_contender[0]
161
162        # NOTE: if filter_baseline == filter_contender, you are analyzing the
163        # stdev
164
165        description = 'Comparing %s to %s (from %s)' % (
166            filter_baseline, filter_contender, args.test[0].name)
167    elif args.mode == 'benchmarksfiltered':
168        test_baseline = args.test_baseline[0].name
169        test_contender = args.test_contender[0].name
170        filter_baseline = args.filter_baseline[0]
171        filter_contender = args.filter_contender[0]
172
173        # NOTE: if test_baseline == test_contender and
174        # filter_baseline == filter_contender, you are analyzing the stdev
175
176        description = 'Comparing %s (from %s) to %s (from %s)' % (
177            filter_baseline, test_baseline, filter_contender, test_contender)
178    else:
179        # should never happen
180        print("Unrecognized mode of operation: '%s'" % args.mode)
181        parser.print_help()
182        exit(1)
183
184    check_inputs(test_baseline, test_contender, benchmark_options)
185
186    options_baseline = []
187    options_contender = []
188
189    if filter_baseline and filter_contender:
190        options_baseline = ['--benchmark_filter=%s' % filter_baseline]
191        options_contender = ['--benchmark_filter=%s' % filter_contender]
192
193    # Run the benchmarks and report the results
194    json1 = json1_orig = gbench.util.run_or_load_benchmark(
195        test_baseline, benchmark_options + options_baseline)
196    json2 = json2_orig = gbench.util.run_or_load_benchmark(
197        test_contender, benchmark_options + options_contender)
198
199    # Now, filter the benchmarks so that the difference report can work
200    if filter_baseline and filter_contender:
201        replacement = '[%s vs. %s]' % (filter_baseline, filter_contender)
202        json1 = gbench.report.filter_benchmark(
203            json1_orig, filter_baseline, replacement)
204        json2 = gbench.report.filter_benchmark(
205            json2_orig, filter_contender, replacement)
206
207    # Diff and output
208    output_lines = gbench.report.generate_difference_report(json1, json2)
209    print(description)
210    for ln in output_lines:
211        print(ln)
212
213
214import unittest
215
216
217class TestParser(unittest.TestCase):
218    def setUp(self):
219        self.parser = create_parser()
220        testInputs = os.path.join(
221            os.path.dirname(
222                os.path.realpath(__file__)),
223            'gbench',
224            'Inputs')
225        self.testInput0 = os.path.join(testInputs, 'test1_run1.json')
226        self.testInput1 = os.path.join(testInputs, 'test1_run2.json')
227
228    def test_benchmarks_basic(self):
229        parsed = self.parser.parse_args(
230            ['benchmarks', self.testInput0, self.testInput1])
231        self.assertEqual(parsed.mode, 'benchmarks')
232        self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
233        self.assertEqual(parsed.test_contender[0].name, self.testInput1)
234        self.assertFalse(parsed.benchmark_options)
235
236    def test_benchmarks_with_remainder(self):
237        parsed = self.parser.parse_args(
238            ['benchmarks', self.testInput0, self.testInput1, 'd'])
239        self.assertEqual(parsed.mode, 'benchmarks')
240        self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
241        self.assertEqual(parsed.test_contender[0].name, self.testInput1)
242        self.assertEqual(parsed.benchmark_options, ['d'])
243
244    def test_benchmarks_with_remainder_after_doubleminus(self):
245        parsed = self.parser.parse_args(
246            ['benchmarks', self.testInput0, self.testInput1, '--', 'e'])
247        self.assertEqual(parsed.mode, 'benchmarks')
248        self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
249        self.assertEqual(parsed.test_contender[0].name, self.testInput1)
250        self.assertEqual(parsed.benchmark_options, ['e'])
251
252    def test_filters_basic(self):
253        parsed = self.parser.parse_args(
254            ['filters', self.testInput0, 'c', 'd'])
255        self.assertEqual(parsed.mode, 'filters')
256        self.assertEqual(parsed.test[0].name, self.testInput0)
257        self.assertEqual(parsed.filter_baseline[0], 'c')
258        self.assertEqual(parsed.filter_contender[0], 'd')
259        self.assertFalse(parsed.benchmark_options)
260
261    def test_filters_with_remainder(self):
262        parsed = self.parser.parse_args(
263            ['filters', self.testInput0, 'c', 'd', 'e'])
264        self.assertEqual(parsed.mode, 'filters')
265        self.assertEqual(parsed.test[0].name, self.testInput0)
266        self.assertEqual(parsed.filter_baseline[0], 'c')
267        self.assertEqual(parsed.filter_contender[0], 'd')
268        self.assertEqual(parsed.benchmark_options, ['e'])
269
270    def test_filters_with_remainder_after_doubleminus(self):
271        parsed = self.parser.parse_args(
272            ['filters', self.testInput0, 'c', 'd', '--', 'f'])
273        self.assertEqual(parsed.mode, 'filters')
274        self.assertEqual(parsed.test[0].name, self.testInput0)
275        self.assertEqual(parsed.filter_baseline[0], 'c')
276        self.assertEqual(parsed.filter_contender[0], 'd')
277        self.assertEqual(parsed.benchmark_options, ['f'])
278
279    def test_benchmarksfiltered_basic(self):
280        parsed = self.parser.parse_args(
281            ['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e'])
282        self.assertEqual(parsed.mode, 'benchmarksfiltered')
283        self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
284        self.assertEqual(parsed.filter_baseline[0], 'c')
285        self.assertEqual(parsed.test_contender[0].name, self.testInput1)
286        self.assertEqual(parsed.filter_contender[0], 'e')
287        self.assertFalse(parsed.benchmark_options)
288
289    def test_benchmarksfiltered_with_remainder(self):
290        parsed = self.parser.parse_args(
291            ['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e', 'f'])
292        self.assertEqual(parsed.mode, 'benchmarksfiltered')
293        self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
294        self.assertEqual(parsed.filter_baseline[0], 'c')
295        self.assertEqual(parsed.test_contender[0].name, self.testInput1)
296        self.assertEqual(parsed.filter_contender[0], 'e')
297        self.assertEqual(parsed.benchmark_options[0], 'f')
298
299    def test_benchmarksfiltered_with_remainder_after_doubleminus(self):
300        parsed = self.parser.parse_args(
301            ['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e', '--', 'g'])
302        self.assertEqual(parsed.mode, 'benchmarksfiltered')
303        self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
304        self.assertEqual(parsed.filter_baseline[0], 'c')
305        self.assertEqual(parsed.test_contender[0].name, self.testInput1)
306        self.assertEqual(parsed.filter_contender[0], 'e')
307        self.assertEqual(parsed.benchmark_options[0], 'g')
308
309
310if __name__ == '__main__':
311    # unittest.main()
312    main()
313
314# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
315# kate: tab-width: 4; replace-tabs on; indent-width 4; tab-indents: off;
316# kate: indent-mode python; remove-trailing-spaces modified;
317