1# encoding: utf-8 2###################################################################### 3# This file is imported from the minitest project. 4# DO NOT make modifications in this repo. They _will_ be reverted! 5# File a patch instead and assign it to Ryan Davis. 6###################################################################### 7 8require 'minitest/unit' 9require 'minitest/spec' 10 11class MiniTest::Unit # :nodoc: 12 def run_benchmarks # :nodoc: 13 _run_anything :benchmark 14 end 15 16 def benchmark_suite_header suite # :nodoc: 17 "\n#{suite}\t#{suite.bench_range.join("\t")}" 18 end 19 20 class TestCase 21 ## 22 # Returns a set of ranges stepped exponentially from +min+ to 23 # +max+ by powers of +base+. Eg: 24 # 25 # bench_exp(2, 16, 2) # => [2, 4, 8, 16] 26 27 def self.bench_exp min, max, base = 10 28 min = (Math.log10(min) / Math.log10(base)).to_i 29 max = (Math.log10(max) / Math.log10(base)).to_i 30 31 (min..max).map { |m| base ** m }.to_a 32 end 33 34 ## 35 # Returns a set of ranges stepped linearly from +min+ to +max+ by 36 # +step+. Eg: 37 # 38 # bench_linear(20, 40, 10) # => [20, 30, 40] 39 40 def self.bench_linear min, max, step = 10 41 (min..max).step(step).to_a 42 rescue LocalJumpError # 1.8.6 43 r = []; (min..max).step(step) { |n| r << n }; r 44 end 45 46 ## 47 # Returns the benchmark methods (methods that start with bench_) 48 # for that class. 49 50 def self.benchmark_methods # :nodoc: 51 public_instance_methods(true).grep(/^bench_/).map { |m| m.to_s }.sort 52 end 53 54 ## 55 # Returns all test suites that have benchmark methods. 56 57 def self.benchmark_suites 58 TestCase.test_suites.reject { |s| s.benchmark_methods.empty? } 59 end 60 61 ## 62 # Specifies the ranges used for benchmarking for that class. 63 # Defaults to exponential growth from 1 to 10k by powers of 10. 64 # Override if you need different ranges for your benchmarks. 65 # 66 # See also: ::bench_exp and ::bench_linear. 67 68 def self.bench_range 69 bench_exp 1, 10_000 70 end 71 72 ## 73 # Runs the given +work+, gathering the times of each run. Range 74 # and times are then passed to a given +validation+ proc. Outputs 75 # the benchmark name and times in tab-separated format, making it 76 # easy to paste into a spreadsheet for graphing or further 77 # analysis. 78 # 79 # Ranges are specified by ::bench_range. 80 # 81 # Eg: 82 # 83 # def bench_algorithm 84 # validation = proc { |x, y| ... } 85 # assert_performance validation do |n| 86 # @obj.algorithm(n) 87 # end 88 # end 89 90 def assert_performance validation, &work 91 range = self.class.bench_range 92 93 io.print "#{__name__}" 94 95 times = [] 96 97 range.each do |x| 98 GC.start 99 t0 = Time.now 100 instance_exec(x, &work) 101 t = Time.now - t0 102 103 io.print "\t%9.6f" % t 104 times << t 105 end 106 io.puts 107 108 validation[range, times] 109 end 110 111 ## 112 # Runs the given +work+ and asserts that the times gathered fit to 113 # match a constant rate (eg, linear slope == 0) within a given 114 # +threshold+. Note: because we're testing for a slope of 0, R^2 115 # is not a good determining factor for the fit, so the threshold 116 # is applied against the slope itself. As such, you probably want 117 # to tighten it from the default. 118 # 119 # See http://www.graphpad.com/curvefit/goodness_of_fit.htm for 120 # more details. 121 # 122 # Fit is calculated by #fit_linear. 123 # 124 # Ranges are specified by ::bench_range. 125 # 126 # Eg: 127 # 128 # def bench_algorithm 129 # assert_performance_constant 0.9999 do |n| 130 # @obj.algorithm(n) 131 # end 132 # end 133 134 def assert_performance_constant threshold = 0.99, &work 135 validation = proc do |range, times| 136 a, b, rr = fit_linear range, times 137 assert_in_delta 0, b, 1 - threshold 138 [a, b, rr] 139 end 140 141 assert_performance validation, &work 142 end 143 144 ## 145 # Runs the given +work+ and asserts that the times gathered fit to 146 # match a exponential curve within a given error +threshold+. 147 # 148 # Fit is calculated by #fit_exponential. 149 # 150 # Ranges are specified by ::bench_range. 151 # 152 # Eg: 153 # 154 # def bench_algorithm 155 # assert_performance_exponential 0.9999 do |n| 156 # @obj.algorithm(n) 157 # end 158 # end 159 160 def assert_performance_exponential threshold = 0.99, &work 161 assert_performance validation_for_fit(:exponential, threshold), &work 162 end 163 164 ## 165 # Runs the given +work+ and asserts that the times gathered fit to 166 # match a straight line within a given error +threshold+. 167 # 168 # Fit is calculated by #fit_linear. 169 # 170 # Ranges are specified by ::bench_range. 171 # 172 # Eg: 173 # 174 # def bench_algorithm 175 # assert_performance_linear 0.9999 do |n| 176 # @obj.algorithm(n) 177 # end 178 # end 179 180 def assert_performance_linear threshold = 0.99, &work 181 assert_performance validation_for_fit(:linear, threshold), &work 182 end 183 184 ## 185 # Runs the given +work+ and asserts that the times gathered curve 186 # fit to match a power curve within a given error +threshold+. 187 # 188 # Fit is calculated by #fit_power. 189 # 190 # Ranges are specified by ::bench_range. 191 # 192 # Eg: 193 # 194 # def bench_algorithm 195 # assert_performance_power 0.9999 do |x| 196 # @obj.algorithm 197 # end 198 # end 199 200 def assert_performance_power threshold = 0.99, &work 201 assert_performance validation_for_fit(:power, threshold), &work 202 end 203 204 ## 205 # Takes an array of x/y pairs and calculates the general R^2 value. 206 # 207 # See: http://en.wikipedia.org/wiki/Coefficient_of_determination 208 209 def fit_error xys 210 y_bar = sigma(xys) { |x, y| y } / xys.size.to_f 211 ss_tot = sigma(xys) { |x, y| (y - y_bar) ** 2 } 212 ss_err = sigma(xys) { |x, y| (yield(x) - y) ** 2 } 213 214 1 - (ss_err / ss_tot) 215 end 216 217 ## 218 # To fit a functional form: y = ae^(bx). 219 # 220 # Takes x and y values and returns [a, b, r^2]. 221 # 222 # See: http://mathworld.wolfram.com/LeastSquaresFittingExponential.html 223 224 def fit_exponential xs, ys 225 n = xs.size 226 xys = xs.zip(ys) 227 sxlny = sigma(xys) { |x,y| x * Math.log(y) } 228 slny = sigma(xys) { |x,y| Math.log(y) } 229 sx2 = sigma(xys) { |x,y| x * x } 230 sx = sigma xs 231 232 c = n * sx2 - sx ** 2 233 a = (slny * sx2 - sx * sxlny) / c 234 b = ( n * sxlny - sx * slny ) / c 235 236 return Math.exp(a), b, fit_error(xys) { |x| Math.exp(a + b * x) } 237 end 238 239 ## 240 # Fits the functional form: a + bx. 241 # 242 # Takes x and y values and returns [a, b, r^2]. 243 # 244 # See: http://mathworld.wolfram.com/LeastSquaresFitting.html 245 246 def fit_linear xs, ys 247 n = xs.size 248 xys = xs.zip(ys) 249 sx = sigma xs 250 sy = sigma ys 251 sx2 = sigma(xs) { |x| x ** 2 } 252 sxy = sigma(xys) { |x,y| x * y } 253 254 c = n * sx2 - sx**2 255 a = (sy * sx2 - sx * sxy) / c 256 b = ( n * sxy - sx * sy ) / c 257 258 return a, b, fit_error(xys) { |x| a + b * x } 259 end 260 261 ## 262 # To fit a functional form: y = ax^b. 263 # 264 # Takes x and y values and returns [a, b, r^2]. 265 # 266 # See: http://mathworld.wolfram.com/LeastSquaresFittingPowerLaw.html 267 268 def fit_power xs, ys 269 n = xs.size 270 xys = xs.zip(ys) 271 slnxlny = sigma(xys) { |x, y| Math.log(x) * Math.log(y) } 272 slnx = sigma(xs) { |x | Math.log(x) } 273 slny = sigma(ys) { | y| Math.log(y) } 274 slnx2 = sigma(xs) { |x | Math.log(x) ** 2 } 275 276 b = (n * slnxlny - slnx * slny) / (n * slnx2 - slnx ** 2); 277 a = (slny - b * slnx) / n 278 279 return Math.exp(a), b, fit_error(xys) { |x| (Math.exp(a) * (x ** b)) } 280 end 281 282 ## 283 # Enumerates over +enum+ mapping +block+ if given, returning the 284 # sum of the result. Eg: 285 # 286 # sigma([1, 2, 3]) # => 1 + 2 + 3 => 7 287 # sigma([1, 2, 3]) { |n| n ** 2 } # => 1 + 4 + 9 => 14 288 289 def sigma enum, &block 290 enum = enum.map(&block) if block 291 enum.inject { |sum, n| sum + n } 292 end 293 294 ## 295 # Returns a proc that calls the specified fit method and asserts 296 # that the error is within a tolerable threshold. 297 298 def validation_for_fit msg, threshold 299 proc do |range, times| 300 a, b, rr = send "fit_#{msg}", range, times 301 assert_operator rr, :>=, threshold 302 [a, b, rr] 303 end 304 end 305 end 306end 307 308class MiniTest::Spec 309 ## 310 # This is used to define a new benchmark method. You usually don't 311 # use this directly and is intended for those needing to write new 312 # performance curve fits (eg: you need a specific polynomial fit). 313 # 314 # See ::bench_performance_linear for an example of how to use this. 315 316 def self.bench name, &block 317 define_method "bench_#{name.gsub(/\W+/, '_')}", &block 318 end 319 320 ## 321 # Specifies the ranges used for benchmarking for that class. 322 # 323 # bench_range do 324 # bench_exp(2, 16, 2) 325 # end 326 # 327 # See Unit::TestCase.bench_range for more details. 328 329 def self.bench_range &block 330 return super unless block 331 332 meta = (class << self; self; end) 333 meta.send :define_method, "bench_range", &block 334 end 335 336 ## 337 # Create a benchmark that verifies that the performance is linear. 338 # 339 # describe "my class" do 340 # bench_performance_linear "fast_algorithm", 0.9999 do |n| 341 # @obj.fast_algorithm(n) 342 # end 343 # end 344 345 def self.bench_performance_linear name, threshold = 0.99, &work 346 bench name do 347 assert_performance_linear threshold, &work 348 end 349 end 350 351 ## 352 # Create a benchmark that verifies that the performance is constant. 353 # 354 # describe "my class" do 355 # bench_performance_constant "zoom_algorithm!" do |n| 356 # @obj.zoom_algorithm!(n) 357 # end 358 # end 359 360 def self.bench_performance_constant name, threshold = 0.99, &work 361 bench name do 362 assert_performance_constant threshold, &work 363 end 364 end 365 366 ## 367 # Create a benchmark that verifies that the performance is exponential. 368 # 369 # describe "my class" do 370 # bench_performance_exponential "algorithm" do |n| 371 # @obj.algorithm(n) 372 # end 373 # end 374 375 def self.bench_performance_exponential name, threshold = 0.99, &work 376 bench name do 377 assert_performance_exponential threshold, &work 378 end 379 end 380end 381