time-to-botec

Benchmark sampling in different programming languages
Log | Files | Refs | README

commit 1ebc3ce7b9f0373a56387f730fb7147a8e8616aa
parent 6454b2eeabdf28d85fc67b459e92f85297abfc6a
Author: NunoSempere <nuno.sempere@protonmail.com>
Date:   Sun, 21 May 2023 01:29:57 -0400

move hardcore defs to a different folder, use stdlib math

Makes this way faster. Don't roll your stdlib, Nuño

Diffstat:
Awip/nim/hardcore/README.md | 4++++
Awip/nim/hardcore/makefile | 11+++++++++++
Cwip/nim/samples -> wip/nim/hardcore/samples | 0
Cwip/nim/samples.nim -> wip/nim/hardcore/samples.nim | 0
Mwip/nim/samples | 0
Mwip/nim/samples.nim | 62+++-----------------------------------------------------------
6 files changed, 18 insertions(+), 59 deletions(-)

diff --git a/wip/nim/hardcore/README.md b/wip/nim/hardcore/README.md @@ -0,0 +1,4 @@ +This is a version of time-to-botec for nim for which I define logarithms, normals & lognormals pretty much +from scratch, without relying on libraries except for random numbers, sqrts, or taking a to the power of b. + +This is interesting because it leads to better understanding. But it doesn't help compare the efficiency of various languages. So I'm setting it aside for now, even though I think it was really interesting. diff --git a/wip/nim/hardcore/makefile b/wip/nim/hardcore/makefile @@ -0,0 +1,11 @@ +SHELL := /bin/bash + +build: samples.nim + nim c --verbosity:0 samples.nim + +run: samples + ./samples --verbosity:0 + +examine: samples + # nim c --verbosity:0 --opt:speed -d:release -d:danger --checks:off samples.nim && time ./samples --verbosity:0 --checks:off + nim c -d:release samples.nim && time ./samples diff --git a/wip/nim/samples b/wip/nim/hardcore/samples Binary files differ. diff --git a/wip/nim/samples.nim b/wip/nim/hardcore/samples.nim diff --git a/wip/nim/samples b/wip/nim/samples Binary files differ. diff --git a/wip/nim/samples.nim b/wip/nim/samples.nim @@ -5,62 +5,6 @@ import std/sequtils randomize() -## Basic math functions -proc factorial(n: int): int = - if n == 0 or n < 0: - return 1 - else: - return n * factorial(n - 1) - -proc sine(x: float): float = - let n = 8 - # ^ Taylor will converge really quickly - # notice that the factorial of 17 is - # already pretty gigantic - var acc = 0.0 - for i in 0..n: - var k = 2*i + 1 - var taylor = pow(-1, i.float) * pow(x, k.float) / factorial(k).float - acc = acc + taylor - return acc - -## Log function -## <https://en.wikipedia.org/wiki/Natural_logarithm#High_precision> - -## Arithmetic-geomtric mean -proc ag(x: float, y: float): float = - let n = 16 # just some high number - var a = (x + y)/2.0 - var b = sqrt(x * y) - for i in 0..n: - let temp = a - a = (a+b)/2.0 - b = sqrt(b*temp) - return a - -## Find m such that x * 2^m > 2^precision/2 -proc find_m(x:float): float = - var m = 0.0; - let precision = 32 # bits - let c = pow(2.0, precision.float / 2.0) - while x * pow(2.0, m) < c: - m = m + 1 - return m - -proc log(x: float): float = - let m = find_m(x) - let s = x * pow(2.0, m) - let ln2 = 0.6931471805599453 - return ( PI / (2.0 * ag(1, 4.0/s)) ) - m * ln2 - -## Test these functions -## echo factorial(5) -## echo sine(1.0) -## echo log(0.1) -## echo log(2.0) -## echo log(3.0) -## echo pow(2.0, 32.float) - ## Distribution functions ## Normal @@ -68,7 +12,7 @@ proc log(x: float): float = proc ur_normal(): float = let u1 = rand(1.0) let u2 = rand(1.0) - let z = sqrt(-2.0 * log(u1)) * sine(2 * PI * u2) + let z = sqrt(-2.0 * ln(u1)) * sin(2 * PI * u2) return z proc normal(mean: float, sigma: float): float = @@ -80,8 +24,8 @@ proc lognormal(logmean: float, logsigma: float): float = proc to(low: float, high: float): float = let normal95confidencePoint = 1.6448536269514722 - let loglow = log(low) - let loghigh = log(high) + let loglow = ln(low) + let loghigh = ln(high) let logmean = (loglow + loghigh)/2 let logsigma = (loghigh - loglow) / (2.0 * normal95confidencePoint); return lognormal(logmean, logsigma)