squiggle.c

Self-contained Monte Carlo estimation in C99
Log | Files | Refs | README

commit 66d33c0fb75ce010c5bc54b9f949f5faadda8ab0
parent 5f6cc0fe4f9e57744970f9b5e3bd9d78b28ef91d
Author: NunoSempere <nuno.sempere@protonmail.com>
Date:   Mon, 27 Nov 2023 14:13:50 +0000

generalize parallelism to arbitrary num of samples, fix off by one error

Diffstat:
Mexamples/more/00_example_template/example | 0
Mexamples/more/01_sample_from_cdf/example | 0
Mexamples/more/02_sample_from_cdf_beta/example | 0
Mexamples/more/03_ci_beta/example | 0
Mexamples/more/04_nuclear_war/example | 0
Mexamples/more/05_burn_10kg_fat/example | 0
Mexamples/more/06_nuclear_recovery/example | 0
Mexamples/more/07_algebra/example | 0
Mexamples/more/08_algebra_and_conversion/example | 0
Mexamples/more/09_ergonomic_algebra/example | 0
Mexamples/more/10_twitter_thread_example/example | 0
Mexamples/more/11_billion_lognormals_paralell/example | 0
Mexamples/more/12_time_to_botec_parallel/example | 0
Aexamples/more/13_parallelize_min/example | 0
Aexamples/more/13_parallelize_min/example.c | 67+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Mexamples/more/makefile | 3+++
Mscratchpad/makefile | 2+-
Mscratchpad/scratchpad | 0
Mscratchpad/scratchpad.c | 14+++++++++++++-
Msquiggle_more.c | 33++++++++++++++++++++++++++-------
20 files changed, 110 insertions(+), 9 deletions(-)

diff --git a/examples/more/00_example_template/example b/examples/more/00_example_template/example Binary files differ. diff --git a/examples/more/01_sample_from_cdf/example b/examples/more/01_sample_from_cdf/example Binary files differ. diff --git a/examples/more/02_sample_from_cdf_beta/example b/examples/more/02_sample_from_cdf_beta/example Binary files differ. diff --git a/examples/more/03_ci_beta/example b/examples/more/03_ci_beta/example Binary files differ. diff --git a/examples/more/04_nuclear_war/example b/examples/more/04_nuclear_war/example Binary files differ. diff --git a/examples/more/05_burn_10kg_fat/example b/examples/more/05_burn_10kg_fat/example Binary files differ. diff --git a/examples/more/06_nuclear_recovery/example b/examples/more/06_nuclear_recovery/example Binary files differ. diff --git a/examples/more/07_algebra/example b/examples/more/07_algebra/example Binary files differ. diff --git a/examples/more/08_algebra_and_conversion/example b/examples/more/08_algebra_and_conversion/example Binary files differ. diff --git a/examples/more/09_ergonomic_algebra/example b/examples/more/09_ergonomic_algebra/example Binary files differ. diff --git a/examples/more/10_twitter_thread_example/example b/examples/more/10_twitter_thread_example/example Binary files differ. diff --git a/examples/more/11_billion_lognormals_paralell/example b/examples/more/11_billion_lognormals_paralell/example Binary files differ. diff --git a/examples/more/12_time_to_botec_parallel/example b/examples/more/12_time_to_botec_parallel/example Binary files differ. diff --git a/examples/more/13_parallelize_min/example b/examples/more/13_parallelize_min/example Binary files differ. diff --git a/examples/more/13_parallelize_min/example.c b/examples/more/13_parallelize_min/example.c @@ -0,0 +1,67 @@ +#include "../../../squiggle.h" +#include "../../../squiggle_more.h" +#include <stdio.h> +#include <stdlib.h> + +int main() +{ + /* Question: can we parallelize this? + A = normal(5,2) + B = min(A) + B * 20 + */ + + /* Option 1: parallelize taking from n samples */ + // Question being asked: what is the distribution of sampling 1000 times and taking the min? + double sample_min_of_n(uint64_t* seed, int n){ + double min = sample_normal(5, 2, seed); + for(int i=0; i<(n-2); i++){ + double sample = sample_normal(5, 2, seed); + if(sample < min){ + min = sample; + } + } + return min; + } + double sampler_min_of_1000(uint64_t* seed) { + return sample_min_of_n(seed, 1000); + } + + int n_samples = 1000000, n_threads = 16; + double* results = malloc(n_samples * sizeof(double)); + parallel_sampler(sampler_min_of_1000, results, n_threads, n_samples); + printf("Mean of the distribution of (taking the min of 1000 samples of a normal(5,2)): %f\n", array_mean(results, n_samples)); + free(results); + + /* Option 2: take the min from n samples cleverly using parallelism */ + // Question being asked: can we take the min of n samples cleverly? + double sample_n_parallel(int n){ + + int n_threads = 16; + int quotient = n / 16; + int remainder = n % 16; + + uint64_t seed = 1000; + double result_remainder = sample_min_of_n(&seed, remainder); + + double sample_min_of_quotient(uint64_t* seed) { + return sample_min_of_n(seed, quotient); + } + double* results_quotient = malloc(quotient * sizeof(double)); + parallel_sampler(sample_min_of_quotient, results_quotient, n_threads, quotient); + + double min = results_quotient[0]; + for(int i=1; i<quotient; i++){ + if(min > results_quotient[i]){ + min = results_quotient[i]; + } + } + if(min > result_remainder){ + min = result_remainder; + } + free(results_quotient); + return min; + } + printf("Minimum of 1M samples of normal(5,2): %f\n", sample_n_parallel(1000000)); + +} diff --git a/examples/more/makefile b/examples/more/makefile @@ -48,6 +48,7 @@ all: $(CC) $(OPTIMIZED) $(DEBUG) 10_twitter_thread_example/$(SRC) $(DEPS) -o 10_twitter_thread_example/$(OUTPUT) $(CC) $(OPTIMIZED) $(DEBUG) 11_billion_lognormals_paralell/$(SRC) $(DEPS) -o 11_billion_lognormals_paralell/$(OUTPUT) $(CC) $(OPTIMIZED) $(DEBUG) 12_time_to_botec_parallel/$(SRC) $(DEPS) -o 12_time_to_botec_parallel/$(OUTPUT) + $(CC) $(OPTIMIZED) $(DEBUG) 13_parallelize_min/$(SRC) $(DEPS) -o 13_parallelize_min/$(OUTPUT) format-all: $(FORMATTER) 00_example_template/$(SRC) @@ -63,6 +64,7 @@ format-all: $(FORMATTER) 10_twitter_thread_example/$(SRC) $(FORMATTER) 11_billion_lognormals_paralell/$(SRC) $(FORMATTER) 12_time_to_botec_parallel/$(SRC) + $(FORMATTER) 13_parallelize_min/$(SRC) run-all: 00_example_template/$(OUTPUT) @@ -78,6 +80,7 @@ run-all: 10_twitter_thread_example/$(OUTPUT) 11_billion_lognormals_paralell/$(OUTPUT) 12_time_to_botec_parallel/$(OUTPUT) + 13_parallelize_min/$(OUTPUT) ## make one DIR=06_nuclear_recovery one: $(DIR)/$(SRC) diff --git a/scratchpad/makefile b/scratchpad/makefile @@ -9,7 +9,7 @@ CC=gcc # CC=tcc # <= faster compilation # Main file -SRC=scratchpad.c ../squiggle.c +SRC=scratchpad.c ../squiggle.c ../squiggle_more.c OUTPUT=scratchpad ## Dependencies diff --git a/scratchpad/scratchpad b/scratchpad/scratchpad Binary files differ. diff --git a/scratchpad/scratchpad.c b/scratchpad/scratchpad.c @@ -1,4 +1,5 @@ #include "../squiggle.h" +#include "../squiggle_more.h" #include <math.h> #include <stdint.h> #include <stdio.h> @@ -16,7 +17,18 @@ int main() }*/ // Test division - printf("\n%d\n", 10 % 3); + // printf("\n%d\n", 10 % 3); + // + + int n_samples = 100, n_threads = 16; + double* results = malloc(n_samples * sizeof(double)); + double sampler_scratchpad(uint64_t* seed){ + return 1; + } + parallel_sampler(sampler_scratchpad, results, n_threads, n_samples); + for(int i=0; i<n_samples; i++){ + printf("Sample %d: %f\n", i, results[i]); + } free(seed); } diff --git a/squiggle_more.c b/squiggle_more.c @@ -306,10 +306,25 @@ ci convert_lognormal_params_to_ci(lognormal_params y) /* Parallel sampler */ void parallel_sampler(double (*sampler)(uint64_t* seed), double* results, int n_threads, int n_samples){ - if((n_samples % n_threads) != 0){ - fprintf(stderr, "Number of samples isn't divisible by number of threads, aborting\n"); - exit(1); - } + + // Division terminology: + // a = b * quotient + reminder + // a = (a/b)*b + (a%b) + // dividend: a + // divisor: b + // quotient = a / b + // reminder = a % b + // "divisor's multiple" := (a/b)*b + + // now, we have n_samples and n_threads + // to make our life easy, each thread will have a number of samples of: a/b (quotient) + // and we'll compute the remainder of samples separately + // to possibly do by Jorge: improve so that the remainder is included in the threads + + int quotient = n_samples / n_threads; + int remainder = n_samples % n_threads; + int divisor_multiple = quotient * n_threads; + uint64_t** seeds = malloc(n_threads * sizeof(uint64_t*)); for (uint64_t i = 0; i < n_threads; i++) { seeds[i] = malloc(sizeof(uint64_t)); @@ -321,14 +336,18 @@ void parallel_sampler(double (*sampler)(uint64_t* seed), double* results, int n_ { #pragma omp for for (i = 0; i < n_threads; i++) { - int lower_bound = i * (n_samples / n_threads); - int upper_bound = ((i+1) * (n_samples / n_threads)) - 1; + int lower_bound_inclusive = i * quotient; + int upper_bound_not_inclusive = ((i+1) * quotient); // note the < in the for loop below, // printf("Lower bound: %d, upper bound: %d\n", lower_bound, upper_bound); - for (int j = lower_bound; j < upper_bound; j++) { + for (int j = lower_bound_inclusive; j < upper_bound_not_inclusive; j++) { results[j] = sampler(seeds[i]); } } } + for(int j=divisor_multiple; j<n_samples; j++){ + results[j] = sampler(seeds[0]); + // we can just reuse a seed, this isn't problematic because we are not doing multithreading + } for (uint64_t i = 0; i < n_threads; i++) { free(seeds[i]);