ssumpw.c (3276B)
1 /** 2 * @license Apache-2.0 3 * 4 * Copyright (c) 2020 The Stdlib Authors. 5 * 6 * Licensed under the Apache License, Version 2.0 (the "License"); 7 * you may not use this file except in compliance with the License. 8 * You may obtain a copy of the License at 9 * 10 * http://www.apache.org/licenses/LICENSE-2.0 11 * 12 * Unless required by applicable law or agreed to in writing, software 13 * distributed under the License is distributed on an "AS IS" BASIS, 14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 * See the License for the specific language governing permissions and 16 * limitations under the License. 17 */ 18 19 #include "stdlib/blas/ext/base/ssumpw.h" 20 #include <stdint.h> 21 22 /** 23 * Computes the sum of single-precision floating-point strided array elements using pairwise summation. 24 * 25 * ## Method 26 * 27 * - This implementation uses pairwise summation, which accrues rounding error `O(log2 N)` instead of `O(N)`. The recursion depth is also `O(log2 N)`. 28 * 29 * ## References 30 * 31 * - Higham, Nicholas J. 1993. "The Accuracy of Floating Point Summation." _SIAM Journal on Scientific Computing_ 14 (4): 783–99. doi:[10.1137/0914050](https://doi.org/10.1137/0914050). 32 * 33 * @param N number of indexed elements 34 * @param X input array 35 * @param stride stride length 36 * @return output value 37 */ 38 float stdlib_strided_ssumpw( const int64_t N, const float *X, const int64_t stride ) { 39 float *xp1; 40 float *xp2; 41 int64_t ix; 42 int64_t M; 43 int64_t n; 44 int64_t i; 45 float sum; 46 float s0; 47 float s1; 48 float s2; 49 float s3; 50 float s4; 51 float s5; 52 float s6; 53 float s7; 54 55 if ( N <= 0 ) { 56 return 0.0f; 57 } 58 if ( N == 1 || stride == 0 ) { 59 return X[ 0 ]; 60 } 61 if ( stride < 0 ) { 62 ix = (1-N) * stride; 63 } else { 64 ix = 0; 65 } 66 if ( N < 8 ) { 67 // Use simple summation... 68 sum = 0.0f; 69 for ( i = 0; i < N; i++ ) { 70 sum += X[ ix ]; 71 ix += stride; 72 } 73 return sum; 74 } 75 // Blocksize for pairwise summation: 128 (NOTE: decreasing the blocksize decreases rounding error as more pairs are summed, but also decreases performance. Because the inner loop is unrolled eight times, the blocksize is effectively `16`.) 76 if ( N <= 128 ) { 77 // Sum a block with 8 accumulators (by loop unrolling, we lower the effective blocksize to 16)... 78 s0 = X[ ix ]; 79 s1 = X[ ix+stride ]; 80 s2 = X[ ix+(2*stride) ]; 81 s3 = X[ ix+(3*stride) ]; 82 s4 = X[ ix+(4*stride) ]; 83 s5 = X[ ix+(5*stride) ]; 84 s6 = X[ ix+(6*stride) ]; 85 s7 = X[ ix+(7*stride) ]; 86 ix += 8 * stride; 87 88 M = N % 8; 89 for ( i = 8; i < N-M; i += 8 ) { 90 s0 += X[ ix ]; 91 s1 += X[ ix+stride ]; 92 s2 += X[ ix+(2*stride) ]; 93 s3 += X[ ix+(3*stride) ]; 94 s4 += X[ ix+(4*stride) ]; 95 s5 += X[ ix+(5*stride) ]; 96 s6 += X[ ix+(6*stride) ]; 97 s7 += X[ ix+(7*stride) ]; 98 ix += 8 * stride; 99 } 100 // Pairwise sum the accumulators: 101 sum = ((s0+s1) + (s2+s3)) + ((s4+s5) + (s6+s7)); 102 103 // Clean-up loop... 104 for (; i < N; i++ ) { 105 sum += X[ ix ]; 106 ix += stride; 107 } 108 return sum; 109 } 110 // Recurse by dividing by two, but avoiding non-multiples of unroll factor... 111 n = N / 2; 112 n -= n % 8; 113 if ( stride < 0 ) { 114 xp1 = (float *)X + ( (n-N)*stride ); 115 xp2 = (float *)X; 116 } else { 117 xp1 = (float *)X; 118 xp2 = (float *)X + ( n*stride ); 119 } 120 return stdlib_strided_ssumpw( n, xp1, stride ) + stdlib_strided_ssumpw( N-n, xp2, stride ); 121 }