1 module rmathd.nbeta; 2 3 public import rmathd.common; 4 public import rmathd.beta; 5 public import rmathd.toms708; 6 public import rmathd.nchisq; 7 public import rmathd.chisq; 8 9 /* 10 ** poisson.d gamma.d 11 ** dmd nbeta.d common.d beta.d toms708.d nchisq.d chisq.d normal.d && ./nbeta 12 */ 13 14 T dnbeta(T: double)(T x, T a, T b, T ncp, int give_log) 15 { 16 const static T eps = 1.0e-15; 17 18 int kMax; 19 T k, ncp2, dx2, d, D; 20 real sum, term, p_k, q; 21 mixin R_D__0!give_log; 22 23 if (isNaN(x) || isNaN(a) || isNaN(b) || isNaN(ncp)) 24 return x + a + b + ncp; 25 26 if (ncp < 0 || a <= 0 || b <= 0) 27 return T.nan; 28 29 if (!isFinite(a) || !isFinite(b) || !isFinite(ncp)) 30 return T.nan; 31 32 if (x < 0 || x > 1) 33 return R_D__0; 34 if(ncp == 0) 35 return dbeta!T(x, a, b, give_log); 36 37 /* New algorithm, starting with *largest* term : */ 38 ncp2 = 0.5 * ncp; 39 dx2 = ncp2*x; 40 d = (dx2 - a - 1)/2; 41 D = d*d + dx2 * (a + b) - a; 42 if(D <= 0) { 43 kMax = 0; 44 } else { 45 D = ceil(d + sqrt(D)); 46 kMax = (D > 0) ? cast(int)D : 0; 47 } 48 49 /* The starting "middle term" --- first look at it's log scale: */ 50 term = dbeta!T(x, a + kMax, b, /* log = */ 1); 51 p_k = dpois_raw!T(kMax, ncp2, 1); 52 if(x == 0. || !isFinite(term) || !isFinite(cast(T)p_k)) /* if term = +Inf */ 53 return R_D_exp!T(cast(T)(p_k + term), give_log); 54 55 /* Now if s_k := p_k * t_k {here = exp(p_k + term)} would underflow, 56 * we should rather scale everything and re-scale at the end:*/ 57 58 p_k += term; /* = log(p_k) + log(t_k) == log(s_k) -- used at end to rescale */ 59 /* mid = 1 = the rescaled value, instead of mid = exp(p_k); */ 60 61 /* Now sum from the inside out */ 62 sum = term = 1. /* = mid term */; 63 /* middle to the left */ 64 k = kMax; 65 while(k > 0 && term > sum * eps) { 66 k--; 67 q = /* 1 / r_k = */ (k + 1)*(k + a)/(k + a + b)/dx2; 68 term *= q; 69 sum += term; 70 } 71 /* middle to the right */ 72 term = 1.; 73 k = kMax; 74 do { 75 q = /* r_{old k} = */ dx2 * (k + a + b)/(k + a)/(k + 1); 76 k++; 77 term *= q; 78 sum += term; 79 } while (term > sum * eps); 80 81 return R_D_exp!T(cast(T)(p_k + log(sum)), give_log); 82 } 83 84 85 real pnbeta_raw(T)(T x, T o_x, T a, T b, T ncp) 86 { 87 /* o_x == 1 - x but maybe more accurate */ 88 89 /* change errmax and itrmax if desired; 90 * original (AS 226, R84) had (errmax; itrmax) = (1e-6; 100) */ 91 const static T errmax = 1.0e-9; 92 const int itrmax = 10000; /* 100 is not enough for pf(ncp=200) 93 see PR#11277 */ 94 95 T a0, lbeta, c, errbd, x0, temp, tmp_c; 96 int ierr; 97 98 real ans, ax, gx, q, sumq; 99 100 if (ncp < 0. || a <= 0. || b <= 0.) 101 return T.nan; 102 103 if(x < 0. || o_x > 1. || (x == 0. && o_x == 1.)) 104 return 0.; 105 if(x > 1. || o_x < 0. || (x == 1. && o_x == 0.)) 106 return 1.; 107 108 c = ncp / 2.; 109 110 /* initialize the series */ 111 112 x0 = floor(fmax2!T(c - 7. * sqrt(c), 0.)); 113 a0 = a + x0; 114 lbeta = lgammafn!T(a0) + lgammafn!T(b) - lgammafn!T(a0 + b); 115 /* temp = pbeta_raw(x, a0, b, TRUE, FALSE), but using (x, o_x): */ 116 bratio!T(a0, b, x, o_x, &temp, &tmp_c, &ierr, 0); 117 118 gx = exp(a0 * log(x) + b * (x < .5 ? log1p(-x) : log(o_x)) 119 - lbeta - log(a0)); 120 if (a0 > a) 121 q = exp(-c + x0 * log(c) - lgammafn!T(x0 + 1.)); 122 else 123 q = exp(-c); 124 125 sumq = 1. - q; 126 ans = ax = q * temp; 127 128 /* recurse over subsequent terms until convergence is achieved */ 129 T j = floor(x0); // x0 could be billions, and is in package EnvStats 130 do { 131 j++; 132 temp -= cast(T) gx; 133 gx *= x * (a + b + j - 1.) / (a + j); 134 q *= c / j; 135 sumq -= q; 136 ax = temp * q; 137 ans += ax; 138 errbd = cast(T)((temp - gx) * sumq); 139 } while (errbd > errmax && j < itrmax + x0); 140 141 if (errbd > errmax){ 142 //ML_ERROR(ME_PRECISION, "pnbeta"); 143 doNothing(); 144 } 145 if (j >= itrmax + x0){ 146 //ML_ERROR(ME_NOCONV, "pnbeta"); 147 doNothing(); 148 } 149 150 return ans; 151 } 152 153 T pnbeta2(T)(T x, T o_x, T a, T b, T ncp, 154 /* o_x == 1 - x but maybe more accurate */ 155 int lower_tail, int log_p) 156 { 157 real ans = pnbeta_raw!T(x, o_x, a,b, ncp); 158 159 160 /* return R_DT_val(ans), but we want to warn about cancellation here */ 161 if (lower_tail) 162 return cast(T)(log_p ? log(ans) : ans); 163 else { 164 if (ans > 1. - 1e-10){ 165 //ML_ERROR(ME_PRECISION, "pnbeta"); 166 doNothing(); 167 } 168 if (ans > 1.0) 169 ans = 1.0; /* Precaution */ 170 return cast(T) (log_p ? log1p(-ans) : (1. - ans)); 171 } 172 } 173 174 T pnbeta(T: double)(T x, T a, T b, T ncp, int lower_tail, int log_p) 175 { 176 if (isNaN(x) || isNaN(a) || isNaN(b) || isNaN(ncp)) 177 return x + a + b + ncp; 178 179 mixin (R_P_bounds_01!(x, 0., 1.)); 180 return pnbeta2!T(x, 1 - x, a, b, ncp, lower_tail, log_p); 181 } 182 183 184 T qnbeta(T: double)(T p, T a, T b, T ncp, int lower_tail, int log_p) 185 { 186 const static T accu = 1e-15; 187 const static T Eps = 1e-14; /* must be > accu */ 188 189 T ux, lx, nx, pp; 190 191 if (isNaN(p) || isNaN(a) || isNaN(b) || isNaN(ncp)) 192 return p + a + b + ncp; 193 194 if (!isFinite(a)) 195 return T.nan; 196 197 if (ncp < 0. || a <= 0. || b <= 0.) 198 return T.nan; 199 200 mixin (R_Q_P01_boundaries!(p, 0, 1)); 201 mixin R_DT_qIv!(p); 202 203 p = R_DT_qIv; 204 205 /* Invert pnbeta(.) : 206 * 1. finding an upper and lower bound */ 207 if(p > 1 - DBL_EPSILON) 208 return 1.0; 209 pp = fmin2!T(1 - DBL_EPSILON, p * (1 + Eps)); 210 for(ux = 0.5; ux < 1 - DBL_EPSILON && pnbeta!T(ux, a, b, ncp, 1, 0) < pp; ux = 0.5*(1 + ux)){} 211 pp = p * (1 - Eps); 212 for(lx = 0.5; lx > DBL_MIN && pnbeta!T(lx, a, b, ncp, 1, 0) > pp; lx *= 0.5){} 213 214 /* 2. interval (lx,ux) halving : */ 215 do { 216 nx = 0.5 * (lx + ux); 217 if (pnbeta!T(nx, a, b, ncp, 1, 0) > p) 218 ux = nx; 219 else 220 lx = nx; 221 } while ((ux - lx) / nx > accu); 222 223 return 0.5*(ux + lx); 224 } 225 226 227 T rnbeta(T: double)(T a, T b, T ncp) 228 { 229 T X = rnchisq!T(2*a, ncp); 230 return X/(X + rchisq(2*b)); 231 } 232 233 234 void test_nbeta() 235 { 236 import std.stdio: writeln; 237 writeln("dnbeta: ", dnbeta(0.7, 6., 8., 20., 1)); 238 writeln("qnbeta: ", qnbeta(0.7, 6., 8., 20., 1, 0)); 239 writeln("pnbeta: ", pnbeta(0.7, 6., 8., 20., 1, 0)); 240 writeln("rnbeta: ", rnbeta(6., 8., 20.), ", rnbeta: ", rnbeta(6., 8., 20.), ", rnbeta: ", rnbeta(6., 8., 20.)); 241 } 242