cloudy trunk
Loading...
Searching...
No Matches
grains_qheat.cpp
Go to the documentation of this file.
1/* This file is part of Cloudy and is copyright (C)1978-2013 by Gary J. Ferland and
2 * others. For conditions of distribution and use see copyright notice in license.txt */
3/*GrainMakeDiffuse main routine for generating the grain diffuse emission, called by RT_diffuse */
4#include "cddefines.h"
5#include "physconst.h"
6#include "rfield.h"
7#include "phycon.h"
8#include "dense.h"
9#include "hmi.h"
10#include "thermal.h"
11#include "trace.h"
12#include "thirdparty.h"
13#include "iterations.h"
14#include "grainvar.h"
15#include "grains.h"
16
17inline double no_atoms(size_t nd)
18{
19 return gv.bin[nd]->AvVol*gv.bin[nd]->dustp[0]/ATOMIC_MASS_UNIT/gv.bin[nd]->atomWeight;
20}
21
22/* NB NB -- the sequence below has been carefully chosen and should NEVER be
23 * altered unless you really know what you are doing !! */
24/* >>chng 03 jan 16, introduced QH_THIGH_FAIL and started using splint_safe and spldrv_safe
25 * throughout the code; this solves the parispn_a031_sil.in bug, PvH */
26/* >>chng 03 jan 16, rescheduled QH_STEP_FAIL as non-fatal; it can be triggered at very low temps, PvH */
27typedef enum {
28 /* the following are OK */
29 /* 0 1 2 3 */
31 /* the following are mild errors we already recovered from */
32 /* 4 5 6 7 */
34 /* any of these errors will prompt qheat to do another iteration */
35 /* 8 9 10 11 */
37 /* any error larger than QH_NO_REBIN will cause GetProbDistr_LowLimit to return
38 * before even attempting to rebin the results; we may be able to recover though... */
39 /* 12 13 14 15 */
41 /* any case larger than QH_FATAL is truly pathological; there is no hope of recovery */
42 /* 16 17 18 19 */
44} QH_Code;
45
46/*================================================================================*/
47/* definitions relating to quantum heating routines */
48
49/* this is the minimum number of bins for quantum heating calculation to be valid */
50static const long NQMIN = 10L;
51
52/* this is the lowest value for dPdlnT that should be included in the modeling */
53static const double PROB_CUTOFF_LO = 1.e-15;
54static const double PROB_CUTOFF_HI = 1.e-20;
55static const double SAFETY = 1.e+8;
56
57/* during the first NSTARTUP steps, use special algorithm to calculate stepsize */
58static const long NSTARTUP = 5L;
59
60/* if the average number of multiple events is above this number
61 * don't try full quantum heating treatment. */
62static const double MAX_EVENTS = 150.;
63
64/* maximum number of tries for quantum heating routine */
65/* >>chng 02 jan 30, changed LOOP_MAX from 10 -> 20, PvH */
66static const long LOOP_MAX = 20L;
67
68/* if all else fails, divide temp estimate by this factor */
69static const double DEF_FAC = 3.;
70
71/* total probability for all bins should not deviate more than this from 1. */
72static const double PROB_TOL = 0.02;
73
74/* after NQTEST steps, make an estimate if prob distr will converge in NQGRID steps */
75/* >>chng 02 jan 30, change NQTEST from 1000 -> 500, PvH */
76static const long NQTEST = 500L;
77
78/* if the ratio fwhm/Umax is lower than this number
79 * don't try full quantum heating treatment. */
80static const double FWHM_RATIO = 0.1;
81/* if the ratio fwhm/Umax is lower than this number
82 * don't even try analytic approximation, use delta function instead */
83static const double FWHM_RATIO2 = 0.007;
84
85/* maximum number of steps for integrating quantum heating probability distribution */
86static const long MAX_LOOP = 2*NQGRID;
87
88/* this is the tolerance used while integrating the temperature distribution of the grains */
89static const double QHEAT_TOL = 5.e-3;
90
91/* maximum number of tries before we declare that probability distribution simply won't fit */
92static const long WIDE_FAIL_MAX = 3;
93
94/* multipliers for PROB_TOL used in GetProbDistr_HighLimit */
95static const double STRICT = 1.;
96static const double RELAX = 15.;
97
98/* when rebinning quantum heating results, make ln(qtemp[i]/qtemp[i-1]) == QT_RATIO */
99/* this constant determines the accuracy with which the Wien tail of the grain emission
100 * is calculated; if x = h*nu/k*T_gr and d = QT_RATIO-1., then the relative accuracy of
101 * the flux in the Wien tail is Acc = fabs(x*d^2/12 - x^2*d^2/24). A typical value of x
102 * would be x = 15, so QT_RATIO = 1.03 should converge the spectrum to better than 1% */
103static const double QT_RATIO = 1.03;
104
105
106/*================================================================================*/
107/* global variables */
108
109/* these data define the enthalpy function for silicates
110 * derived from:
111 * >>refer grain physics Guhathakurta & Draine, 1989, ApJ, 345, 230
112 * coefficients converted to rydberg energy units, per unit atom
113 * assuming a density of 3.3 g/cm^3 and pure MgSiFeO4.
114 * this is not right, but the result is correct because number
115 * of atoms will be calculated using the same assumption. */
116
117/* this is the specific density of silicate in g/cm^3 */
118static const double DEN_SIL = 3.30;
119
120/* these are the mean molecular weights per atom for MgSiFeO4 and SiO2 in amu */
121static const double MW_SIL = 24.6051;
122/*static const double MW_SIO2 = 20.0283;*/
123
124static const double tlim[5]={0.,50.,150.,500.,DBL_MAX};
125static const double ppower[4]={2.00,1.30,0.68,0.00};
131
132
133/* initialize phiTilde */
134STATIC void qheat_init(size_t,/*@out@*/vector<double>&,/*@out@*/double*);
135/* worker routine, this implements the algorithm of Guhathakurtha & Draine */
136STATIC void GetProbDistr_LowLimit(size_t,double,double,double,/*@in@*/const vector<double>&,
137 /*@in@*/const vector<double>&,/*@out@*/vector<double>&,
138 /*@out@*/vector<double>&,/*@out@*/vector<double>&,
139 /*@out@*/long*,/*@out@*/double*,long*,/*@out@*/QH_Code*);
140/* try two consecutive integration steps using stepsize "step/2." (yielding p[k]),
141 * and also one double integration step using stepsize "step" (yielding p2k). */
142STATIC double TryDoubleStep(vector<double>&,vector<double>&,vector<double>&,vector<double>&,
143 vector<double>&,const vector<double>&,const vector<double>&,double,
144 /*@out@*/double*,long,size_t,/*@out@*/bool*);
145/* calculate logarithmic integral from (x1,y1) to (x2,y2) */
146STATIC double log_integral(double,double,double,double);
147/* scan the probability distribution for valid range */
148STATIC void ScanProbDistr(const vector<double>&,const vector<double>&,long,double,long,double,
149 /*@out@*/long*,/*@out@*/long*,/*@out@*/long*,long*,QH_Code*);
150/* rebin the quantum heating results to speed up RT_diffuse */
151STATIC long RebinQHeatResults(size_t,long,long,vector<double>&,vector<double>&,vector<double>&,
152 vector<double>&,vector<double>&,vector<double>&,vector<double>&,QH_Code*);
153/* calculate approximate probability distribution in high intensity limit */
154STATIC void GetProbDistr_HighLimit(long,double,double,double,/*@out@*/vector<double>&,/*@out@*/vector<double>&,
155 /*@out@*/vector<double>&,/*@out@*/double*,
156 /*@out@*/long*,/*@out@*/double*,/*@out@*/QH_Code*);
157/* derivative of the enthalpy function dU/dT */
158STATIC double uderiv(double,size_t);
159/* enthalpy function */
160STATIC double ufunct(double,size_t,/*@out@*/bool*);
161/* inverse enthalpy function */
162STATIC double inv_ufunct(double,size_t,/*@out@*/bool*);
163/* helper function for calculating enthalpy, uses Debye theory */
164STATIC double DebyeDeriv(double,long);
165
166/* >>chng 01 oct 29, introduced gv.bin[nd]->cnv_H_pGR, cnv_GR_pH, etc. PvH */
167
168
169/* main routine for generating the grain diffuse emission, called by RT_diffuse */
171{
172 long i, j;
173 double bfac,
174 f,
175 factor,
176 flux,
177 x;
178
179# ifndef NDEBUG
180 bool lgNoTdustFailures;
181 double BolFlux,
182 Comparison1,
183 Comparison2;
184# endif
185
186 /* this assures 6-digit precision in the evaluation of the exponential below */
187 const double LIM1 = pow(2.e-6,1./2.);
188 const double LIM2 = pow(6.e-6,1./3.);
189
190 DEBUG_ENTRY( "GrainMakeDiffuse()" );
191
192 factor = 2.*PI4*POW2(FR1RYD/SPEEDLIGHT)*FR1RYD;
193 /* >>chng 96 apr 26 upper limit chng from 15 to 75 Peter van Hoof */
194 /* >>chng 00 apr 10 use constants appropriate for double precision, by PvH */
195 x = log(0.999*DBL_MAX);
196
197 /* save grain emission per unit volume */
198 for( i=0; i < rfield.nflux; i++ )
199 {
200 /* used in RT_diffuse to derive total emission */
201 gv.GrainEmission[i] = 0.;
202 gv.SilicateEmission[i] = 0.;
203 gv.GraphiteEmission[i] = 0.;
204 }
205
206 vector<double> qtemp(NQGRID);
207 vector<double> qprob(NQGRID);
208
209 for( size_t nd=0; nd < gv.bin.size(); nd++ )
210 {
211 bool lgLocalQHeat;
212 long qnbin=-200;
213 realnum threshold;
214 double xx;
215
216 /* this local copy is necessary to keep lint happy */
217 lgLocalQHeat = gv.bin[nd]->lgQHeat;
218 /* >>chng 04 nov 09, do not evaluate quantum heating if abundance is negligible, PvH
219 * this prevents PAH's deep inside molecular regions from failing if GrnVryDepth is used */
220 /* >>chng 04 dec 31, introduced separate thresholds near I-front and in molecular region, PvH */
221 threshold = ( dense.xIonDense[ipHYDROGEN][0]+dense.xIonDense[ipHYDROGEN][1] > hmi.H2_total ) ?
222 gv.dstAbundThresholdNear : gv.dstAbundThresholdFar;
223
224 if( lgLocalQHeat && gv.bin[nd]->dstAbund >= threshold )
225 {
226 qheat(qtemp,qprob,&qnbin,nd);
227
228 if( gv.bin[nd]->lgUseQHeat )
229 {
230 ASSERT( qnbin > 0 );
231 }
232 }
233 else
234 {
235 /* >> chng 04 dec 31, repaired bug lgUseQHeat not set when abundance below threshold, PvH */
236 gv.bin[nd]->lgUseQHeat = false;
237 }
238
239 flux = 1.;
240 /* flux can only become zero in the Wien tail */
241 /* >>chng 04 jan 25, replaced flux > 0. with (realnum)flux > 0.f, PvH */
242 for( i=0; i < rfield.nflux && (realnum)flux > 0.f; i++ )
243 {
244 flux = 0.;
245 if( lgLocalQHeat && gv.bin[nd]->lgUseQHeat )
246 {
247 xx = 1.;
248 /* we start at high temperature end and work our way down
249 * until contribution becomes negligible */
250 for( j=qnbin-1; j >= 0 && xx > flux*DBL_EPSILON; j-- )
251 {
252 f = TE1RYD/qtemp[j]*rfield.anu[i];
253 if( f < x )
254 {
255 /* want the number exp(hnu/kT) - 1, two expansions */
256 /* >>chng 04 jan 25, changed 1.e-5 -> LIM1, LIM2, PvH */
257 if( f > LIM2 )
258 bfac = exp(f) - 1.;
259 else if( f > LIM1 )
260 bfac = (1. + 0.5*f)*f;
261 else
262 bfac = f;
263 xx = qprob[j]*gv.bin[nd]->dstab1[i]*rfield.anu2[i]*
264 rfield.widflx[i]/bfac;
265 flux += xx;
266 }
267 else
268 {
269 xx = 0.;
270 }
271 }
272 }
273 else
274 {
275 f = TE1RYD/gv.bin[nd]->tedust*rfield.anu[i];
276 if( f < x )
277 {
278 /* >>chng 04 jan 25, changed 1.e-5 -> LIM1, LIM2, PvH */
279 if( f > LIM2 )
280 bfac = exp(f) - 1.;
281 else if( f > LIM1 )
282 bfac = (1. + 0.5*f)*f;
283 else
284 bfac = f;
285 flux = gv.bin[nd]->dstab1[i]*rfield.anu2[i]*rfield.widflx[i]/bfac;
286 }
287 }
288
289 /* do multiplications outside loop, PvH */
290 flux *= factor*gv.bin[nd]->cnv_H_pCM3;
291
292 /* remember local emission these are zeroed out on each zone
293 * above, and now incremented so is unit emission from this zone */
294 gv.GrainEmission[i] += (realnum)flux;
295 /* unit emission for each different grain type */
296 strg_type scase = gv.which_strg[gv.bin[nd]->matType];
297 switch( scase )
298 {
299 case STRG_SIL:
300 gv.SilicateEmission[i] += (realnum)flux;
301 break;
302 case STRG_CAR:
303 gv.GraphiteEmission[i] += (realnum)flux;
304 break;
305 default:
307 }
308 }
309 }
310
311# ifndef NDEBUG
312 /*********************************************************************************
313 *
314 * Following are three checks on energy and charge conservation by the grain code.
315 * Their primary function is as an internal consistency check, so that coding
316 * errors get caught as early as possible. This is why the program terminates as
317 * soon as any one of the checks fails.
318 *
319 * NB NB - despite appearances, these checks do NOT guarantee overall energy
320 * conservation in the Cloudy model to the asserted tolerance, see note 1B !
321 *
322 * Note 1: there are two sources for energy imbalance in the grain code (see A & B).
323 * A: Interpolation in dstems. The code calculates how much energy the grains
324 * emit in thermal radiation (gv.bin[nd]->GrainHeat), and converts that into
325 * an (average) grain temperature by reverse interpolation in dstems. If
326 * quantum heating is not used, that temperature is used directly to generate
327 * the local diffuse emission. Hence the finite resolution of the dstems grid
328 * can lead to small errors in flux. This is tested in Check 1. The maximum
329 * error of interpolating in dstems scales with NDEMS^-3. The same problem
330 * can also occur when quantum heating is used, however, the fact that many
331 * bins are used will probably lead to a cancellation effect of the errors.
332 * B: RT_OTS_Update gets called AFTER grain() has completed, so the grain heating
333 * was calculated using a different version of the OTS fields than the one
334 * that gets fed into the RT routines (where the actual attenuation of the
335 * radiation fields by the grain opacity is done). This can lead to an energy
336 * imbalance, depending on how accurate the convergence of the OTS fields is.
337 * This is outside the control of the grain code and is therefore NOT checked.
338 * Rather, the grain code remembers the contribution from the old OTS fields
339 * (through gv.bin[nd]->BolFlux) and uses that in Check 3. In most models the
340 * difference will be well below 0.1%, but in AGN type models where OTS continua
341 * are important, the energy imbalance can be of the order of 0.5% of the grain
342 * heating (status nov 2001). On 04 jan 25 the initialization of phiTilde has
343 * been moved to qheat, implying that phiTilde now uses the updated version of
344 * the OTS fields. The total amount of radiated energy however is still based
345 * on gv.bin[nd]->GrainHeat which uses the old version of the OTS fields.
346 * C: Energy conservation for collisional processes is guaranteed by adding in
347 * (very small) correction terms. These corrections are needed to cover up
348 * small imperfection in the theory, and cannot be avoided without making the
349 * already very complex theory even more complex.
350 * D: Photo-electric heating and collisional cooling can have an important effect
351 * on the total heating balance of the gas. Both processes depend strongly on
352 * the grain charge, so assuring proper charge balance is important as well.
353 * This is tested in Check 2.
354 *
355 * Note 2: for quantum heating it is important to resolve the Maxwell distribution
356 * of the electrons and ions far enough into the tail that the total amount of
357 * energy contained in the remaining tail is negligible. If this is not the case,
358 * the assert at the beginning of the qheat() routine will fail. This is because
359 * the code filling up the phiTilde array in GrainCollHeating() assumes a value for
360 * the average particle energy based on a Maxwell distribution going to infinity.
361 * If the maximum energy used is too low, the assumed average energy would be
362 * incorrect.
363 *
364 *********************************************************************************/
365
366 lgNoTdustFailures = true;
367 for( size_t nd=0; nd < gv.bin.size(); nd++ )
368 {
369 if( !gv.bin[nd]->lgTdustConverged )
370 {
371 lgNoTdustFailures = false;
372 break;
373 }
374 }
375
376 /* CHECK 1: does the grain thermal emission conserve energy ? */
377 BolFlux = 0.;
378 for( i=0; i < rfield.nflux; i++ )
379 {
380 BolFlux += gv.GrainEmission[i]*rfield.anu[i]*EN1RYD;
381 }
382 Comparison1 = 0.;
383 for( size_t nd=0; nd < gv.bin.size(); nd++ )
384 {
385 if( gv.bin[nd]->tedust < gv.bin[nd]->Tsublimat )
386 Comparison1 += CONSERV_TOL*gv.bin[nd]->GrainHeat;
387 else
388 /* for high temperatures the interpolation in dstems
389 * is less accurate, so we have to be more lenient */
390 Comparison1 += 10.*CONSERV_TOL*gv.bin[nd]->GrainHeat;
391 }
392
393 /* >>chng 04 mar 11, add constant grain temperature to pass assert */
394 /* >>chng 04 jun 01, deleted test for constant grain temperature, PvH */
395 ASSERT( fabs(BolFlux-gv.GrainHeatSum) < Comparison1 );
396
397 /* CHECK 2: assert charging balance */
398 for( size_t nd=0; nd < gv.bin.size(); nd++ )
399 {
400 if( gv.bin[nd]->lgChrgConverged )
401 {
402 double ave = 0.5*(gv.bin[nd]->RateDn+gv.bin[nd]->RateUp);
403 /* >>chng 04 dec 16, add lgAbort - do not throw assert if we are in
404 * process of aborting */
405 ASSERT( lgAbort || fabs(gv.bin[nd]->RateDn-gv.bin[nd]->RateUp) < CONSERV_TOL*ave );
406 }
407 }
408
409 if( lgNoTdustFailures && gv.lgDHetOn && gv.lgDColOn && thermal.ConstGrainTemp == 0. )
410 {
411 /* CHECK 3: calculate the total energy donated to grains, must be balanced by
412 * the energy emitted in thermal radiation plus various forms of gas heating */
413 Comparison1 = 0.;
414 for( size_t nd=0; nd < gv.bin.size(); nd++ )
415 {
416 Comparison1 += gv.bin[nd]->BolFlux;
417 }
418 /* add in collisional heating of grains by plasma (if positive) */
419 Comparison1 += MAX2(gv.GasCoolColl,0.);
420 /* add in net amount of chemical energy donated by recombining ions and molecule formation */
421 Comparison1 += gv.GrainHeatChem;
422
423 /* thermal emis PE effect gas heating by coll thermionic emis */
424 Comparison2 = gv.GrainHeatSum+thermal.heating[0][13]+thermal.heating[0][14]+thermal.heating[0][25];
425
426 /* >>chng 06 jun 02, add test on gv.GrainHeatScaleFactor so that assert not thrown
427 * when set grain heat command is used */
428 ASSERT( lgAbort || gv.GrainHeatScaleFactor != 1.f || gv.lgBakesPAH_heat ||
429 fabs(Comparison1-Comparison2)/Comparison2 < CONSERV_TOL );
430 }
431# endif
432 return;
433}
434
435
436/****************************************************************************
437 *
438 * qheat: driver routine for non-equilibrium heating of grains
439 *
440 * This routine calls GetProbDistr_LowLimit, GetProbDistr_HighLimit
441 * (which do the actual non-equilibrium calculations), and does the
442 * subsequent quality control.
443 *
444 * Written by Peter van Hoof (UK, CITA, QUB).
445 *
446 ****************************************************************************/
447
448/* this is the new version of the quantum heating code, used starting Cloudy 96 beta 3 */
449
450void qheat(/*@out@*/ vector<double>& qtemp, /* qtemp[NQGRID] */
451 /*@out@*/ vector<double>& qprob, /* qprob[NQGRID] */
452 /*@out@*/ long int *qnbin,
453 size_t nd)
454{
455 bool lgBoundErr,
456 lgDelta,
457 lgNegRate,
458 lgOK,
459 lgTried;
460 long int i,
461 nWideFail;
462 QH_Code ErrorCode,
463 ErrorCode2,
464 ErrorStart;
465 double c0,
466 c1,
467 c2,
468 check,
469 DefFac,
470 deriv,
471 fwhm,
472 FwhmRatio,
473 integral,
474 minBracket,
475 maxBracket,
476 new_tmin,
477 NumEvents,
478 oldy,
479 rel_tol,
480 Tmax,
481 tol,
482 Umax,
483 xx,
484 y;
485
486
487 DEBUG_ENTRY( "qheat()" );
488
489 /* sanity checks */
490 ASSERT( gv.bin[nd]->lgQHeat );
491 ASSERT( nd < gv.bin.size() );
492
493 if( trace.lgTrace && trace.lgDustBug )
494 {
495 fprintf( ioQQQ, "\n >>>>qheat called for grain %s\n", gv.bin[nd]->chDstLab );
496 }
497
498 /* >>chng 01 aug 22, allocate space */
499 /* phiTilde is continuum corrected for photo-electric effect, in events/H/s/cell, default depl */
500 vector<double> phiTilde(rfield.nupper);
501 vector<double> Phi(rfield.nupper);
502 vector<double> PhiDrv(rfield.nupper);
503 vector<double> dPdlnT(NQGRID);
504
505 qheat_init( nd, phiTilde, &check );
506
507 check += gv.bin[nd]->GrainHeatColl-gv.bin[nd]->GrainCoolTherm;
508
509 xx = integral = 0.;
510 c0 = c1 = c2 = 0.;
511 lgNegRate = false;
512 oldy = 0.;
513 tol = 1.;
514
515 /* >>chng 01 nov 29, rfield.nflux -> gv.qnflux, PvH */
516 /* >>chng 03 jan 26, gv.qnflux -> gv.bin[nd]->qnflux, PvH */
517 for( i=gv.bin[nd]->qnflux-1; i >= 0; i-- )
518 {
519 /* >>chng 97 jul 17, to summed continuum */
520 /* >>chng 00 mar 30, to phiTilde, to keep track of photo-electric effect and collisions, by PvH */
521 /* >>chng 01 oct 10, use trapezoidal rule for integrating Phi, reverse direction of integration.
522 * Phi[i] is now integral from exactly rfield.anu[i] to infinity to second-order precision, PvH */
523 /* >>chng 01 oct 30, change normalization of Phi, PhiDrv from <unit>/cm^3 -> <unit>/grain, PvH */
524 double fac = ( i < gv.bin[nd]->qnflux-1 ) ? 1./rfield.widflx[i] : 0.;
525 /* phiTilde has units events/H/s, y has units events/grain/s/Ryd */
526 y = phiTilde[i]*gv.bin[nd]->cnv_H_pGR*fac;
527 /* PhiDrv[i] = (Phi[i+1]-Phi[i])/(anu[i+1]-anu[i]), units events/grain/s/Ryd */
528 PhiDrv[i] = -0.5*(y + oldy);
529 /* there is a minus sign here because we are integrating from infinity downwards */
530 xx -= PhiDrv[i]*(rfield.anu[i+1]-rfield.anu[i]);
531 /* Phi has units events/grain/s */
532 Phi[i] = xx;
533
534# ifndef NDEBUG
535 /* trapezoidal rule is not needed for integral, this is also second-order correct */
536 integral += phiTilde[i]*gv.bin[nd]->cnv_H_pCM3*rfield.anu[i]*EN1RYD;
537# endif
538
539 /* c<n> has units Ryd^(n+1)/grain/s */
540 c0 += Phi[i]*rfield.widflx[i];
541 c1 += Phi[i]*rfield.anu[i]*rfield.widflx[i];
542 c2 += Phi[i]*POW2(rfield.anu[i])*rfield.widflx[i];
543
544 lgNegRate = lgNegRate || ( phiTilde[i] < 0. );
545
546 oldy = y;
547 }
548
549 /* sanity check */
550 ASSERT( fabs(check-integral)/check <= CONSERV_TOL );
551
552# if 0
553 {
554 char fnam[50];
555 FILE *file;
556
557 sprintf(fnam,"Lambda_%2.2ld.asc",nd);
558 file = fopen(fnam,"w");
559 for( i=0; i < NDEMS; ++i )
560 fprintf(file,"%e %e %e\n",
561 exp(gv.dsttmp[i]),
562 ufunct(exp(gv.dsttmp[i]),nd,&lgBoundErr),
563 exp(gv.bin[nd]->dstems[i])*gv.bin[nd]->cnv_H_pGR/EN1RYD);
564 fclose(file);
565
566 sprintf(fnam,"Phi_%2.2ld.asc",nd);
567 file = fopen(fnam,"w");
568 for( i=0; i < gv.bin[nd]->qnflux; ++i )
569 fprintf(file,"%e %e\n", rfield.anu[i],Phi[i]);
570 fclose(file);
571 }
572# endif
573
574 /* Tmax is where p(U) will peak (at least in high intensity limit) */
575 Tmax = gv.bin[nd]->tedust;
576 /* grain enthalpy at peak of p(U), in Ryd */
577 Umax = ufunct(Tmax,nd,&lgBoundErr);
578 ASSERT( !lgBoundErr ); /* this should never happen */
579 /* y is dln(Lambda)/dlnT at Tmax */
580 spldrv_safe(gv.dsttmp,gv.bin[nd]->dstems,gv.bin[nd]->dstslp2,NDEMS,log(Tmax),&y,&lgBoundErr);
581 ASSERT( !lgBoundErr ); /* this should never happen */
582 /* deriv is dLambda/dU at Umax, in 1/grain/s */
583 deriv = y*c0/(uderiv(Tmax,nd)*Tmax);
584 /* estimated FWHM of probability distribution, in Ryd */
585 fwhm = sqrt(8.*LN_TWO*c1/deriv);
586
587 NumEvents = POW2(fwhm)*c0/(4.*LN_TWO*c2);
588 FwhmRatio = fwhm/Umax;
589
590 /* >>chng 01 nov 15, change ( NumEvents > MAX_EVENTS2 ) --> ( FwhmRatio < FWHM_RATIO2 ), PvH */
591 lgDelta = ( FwhmRatio < FWHM_RATIO2 );
592 /* high intensity case is always OK since we will use equilibrium treatment */
593 lgOK = lgDelta;
594
595 ErrorStart = QH_OK;
596
597 if( lgDelta )
598 {
599 /* in this case we ignore negative rates, equilibrium treatment is good enough */
600 lgNegRate = false;
601 ErrorStart = MAX2(ErrorStart,QH_DELTA);
602 }
603
604 if( lgNegRate )
605 ErrorStart = MAX2(ErrorStart,QH_NEGRATE_FAIL);
606
607 ErrorCode = ErrorStart;
608
609 if( trace.lgTrace && trace.lgDustBug )
610 {
611 double Rate2 = 0.;
612 for( int nz=0; nz < gv.bin[nd]->nChrg; nz++ )
613 Rate2 += gv.bin[nd]->chrg[nz]->FracPop*gv.bin[nd]->chrg[nz]->HeatingRate2;
614
615 fprintf( ioQQQ, " grain heating: %.4e, integral %.4e, total rate %.4e lgNegRate %c\n",
616 gv.bin[nd]->GrainHeat,integral,Phi[0],TorF(lgNegRate));
617 fprintf( ioQQQ, " av grain temp %.4e av grain enthalpy (Ryd) %.4e\n",
618 gv.bin[nd]->tedust,Umax);
619 fprintf( ioQQQ, " fwhm^2/(4ln2*c2/c0): %.4e fwhm (Ryd) %.4e fwhm/Umax %.4e\n",
620 NumEvents,fwhm,FwhmRatio );
621 fprintf( ioQQQ, " HeatingRate1 %.4e HeatingRate2 %.4e lgQHTooWide %c\n",
622 gv.bin[nd]->HeatingRate1*gv.bin[nd]->cnv_H_pCM3, Rate2*gv.bin[nd]->cnv_H_pCM3,
623 TorF(gv.bin[nd]->lgQHTooWide) );
624 }
625
626 /* these two variables will bracket qtmin, they should only be needed during the initial search phase */
627 minBracket = GRAIN_TMIN;
628 maxBracket = gv.bin[nd]->tedust;
629
630 /* >>chng 02 jan 30, introduced lgTried to avoid running GetProbDistr_HighLimit over and over..., PvH */
631 lgTried = false;
632 /* >>chng 02 aug 06, introduced QH_WIDE_FAIL and nWideFail, PvH */
633 nWideFail = 0;
634 /* >>chng 03 jan 27, introduced DefFac to increase factor for repeated LOW_FAIL's, PvH */
635 DefFac = DEF_FAC;
636 /* >>chng 04 nov 10, introduced rel_tol to increase precision in case of repeated CONV_FAIL's, PvH */
637 rel_tol = 1.;
638
639 /* if average number of multiple photon events is too high, lgOK is set to true */
640 /* >>chng 02 aug 12, added gv.bin[nd]->lgQHTooWide to prevent unnecessary looping here.
641 * In general the number of integration steps needed to integrate the probability distribution
642 * will increase monotonically with depth into the cloud. Hence, once the distribution becomes
643 * too wide to fit into NQGRID steps (which only happens for extremely cold grains in deeply
644 * shielded conditions) there is no hope of ever converging GetProbDistr_LowLimit and the code
645 * will waste a lot of CPU time establishing this for every zone again. So once the distribution
646 * becomes too wide we immediately skip to the analytic approximation to save time, PvH */
647 for( i=0; i < LOOP_MAX && !lgOK && !gv.bin[nd]->lgQHTooWide; i++ )
648 {
649 if( gv.bin[nd]->qtmin >= gv.bin[nd]->tedust )
650 {
651 /* >>chng 02 jul 31, was gv.bin[nd]->qtmin = 0.7*gv.bin[nd]->tedust */
652 /* >>chng 03 nov 10, changed Umax/exp(+... to Umax*exp(-... to avoid overflow, PvH */
653 double Ulo = Umax*exp(-sqrt(-log(PROB_CUTOFF_LO)/(4.*LN_TWO))*fwhm/Umax);
654 double MinEnth = exp(gv.bin[nd]->DustEnth[0]);
655 Ulo = MAX2(Ulo,MinEnth);
656 gv.bin[nd]->qtmin = inv_ufunct(Ulo,nd,&lgBoundErr);
657 ASSERT( !lgBoundErr ); /* this should never happen */
658 /* >>chng 02 jul 30, added this test; prevents problems with ASSERT below, PvH */
659 if( gv.bin[nd]->qtmin <= minBracket || gv.bin[nd]->qtmin >= maxBracket )
660 gv.bin[nd]->qtmin = sqrt(minBracket*maxBracket);
661 }
662 gv.bin[nd]->qtmin = MAX2(gv.bin[nd]->qtmin,GRAIN_TMIN);
663
664 ASSERT( minBracket <= gv.bin[nd]->qtmin && gv.bin[nd]->qtmin <= maxBracket );
665
666 ErrorCode = ErrorStart;
667
668 /* >>chng 01 nov 15, add ( FwhmRatio >= FWHM_RATIO ), PvH */
669 if( FwhmRatio >= FWHM_RATIO && NumEvents <= MAX_EVENTS )
670 {
671 GetProbDistr_LowLimit(nd,rel_tol,Umax,fwhm,Phi,PhiDrv,qtemp,qprob,dPdlnT,qnbin,
672 &new_tmin,&nWideFail,&ErrorCode);
673
674 /* >>chng 02 jan 07, various changes to improve convergence for very cold grains, PvH */
675 if( ErrorCode == QH_DELTA_FAIL && fwhm < Umax && !lgTried )
676 {
677 double dummy;
678
679 /* this situation can mean two things: either the photon rate is so high that
680 * the code needs too many steps to integrate the probability distribution,
681 * or alternatively, tmin is far too low and the code needs too many steps
682 * to overcome the rising side of the probability distribution.
683 * So we call GetProbDistr_HighLimit first to determine if the former is the
684 * case; if that fails then the latter must be true and we reset QH_DELTA_FAIL */
685 ErrorCode = MAX2(ErrorStart,QH_ANALYTIC);
686 /* use dummy to avoid losing estimate for new_tmin from GetProbDistr_LowLimit */
687 /* >>chng 02 aug 06, introduced STRICT and RELAX, PvH */
688 GetProbDistr_HighLimit(nd,STRICT,Umax,fwhm,qtemp,qprob,dPdlnT,&tol,qnbin,&dummy,
689 &ErrorCode);
690
691 if( ErrorCode >= QH_RETRY )
692 {
693 ErrorCode = QH_DELTA_FAIL;
694 lgTried = true;
695 }
696 }
697
698 /* >>chng 02 aug 07 major rewrite of the logic below */
699 if( ErrorCode < QH_NO_REBIN )
700 {
701 if( new_tmin < minBracket || new_tmin > maxBracket )
702 ++nWideFail;
703
704 if( nWideFail < WIDE_FAIL_MAX )
705 {
706 if( new_tmin <= minBracket )
707 new_tmin = sqrt(gv.bin[nd]->qtmin*minBracket);
708 if( new_tmin >= maxBracket )
709 new_tmin = sqrt(gv.bin[nd]->qtmin*maxBracket);
710 }
711 else
712 {
713 ErrorCode = MAX2(ErrorCode,QH_WIDE_FAIL);
714 }
715
716 if( ErrorCode == QH_CONV_FAIL )
717 {
718 rel_tol *= 0.9;
719 }
720 }
721 else if( ErrorCode == QH_LOW_FAIL )
722 {
723 double help1 = gv.bin[nd]->qtmin*sqrt(DefFac);
724 double help2 = sqrt(gv.bin[nd]->qtmin*maxBracket);
725 minBracket = gv.bin[nd]->qtmin;
726 new_tmin = MIN2(help1,help2);
727 /* increase factor in case we get repeated LOW_FAIL's */
728 DefFac += 1.5;
729 }
730 else if( ErrorCode == QH_HIGH_FAIL )
731 {
732 double help = sqrt(gv.bin[nd]->qtmin*minBracket);
733 maxBracket = gv.bin[nd]->qtmin;
734 new_tmin = MAX2(gv.bin[nd]->qtmin/DEF_FAC,help);
735 }
736 else
737 {
738 new_tmin = sqrt(minBracket*maxBracket);
739 }
740 }
741 else
742 {
743 GetProbDistr_HighLimit(nd,STRICT,Umax,fwhm,qtemp,qprob,dPdlnT,&tol,qnbin,&new_tmin,
744 &ErrorCode);
745 }
746
747 gv.bin[nd]->qtmin = new_tmin;
748
749 lgOK = ( ErrorCode < QH_RETRY );
750
751 if( ErrorCode >= QH_FATAL )
752 break;
753
754 if( ErrorCode != QH_LOW_FAIL )
755 DefFac = DEF_FAC;
756
757 if( trace.lgTrace && trace.lgDustBug )
758 {
759 fprintf( ioQQQ, " GetProbDistr returns code %d\n", ErrorCode );
760 if( !lgOK )
761 {
762 fprintf( ioQQQ, " >>>>qheat trying another iteration, qtmin bracket %.4e %.4e",
763 minBracket,maxBracket );
764 fprintf( ioQQQ, " nWideFail %ld\n", nWideFail );
765 }
766 }
767 }
768
769 if( ErrorCode == QH_WIDE_FAIL )
770 gv.bin[nd]->lgQHTooWide = true;
771
772 /* >>chng 03 jan 17, added test for !lgDelta, PvH */
773 /* if( gv.bin[nd]->lgQHTooWide ) */
774 if( gv.bin[nd]->lgQHTooWide && !lgDelta )
775 ErrorCode = MAX2(ErrorCode,QH_WIDE_FAIL);
776
777/* if( ErrorCode >= QH_RETRY ) */
778/* printf( " *** PROBLEM loop not converged, errorcode %d\n",ErrorCode ); */
779
780 /* The quantum heating code tends to run into trouble when it goes deep into the neutral zone,
781 * especially if the original spectrum was very hard, as is the case in high excitation PNe or AGN.
782 * You then get a bipartition in the spectrum where most of the photons have low energy, while
783 * there still are hard X-ray photons left. The fact that the average energy per photon is low
784 * forces the quantum code to take tiny little steps when integrating the probability distribution,
785 * while the fact that X-ray photons are still present implies that big temperature spikes still
786 * occur and hence the temperature distribution is very wide. Therefore the code needs a zillion
787 * steps to integrate the probability distribution and simply runs out of room. As a last resort
788 * try the analytic approximation with relaxed constraints used below. */
789 /* >>chng 02 oct 03, vary Umax and fwhm to force fit with fwhm/Umax remaining constant */
790 /* >>chng 03 jan 17, changed test so that last resort always gets tried when lgOK = lgDelta = false, PvH */
791 /* if( !lgOK && FwhmRatio >= FWHM_RATIO && NumEvents <= MAX_EVENTS ) */
792 if( !lgOK && !lgDelta )
793 {
794 double Umax2 = Umax*sqrt(tol);
795 double fwhm2 = fwhm*sqrt(tol);
796
797 for( i=0; i < LOOP_MAX; ++i )
798 {
799 double dummy;
800
801 ErrorCode2 = MAX2(ErrorStart,QH_ANALYTIC);
802 GetProbDistr_HighLimit(nd,RELAX,Umax2,fwhm2,qtemp,qprob,dPdlnT,&tol,qnbin,&dummy,
803 &ErrorCode2);
804
805 lgOK = ( ErrorCode2 < QH_RETRY );
806 if( lgOK )
807 {
808 gv.bin[nd]->qtmin = dummy;
809 ErrorCode = ErrorCode2;
810 break;
811 }
812 else
813 {
814 Umax2 *= sqrt(tol);
815 fwhm2 *= sqrt(tol);
816 }
817 }
818 }
819
820 if( nzone == 1 )
821 gv.bin[nd]->qtmin_zone1 = gv.bin[nd]->qtmin;
822
823 gv.bin[nd]->lgUseQHeat = ( lgOK && !lgDelta );
824 gv.bin[nd]->lgEverQHeat = ( gv.bin[nd]->lgEverQHeat || gv.bin[nd]->lgUseQHeat );
825
826 if( lgOK )
827 {
828 if( trace.lgTrace && trace.lgDustBug )
829 fprintf( ioQQQ, " >>>>qheat converged with code: %d\n", ErrorCode );
830 }
831 else
832 {
833 *qnbin = 0;
834 ++gv.bin[nd]->QHeatFailures;
835 fprintf( ioQQQ, " PROBLEM qheat did not converge grain %s in zone %ld, error code = %d\n",
836 gv.bin[nd]->chDstLab,nzone,ErrorCode );
837 }
838
839 if( gv.QHSaveFile != NULL && ( iterations.lgLastIt || !gv.lgQHPunLast ) )
840 {
841 fprintf( gv.QHSaveFile, "\nDust Temperature Distribution: grain %s zone %ld\n",
842 gv.bin[nd]->chDstLab,nzone );
843
844 fprintf( gv.QHSaveFile, "Equilibrium temperature: %.2f\n", gv.bin[nd]->tedust );
845
846 if( gv.bin[nd]->lgUseQHeat )
847 {
848 /* >>chng 01 oct 09, remove qprob from output, it depends on step size, PvH */
849 fprintf( gv.QHSaveFile, "Number of bins: %ld\n", *qnbin );
850 fprintf( gv.QHSaveFile, " Tgrain dP/dlnT\n" );
851 for( i=0; i < *qnbin; i++ )
852 {
853 fprintf( gv.QHSaveFile, "%.4e %.4e\n", qtemp[i],dPdlnT[i] );
854 }
855 }
856 else
857 {
858 fprintf( gv.QHSaveFile, "**** quantum heating was not used\n" );
859 }
860 }
861 return;
862}
863
864
865/* initialize phiTilde */
866STATIC void qheat_init(size_t nd,
867 /*@out@*/ vector<double>& phiTilde, /* phiTilde[rfield.nupper] */
868 /*@out@*/ double *check)
869{
870 long i,
871 nz;
872 double sum = 0.;
873
874 /*@-redef@*/
875 enum {DEBUG_LOC=false};
876 /*@+redef@*/
877
878 DEBUG_ENTRY( "qheat_init()" );
879
880 /* sanity checks */
881 ASSERT( gv.bin[nd]->lgQHeat );
882 ASSERT( nd < gv.bin.size() );
883
884 *check = 0.;
885
886 /* >>chng 01 nov 29, rfield.nflux -> gv.qnflux, PvH */
887 /* >>chng 03 jan 26, gv.qnflux -> gv.bin[nd]->qnflux, PvH */
888 for( i=0; i < gv.bin[nd]->qnflux; i++ )
889 {
890 phiTilde[i] = 0.;
891 }
892
893 /* fill in auxiliary array for quantum heating routine
894 * it reshuffles the input spectrum times the cross section to take
895 * the photo-electric effect into account. this prevents the quantum
896 * heating routine from having to calculate this effect over and over
897 * again; it can do a straight integration instead, making the code
898 * a lot simpler and faster. this initializes the array for non-ionizing
899 * energies, the reshuffling for higher energies is done in the next loop
900 * phiTilde has units events/H/s/cell at default depletion */
901
902 double NegHeatingRate = 0.;
903
904 for( nz=0; nz < gv.bin[nd]->nChrg; nz++ )
905 {
906 double check1 = 0.;
907 ChargeBin *gptr = gv.bin[nd]->chrg[nz];
908
909 /* integrate over incident continuum for non-ionizing energies */
910 for( i=0; i < MIN2(gptr->ipThresInf,rfield.nflux); i++ )
911 {
912 check1 += rfield.SummedCon[i]*gv.bin[nd]->dstab1[i]*rfield.anu[i];
913 phiTilde[i] += gptr->FracPop*rfield.SummedCon[i]*gv.bin[nd]->dstab1[i];
914 }
915
916 /* >>chng 01 mar 02, use new expresssions for grain cooling and absorption
917 * cross sections following the discussion with Joe Weingartner, PvH */
918 for( i=gptr->ipThresInf; i < rfield.nflux; i++ )
919 {
920 long ipLo2 = gptr->ipThresInfVal;
921 double cs1 = ( i >= ipLo2 ) ? gv.bin[nd]->dstab1[i]*gptr->yhat_primary[i] : 0.;
922
923 check1 += rfield.SummedCon[i]*gptr->fac1[i];
924 /* this accounts for the photons that are fully absorbed by grain */
925 phiTilde[i] += gptr->FracPop*rfield.SummedCon[i]*MAX2(gv.bin[nd]->dstab1[i]-cs1,0.);
926
927 /* >>chng 01 oct 10, use bisection search to find ip. On C scale now */
928
929 /* this accounts for photons that eject an electron from the valence band */
930 if( cs1 > 0. )
931 {
932 /* we treat photo-ejection and all subsequent de-excitation cascades
933 * from the conduction/valence band as one simultaneous event */
934 /* the condition cs1 > 0. assures i >= ipLo2 */
935 /* ratio is number of ejected electrons per primary ionization */
936 double ratio = ( gv.lgWD01 ) ? 1. : gptr->yhat[i]/gptr->yhat_primary[i];
937 /* ehat is average energy of ejected electron at infinity */
938 double ehat = gptr->ehat[i];
939 double cool1, sign = 1.;
940 realnum xx;
941
942 if( gptr->DustZ <= -1 )
943 cool1 = gptr->ThresSurf + gptr->PotSurf + ehat;
944 else
945 cool1 = gptr->ThresSurfVal + gptr->PotSurf + ehat;
946 /* secondary electrons are assumed to have the same Elo and Ehi as the
947 * primary electrons that excite them. This neglects the threshold for
948 * the ejection of the secondary electron and can cause xx to become
949 * negative if Ehi is less than that threshold. To conserve energy we
950 * will simply assume a negative rate here. Since secondary electrons
951 * are generally not important this should have little impact on the
952 * overall temperature distribution */
953 xx = rfield.anu[i] - (realnum)(ratio*cool1);
954 if( xx < 0.f )
955 {
956 xx = -xx;
957 sign = -1.;
958 }
959 long ipLo = hunt_bisect( &gv.anumin[0], i+1, xx );
960 /* for grains in hard X-ray environments, the coarseness of the grid can
961 * lead to inaccuracies in the integral over phiTilde that would trip the
962 * sanity check in qheat(), here we correct for the energy mismatch */
963 double corr = xx/rfield.anu[ipLo];
964 phiTilde[ipLo] += sign*corr*gptr->FracPop*rfield.SummedCon[i]*cs1;
965 }
966
967 /* no need to account for photons that eject an electron from the conduction band */
968 /* >>chng 01 dec 11, cool2 always equal to rfield.anu[i] -> no grain heating */
969 }
970
971 *check += gptr->FracPop*check1*EN1RYD*gv.bin[nd]->cnv_H_pCM3;
972
973 sum += gptr->FracPop*check1*EN1RYD*gv.bin[nd]->cnv_H_pCM3;
974
975 if( DEBUG_LOC )
976 {
977 double integral = 0.;
978 for( i=0; i < gv.bin[nd]->qnflux; i++ )
979 {
980 integral += phiTilde[i]*gv.bin[nd]->cnv_H_pCM3*rfield.anu[i]*EN1RYD;
981 }
982 dprintf( ioQQQ, " integral test 1: integral %.6e %.6e\n", integral, sum );
983 }
984
985 /* add quantum heating due to recombination of electrons, subtract thermionic cooling */
986
987 /* gptr->HeatingRate2 is net heating rate in erg/H/s at standard depl
988 * includes contributions for recombining electrons, autoionizing electrons
989 * subtracted by thermionic emissions here since it is inverse process
990 *
991 * NB - in extreme conditions this rate may be negative (if there
992 * is an intense radiation field leading to very hot grains, but no ionizing
993 * photons, hence very few free electrons). we assume that the photon rates
994 * are high enough under those circumstances to avoid phiTilde becoming negative,
995 * but we will check that in qheat1 anyway. */
996
997 /* >>chng 03 nov 06, check for extremely low HeatingRate and save CPU time, pah_crash.in, PvH */
998 if( gptr->HeatingRate2*gv.bin[nd]->cnv_H_pCM3 > 0.05*CONSERV_TOL*gv.bin[nd]->GrainHeat )
999 {
1000 double Sum,ESum,DSum,Delta,E_av2,Corr;
1001 double fac = BOLTZMANN/EN1RYD*phycon.te;
1002 /* E0 is barrier that electron needs to overcome, zero for positive grains */
1003 /* >>chng 03 jan 23, added second term to correctly account for autoionizing states
1004 * where ThresInfInc is negative, tested in small_grain.in, PvH */
1005 double E0 = -(MIN2(gptr->PotSurfInc,0.) + MIN2(gptr->ThresInfInc,0.));
1006 /* >>chng 01 mar 02, this should be energy gap between top electron and infinity, PvH */
1007 /* >>chng 01 nov 21, use correct barrier: ThresInf[nz] -> ThresInfInc[nz], PvH */
1008 /* >>chng 03 jan 23, replaced -E0 with MIN2(PotSurfInc[nz],0.), PvH */
1009 double Einf = gptr->ThresInfInc + MIN2(gptr->PotSurfInc,0.);
1010 /* this is average energy deposited by one event, in erg
1011 * this value is derived from distribution assumed here, and may
1012 * not be the same as HeatElectrons/(CollisionRateElectr*eta) !! */
1013 /* >>chng 01 nov 21, use correct barrier: ThresInf[nz] -> ThresInfInc[nz], PvH */
1014 /* >>chng 03 jan 23, replaced ThresInfInc[nz] with MAX2(ThresInfInc[nz],0.), PvH */
1015 double E_av = MAX2(gptr->ThresInfInc,0.)*EN1RYD + 2.*BOLTZMANN*phycon.te;
1016 /* this is rate in events/H/s at standard depletion */
1017 double rate = gptr->HeatingRate2/E_av;
1018
1019 double ylo = -exp(-E0/fac);
1020 /* this is highest kinetic energy of electron that can be represented in phiTilde */
1021 /* >>chng 01 nov 29, rfield.nflux -> gv.qnflux, PvH */
1022 /* >>chng 03 jan 26, gv.qnflux -> gv.bin[nd]->qnflux, PvH */
1023 double Ehi = gv.anumax[gv.bin[nd]->qnflux-1]-Einf;
1024 double yhi = ((E0-Ehi)/fac-1.)*exp(-Ehi/fac);
1025 /* renormalize rate so that integral over phiTilde*anu gives correct total energy */
1026 rate /= yhi-ylo;
1027
1028 /* correct for fractional population of this charge state */
1029 rate *= gptr->FracPop;
1030
1031 /* >>chng 03 jan 24, add code to correct for discretization errors, hotdust.in, PvH */
1032 vector<double> RateArr(gv.bin[nd]->qnflux);
1033 Sum = ESum = DSum = 0.;
1034
1035 /* >>chng 04 jan 21, replaced gv.bin[nd]->qnflux -> gv.bin[nd]->qnflux2, PvH */
1036 for( i=0; i < gv.bin[nd]->qnflux2; i++ )
1037 {
1038 Ehi = gv.anumax[i] - Einf;
1039 if( Ehi >= E0 )
1040 {
1041 /* Ehi is kinetic energy of electron at infinity */
1042 yhi = ((E0-Ehi)/fac-1.)*exp(-Ehi/fac);
1043 /* >>chng 01 mar 24, use MAX2 to protect against roundoff error, PvH */
1044 RateArr[i] = rate*MAX2(yhi-ylo,0.);
1045 Sum += RateArr[i];
1046 ESum += rfield.anu[i]*RateArr[i];
1047# ifndef NDEBUG
1048 DSum += rfield.widflx[i]*RateArr[i];
1049# endif
1050 ylo = yhi;
1051 }
1052 else
1053 {
1054 RateArr[i] = 0.;
1055 }
1056 }
1057 E_av2 = ESum/Sum*EN1RYD;
1058 Delta = DSum/Sum*EN1RYD;
1059 ASSERT( fabs(E_av-E_av2) <= Delta );
1060 Corr = E_av/E_av2;
1061
1062 for( i=0; i < gv.bin[nd]->qnflux2; i++ )
1063 {
1064 phiTilde[i] += RateArr[i]*Corr;
1065 }
1066
1067 sum += gptr->FracPop*gptr->HeatingRate2*gv.bin[nd]->cnv_H_pCM3;
1068
1069 if( DEBUG_LOC )
1070 {
1071 double integral = 0.;
1072 for( i=0; i < gv.bin[nd]->qnflux; i++ )
1073 {
1074 integral += phiTilde[i]*gv.bin[nd]->cnv_H_pCM3*rfield.anu[i]*EN1RYD;
1075 }
1076 dprintf( ioQQQ, " integral test 2: integral %.6e %.6e\n", integral, sum );
1077 }
1078 }
1079 else
1080 {
1081 NegHeatingRate += gptr->FracPop*gptr->HeatingRate2*gv.bin[nd]->cnv_H_pCM3;
1082 }
1083 }
1084
1085 /* ============================================================================= */
1086
1087 /* add quantum heating due to molecule/ion collisions */
1088
1089 /* gv.bin[nd]->HeatingRate1 is heating rate in erg/H/s at standard depl
1090 * includes contributions from molecules/neutral atoms and recombining ions
1091 *
1092 * in fully ionized conditions electron heating rates will be much higher
1093 * than ion and molecule rates since electrons are so much faster and grains
1094 * tend to be positive. in non-ionized conditions the main contribution will
1095 * come from neutral atoms and molecules, so it is appropriate to treat both
1096 * the same. in fully ionized conditions we don't care since unimportant.
1097 *
1098 * NB - if grains are hotter than ambient gas, the heating rate may become negative.
1099 * if photon rates are not high enough to prevent phiTilde from becoming negative,
1100 * we will raise a flag while calculating the quantum heating in qheat1 */
1101
1102 /* >>chng 03 nov 06, check for extremely low HeatingRate and save CPU time, PvH */
1103 if( gv.bin[nd]->HeatingRate1*gv.bin[nd]->cnv_H_pCM3 > 0.05*CONSERV_TOL*gv.bin[nd]->GrainHeat )
1104 {
1105 /* limits for Taylor expansion of (1+x)*exp(-x) */
1106 /* these choices will assure only 6 digits precision */
1107 const double LIM2 = pow(3.e-6,1./3.);
1108 const double LIM3 = pow(8.e-6,1./4.);
1109 /* if gas temperature is higher than grain temperature we will
1110 * consider Maxwell-Boltzmann distribution of incoming particles
1111 * and ignore distribution of outgoing particles, if grains
1112 * are hotter than ambient gas, we use reverse treatment */
1113 double fac = BOLTZMANN/EN1RYD*MAX2(phycon.te,gv.bin[nd]->tedust);
1114 /* this is average energy deposited/extracted by one event, in erg */
1115 double E_av = 2.*BOLTZMANN*MAX2(phycon.te,gv.bin[nd]->tedust);
1116 /* this is rate in events/H/s at standard depletion */
1117 double rate = gv.bin[nd]->HeatingRate1/E_av;
1118
1119 double ylo = -1.;
1120 /* this is highest energy of incoming/outgoing particle that can be represented in phiTilde */
1121 /* >>chng 01 nov 29, rfield.nflux -> gv.qnflux, PvH */
1122 /* >>chng 03 jan 26, gv.qnflux -> gv.bin[nd]->qnflux, PvH */
1123 double Ehi = gv.anumax[gv.bin[nd]->qnflux-1];
1124 double yhi = -(Ehi/fac+1.)*exp(-Ehi/fac);
1125 /* renormalize rate so that integral over phiTilde*anu gives correct total energy */
1126 rate /= yhi-ylo;
1127
1128 for( i=0; i < gv.bin[nd]->qnflux2; i++ )
1129 {
1130 /* Ehi is kinetic energy of incoming/outgoing particle
1131 * we assume that Ehi-E0 is deposited/extracted from grain */
1132 /* Ehi = gv.anumax[i]; */
1133 double x = gv.anumax[i]/fac;
1134 /* (1+x)*exp(-x) = 1 - 1/2*x^2 + 1/3*x^3 - 1/8*x^4 + O(x^5)
1135 * = 1 - Sum_n=2^infty (-x)^n/(n*(n-2)!) */
1136 if( x > LIM3 )
1137 yhi = -(x+1.)*exp(-x);
1138 else if( x > LIM2 )
1139 yhi = -(((1./3.)*x - 0.5)*x*x + 1.);
1140 else
1141 yhi = -(1. - 0.5*x*x);
1142
1143 /* >>chng 01 mar 24, use MAX2 to protect against roundoff error, PvH */
1144 phiTilde[i] += rate*MAX2(yhi-ylo,0.);
1145 ylo = yhi;
1146 }
1147
1148 sum += gv.bin[nd]->HeatingRate1*gv.bin[nd]->cnv_H_pCM3;
1149
1150 if( DEBUG_LOC )
1151 {
1152 double integral = 0.;
1153 for( i=0; i < gv.bin[nd]->qnflux; i++ )
1154 {
1155 integral += phiTilde[i]*gv.bin[nd]->cnv_H_pCM3*rfield.anu[i]*EN1RYD;
1156 }
1157 dprintf( ioQQQ, " integral test 3: integral %.6e %.6e\n", integral, sum );
1158 }
1159 }
1160 else
1161 {
1162 NegHeatingRate += gv.bin[nd]->HeatingRate1*gv.bin[nd]->cnv_H_pCM3;
1163 }
1164
1165 /* here we account for the negative heating rates, we simply do that by scaling the entire
1166 * phiTilde array down by a constant factor such that the total amount of energy is conserved
1167 * This treatment assures that phiTilde never goes negative, which avoids problems further on */
1168 if( NegHeatingRate < 0. )
1169 {
1170 double scale_fac = (sum+NegHeatingRate)/sum;
1171 for( i=0; i < gv.bin[nd]->qnflux; i++ )
1172 phiTilde[i] *= scale_fac;
1173
1174 sum += NegHeatingRate;
1175
1176 if( DEBUG_LOC )
1177 {
1178 double integral = 0.;
1179 for( i=0; i < gv.bin[nd]->qnflux; i++ )
1180 {
1181 integral += phiTilde[i]*gv.bin[nd]->cnv_H_pCM3*rfield.anu[i]*EN1RYD;
1182 }
1183 dprintf( ioQQQ, " integral test 4: integral %.6e %.6e\n", integral, sum );
1184 }
1185 }
1186
1187 return;
1188}
1189
1190
1191/*******************************************************************************************
1192 *
1193 * GetProbDistr_LowLimit: main routine for calculating non-equilibrium heating of grains
1194 *
1195 * This routine implements the algorithm outlined in:
1196 * >>refer grain physics Guhathakurtha & Draine, 1989, ApJ, 345, 230
1197 *
1198 * The original (fortran) version of the code was written by Kevin Volk.
1199 *
1200 * Heavily modified and adapted for new style grains by Peter van Hoof.
1201 *
1202 *******************************************************************************************/
1203
1205 double rel_tol,
1206 double Umax,
1207 double fwhm,
1208 /*@in@*/ const vector<double>& Phi, /* Phi[NQGRID] */
1209 /*@in@*/ const vector<double>& PhiDrv, /* PhiDrv[NQGRID] */
1210 /*@out@*/ vector<double>& qtemp, /* qtemp[NQGRID] */
1211 /*@out@*/ vector<double>& qprob, /* qprob[NQGRID] */
1212 /*@out@*/ vector<double>& dPdlnT, /* dPdlnT[NQGRID] */
1213 /*@out@*/ long int *qnbin,
1214 /*@out@*/ double *new_tmin,
1215 long *nWideFail,
1216 /*@out@*/ QH_Code *ErrorCode)
1217{
1218 bool lgAllNegSlope,
1219 lgBoundErr;
1220 long int j,
1221 k,
1222 l,
1223 nbad,
1224 nbin,
1225 nend=0,
1226 nmax,
1227 nok,
1228 nstart=0,
1229 nstart2=0;
1230 double dCool=0.,
1231 dlnLdlnT,
1232 dlnpdlnU,
1233 fac = 0.,
1234 maxVal,
1235 NextStep,
1236 qtmin1,
1237 RadCooling,
1238 sum,
1239 y;
1240 vector<double> delu(NQGRID);
1241 vector<double> Lambda(NQGRID);
1242 vector<double> p(NQGRID);
1243 vector<double> u1(NQGRID);
1244
1245
1246 DEBUG_ENTRY( "GetProbDistr_LowLimit()" );
1247
1248 /* sanity checks */
1249 ASSERT( nd < gv.bin.size() );
1250
1251 if( trace.lgTrace && trace.lgDustBug )
1252 {
1253 fprintf( ioQQQ, " GetProbDistr_LowLimit called for grain %s\n", gv.bin[nd]->chDstLab );
1254 fprintf( ioQQQ, " got qtmin1 %.4e\n", gv.bin[nd]->qtmin);
1255 }
1256
1257 qtmin1 = gv.bin[nd]->qtmin;
1258 qtemp[0] = qtmin1;
1259 /* u1 holds enthalpy in Ryd/grain */
1260 u1[0] = ufunct(qtemp[0],nd,&lgBoundErr);
1261 ASSERT( !lgBoundErr ); /* this should never happen */
1262 /* >>chng 00 mar 22, taken out factor 4, factored in hden and dstAbund
1263 * interpolate in dstems array instead of integrating explicitly, by PvH */
1264 splint_safe(gv.dsttmp,gv.bin[nd]->dstems,gv.bin[nd]->dstslp2,NDEMS,log(qtemp[0]),&y,&lgBoundErr);
1265 ASSERT( !lgBoundErr ); /* this should never happen */
1266 /* Lambda holds the radiated energy for grains in this bin, in Ryd/s/grain */
1267 Lambda[0] = exp(y)*gv.bin[nd]->cnv_H_pGR/EN1RYD;
1268 /* set up first step of integration */
1269 /* >>chng 01 nov 14, set to 2.*Lambda[0]/Phi[0] instead of u1[0],
1270 * this choice assures that p[1] doesn't make a large jump from p[0], PvH */
1271 delu[0] = 2.*Lambda[0]/Phi[0];
1272 p[0] = PROB_CUTOFF_LO;
1273 dPdlnT[0] = p[0]*qtemp[0]*uderiv(qtemp[0],nd);
1274 RadCooling = 0.5*p[0]*Lambda[0]*delu[0];
1275 NextStep = 0.01*Lambda[0]/Phi[0];
1276 /* >>chng 03 nov 10, added extra safeguard against stepsize too small, PvH */
1277 if( NextStep < 0.05*DBL_EPSILON*u1[0] )
1278 {
1279 *ErrorCode = MAX2(*ErrorCode,QH_STEP_FAIL);
1280 return;
1281 }
1282 else if( NextStep < 5.*DBL_EPSILON*u1[0] )
1283 {
1284 /* try a last resort... */
1285 NextStep *= 100.;
1286 }
1287
1288 nbad = 0;
1289 k = 0;
1290
1291 *qnbin = 0;
1292 *new_tmin = qtmin1;
1293 lgAllNegSlope = true;
1294 maxVal = dPdlnT[0];
1295 nmax = 0;
1296
1297 /* this test neglects a negative contribution which is impossible to calculate, so it may
1298 * fail to detect cases where the probability distribution starts dropping immediately,
1299 * we will use a second test using the variable lgAllNegSlope below to catch those cases, PvH */
1300 spldrv_safe(gv.dsttmp,gv.bin[nd]->dstems,gv.bin[nd]->dstslp2,NDEMS,log(qtemp[0]),&dlnLdlnT,&lgBoundErr);
1301 ASSERT( !lgBoundErr ); /* this should never happen */
1302 dlnpdlnU = u1[0]*Phi[0]/Lambda[0] - dlnLdlnT*u1[0]/(qtemp[0]*uderiv(qtemp[0],nd));
1303 if( dlnpdlnU < 0. )
1304 {
1305 /* >>chng 03 nov 06, confirm this by integrating first step..., pah_crash.in, PvH */
1306 (void)TryDoubleStep(u1,delu,p,qtemp,Lambda,Phi,PhiDrv,NextStep,&dCool,k,nd,&lgBoundErr);
1307 dPdlnT[2] = p[2]*qtemp[2]*uderiv(qtemp[2],nd);
1308
1309 if( dPdlnT[2] < dPdlnT[0] )
1310 {
1311 /* if dPdlnT starts falling immediately,
1312 * qtmin1 was too high and convergence is impossible */
1313 *ErrorCode = MAX2(*ErrorCode,QH_HIGH_FAIL);
1314 return;
1315 }
1316 }
1317
1318 /* NB NB -- every break in this loop should set *ErrorCode (except for regular stop criterion) !! */
1319 for( l=0; l < MAX_LOOP; ++l )
1320 {
1321 double RelError = TryDoubleStep(u1,delu,p,qtemp,Lambda,Phi,PhiDrv,NextStep,&dCool,k,nd,&lgBoundErr);
1322
1323 /* this happens if the grain temperature in qtemp becomes higher than GRAIN_TMAX
1324 * nothing that TryDoubleStep returns can be trusted, so this check should be first */
1325 if( lgBoundErr )
1326 {
1327 nbad += 2;
1328 *ErrorCode = MAX2(*ErrorCode,QH_THIGH_FAIL);
1329 break;
1330 }
1331
1332 /* estimate new stepsize */
1333 if( RelError > rel_tol*QHEAT_TOL )
1334 {
1335 nbad += 2;
1336
1337 /* step is rejected, decrease stepsize and try again */
1338 NextStep *= sqrt(0.9*rel_tol*QHEAT_TOL/RelError);
1339
1340 /* stepsize too small, this can happen at extreme low temperatures */
1341 if( NextStep < 5.*DBL_EPSILON*u1[k] )
1342 {
1343 *ErrorCode = MAX2(*ErrorCode,QH_STEP_FAIL);
1344 break;
1345 }
1346
1347 continue;
1348 }
1349 else
1350 {
1351 /* step was OK, adjust stepsize */
1352 k += 2;
1353
1354 /* >>chng 03 nov 10, safeguard against division by zero, PvH */
1355 NextStep *= MIN2(pow(0.9*rel_tol*QHEAT_TOL/MAX2(RelError,1.e-50),1./3.),4.);
1356 NextStep = MIN2(NextStep,Lambda[k]/Phi[0]);
1357 }
1358
1359 dPdlnT[k-1] = p[k-1]*qtemp[k-1]*uderiv(qtemp[k-1],nd);
1360 dPdlnT[k] = p[k]*qtemp[k]*uderiv(qtemp[k],nd);
1361
1362 lgAllNegSlope = lgAllNegSlope && ( dPdlnT[k] < dPdlnT[k-2] );
1363
1364 if( dPdlnT[k-1] > maxVal )
1365 {
1366 maxVal = dPdlnT[k-1];
1367 nmax = k-1;
1368 }
1369 if( dPdlnT[k] > maxVal )
1370 {
1371 maxVal = dPdlnT[k];
1372 nmax = k;
1373 }
1374
1375 RadCooling += dCool;
1376
1377// if( nzone >= 24 && nd == 0 ) {
1378// printf(" k %ld T[k] %.6e U[k] %.6e p[k] %.6e dPdlnT[k] %.6e\n",k-1,qtemp[k-1],u1[k-1],p[k-1],dPdlnT[k-1]);
1379// printf(" k %ld T[k] %.6e U[k] %.6e p[k] %.6e dPdlnT[k] %.6e\n",k,qtemp[k],u1[k],p[k],dPdlnT[k]);
1380// }
1381
1382 /* if qtmin is far too low, p[k] can become extremely large, exceeding
1383 * even double precision range. the following check should prevent overflows */
1384 /* >>chng 01 nov 07, sqrt(DBL_MAX) -> sqrt(DBL_MAX/100.) so that sqrt(p[k]*p[k+1]) is safe */
1385 if( p[k] > sqrt(DBL_MAX/100.) )
1386 {
1387 *ErrorCode = MAX2(*ErrorCode,QH_LOW_FAIL);
1388 break;
1389
1390#if 0
1391 /* >>chng 01 apr 21, change DBL_MAX -> DBL_MAX/100. to work around
1392 * a bug in the Compaq C compiler V6.3-025 when invoked with -fast, PvH */
1393 for( j=0; j <= k; j++ )
1394 {
1395 p[j] /= DBL_MAX/100.;
1396 dPdlnT[j] /= DBL_MAX/100.;
1397 }
1398 RadCooling /= DBL_MAX/100.;
1399#endif
1400 }
1401
1402 /* this may catch a bug in the Compaq C compiler V6.3-025
1403 * if this gets triggered, try compiling with -ieee */
1404 ASSERT( p[k] > 0. && dPdlnT[k] > 0. && RadCooling > 0. );
1405
1406 /* do a check for negative slope and if there will be enough room to store results */
1407 if( k > 0 && k%NQTEST == 0 )
1408 {
1409 double wid, avStep, factor;
1410 /* >>chng 02 jul 31, do additional test for HIGH_FAIL,
1411 * first test before main loop doesn't catch everything, PvH */
1412 if( lgAllNegSlope )
1413 {
1414 *ErrorCode = MAX2(*ErrorCode,QH_HIGH_FAIL);
1415 break;
1416 }
1417
1418 /* this is a lower limit for the total width of the probability distr */
1419 /* >>chng 02 jan 30, corrected calculation of wid and avStep, PvH */
1420 wid = (sqrt(-log(PROB_CUTOFF_LO)/(4.*LN_TWO)) +
1421 sqrt(-log(PROB_CUTOFF_HI)/(4.*LN_TWO)))*fwhm/Umax;
1422 avStep = log(u1[k]/u1[0])/(double)k;
1423 /* make an estimate for the number of steps needed */
1424 /* >>chng 02 jan 30, included factor 1.5 because stepsize increases near peak, PvH */
1425 /* >>chng 02 aug 06, changed 1.5 to sliding scale because of laqheat2.in test, PvH */
1426 factor = 1.1 + 3.9*(1.0 - sqrt((double)k/(double)NQGRID));
1427 if( wid/avStep > factor*(double)NQGRID )
1428 {
1429 *ErrorCode = MAX2(*ErrorCode,QH_ARRAY_FAIL);
1430 break;
1431 }
1432 }
1433
1434 /* if we run out of room to store results, do regular break
1435 * the code below will sort out if integration is valid or not */
1436 if( k >= NQGRID-2 )
1437 {
1438 *ErrorCode = MAX2(*ErrorCode,QH_ARRAY_FAIL);
1439 break;
1440 }
1441
1442 /* force thermal equilibrium of the grains */
1443 fac = RadCooling*gv.bin[nd]->cnv_GR_pCM3*EN1RYD/gv.bin[nd]->GrainHeat;
1444
1445 /* this is regular stop criterion */
1446 if( dPdlnT[k] < dPdlnT[k-1] && dPdlnT[k]/fac < PROB_CUTOFF_HI )
1447 {
1448 break;
1449 }
1450 }
1451
1452 if( l == MAX_LOOP )
1453 *ErrorCode = MAX2(*ErrorCode,QH_LOOP_FAIL);
1454
1455 nok = k;
1456 nbin = k+1;
1457
1458 /* there are insufficient bins to attempt rebinning */
1459 if( *ErrorCode < QH_NO_REBIN && nbin < NQMIN )
1460 *ErrorCode = MAX2(*ErrorCode,QH_NBIN_FAIL);
1461
1462 /* >>chng 02 aug 07, do some basic checks on the distribution first */
1463 if( *ErrorCode < QH_NO_REBIN )
1464 ScanProbDistr(u1,dPdlnT,nbin,maxVal,nmax,qtmin1,&nstart,&nstart2,&nend,nWideFail,ErrorCode);
1465
1466 if( *ErrorCode >= QH_NO_REBIN )
1467 {
1468 return;
1469 }
1470
1471 for( j=0; j < nbin; j++ )
1472 {
1473 p[j] /= fac;
1474 dPdlnT[j] /= fac;
1475 }
1476 RadCooling /= fac;
1477
1478 /* >>chng 02 aug 08, moved RebinQHeatResults from here further down, this improves new_tmin estimate */
1479 *new_tmin = 0.;
1480 for( j=nstart; j < nbin; j++ )
1481 {
1482 if( dPdlnT[j] < PROB_CUTOFF_LO )
1483 {
1484 *new_tmin = qtemp[j];
1485 }
1486 else
1487 {
1488 if( j == nstart )
1489 {
1490 /* if dPdlnT[nstart] is too high, but qtmin1 is already close to GRAIN_TMIN,
1491 * then don't bother signaling a QH_BOUND_FAIL. grains below GRAIN_TMIN have the
1492 * peak of their thermal emission beyond 3 meter, so they really are irrelevant
1493 * since free-free emission from electrons will drown this grain emission... */
1494 if( dPdlnT[j] > SAFETY*PROB_CUTOFF_LO && qtmin1 > 1.2*GRAIN_TMIN )
1495 *ErrorCode = MAX2(*ErrorCode,QH_BOUND_FAIL);
1496
1497 /* >>chng 02 aug 11, use nstart2 for more reliable extrapolation */
1498 if( dPdlnT[nstart2] < 0.999*dPdlnT[nstart2+NSTARTUP] )
1499 {
1500 /* >>chng 02 aug 09, new formula for extrapolating new_tmin, PvH */
1501 /* this assumes that at low temperatures the behaviour
1502 * is as follows: dPdlnT(T) = C1 * exp( -C2/T**3 ) */
1503 double T1 = qtemp[nstart2];
1504 double T2 = qtemp[nstart2+NSTARTUP];
1505 double delta_y = log(dPdlnT[nstart2+NSTARTUP]/dPdlnT[nstart2]);
1506 double c2 = delta_y/(1./POW3(T1)-1./POW3(T2));
1507 double help = c2/POW3(T1) + log(dPdlnT[nstart2]/PROB_CUTOFF_LO);
1508 *new_tmin = pow(c2/help,1./3.);
1509 }
1510
1511 /* >>chng 04 nov 09, in case of lower bound failure, assure qtmin is lowered, PvH */
1512 if( dPdlnT[j] > SAFETY*PROB_CUTOFF_LO && *new_tmin >= qtmin1 )
1513 {
1514 double delta_x = log(qtemp[nstart2+NSTARTUP]/qtemp[nstart2]);
1515 double delta_y = log(dPdlnT[nstart2+NSTARTUP]/dPdlnT[nstart2]);
1516 delta_x *= log(PROB_CUTOFF_LO/dPdlnT[nstart2])/delta_y;
1517 *new_tmin = qtemp[nstart2]*exp(delta_x);
1518 if( *new_tmin < qtmin1 )
1519 /* in general this estimate will be too low -> use geometric mean */
1520 *new_tmin = sqrt( *new_tmin*qtmin1 );
1521 else
1522 /* last resort... */
1523 *new_tmin = qtmin1/DEF_FAC;
1524 }
1525 }
1526 break;
1527 }
1528 }
1529 *new_tmin = MAX3(*new_tmin,qtmin1/DEF_FAC,GRAIN_TMIN);
1530
1531 ASSERT( *new_tmin < gv.bin[nd]->tedust );
1532
1533 /* >>chng 02 jan 30, prevent excessive looping when prob distribution simply won't fit, PvH */
1534 if( dPdlnT[nbin-1] > SAFETY*PROB_CUTOFF_HI )
1535 {
1536 if( *ErrorCode == QH_ARRAY_FAIL || *ErrorCode == QH_LOOP_FAIL )
1537 {
1538 ++(*nWideFail);
1539
1540 if( *nWideFail < WIDE_FAIL_MAX )
1541 {
1542 /* this indicates that low end was OK, but we ran out of room
1543 * to store the high end -> try GetProbDistr_HighLimit instead */
1544 *ErrorCode = MAX2(*ErrorCode,QH_DELTA_FAIL);
1545 }
1546 else
1547 {
1548 *ErrorCode = MAX2(*ErrorCode,QH_WIDE_FAIL);
1549 }
1550 }
1551 else
1552 {
1553 *ErrorCode = MAX2(*ErrorCode,QH_BOUND_FAIL);
1554 }
1555 }
1556
1557 /* >>chng 01 may 11, rebin the quantum heating results
1558 *
1559 * for grains in intense radiation fields, the code needs high resolution for stability
1560 * and therefore produces lots of small bins, even though the grains never make large
1561 * excurions from the equilibrium temperature; adding in the resulting spectra in RT_diffuse
1562 * takes up an excessive amount of CPU time where most CPU is spent on grains for which
1563 * the quantum treatment matters least, and moreover on temperature bins with very low
1564 * probability; rebinning the results on a coarser grid should help reduce the overhead */
1565 /* >>chng 02 aug 07, use nstart and nend while rebinning */
1566
1567 nbin = RebinQHeatResults(nd,nstart,nend,p,qtemp,qprob,dPdlnT,u1,delu,Lambda,ErrorCode);
1568
1569 /* >>chng 01 jul 13, add fail-safe for failure in RebinQHeatResults */
1570 if( nbin == 0 )
1571 {
1572 return;
1573 }
1574
1575 *qnbin = nbin;
1576
1577 sum = 0.;
1578 for( j=0; j < nbin; j++ )
1579 {
1580 sum += qprob[j];
1581 }
1582
1583 /* the fact that the probability normalization fails may indicate that the distribution is
1584 * too close to a delta function to resolve, but another possibility is that the radiation
1585 * field is extremely diluted allowing a sizable fraction of the grains to cool below
1586 * GRAIN_TMIN. In the latter case we don't raise QH_CONV_FAIL since these very cool grains
1587 * only contribute at very long radio wavelengths (more than 1 meter) */
1588 if( fabs(sum-1.) > PROB_TOL && qtmin1 > 1.2*GRAIN_TMIN )
1589 *ErrorCode = MAX2(*ErrorCode,QH_CONV_FAIL);
1590
1591 if( trace.lgTrace && trace.lgDustBug )
1592 {
1593 fprintf( ioQQQ,
1594 " zone %ld %s nbin %ld nok %ld nbad %ld total prob %.4e rel_tol %.3e new_tmin %.4e\n",
1595 nzone,gv.bin[nd]->chDstLab,nbin,nok,nbad,sum,rel_tol,*new_tmin );
1596 }
1597 return;
1598}
1599
1600
1601/* try two consecutive integration steps using stepsize "step/2." (yielding p[k]),
1602 * and also one double integration step using stepsize "step" (yielding p2k).
1603 * the difference fabs(p2k-p[k])/(3.*p[k]) can be used to estimate the relative
1604 * accuracy of p[k] and will be used to adapt the stepsize to an optimal value */
1605STATIC double TryDoubleStep(vector<double>& u1,
1606 vector<double>& delu,
1607 vector<double>& p,
1608 vector<double>& qtemp,
1609 vector<double>& Lambda,
1610 const vector<double>& Phi,
1611 const vector<double>& PhiDrv,
1612 double step,
1613 /*@out@*/ double *cooling,
1614 long index,
1615 size_t nd,
1616 /*@out@*/ bool *lgBoundFail)
1617{
1618 long i,
1619 j,
1620 jlo,
1621 k=-1000;
1622 double bval_jk,
1623 cooling2,
1624 p2k,
1625 p_max,
1626 RelErrCool,
1627 RelErrPk,
1628 sum,
1629 sum2 = -DBL_MAX,
1630 trap1,
1631 trap12 = -DBL_MAX,
1632 trap2,
1633 uhi,
1634 ulo,
1635 umin,
1636 y;
1637
1638 DEBUG_ENTRY( "TryDoubleStep()" );
1639
1640 /* sanity checks */
1641 ASSERT( index >= 0 && index < NQGRID-2 && nd < gv.bin.size() && step > 0. );
1642
1643 ulo = rfield.anu[0];
1644 /* >>chng 01 nov 29, rfield.nflux -> gv.qnflux, PvH */
1645 /* >>chng 03 jan 26, gv.qnflux -> gv.bin[nd]->qnflux, PvH */
1646 uhi = rfield.anu[gv.bin[nd]->qnflux-1];
1647
1648 /* >>chng 01 nov 21, skip initial bins if they have very low probability */
1649 p_max = 0.;
1650 for( i=0; i <= index; i++ )
1651 p_max = MAX2(p_max,p[i]);
1652 jlo = 0;
1653 while( p[jlo] < PROB_CUTOFF_LO*p_max )
1654 jlo++;
1655
1656 for( i=1; i <= 2; i++ )
1657 {
1658 bool lgErr;
1659 long ipLo = 0;
1660 long ipHi = gv.bin[nd]->qnflux-1;
1661 k = index + i;
1662 delu[k] = step/2.;
1663 u1[k] = u1[k-1] + delu[k];
1664 qtemp[k] = inv_ufunct(u1[k],nd,lgBoundFail);
1665 splint_safe(gv.dsttmp,gv.bin[nd]->dstems,gv.bin[nd]->dstslp2,NDEMS,log(qtemp[k]),&y,&lgErr);
1666 *lgBoundFail = *lgBoundFail || lgErr;
1667 Lambda[k] = exp(y)*gv.bin[nd]->cnv_H_pGR/EN1RYD;
1668
1669 sum = sum2 = 0.;
1670 trap1 = trap2 = trap12 = 0.;
1671
1672 /* this loop uses up a large fraction of the total CPU time, it should be well optimized */
1673 for( j=jlo; j < k; j++ )
1674 {
1675 umin = u1[k] - u1[j];
1676
1677 if( umin >= uhi )
1678 {
1679 /* for increasing j, umin will decrease monotonically. If ( umin > uhi ) holds for
1680 * the current value of j, it must have held for the previous value as well. Hence
1681 * both trap1 and trap2 are zero at this point and we would only be adding zero
1682 * to sum. Therefore we skip this step to save CPU time */
1683 continue;
1684 }
1685 else if( umin > ulo )
1686 {
1687 /* do a bisection search such that rfield.anu[ipLo] <= umin < rfield.anu[ipHi]
1688 * ipoint doesn't always give the correct index into anu (it works for AnuOrg)
1689 * bisection search is also faster, which is important here to save CPU time.
1690 * on the first iteration ipLo equals 0 and the first while loop will be skipped;
1691 * after that umin is monotonically decreasing, and ipHi is retained from the
1692 * previous iteration since it is a valid upper limit; ipLo will equal ipHi-1 */
1693 long ipStep = 1;
1694 /* >>chng 03 feb 03 rjrw: hunt for lower bracket */
1695 while( rfield.anu[ipLo] > (realnum)umin )
1696 {
1697 ipHi = ipLo;
1698 ipLo -= ipStep;
1699 if( ipLo <= 0 )
1700 {
1701 ipLo = 0;
1702 break;
1703 }
1704 ipStep *= 2;
1705 }
1706 /* now do regular bisection search */
1707 while( ipHi-ipLo > 1 )
1708 {
1709 long ipMd = (ipLo+ipHi)/2;
1710 if( rfield.anu[ipMd] > (realnum)umin )
1711 ipHi = ipMd;
1712 else
1713 ipLo = ipMd;
1714 }
1715 bval_jk = Phi[ipLo] + (umin - rfield.anu[ipLo])*PhiDrv[ipLo];
1716 }
1717 else
1718 {
1719 bval_jk = Phi[0];
1720 }
1721
1722 /* these two quantities are needed to take double step from index -> index+2 */
1723 trap12 = trap1;
1724 sum2 = sum;
1725
1726 /* bval_jk*gv.bin[nd]->cnv_CM3_pGR is the total excitation rate from j to k and
1727 * higher due to photon absorptions and particle collisions, it already implements
1728 * Eq. 2.17 of Guhathakurtha & Draine, in events/grain/s */
1729 /* >>chng 00 mar 27, factored in hden (in Phi), by PvH */
1730 /* >>chng 00 mar 29, add in contribution due to particle collisions, by PvH */
1731 /* >>chng 01 mar 30, moved multiplication of bval_jk with gv.bin[nd]->cnv_CM3_pGR
1732 * out of loop, PvH */
1733 trap2 = p[j]*bval_jk;
1734 /* Trapezoidal rule, multiplication with factor 0.5 is done outside loop */
1735 sum += (trap1 + trap2)*delu[j];
1736 trap1 = trap2;
1737 }
1738
1739 /* >>chng 00 mar 27, multiplied with delu, by PvH */
1740 /* >>chng 00 apr 05, taken out Lambda[0], improves convergence at low end dramatically!, by PvH */
1741 /* qprob[k] = sum*gv.bin[nd]->cnv_CM3_pGR*delu[k]/(Lambda[k] - Lambda[0]); */
1742 /* this expression includes factor 0.5 from trapezoidal rule above */
1743 /* p[k] = 0.5*(sum + (trap1 + p[k]*Phi[0])*delu[k])/Lambda[k] */
1744 p[k] = (sum + trap1*delu[k])/(2.*Lambda[k] - Phi[0]*delu[k]);
1745
1746 // total failure -> force next step to be smaller
1747 if( p[k] <= 0. )
1748 return 3.*QHEAT_TOL;
1749 }
1750
1751 /* this is estimate for p[k] using one double step of size "step" */
1752 p2k = (sum2 + trap12*step)/(2.*Lambda[k] - Phi[0]*step);
1753
1754 // total failure -> force next step to be smaller
1755 if( p2k <= 0. )
1756 return 3.*QHEAT_TOL;
1757
1758 RelErrPk = fabs(p2k-p[k])/p[k];
1759
1760 /* this is radiative cooling due to the two probability bins we just added */
1761 /* simple trapezoidal rule will not do here, RelErrCool would never converge */
1762 *cooling = log_integral(u1[k-2],p[k-2]*Lambda[k-2],u1[k-1],p[k-1]*Lambda[k-1]);
1763 *cooling += log_integral(u1[k-1],p[k-1]*Lambda[k-1],u1[k],p[k]*Lambda[k]);
1764
1765 /* same as cooling, but now for double step of size "step" */
1766 cooling2 = log_integral(u1[k-2],p[k-2]*Lambda[k-2],u1[k],p2k*Lambda[k]);
1767
1768 /* p[0] is not reliable, so ignore convergence test on cooling on first step */
1769 RelErrCool = ( index > 0 ) ? fabs(cooling2-(*cooling))/(*cooling) : 0.;
1770
1771/* printf( " TryDoubleStep p[k-1] %.4e p[k] %.4e p2k %.4e\n",p[k-1],p[k],p2k ); */
1772 /* error scales as O(step^3), so this is relative accuracy of p[k] or cooling */
1773 return MAX2(RelErrPk,RelErrCool)/3.;
1774}
1775
1776
1777/* calculate logarithmic integral from (xx1,yy1) to (xx2,yy2) */
1778STATIC double log_integral(double xx1,
1779 double yy1,
1780 double xx2,
1781 double yy2)
1782{
1783 double eps,
1784 result,
1785 xx;
1786
1787 DEBUG_ENTRY( "log_integral()" );
1788
1789 /* sanity checks */
1790 ASSERT( xx1 > 0. && yy1 > 0. && xx2 > 0. && yy2 > 0. );
1791
1792 xx = log(xx2/xx1);
1793 eps = log((xx2/xx1)*(yy2/yy1));
1794 if( fabs(eps) < 1.e-4 )
1795 {
1796 result = xx1*yy1*xx*((eps/6. + 0.5)*eps + 1.);
1797 }
1798 else
1799 {
1800 result = (xx2*yy2 - xx1*yy1)*xx/eps;
1801 }
1802 return result;
1803}
1804
1805
1806/* scan the probability distribution for valid range */
1807STATIC void ScanProbDistr(const vector<double>& u1, /* u1[nbin] */
1808 const vector<double>& dPdlnT, /* dPdlnT[nbin] */
1809 long nbin,
1810 double maxVal,
1811 long nmax,
1812 double qtmin1,
1813 /*@out@*/long *nstart,
1814 /*@out@*/long *nstart2,
1815 /*@out@*/long *nend,
1816 long *nWideFail,
1817 QH_Code *ErrorCode)
1818{
1819 bool lgSetLo,
1820 lgSetHi;
1821 long i;
1822 double deriv_max,
1823 minVal;
1824 const double MIN_FAC_LO = 1.e4;
1825 const double MIN_FAC_HI = 1.e4;
1826
1827 DEBUG_ENTRY( "ScanProbDistr()" );
1828
1829 /* sanity checks */
1830 ASSERT( nbin > 0 && nmax >= 0 && nmax < nbin && maxVal > 0. );
1831
1832 /* sometimes the probability distribution will start falling before settling on
1833 * a rising slope. In such a case nstart will point to the first rising point,
1834 * while nstart2 will point to the point with the steepest derivative on the
1835 * rising slope. The code will use the distribution from nstart to nend as a
1836 * valid probability distribution, but the will use the points near nstart2
1837 * to extrapolate a new value for qtmin if needed */
1838 minVal = maxVal;
1839 *nstart = nmax;
1840 for( i=nmax; i >= 0; --i )
1841 {
1842 if( dPdlnT[i] < minVal )
1843 {
1844 *nstart = i;
1845 minVal = dPdlnT[i];
1846 }
1847 }
1848 deriv_max = 0.;
1849 *nstart2 = nmax;
1850 for( i=nmax; i > *nstart; --i )
1851 {
1852 double deriv = log(dPdlnT[i]/dPdlnT[i-1])/log(u1[i]/u1[i-1]);
1853 if( deriv > deriv_max )
1854 {
1855 *nstart2 = i-1;
1856 deriv_max = deriv;
1857 }
1858 }
1859 *nend = nbin-1;
1860
1861 /* now do quality control; these checks are more stringent than the ones in GetProbDistr_LowLimit */
1862 lgSetLo = ( nmax >= *nend || maxVal/dPdlnT[*nend] < MIN_FAC_HI );
1863 /* >>chng 03 jan 22, prevent problems if both dPdlnT and its derivative are continuously rising,
1864 * in which case both lgSetLo and lgSetHi are set and QH_WIDE_FAIL is triggered;
1865 * this can happen if qtmin is far too low; triggered by pahtest.in, PvH */
1866 if( lgSetLo )
1867 /* use relaxed test if lgSetLo is already set */
1868 lgSetHi = ( nmax <= *nstart || maxVal/dPdlnT[*nstart] < MIN_FAC_LO );
1869 else
1870 lgSetHi = ( nmax <= *nstart2 || maxVal/dPdlnT[*nstart2] < MIN_FAC_LO );
1871
1872 if( lgSetLo && lgSetHi )
1873 {
1874 ++(*nWideFail);
1875
1876 if( *nWideFail >= WIDE_FAIL_MAX )
1877 *ErrorCode = MAX2(*ErrorCode,QH_WIDE_FAIL);
1878 }
1879
1880 if( lgSetLo )
1881 *ErrorCode = MAX2(*ErrorCode,QH_LOW_FAIL);
1882
1883 /* if dPdlnT[nstart] is too high, but qtmin1 is already close to GRAIN_TMIN,
1884 * then don't bother signaling a QH_HIGH_FAIL. grains below GRAIN_TMIN have the
1885 * peak of their thermal emission beyond 3 meter, so they really are irrelevant
1886 * since free-free emission from electrons will drown this grain emission... */
1887 if( lgSetHi && qtmin1 > 1.2*GRAIN_TMIN )
1888 *ErrorCode = MAX2(*ErrorCode,QH_HIGH_FAIL);
1889
1890 /* there are insufficient bins to attempt rebinning */
1891 if( *ErrorCode < QH_NO_REBIN && (*nend - *nstart) < NQMIN )
1892 *ErrorCode = MAX2(*ErrorCode,QH_NBIN_FAIL);
1893
1894 if( trace.lgTrace && trace.lgDustBug )
1895 {
1896 fprintf( ioQQQ, " ScanProbDistr nstart %ld nstart2 %ld nend %ld nmax %ld maxVal %.3e",
1897 *nstart,*nstart2,*nend,nmax,maxVal );
1898 fprintf( ioQQQ, " dPdlnT[nstart] %.3e dPdlnT[nstart2] %.3e dPdlnT[nend] %.3e code %d\n",
1899 dPdlnT[*nstart],dPdlnT[*nstart2],dPdlnT[*nend],*ErrorCode );
1900 }
1901
1902 if( *ErrorCode >= QH_NO_REBIN )
1903 {
1904 *nstart = -1;
1905 *nstart2 = -1;
1906 *nend = -2;
1907 }
1908 return;
1909}
1910
1911
1912/* rebin the quantum heating results to speed up RT_diffuse */
1914 long nstart,
1915 long nend,
1916 vector<double>& p,
1917 vector<double>& qtemp,
1918 vector<double>& qprob,
1919 vector<double>& dPdlnT,
1920 vector<double>& u1,
1921 vector<double>& delu,
1922 vector<double>& Lambda,
1923 QH_Code *ErrorCode)
1924{
1925 long i,
1926 newnbin;
1927 double fac,
1928 help,
1929 mul_fac,
1930 PP1,
1931 PP2,
1932 RadCooling,
1933 T1,
1934 T2,
1935 Ucheck,
1936 uu1,
1937 uu2;
1938
1939 DEBUG_ENTRY( "RebinQHeatResults()" );
1940
1941 /* sanity checks */
1942 ASSERT( nd < gv.bin.size() );
1943 /* >>chng 02 aug 07, changed oldnbin -> nstart..nend */
1944 ASSERT( nstart >= 0 && nstart < nend && nend < NQGRID );
1945
1946 /* leading entries may have become very small or zero -> skip */
1947 for( i=nstart; i <= nend && dPdlnT[i] < PROB_CUTOFF_LO; i++ ) {}
1948
1949 /* >>chng 04 oct 17, add fail-safe to keep lint happy, but this should never happen... */
1950 if( i >= NQGRID )
1951 {
1952 *ErrorCode = MAX2(*ErrorCode,QH_REBIN_FAIL);
1953 return 0;
1954 }
1955
1956 /* >>chng 02 aug 15, use malloc to prevent stack overflows */
1957 vector<double> new_delu(NQGRID);
1958 vector<double> new_dPdlnT(NQGRID);
1959 vector<double> new_Lambda(NQGRID);
1960 vector<double> new_p(NQGRID);
1961 vector<double> new_qprob(NQGRID);
1962 vector<double> new_qtemp(NQGRID);
1963 vector<double> new_u1(NQGRID);
1964
1965 newnbin = 0;
1966
1967 T1 = qtemp[i];
1968 PP1 = p[i];
1969 uu1 = u1[i];
1970
1971 /* >>chng 04 feb 01, change 2.*NQMIN -> 1.5*NQMIN, PvH */
1972 help = pow(qtemp[nend]/qtemp[i],1./(1.5*NQMIN));
1973 mul_fac = MIN2(QT_RATIO,help);
1974
1975 Ucheck = u1[i];
1976 RadCooling = 0.;
1977
1978 while( i < nend )
1979 {
1980 bool lgBoundErr;
1981 bool lgDone= false;
1982 double s0 = 0.;
1983 double s1 = 0.;
1984 double wid = 0.;
1985 double xx,y;
1986
1987 T2 = T1*mul_fac;
1988
1989 do
1990 {
1991 double p1,p2,L1,L2,frac,slope;
1992 if( qtemp[i] <= T1 && T1 <= qtemp[i+1] )
1993 {
1994 /* >>chng 01 nov 15, copy uu2 into uu1 instead, PvH */
1995 /* uu1 = ufunct(T1,nd); */
1996 double xrlog = log(qtemp[i+1]/qtemp[i]);
1997 if( xrlog > 0. )
1998 {
1999 frac = log(T1/qtemp[i]);
2000 slope = log(p[i+1]/p[i])/xrlog;
2001 p1 = p[i]*exp(frac*slope);
2002 slope = log(Lambda[i+1]/Lambda[i])/xrlog;
2003 L1 = Lambda[i]*exp(frac*slope);
2004 }
2005 else
2006 {
2007 /* pathological case where slope is extremely steep (daniela.in) */
2008 p1 = sqrt(p[i]*p[i+1]);
2009 L1 = sqrt(Lambda[i]*Lambda[i+1]);
2010 }
2011 }
2012 else
2013 {
2014 /* >>chng 01 nov 15, copy uu2 into uu1 instead, PvH */
2015 /* uu1 = u1[i]; */
2016 p1 = p[i];
2017 L1 = Lambda[i];
2018 }
2019 if( qtemp[i] <= T2 && T2 <= qtemp[i+1] )
2020 {
2021 /* >>chng 02 apr 30, make sure this doesn't point beyond valid range, PvH */
2022 help = ufunct(T2,nd,&lgBoundErr);
2023 uu2 = MIN2(help,u1[i+1]);
2024 ASSERT( !lgBoundErr ); /* this should never be triggered */
2025 double xrlog = log(qtemp[i+1]/qtemp[i]);
2026 if( xrlog > 0. )
2027 {
2028 frac = log(T2/qtemp[i]);
2029 slope = log(p[i+1]/p[i])/xrlog;
2030 p2 = p[i]*exp(frac*slope);
2031 slope = log(Lambda[i+1]/Lambda[i])/xrlog;
2032 L2 = Lambda[i]*exp(frac*slope);
2033 }
2034 else
2035 {
2036 /* pathological case where slope is extremely steep */
2037 p2 = sqrt(p[i]*p[i+1]);
2038 L2 = sqrt(Lambda[i]*Lambda[i+1]);
2039 }
2040 lgDone = true;
2041 }
2042 else
2043 {
2044 uu2 = u1[i+1];
2045 p2 = p[i+1];
2046 L2 = Lambda[i+1];
2047 /* >>chng 01 nov 15, this caps the range in p(U) integrated in one bin
2048 * it helps avoid spurious QH_BOUND_FAIL's when flank is very steep, PvH */
2049 if( MAX2(p2,PP1)/MIN2(p2,PP1) > sqrt(SAFETY) )
2050 {
2051 lgDone = true;
2052 T2 = qtemp[i+1];
2053 }
2054 ++i;
2055 }
2056 PP2 = p2;
2057 wid += uu2 - uu1;
2058 /* sanity check */
2059 ASSERT( wid >= 0. );
2060 s0 += log_integral(uu1,p1,uu2,p2);
2061 s1 += log_integral(uu1,p1*L1,uu2,p2*L2);
2062 uu1 = uu2;
2063
2064 } while( i < nend && ! lgDone );
2065
2066 /* >>chng 01 nov 14, if T2 == qtemp[oldnbin-1], the code will try another iteration
2067 * break here to avoid zero divide, the assert on Ucheck tests if we are really finished */
2068 /* >>chng 01 dec 04, change ( s0 == 0. ) to ( s0 <= 0. ), PvH */
2069 if( s0 <= 0. )
2070 {
2071 ASSERT( wid == 0. );
2072 break;
2073 }
2074
2075 new_qprob[newnbin] = s0;
2076 new_Lambda[newnbin] = s1/s0;
2077 xx = log(new_Lambda[newnbin]*EN1RYD*gv.bin[nd]->cnv_GR_pH);
2078 splint_safe(gv.bin[nd]->dstems,gv.dsttmp,gv.bin[nd]->dstslp,NDEMS,xx,&y,&lgBoundErr);
2079 ASSERT( !lgBoundErr ); /* this should never be triggered */
2080 new_qtemp[newnbin] = exp(y);
2081 new_u1[newnbin] = ufunct(new_qtemp[newnbin],nd,&lgBoundErr);
2082 ASSERT( !lgBoundErr ); /* this should never be triggered */
2083 new_delu[newnbin] = wid;
2084 new_p[newnbin] = new_qprob[newnbin]/new_delu[newnbin];
2085 new_dPdlnT[newnbin] = new_p[newnbin]*new_qtemp[newnbin]*uderiv(new_qtemp[newnbin],nd);
2086
2087 Ucheck += wid;
2088 RadCooling += new_qprob[newnbin]*new_Lambda[newnbin];
2089
2090 T1 = T2;
2091 PP1 = PP2;
2092 ++newnbin;
2093 }
2094
2095 /* >>chng 01 jul 13, add fail-safe */
2096 if( newnbin < NQMIN )
2097 {
2098 *ErrorCode = MAX2(*ErrorCode,QH_REBIN_FAIL);
2099 return 0;
2100 }
2101
2102 fac = RadCooling*EN1RYD*gv.bin[nd]->cnv_GR_pCM3/gv.bin[nd]->GrainHeat;
2103
2104 if( trace.lgTrace && trace.lgDustBug )
2105 {
2106 fprintf( ioQQQ, " RebinQHeatResults found tol1 %.4e tol2 %.4e\n",
2107 fabs(u1[nend]/Ucheck-1.), fabs(fac-1.) );
2108 }
2109
2110 /* do quality control */
2111 /* >>chng 02 apr 30, tighten up check, PvH */
2112 ASSERT( fabs(u1[nend]/Ucheck-1.) < 10.*sqrt((double)(nend-nstart+newnbin))*DBL_EPSILON );
2113
2114 if( fabs(fac-1.) > CONSERV_TOL )
2115 *ErrorCode = MAX2(*ErrorCode,QH_CONV_FAIL);
2116
2117 for( i=0; i < newnbin; i++ )
2118 {
2119 /* renormalize the distribution to assure energy conservation */
2120 p[i] = new_p[i]/fac;
2121 qtemp[i] = new_qtemp[i];
2122 qprob[i] = new_qprob[i]/fac;
2123 dPdlnT[i] = new_dPdlnT[i]/fac;
2124 u1[i] = new_u1[i];
2125 delu[i] = new_delu[i];
2126 Lambda[i] = new_Lambda[i];
2127
2128 /* sanity checks */
2129 ASSERT( qtemp[i] > 0. && qprob[i] > 0. );
2130
2131/* printf(" rk %ld T[k] %.6e U[k] %.6e p[k] %.6e dPdlnT[k] %.6e\n",i,qtemp[i],u1[i],p[i],dPdlnT[i]); */
2132 }
2133 return newnbin;
2134}
2135
2136
2137/* calculate approximate probability distribution in high intensity limit */
2139 double TolFac,
2140 double Umax,
2141 double fwhm,
2142 /*@out@*/vector<double>& qtemp,
2143 /*@out@*/vector<double>& qprob,
2144 /*@out@*/vector<double>& dPdlnT,
2145 /*@out@*/double *tol,
2146 /*@out@*/long *qnbin,
2147 /*@out@*/double *new_tmin,
2148 /*@out@*/QH_Code *ErrorCode)
2149{
2150 bool lgBoundErr,
2151 lgErr;
2152 long i,
2153 nbin;
2154 double c1,
2155 c2,
2156 delu[NQGRID],
2157 fac,
2158 fac1,
2159 fac2,
2160 help1,
2161 help2,
2162 L1,
2163 L2,
2164 Lambda[NQGRID],
2165 mul_fac,
2166 p[NQGRID],
2167 p1,
2168 p2,
2169 RadCooling,
2170 sum,
2171 T1,
2172 T2,
2173 Tlo,
2174 Thi,
2175 Ulo,
2176 Uhi,
2177 uu1,
2178 uu2,
2179 xx,
2180 y;
2181
2182 DEBUG_ENTRY( "GetProbDistr_HighLimit()" );
2183
2184 if( trace.lgTrace && trace.lgDustBug )
2185 {
2186 fprintf( ioQQQ, " GetProbDistr_HighLimit called for grain %s\n", gv.bin[nd]->chDstLab );
2187 }
2188
2189 c1 = sqrt(4.*LN_TWO/PI)/fwhm*exp(-POW2(fwhm/Umax)/(16.*LN_TWO));
2190 c2 = 4.*LN_TWO*POW2(Umax/fwhm);
2191
2192 fac1 = fwhm/Umax*sqrt(-log(PROB_CUTOFF_LO)/(4.*LN_TWO));
2193 /* >>chng 03 nov 10, safeguard against underflow, PvH */
2194 help1 = Umax*exp(-fac1);
2195 help2 = exp(gv.bin[nd]->DustEnth[0]);
2196 Ulo = MAX2(help1,help2);
2197 /* >>chng 03 jan 28, ignore lgBoundErr on lower boundary, low-T grains have negigible emission, PvH */
2198 Tlo = inv_ufunct(Ulo,nd,&lgBoundErr);
2199
2200 fac2 = fwhm/Umax*sqrt(-log(PROB_CUTOFF_HI)/(4.*LN_TWO));
2201 /* >>chng 03 nov 10, safeguard against overflow, PvH */
2202 if( fac2 > log(DBL_MAX/10.) )
2203 {
2204 *ErrorCode = MAX2(*ErrorCode,QH_WIDE_FAIL);
2205 return;
2206 }
2207 Uhi = Umax*exp(fac2);
2208 Thi = inv_ufunct(Uhi,nd,&lgBoundErr);
2209
2210 nbin = 0;
2211
2212 T1 = Tlo;
2213 uu1 = ufunct(T1,nd,&lgErr);
2214 lgBoundErr = lgBoundErr || lgErr;
2215 help1 = log(uu1/Umax);
2216 p1 = c1*exp(-c2*POW2(help1));
2217 splint_safe(gv.dsttmp,gv.bin[nd]->dstems,gv.bin[nd]->dstslp2,NDEMS,log(T1),&y,&lgErr);
2218 lgBoundErr = lgBoundErr || lgErr;
2219 L1 = exp(y)*gv.bin[nd]->cnv_H_pGR/EN1RYD;
2220
2221 /* >>chng 03 nov 10, safeguard against underflow, PvH */
2222 if( uu1*p1*L1 < 1.e5*DBL_MIN )
2223 {
2224 *ErrorCode = MAX2(*ErrorCode,QH_WIDE_FAIL);
2225 return;
2226 }
2227
2228 /* >>chng 04 feb 01, change 2.*NQMIN -> 1.2*NQMIN, PvH */
2229 help1 = pow(Thi/Tlo,1./(1.2*NQMIN));
2230 mul_fac = MIN2(QT_RATIO,help1);
2231
2232 sum = 0.;
2233 RadCooling = 0.;
2234
2235 do
2236 {
2237 double s0,s1,wid;
2238
2239 T2 = T1*mul_fac;
2240 uu2 = ufunct(T2,nd,&lgErr);
2241 lgBoundErr = lgBoundErr || lgErr;
2242 help1 = log(uu2/Umax);
2243 p2 = c1*exp(-c2*POW2(help1));
2244 splint_safe(gv.dsttmp,gv.bin[nd]->dstems,gv.bin[nd]->dstslp2,NDEMS,log(T2),&y,&lgErr);
2245 lgBoundErr = lgBoundErr || lgErr;
2246 L2 = exp(y)*gv.bin[nd]->cnv_H_pGR/EN1RYD;
2247
2248 wid = uu2 - uu1;
2249 s0 = log_integral(uu1,p1,uu2,p2);
2250 s1 = log_integral(uu1,p1*L1,uu2,p2*L2);
2251
2252 qprob[nbin] = s0;
2253 Lambda[nbin] = s1/s0;
2254 xx = log(Lambda[nbin]*EN1RYD*gv.bin[nd]->cnv_GR_pH);
2255 splint_safe(gv.bin[nd]->dstems,gv.dsttmp,gv.bin[nd]->dstslp,NDEMS,xx,&y,&lgErr);
2256 lgBoundErr = lgBoundErr || lgErr;
2257 qtemp[nbin] = exp(y);
2258 delu[nbin] = wid;
2259 p[nbin] = qprob[nbin]/delu[nbin];
2260 dPdlnT[nbin] = p[nbin]*qtemp[nbin]*uderiv(qtemp[nbin],nd);
2261
2262 sum += qprob[nbin];
2263 RadCooling += qprob[nbin]*Lambda[nbin];
2264
2265 T1 = T2;
2266 uu1 = uu2;
2267 p1 = p2;
2268 L1 = L2;
2269
2270 ++nbin;
2271
2272 } while( T2 < Thi && nbin < NQGRID );
2273
2274 fac = RadCooling*EN1RYD*gv.bin[nd]->cnv_GR_pCM3/gv.bin[nd]->GrainHeat;
2275
2276 for( i=0; i < nbin; ++i )
2277 {
2278 qprob[i] /= fac;
2279 dPdlnT[i] /= fac;
2280 }
2281
2282 *tol = sum/fac;
2283 *qnbin = nbin;
2284 *new_tmin = qtemp[0];
2285 *ErrorCode = MAX2(*ErrorCode,QH_ANALYTIC);
2286
2287 /* do quality control */
2288 if( TolFac > STRICT )
2289 *ErrorCode = MAX2(*ErrorCode,QH_ANALYTIC_RELAX);
2290
2291 if( lgBoundErr )
2292 *ErrorCode = MAX2(*ErrorCode,QH_THIGH_FAIL);
2293
2294 if( fabs(sum/fac-1.) > PROB_TOL )
2295 *ErrorCode = MAX2(*ErrorCode,QH_CONV_FAIL);
2296
2297 if( dPdlnT[0] > SAFETY*PROB_CUTOFF_LO || dPdlnT[nbin-1] > SAFETY*PROB_CUTOFF_HI )
2298 *ErrorCode = MAX2(*ErrorCode,QH_BOUND_FAIL);
2299
2300 if( trace.lgTrace && trace.lgDustBug )
2301 {
2302 fprintf( ioQQQ, " GetProbDistr_HighLimit found tol1 %.4e tol2 %.4e\n",
2303 fabs(sum-1.), fabs(sum/fac-1.) );
2304 fprintf( ioQQQ, " zone %ld %s nbin %ld total prob %.4e new_tmin %.4e\n",
2305 nzone,gv.bin[nd]->chDstLab,nbin,sum/fac,*new_tmin );
2306 }
2307 return;
2308}
2309
2310
2311/* calculate derivative of the enthalpy function dU/dT (aka the specific heat) at a given temperature, in Ryd/K */
2312STATIC double uderiv(double temp,
2313 size_t nd)
2314{
2315 enth_type ecase;
2316 long int i,
2317 j;
2318 double N_C,
2319 N_H;
2320 double deriv = 0.,
2321 hok[3] = {1275., 1670., 4359.},
2322 numer,
2323 dnumer,
2324 denom,
2325 ddenom,
2326 x;
2327
2328
2329 DEBUG_ENTRY( "uderiv()" );
2330
2331 if( temp <= 0. )
2332 {
2333 fprintf( ioQQQ, " uderiv called with non-positive temperature: %.6e\n" , temp );
2335 }
2336 ASSERT( nd < gv.bin.size() );
2337
2338 ecase = gv.which_enth[gv.bin[nd]->matType];
2339 switch( ecase )
2340 {
2341 case ENTH_CAR:
2342 numer = (4.15e-22/EN1RYD)*pow(temp,3.3);
2343 dnumer = (3.3*4.15e-22/EN1RYD)*pow(temp,2.3);
2344 denom = 1. + 6.51e-03*temp + 1.5e-06*temp*temp + 8.3e-07*pow(temp,2.3);
2345 ddenom = 6.51e-03 + 2.*1.5e-06*temp + 2.3*8.3e-07*pow(temp,1.3);
2346 /* dU/dT for pah/graphitic grains in Ryd/K, derived from:
2347 * >>refer grain physics Guhathakurta & Draine, 1989, ApJ, 345, 230 */
2348 deriv = (dnumer*denom - numer*ddenom)/POW2(denom);
2349 break;
2350 case ENTH_CAR2:
2351 /* dU/dT for graphite grains in Ryd/K, using eq 9 of */
2352 /* >>refer grain physics Draine B.T., and Li A., 2001, ApJ, 551, 807 */
2353 deriv = (DebyeDeriv(temp/863.,2) + 2.*DebyeDeriv(temp/2504.,2))*BOLTZMANN/EN1RYD;
2354 break;
2355 case ENTH_SIL:
2356 /* dU/dT for silicate grains (and grey grains) in Ryd/K */
2357 /* limits of tlim set above, 0 and DBL_MAX, so always OK */
2358 /* >>refer grain physics Guhathakurta & Draine, 1989, ApJ, 345, 230 */
2359 for( j = 0; j < 4; j++ )
2360 {
2361 if( temp > tlim[j] && temp <= tlim[j+1] )
2362 {
2363 deriv = cval[j]*pow(temp,ppower[j]);
2364 break;
2365 }
2366 }
2367 break;
2368 case ENTH_SIL2:
2369 /* dU/dT for silicate grains in Ryd/K, using eq 11 of */
2370 /* >>refer grain physics Draine B.T., and Li A., 2001, ApJ, 551, 807 */
2371 deriv = (2.*DebyeDeriv(temp/500.,2) + DebyeDeriv(temp/1500.,3))*BOLTZMANN/EN1RYD;
2372 break;
2373 case ENTH_PAH:
2374 /* dU/dT for PAH grains in Ryd/K, using eq A.4 of */
2375 /* >>refer grain physics Dwek E., Arendt R.G., Fixsen D.J. et al., 1997, ApJ, 475, 565 */
2376 /* this expression is only valid upto 2000K */
2377 x = log10(MIN2(temp,2000.));
2378 deriv = pow(10.,-21.26+3.1688*x-0.401894*POW2(x))/EN1RYD;
2379 break;
2380 case ENTH_PAH2:
2381 /* dU/dT for PAH grains in Ryd/K, approximately using eq 33 of */
2382 /* >>refer grain physics Draine B.T., and Li A., 2001, ApJ, 551, 807 */
2383 /* N_C and N_H should actually be nint(N_C) and nint(N_H),
2384 * but this can lead to FP overflow for very large grains */
2385 N_C = no_atoms(nd);
2386 if( N_C <= 25. )
2387 {
2388 N_H = 0.5*N_C;
2389 }
2390 else if( N_C <= 100. )
2391 {
2392 N_H = 2.5*sqrt(N_C);
2393 }
2394 else
2395 {
2396 N_H = 0.25*N_C;
2397 }
2398 deriv = 0.;
2399 for( i=0; i < 3; i++ )
2400 {
2401 double help1 = hok[i]/temp;
2402 if( help1 < 300. )
2403 {
2404 double help2 = exp(help1);
2405 double help3 = ( help1 < 1.e-7 ) ? help1*(1.+0.5*help1) : help2-1.;
2406 deriv += N_H/(N_C-2.)*POW2(help1)*help2/POW2(help3)*BOLTZMANN/EN1RYD;
2407 }
2408 }
2409 deriv += (DebyeDeriv(temp/863.,2) + 2.*DebyeDeriv(temp/2504.,2))*BOLTZMANN/EN1RYD;
2410 break;
2411 default:
2412 fprintf( ioQQQ, " uderiv called with unknown type for enthalpy function: %d\n", ecase );
2414 }
2415
2416 /* >>chng 00 mar 23, use formula 3.1 of Guhathakurtha & Draine, by PvH */
2417 /* >>chng 03 jan 17, use MAX2(..,1) to prevent crash for extremely small grains, PvH */
2418 deriv *= MAX2(no_atoms(nd)-2.,1.);
2419
2420 if( deriv <= 0. )
2421 {
2422 fprintf( ioQQQ, " uderiv finds non-positive derivative: %.6e, what's up?\n" , deriv );
2424 }
2425 return( deriv );
2426}
2427
2428
2429/* calculate the enthalpy of a grain at a given temperature, in Ryd */
2430STATIC double ufunct(double temp,
2431 size_t nd,
2432 /*@out@*/ bool *lgBoundErr)
2433{
2434 double enthalpy,
2435 y;
2436
2437 DEBUG_ENTRY( "ufunct()" );
2438
2439 if( temp <= 0. )
2440 {
2441 fprintf( ioQQQ, " ufunct called with non-positive temperature: %.6e\n" , temp );
2443 }
2444 ASSERT( nd < gv.bin.size() );
2445
2446 /* >>chng 02 apr 22, interpolate in DustEnth array to get enthalpy, by PvH */
2447 splint_safe(gv.dsttmp,gv.bin[nd]->DustEnth,gv.bin[nd]->EnthSlp,NDEMS,log(temp),&y,lgBoundErr);
2448 enthalpy = exp(y);
2449
2450 ASSERT( enthalpy > 0. );
2451 return( enthalpy );
2452}
2453
2454
2455/* this is the inverse of ufunct: determine grain temperature as a function of enthalpy */
2456STATIC double inv_ufunct(double enthalpy,
2457 size_t nd,
2458 /*@out@*/ bool *lgBoundErr)
2459{
2460 double temp,
2461 y;
2462
2463 DEBUG_ENTRY( "inv_ufunct()" );
2464
2465 if( enthalpy <= 0. )
2466 {
2467 fprintf( ioQQQ, " inv_ufunct called with non-positive enthalpy: %.6e\n" , enthalpy );
2469 }
2470 ASSERT( nd < gv.bin.size() );
2471
2472 /* >>chng 02 apr 22, interpolate in DustEnth array to get temperature, by PvH */
2473 splint_safe(gv.bin[nd]->DustEnth,gv.dsttmp,gv.bin[nd]->EnthSlp2,NDEMS,log(enthalpy),&y,lgBoundErr);
2474 temp = exp(y);
2475
2476 ASSERT( temp > 0. );
2477 return( temp );
2478}
2479
2480
2481/* initialize interpolation arrys for grain enthalpy */
2483{
2484 double C_V1,
2485 C_V2,
2486 tdust1,
2487 tdust2;
2488
2489 DEBUG_ENTRY( "InitEnthalpy()" );
2490
2491 for( size_t nd=0; nd < gv.bin.size(); nd++ )
2492 {
2493 tdust2 = GRAIN_TMIN;
2494 C_V2 = uderiv(tdust2,nd);
2495 /* at low temps, C_V = C*T^3 -> U = C*T^4/4 = C_V*T/4 */
2496 gv.bin[nd]->DustEnth[0] = C_V2*tdust2/4.;
2497 tdust1 = tdust2;
2498 C_V1 = C_V2;
2499
2500 for( long i=1; i < NDEMS; i++ )
2501 {
2502 double tmid,Cmid;
2503 tdust2 = exp(gv.dsttmp[i]);
2504 C_V2 = uderiv(tdust2,nd);
2505 tmid = sqrt(tdust1*tdust2);
2506 /* this ensures accuracy for silicate enthalpy */
2507 for( long j=1; j < 4; j++ )
2508 {
2509 if( tdust1 < tlim[j] && tlim[j] < tdust2 )
2510 {
2511 tmid = tlim[j];
2512 break;
2513 }
2514 }
2515 Cmid = uderiv(tmid,nd);
2516 gv.bin[nd]->DustEnth[i] = gv.bin[nd]->DustEnth[i-1] +
2517 log_integral(tdust1,C_V1,tmid,Cmid) +
2518 log_integral(tmid,Cmid,tdust2,C_V2);
2519 tdust1 = tdust2;
2520 C_V1 = C_V2;
2521 }
2522 }
2523
2524 /* conversion for logarithmic interpolation */
2525 for( size_t nd=0; nd < gv.bin.size(); nd++ )
2526 {
2527 for( long i=0; i < NDEMS; i++ )
2528 {
2529 gv.bin[nd]->DustEnth[i] = log(gv.bin[nd]->DustEnth[i]);
2530 }
2531 }
2532
2533 /* now find slopes from spline fit */
2534 for( size_t nd=0; nd < gv.bin.size(); nd++ )
2535 {
2536 /* set up coefficients for splint */
2537 spline(gv.dsttmp,gv.bin[nd]->DustEnth,NDEMS,2e31,2e31,gv.bin[nd]->EnthSlp);
2538 spline(gv.bin[nd]->DustEnth,gv.dsttmp,NDEMS,2e31,2e31,gv.bin[nd]->EnthSlp2);
2539 }
2540 return;
2541}
2542
2543
2544/* helper function for calculating specific heat, uses Debye theory */
2545STATIC double DebyeDeriv(double x,
2546 long n)
2547{
2548 long i,
2549 nn;
2550 double res;
2551
2552 DEBUG_ENTRY( "DebyeDeriv()" );
2553
2554 ASSERT( x > 0. );
2555 ASSERT( n == 2 || n == 3 );
2556
2557 if( x < 0.001 )
2558 {
2559 /* for general n this is Gamma(n+2)*zeta(n+1)*powi(x,n) */
2560 if( n == 2 )
2561 {
2562 res = 6.*1.202056903159594*POW2(x);
2563 }
2564 else if( n == 3 )
2565 {
2566 res = 24.*1.082323233711138*POW3(x);
2567 }
2568 else
2569 /* added to keep lint happy - note that assert above confimred that n is 2 or 3,
2570 * but lint flagged possible flow without defn of res */
2571 TotalInsanity();
2572 }
2573 else
2574 {
2575 nn = 4*MAX2(4,2*(long)(0.05/x));
2576 vector<double> xx(nn);
2577 vector<double> rr(nn);
2578 vector<double> aa(nn);
2579 vector<double> ww(nn);
2580 gauss_legendre(nn,xx,aa);
2581 gauss_init(nn,0.,1.,xx,aa,rr,ww);
2582
2583 res = 0.;
2584 for( i=0; i < nn; i++ )
2585 {
2586 double help1 = rr[i]/x;
2587 if( help1 < 300. )
2588 {
2589 double help2 = exp(help1);
2590 double help3 = ( help1 < 1.e-7 ) ? help1*(1.+0.5*help1) : help2-1.;
2591 res += ww[i]*powi(rr[i],n+1)*help2/POW2(help3);
2592 }
2593 }
2594 res /= POW2(x);
2595 }
2596 return (double)n*res;
2597}
long int nzone
Definition cddefines.cpp:14
FILE * ioQQQ
Definition cddefines.cpp:7
bool lgAbort
Definition cddefines.cpp:10
#define ASSERT(exp)
Definition cddefines.h:578
#define STATIC
Definition cddefines.h:97
#define POW3
Definition cddefines.h:936
#define MAX3(a, b, c)
Definition cddefines.h:787
#define MIN2
Definition cddefines.h:761
#define POW2
Definition cddefines.h:929
T sign(T x, T y)
Definition cddefines.h:800
int dprintf(FILE *fp, const char *format,...)
Definition service.cpp:1009
#define EXIT_FAILURE
Definition cddefines.h:140
char TorF(bool l)
Definition cddefines.h:710
#define cdEXIT(FAIL)
Definition cddefines.h:434
float realnum
Definition cddefines.h:103
#define MAX2
Definition cddefines.h:782
NORETURN void TotalInsanity(void)
Definition service.cpp:886
const int ipHYDROGEN
Definition cddefines.h:305
#define DEBUG_ENTRY(funcname)
Definition cddefines.h:684
double powi(double, long int)
Definition service.cpp:604
double ThresInfInc
Definition grainvar.h:230
flex_arr< realnum > yhat
Definition grainvar.h:236
flex_arr< realnum > ehat
Definition grainvar.h:238
double ThresSurf
Definition grainvar.h:232
double FracPop
Definition grainvar.h:224
long ipThresInfVal
Definition grainvar.h:222
double PotSurfInc
Definition grainvar.h:228
double PotSurf
Definition grainvar.h:227
double HeatingRate2
Definition grainvar.h:275
flex_arr< realnum > yhat_primary
Definition grainvar.h:237
long ipThresInf
Definition grainvar.h:221
double ThresSurfVal
Definition grainvar.h:234
long DustZ
Definition grainvar.h:220
flex_arr< double > fac1
Definition grainvar.h:258
t_dense dense
Definition dense.cpp:24
void gauss_legendre(long int, vector< double > &, vector< double > &)
void gauss_init(long int, double, double, const vector< double > &, const vector< double > &, vector< double > &, vector< double > &)
static const double PROB_CUTOFF_HI
static const long WIDE_FAIL_MAX
static const long NSTARTUP
static const double MAX_EVENTS
static const double FWHM_RATIO2
static const double QHEAT_TOL
void GrainMakeDiffuse(void)
static const long LOOP_MAX
STATIC double ufunct(double, size_t, bool *)
static const double QT_RATIO
static const double SAFETY
STATIC void GetProbDistr_LowLimit(size_t, double, double, double, const vector< double > &, const vector< double > &, vector< double > &, vector< double > &, vector< double > &, long *, double *, long *, QH_Code *)
static const double DEN_SIL
STATIC void ScanProbDistr(const vector< double > &, const vector< double > &, long, double, long, double, long *, long *, long *, long *, QH_Code *)
STATIC double inv_ufunct(double, size_t, bool *)
void InitEnthalpy(void)
QH_Code
@ QH_HIGH_FAIL
@ QH_NEGRATE_FAIL
@ QH_STEP_FAIL
@ QH_OK
@ QH_REBIN_FAIL
@ QH_BOUND_FAIL
@ QH_WIDE_FAIL
@ QH_LOW_FAIL
@ QH_ANALYTIC
@ QH_NBIN_FAIL
@ QH_RETRY
@ QH_LOOP_FAIL
@ QH_FATAL
@ QH_CONV_FAIL
@ QH_DELTA
@ QH_ARRAY_FAIL
@ QH_DELTA_FAIL
@ QH_ANALYTIC_RELAX
@ QH_NO_REBIN
@ QH_THIGH_FAIL
double no_atoms(size_t nd)
static const double PROB_TOL
static const double DEF_FAC
static const double MW_SIL
static const long NQTEST
STATIC double log_integral(double, double, double, double)
static const double ppower[4]
STATIC double DebyeDeriv(double, long)
STATIC long RebinQHeatResults(size_t, long, long, vector< double > &, vector< double > &, vector< double > &, vector< double > &, vector< double > &, vector< double > &, vector< double > &, QH_Code *)
static const double STRICT
static const double PROB_CUTOFF_LO
static const double FWHM_RATIO
STATIC double uderiv(double, size_t)
STATIC double TryDoubleStep(vector< double > &, vector< double > &, vector< double > &, vector< double > &, vector< double > &, const vector< double > &, const vector< double > &, double, double *, long, size_t, bool *)
static const long NQMIN
STATIC void GetProbDistr_HighLimit(long, double, double, double, vector< double > &, vector< double > &, vector< double > &, double *, long *, double *, QH_Code *)
static const double RELAX
static const double tlim[5]
static const long MAX_LOOP
static const double cval[4]
void qheat(vector< double > &qtemp, vector< double > &qprob, long int *qnbin, size_t nd)
STATIC void qheat_init(size_t, vector< double > &, double *)
GrainVar gv
Definition grainvar.cpp:5
const double CONSERV_TOL
Definition grainvar.h:35
const int NDEMS
Definition grainvar.h:14
const double GRAIN_TMIN
Definition grainvar.h:17
strg_type
Definition grainvar.h:79
@ STRG_SIL
Definition grainvar.h:81
@ STRG_CAR
Definition grainvar.h:80
const int NQGRID
Definition grainvar.h:32
enth_type
Definition grainvar.h:45
@ ENTH_SIL2
Definition grainvar.h:49
@ ENTH_PAH2
Definition grainvar.h:51
@ ENTH_CAR2
Definition grainvar.h:47
@ ENTH_SIL
Definition grainvar.h:48
@ ENTH_PAH
Definition grainvar.h:50
@ ENTH_CAR
Definition grainvar.h:46
t_hmi hmi
Definition hmi.cpp:5
t_iterations iterations
Definition iterations.cpp:5
t_phycon phycon
Definition phycon.cpp:6
UNUSED const double FR1RYD
Definition physconst.h:195
UNUSED const double PI
Definition physconst.h:29
UNUSED const double PI4
Definition physconst.h:35
UNUSED const double SPEEDLIGHT
Definition physconst.h:100
UNUSED const double BOLTZMANN
Definition physconst.h:97
UNUSED const double LN_TWO
Definition physconst.h:50
UNUSED const double EN1RYD
Definition physconst.h:179
UNUSED const double ATOMIC_MASS_UNIT
Definition physconst.h:88
UNUSED const double TE1RYD
Definition physconst.h:183
t_rfield rfield
Definition rfield.cpp:8
t_thermal thermal
Definition thermal.cpp:5
void spldrv_safe(const double xa[], const double ya[], const double y2a[], long int n, double x, double *y, bool *lgOutOfBounds)
Definition thirdparty.h:206
long hunt_bisect(const T x[], long n, T xval)
Definition thirdparty.h:270
void splint_safe(const double xa[], const double ya[], const double y2a[], long int n, double x, double *y, bool *lgOutOfBounds)
Definition thirdparty.h:166
void spline(const double x[], const double y[], long int n, double yp1, double ypn, double y2a[])
Definition thirdparty.h:117
t_trace trace
Definition trace.cpp:5