SundanceDiffOpEvaluator.cpp
Go to the documentation of this file.
00001 /* @HEADER@ */
00002 // ************************************************************************
00003 // 
00004 //                              Sundance
00005 //                 Copyright (2005) Sandia Corporation
00006 // 
00007 // Copyright (year first published) Sandia Corporation.  Under the terms 
00008 // of Contract DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government 
00009 // retains certain rights in this software.
00010 // 
00011 // This library is free software; you can redistribute it and/or modify
00012 // it under the terms of the GNU Lesser General Public License as
00013 // published by the Free Software Foundation; either version 2.1 of the
00014 // License, or (at your option) any later version.
00015 //  
00016 // This library is distributed in the hope that it will be useful, but
00017 // WITHOUT ANY WARRANTY; without even the implied warranty of
00018 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00019 // Lesser General Public License for more details.
00020 //                                                                                 
00021 // You should have received a copy of the GNU Lesser General Public
00022 // License along with this library; if not, write to the Free Software
00023 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
00024 // USA                                                                                
00025 // Questions? Contact Kevin Long (krlong@sandia.gov), 
00026 // Sandia National Laboratories, Livermore, California, USA
00027 // 
00028 // ************************************************************************
00029 /* @HEADER@ */
00030 
00031 #include "SundanceDiffOpEvaluator.hpp"
00032 #include "SundanceEvalManager.hpp"
00033 #include "SundanceDiffOp.hpp"
00034 
00035 
00036 #include "PlayaTabs.hpp"
00037 #include "SundanceOut.hpp"
00038 #include "SundanceUnknownFuncElement.hpp"
00039 #include "SundanceTestFuncElement.hpp"
00040 #include "SundanceDiscreteFuncElement.hpp"
00041 #include "SundanceDiscreteFuncEvaluator.hpp"
00042 #include "SundanceEvaluatableExpr.hpp"
00043 #include "SundanceZeroExpr.hpp"
00044 
00045 using namespace Sundance;
00046 using namespace Sundance;
00047 using namespace Sundance;
00048 using namespace Teuchos;
00049 
00050 
00051 
00052 
00053 
00054 DiffOpEvaluator
00055 ::DiffOpEvaluator(const DiffOp* expr,
00056   const EvalContext& context)
00057   : UnaryEvaluator<DiffOp>(expr, context),
00058     isConstant_(this->sparsity()->numDerivs()),
00059     resultIndices_(this->sparsity()->numDerivs()),
00060     constantMonomials_(this->sparsity()->numDerivs()),
00061     vectorMonomials_(this->sparsity()->numDerivs()),
00062     constantFuncCoeffs_(this->sparsity()->numDerivs()),
00063     vectorFuncCoeffs_(this->sparsity()->numDerivs()),
00064     funcEvaluators_(),
00065     constantCoeffFuncIndices_(this->sparsity()->numDerivs()),
00066     constantCoeffFuncMi_(this->sparsity()->numDerivs()),
00067     vectorCoeffFuncIndices_(this->sparsity()->numDerivs()),
00068     vectorCoeffFuncMi_(this->sparsity()->numDerivs())
00069 {
00070   int verb = context.setupVerbosity();
00071   Tabs tabs;
00072   SUNDANCE_MSG1(verb, tabs << "initializing diff op evaluator for " 
00073     << expr->toString());
00074 
00075   {
00076     Tabs tab0;
00077   
00078     SUNDANCE_MSG2(verb, tab0 << "return sparsity " << std::endl << *(this->sparsity)());
00079 
00080     SUNDANCE_MSG2(verb, tab0 << "argument sparsity subset " << std::endl 
00081       << *(argSparsitySuperset()));
00082 
00083     Map<const DiscreteFuncElementEvaluator*, int> funcToIndexMap;
00084 
00085     int vecResultIndex = 0;
00086     int constResultIndex = 0;
00087   
00088     for (int i=0; i<this->sparsity()->numDerivs(); i++)
00089     {
00090       Tabs tab1;
00091       const MultipleDeriv& resultDeriv = this->sparsity()->deriv(i);
00092       SUNDANCE_MSG3(verb, tab0 << "working out procedure for computing " 
00093         << resultDeriv);
00094 
00095       if (this->sparsity()->state(i)==ConstantDeriv)
00096       {
00097         Tabs tab2;
00098         addConstantIndex(i, constResultIndex);
00099         resultIndices_[i] = constResultIndex++;
00100         isConstant_[i] = true;
00101         SUNDANCE_MSG3(verb, tab2 << "deriv is constant, will be stored at index "
00102           << resultIndices_[i] << " in the const result array");
00103       }
00104       else
00105       {
00106         Tabs tab2;
00107         addVectorIndex(i, vecResultIndex);
00108         resultIndices_[i] = vecResultIndex++;
00109         isConstant_[i] = false;
00110         SUNDANCE_MSG3(verb, tab2 << "deriv is variable, will be stored at index "
00111           << resultIndices_[i] << " in the var result array");
00112       }
00113 
00114       int order = resultDeriv.order();
00115       const Set<MultipleDeriv>& RArg 
00116         = argExpr()->findR(order, context);
00117       const Set<MultipleDeriv>& RArgPlus
00118         = argExpr()->findR(order+1, context);
00119       const Set<MultipleDeriv>& W1Arg 
00120         = argExpr()->findW(1, context);
00121 
00122         
00123       SUNDANCE_MSG3(verb, tab1 << "RArg = " << RArg);
00124       SUNDANCE_MSG3(verb, tab1 << "RArgPlus = " << RArgPlus);
00125       SUNDANCE_MSG3(verb, tab1 << "W1Arg = " << W1Arg);
00126 
00127       Set<MultipleDeriv> funcTermCoeffs 
00128         = RArgPlus.intersection(increasedDerivs(resultDeriv, W1Arg, verb));
00129       SUNDANCE_MSG3(verb, tab1 << "function term coeffs = " << funcTermCoeffs);
00130 
00131       
00132       if (funcTermCoeffs.size()==0)
00133       {
00134         SUNDANCE_MSG3(verb, tab1 << "no direct chain rule terms");
00135       }
00136       else
00137       {
00138         SUNDANCE_MSG3(verb, tab1 << "getting direct chain rule terms");
00139       }
00140 
00141 
00142       for (Set<MultipleDeriv>::const_iterator 
00143              j=funcTermCoeffs.begin(); j != funcTermCoeffs.end(); j++)
00144       {
00145         Tabs tab2;
00146         SUNDANCE_MSG3(verb, tab2 << "getting coefficient of " << *j);
00147 
00148         int argIndex = argSparsitySuperset()->getIndex(*j);
00149         TEUCHOS_TEST_FOR_EXCEPTION(argIndex==-1, std::runtime_error,
00150           "Derivative " << *j << " expected in argument "
00151           "but not found");
00152 
00153         Deriv lambda = remainder(*j, resultDeriv, verb);
00154 
00155         if (lambda.isCoordDeriv())
00156         {
00157           Tabs tab3;
00158           SUNDANCE_MSG3(verb, tab2 << "detected coordinate deriv");
00159           if (lambda.coordDerivDir()!=expr->mi().firstOrderDirection())
00160           {
00161             SUNDANCE_MSG3(verb, tab2 << "direction mismatch, skipping");
00162             continue;
00163           }
00164           const DerivState& argState = argSparsitySuperset()->state(argIndex);
00165           if (argState==ConstantDeriv)
00166           {
00167             int constArgIndex = argEval()->constantIndexMap().get(argIndex);
00168             constantMonomials_[i].append(constArgIndex);
00169           }
00170           else
00171           {
00172             int vectorArgIndex = argEval()->vectorIndexMap().get(argIndex);
00173             vectorMonomials_[i].append(vectorArgIndex);
00174           }
00175         }
00176         else if (lambda.opOnFunc().isPartial() || lambda.opOnFunc().isIdentity())
00177         {
00178           Tabs tab3;
00179           SUNDANCE_MSG3(verb, tab3 << "detected functional deriv " << lambda);
00180           const SymbolicFuncElement* f = lambda.symbFuncElem();
00181           const MultiIndex& mi = expr->mi() + lambda.opOnFunc().mi(); 
00182           SUNDANCE_MSG3(verb, tab3 << "modified multiIndex is " << mi);
00183 
00184           const TestFuncElement* t 
00185             = dynamic_cast<const TestFuncElement*>(f);
00186           if (t != 0) continue;
00187 
00188           const UnknownFuncElement* u 
00189             = dynamic_cast<const UnknownFuncElement*>(f);
00190           TEUCHOS_TEST_FOR_EXCEPTION(u==0, std::logic_error,
00191             "Non-unknown function detected where an unknown "
00192             "function was expected in "
00193             "DiffOpEvaluator ctor");
00194 
00195 
00196           const EvaluatableExpr* evalPt = u->evalPt();
00197           const ZeroExpr* z = dynamic_cast<const ZeroExpr*>(evalPt);
00198           if (z != 0) continue;
00199           TEUCHOS_TEST_FOR_EXCEPTION(z != 0, std::logic_error,
00200             "DiffOpEvaluator detected identically zero "
00201             "function");
00202 
00203           const DiscreteFuncElement* df 
00204             = dynamic_cast<const DiscreteFuncElement*>(evalPt);
00205           
00206           TEUCHOS_TEST_FOR_EXCEPTION(df==0, std::logic_error,
00207             "DiffOpEvaluator ctor: evaluation point of "
00208             "unknown function " << u->toString() 
00209             << " is not a discrete function");
00210 
00211           const SymbolicFuncElementEvaluator* uEval 
00212             = dynamic_cast<const SymbolicFuncElementEvaluator*>(u->evaluator(context).get());
00213 
00214           const DiscreteFuncElementEvaluator* dfEval = uEval->dfEval();
00215 
00216 
00217           TEUCHOS_TEST_FOR_EXCEPTION(dfEval==0, std::logic_error,
00218             "DiffOpEvaluator ctor: evaluator for "
00219             "evaluation point is not a "
00220             "DiscreteFuncElementEvaluator");
00221 
00222           TEUCHOS_TEST_FOR_EXCEPTION(!dfEval->hasMultiIndex(mi), std::logic_error,
00223             "DiffOpEvaluator ctor: evaluator for "
00224             "discrete function " << df->toString()
00225             << " does not know about multiindex "
00226             << mi.toString());
00227           
00228           int fIndex;
00229           int miIndex = dfEval->miIndex(mi);
00230           
00231           if (funcToIndexMap.containsKey(dfEval))
00232           {
00233             fIndex = funcToIndexMap.get(dfEval);
00234           }
00235           else
00236           {
00237             fIndex = funcEvaluators_.size();
00238             funcEvaluators_.append(dfEval);
00239             funcToIndexMap.put(dfEval, fIndex);
00240           }
00241 
00242             
00243           const DerivState& argState = argSparsitySuperset()->state(argIndex);
00244           if (argState==ConstantDeriv)
00245           {
00246             int constArgIndex = argEval()->constantIndexMap().get(argIndex);
00247             constantCoeffFuncIndices_[i].append(fIndex);
00248             constantCoeffFuncMi_[i].append(miIndex);
00249             constantFuncCoeffs_[i].append(constArgIndex);
00250           }
00251           else
00252           {
00253             int vectorArgIndex = argEval()->vectorIndexMap().get(argIndex);
00254             vectorCoeffFuncIndices_[i].append(fIndex);
00255             vectorCoeffFuncMi_[i].append(miIndex);
00256             vectorFuncCoeffs_[i].append(vectorArgIndex);
00257           }
00258         }
00259         else
00260         {
00261           TEUCHOS_TEST_FOR_EXCEPTION(true, std::logic_error,
00262             "DiffOpEvaluator has been asked to preprocess a Deriv that "
00263             "is not a simple partial derivative. The problem child is: "
00264             << lambda);
00265         }
00266       }
00267       
00268       
00269       Set<MultipleDeriv> isolatedTerms 
00270         = RArg.intersection(backedDerivs(resultDeriv, W1Arg, verb));
00271       
00272       if (isolatedTerms.size()==0)
00273       {
00274         SUNDANCE_MSG3(verb, tab1 << "no indirect chain rule terms");
00275       }
00276       else
00277       {
00278         SUNDANCE_MSG3(verb, tab1 << "getting indirect chain rule terms");
00279         SUNDANCE_MSG3(verb, tab1 << "isolated terms = " << isolatedTerms);
00280       }
00281 
00282       for (Set<MultipleDeriv>::const_iterator 
00283              j=isolatedTerms.begin(); j != isolatedTerms.end(); j++)
00284       {
00285         int argIndex = argSparsitySuperset()->getIndex(*j);
00286         TEUCHOS_TEST_FOR_EXCEPTION(argIndex==-1, std::runtime_error,
00287           "Derivative " << *j << " expected in argument "
00288           "but not found");
00289         const DerivState& argState = argSparsitySuperset()->state(argIndex);
00290         if (argState==ConstantDeriv)
00291         {
00292           int constArgIndex = argEval()->constantIndexMap().get(argIndex);
00293           constantMonomials_[i].append(constArgIndex);
00294         }
00295         else
00296         {
00297           int vectorArgIndex = argEval()->vectorIndexMap().get(argIndex);
00298           vectorMonomials_[i].append(vectorArgIndex);
00299         }
00300       }
00301     }
00302   }
00303 
00304   if (verb > 2)
00305   {
00306     Out::os() << tabs << "instruction tables for summing spatial/functional chain rule" << std::endl;
00307     for (int i=0; i<this->sparsity()->numDerivs(); i++)
00308     {
00309       Tabs tab1;
00310       Out::os() << tab1 << "deriv " << sparsity()->deriv(i) << std::endl;
00311       {
00312         Tabs tab2;
00313         Out::os() << tab2 << "constant monomials: " << constantMonomials_[i]
00314                   << std::endl;
00315         Out::os() << tab2 << "vector monomials: " << vectorMonomials_[i]
00316                   << std::endl;
00317             
00318         Out::os() << tab2 << "constant coeff functions: " << std::endl;
00319         for (int j=0; j<constantFuncCoeffs_[i].size(); j++)
00320         {
00321           Tabs tab3;
00322           Out::os() << tab3 << "func=" << constantCoeffFuncIndices_[i][j]
00323                     << " mi=" << constantCoeffFuncMi_[i][j] << std::endl;
00324         } 
00325         Out::os() << tab2 << "vector coeff functions: " << std::endl;
00326         for (int j=0; j<vectorFuncCoeffs_[i].size(); j++)
00327         {
00328           Tabs tab3;
00329           Out::os() << tab3 << "func=" << vectorCoeffFuncIndices_[i][j]
00330                     << " mi=" << vectorCoeffFuncMi_[i][j] << std::endl;
00331         }
00332             
00333       }
00334     }
00335   }
00336 }
00337 
00338 
00339 Deriv DiffOpEvaluator::remainder(const MultipleDeriv& big, 
00340   const MultipleDeriv& little, int verb) const 
00341 {
00342   Tabs tab;
00343   SUNDANCE_MSG5(verb, tab << "computing remainder: big=" << big << ", little="
00344     << little);
00345   TEUCHOS_TEST_FOR_EXCEPT(big.order()-little.order() != 1);
00346 
00347   MultipleDeriv r;
00348   if (little.order()==0) r = big;
00349   else r = big.factorOutDeriv(little);
00350 
00351   SUNDANCE_MSG5(verb, tab << "remainder = " << r);
00352 
00353   TEUCHOS_TEST_FOR_EXCEPT(r.order() != 1);
00354 
00355   return *(r.begin());
00356 }
00357 
00358 Set<MultipleDeriv> DiffOpEvaluator
00359 ::increasedDerivs(const MultipleDeriv& mu,
00360   const Set<MultipleDeriv>& W1, int verb) const
00361 {
00362   Tabs tabs;
00363   SUNDANCE_MSG3(verb, tabs << "computing increased derivs");
00364   Set<MultipleDeriv> rtn;
00365   for (Set<MultipleDeriv>::const_iterator i=W1.begin(); i!=W1.end(); i++)
00366   {
00367     MultipleDeriv md = *i;
00368     TEUCHOS_TEST_FOR_EXCEPT(md.order() != 1);
00369     Deriv lambda = *(md.begin());
00370     MultipleDeriv lambdaMu = mu;
00371     lambdaMu.put(lambda);
00372     rtn.put(lambdaMu);
00373   }
00374   SUNDANCE_MSG3(verb, tabs << "increased derivs = " << rtn);
00375   return rtn;
00376 }
00377 
00378 Set<MultipleDeriv> DiffOpEvaluator
00379 ::backedDerivs(const MultipleDeriv& mu,
00380   const Set<MultipleDeriv>& W1, int verb) const
00381 {
00382   Tabs tabs;
00383   SUNDANCE_MSG3(verb, tabs << "computing backed-out derivs for mu= " << mu
00384     << ", W1=" << W1);
00385   Set<MultipleDeriv> rtn;
00386   if (mu.order() != 0) 
00387   {
00388     const MultiIndex& alpha = expr()->mi();
00389 
00390     for (Set<MultipleDeriv>::const_iterator i=W1.begin(); i!=W1.end(); i++)
00391     {
00392       const MultipleDeriv& md = *i;
00393       TEUCHOS_TEST_FOR_EXCEPT(md.order() != 1);
00394       Deriv lambda = *(md.begin());
00395       if (lambda.isCoordDeriv()) continue;
00396       TEUCHOS_TEST_FOR_EXCEPT(!lambda.isFunctionalDeriv());
00397       FunctionIdentifier lambda_fid = lambda.fid();
00398       const MultiIndex& lambda_mi = lambda.opOnFunc().mi(); 
00399       for (MultipleDeriv::const_iterator j=mu.begin(); j!=mu.end(); j++)
00400       {
00401         const Deriv& d = *j;
00402         if (d.isCoordDeriv()) continue;
00403         FunctionIdentifier d_fid = d.fid();
00404         const MultiIndex& d_mi = d.opOnFunc().mi(); 
00405         if (d_fid != lambda_fid) continue;
00406         if (!(alpha + lambda_mi == d_mi)) continue;
00407         MultipleDeriv z = mu.factorOutDeriv(d);
00408         z.put(lambda);
00409         rtn.put(z);
00410       }
00411     }
00412   }
00413   SUNDANCE_MSG3(verb, tabs << "backed-out derivs = " << rtn);
00414   return rtn;
00415 }
00416 
00417 
00418 
00419 void DiffOpEvaluator::internalEval(const EvalManager& mgr,
00420   Array<double>& constantResults,
00421   Array<RCP<EvalVector> >& vectorResults)  const
00422 {
00423   Tabs tabs;
00424   SUNDANCE_MSG1(mgr.verb(), tabs << "DiffOpEvaluator::eval() expr=" 
00425     << expr()->toString());
00426 
00427   /* evaluate the argument */
00428   Array<RCP<EvalVector> > argVectorResults;
00429   Array<double> argConstantResults;
00430 
00431   SUNDANCE_MSG2(mgr.verb(), tabs << "evaluating operand");
00432   evalOperand(mgr, argConstantResults, argVectorResults);
00433 
00434 
00435   if (mgr.verb() > 2)
00436   {
00437     Tabs tab1;
00438     Out::os() << tabs << "DiffOp operand results" << std::endl;
00439     mgr.showResults(Out::os(), argSparsitySuperset(), argVectorResults,
00440       argConstantResults);
00441   }
00442 
00443 
00444 
00445   /* evaluate the required discrete functions */
00446   SUNDANCE_MSG2(mgr.verb(), tabs << "evaluating discrete functions, num funcs= " << funcEvaluators_.size());
00447 
00448   Array<Array<RCP<EvalVector> > > funcVectorResults(funcEvaluators_.size());
00449   Array<double> funcConstantResults;
00450   for (int i=0; i<funcEvaluators_.size(); i++)
00451   {
00452     funcEvaluators_[i]->eval(mgr, funcConstantResults, funcVectorResults[i]);
00453   }
00454   
00455   constantResults.resize(this->sparsity()->numConstantDerivs());
00456   vectorResults.resize(this->sparsity()->numVectorDerivs());
00457   
00458   SUNDANCE_MSG3(mgr.verb(), tabs << "summing spatial/functional chain rule");
00459 
00460   for (int i=0; i<this->sparsity()->numDerivs(); i++)
00461   {
00462     Tabs tab1;
00463     SUNDANCE_MSG4(mgr.verb(), tab1 << "working on deriv " 
00464       << this->sparsity()->deriv(i));
00465 
00466     /* add constant monomials */
00467     SUNDANCE_MSG4(mgr.verb(), tab1 << "have " <<  constantMonomials_[i].size()
00468       << " constant monomials");
00469     double constantVal = 0.0;
00470     for (int j=0; j<constantMonomials_[i].size(); j++)
00471     {
00472       SUNDANCE_MSG4(mgr.verb(), tab1 << "adding in constant monomial (index "
00473         << constantMonomials_[i][j] 
00474         << " in arg results)");
00475       constantVal += argConstantResults[constantMonomials_[i][j]];
00476     }
00477     if (isConstant_[i])
00478     {
00479       constantResults[resultIndices_[i]] = constantVal;
00480       SUNDANCE_MSG4(mgr.verb(), tab1 << "result is constant: value=" 
00481         << constantVal);
00482       continue;
00483     }
00484 
00485     RCP<EvalVector> result;
00486     bool vecHasBeenAllocated = false;
00487 
00488     /* add in the vector monomials */
00489     const Array<int>& vm = vectorMonomials_[i];
00490     SUNDANCE_MSG4(mgr.verb(), tab1 << "have " << vm.size() 
00491       << " vector monomials");
00492     for (int j=0; j<vm.size(); j++)
00493     {
00494       Tabs tab2;
00495 
00496       const RCP<EvalVector>& v = argVectorResults[vm[j]];
00497 
00498       SUNDANCE_MSG4(mgr.verb(), tab2 << "found vec monomial term " << v->str());
00499 
00500       /* if we've not yet allocated a vector for the results, 
00501        * do so now, and set it to the initial value */ 
00502       if (!vecHasBeenAllocated)
00503       {
00504         SUNDANCE_MSG4(mgr.verb(), tab2 << "allocated result vector");
00505         result = mgr.popVector();
00506         vecHasBeenAllocated = true;
00507         if (isZero(constantVal))
00508         {
00509           result->setTo_V(v.get());
00510         }
00511         else
00512         {
00513           result->setTo_S_add_V(constantVal, v.get());
00514         }
00515       }
00516       else
00517       {
00518         result->add_V(v.get());
00519       }
00520       SUNDANCE_MSG4(mgr.verb(), tab2 << "result is " << result->str());
00521     }
00522       
00523     /* add in the function terms with constant coeffs */
00524     const Array<int>& cf = constantFuncCoeffs_[i];
00525     SUNDANCE_MSG4(mgr.verb(), tab1 << "adding " << cf.size()
00526       << " func terms with constant coeffs");
00527     for (int j=0; j<cf.size(); j++)
00528     {
00529       Tabs tab2;
00530       const double& coeff = argConstantResults[cf[j]];
00531       int fIndex = constantCoeffFuncIndices_[i][j];
00532       int miIndex = constantCoeffFuncMi_[i][j];
00533       const RCP<EvalVector>& fValue 
00534         = funcVectorResults[fIndex][miIndex];
00535 
00536       SUNDANCE_MSG4(mgr.verb(), tab2 << "found term: coeff= " 
00537         << coeff << ", func value=" << fValue->str());
00538           
00539       /* if we've not yet allocated a vector for the results, 
00540        * do so now, and set it to the initial value */ 
00541       if (!vecHasBeenAllocated)
00542       {
00543         SUNDANCE_MSG4(mgr.verb(), tab2 << "allocated result vector");
00544         result = mgr.popVector();
00545         vecHasBeenAllocated = true;
00546         if (isOne(coeff))
00547         {
00548           if (isZero(constantVal))
00549           {
00550             result->setTo_V(fValue.get());
00551           }
00552           else
00553           {
00554             result->setTo_S_add_V(constantVal, fValue.get());
00555           }
00556         }
00557         else
00558         {
00559           if (isZero(constantVal))
00560           {
00561             result->setTo_SV(coeff, fValue.get());
00562           }
00563           else
00564           {
00565             result->setTo_S_add_SV(constantVal, coeff, fValue.get());
00566           }
00567         }
00568       }
00569       else
00570       {
00571         if (isOne(coeff))
00572         {
00573           result->add_V(fValue.get());
00574         }
00575         else
00576         {
00577           result->add_SV(coeff, fValue.get());
00578         }
00579       }
00580       SUNDANCE_MSG4(mgr.verb(), tab2 << "result is " << result->str());
00581     }
00582 
00583       
00584     /* add in the function terms with vector coeffs */
00585     const Array<int>& vf = vectorFuncCoeffs_[i];
00586     SUNDANCE_MSG4(mgr.verb(), tab1 << "adding " << vf.size()
00587       << " func terms with vector coeffs");
00588     for (int j=0; j<vf.size(); j++)
00589     {
00590       Tabs tab2;
00591 
00592       const RCP<EvalVector>& coeff = argVectorResults[vf[j]];
00593       int fIndex = vectorCoeffFuncIndices_[i][j];
00594       int miIndex = vectorCoeffFuncMi_[i][j];
00595       const RCP<EvalVector>& fValue 
00596         = funcVectorResults[fIndex][miIndex];
00597 
00598       SUNDANCE_MSG4(mgr.verb(), tab2 << "found term: coeff= " 
00599         << coeff->str() << ", func value=" 
00600         << fValue->str());
00601           
00602       /* if we've not yet allocated a vector for the results, 
00603        * do so now, and set it to the initial value */ 
00604       if (!vecHasBeenAllocated)
00605       {
00606         SUNDANCE_MSG4(mgr.verb(), tab2 << "allocated result vector");
00607         result = mgr.popVector();
00608         vecHasBeenAllocated = true;
00609         result->setTo_VV(coeff.get(), fValue.get());
00610       }
00611       else
00612       {
00613         result->add_VV(coeff.get(), fValue.get());
00614       }
00615       SUNDANCE_MSG4(mgr.verb(), tab2 << "result is " << result->str());
00616     }
00617 
00618     TEUCHOS_TEST_FOR_EXCEPTION(!vecHasBeenAllocated, std::logic_error,
00619       "created empty vector in DiffOpEvaluator::internalEval");
00620     vectorResults[resultIndices_[i]] = result;
00621   }
00622 
00623   if (mgr.verb() > 1)
00624   {
00625     Out::os() << tabs << "diff op results" << std::endl;
00626     mgr.showResults(Out::os(), sparsity(), vectorResults,
00627       constantResults);
00628   }
00629   SUNDANCE_MSG1(mgr.verb(), tabs << "done spatial/functional chain rule");
00630 }
00631 
00632 
00633 
00634 void DiffOpEvaluator::resetNumCalls() const 
00635 {
00636   argEval()->resetNumCalls();
00637   for (int i=0; i<funcEvaluators_.size(); i++) 
00638   {
00639     funcEvaluators_[i]->resetNumCalls();
00640   }
00641   Evaluator::resetNumCalls();
00642 }

Site Contact