|
NLPInterfacePack: C++ Interfaces and Implementation for Non-Linear Programs
Version of the Day
|
00001 // @HEADER 00002 // *********************************************************************** 00003 // 00004 // Moocho: Multi-functional Object-Oriented arCHitecture for Optimization 00005 // Copyright (2003) Sandia Corporation 00006 // 00007 // Under terms of Contract DE-AC04-94AL85000, there is a non-exclusive 00008 // license for use of this work by or on behalf of the U.S. Government. 00009 // 00010 // Redistribution and use in source and binary forms, with or without 00011 // modification, are permitted provided that the following conditions are 00012 // met: 00013 // 00014 // 1. Redistributions of source code must retain the above copyright 00015 // notice, this list of conditions and the following disclaimer. 00016 // 00017 // 2. Redistributions in binary form must reproduce the above copyright 00018 // notice, this list of conditions and the following disclaimer in the 00019 // documentation and/or other materials provided with the distribution. 00020 // 00021 // 3. Neither the name of the Corporation nor the names of the 00022 // contributors may be used to endorse or promote products derived from 00023 // this software without specific prior written permission. 00024 // 00025 // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY 00026 // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 00027 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 00028 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE 00029 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 00030 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 00031 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 00032 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 00033 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 00034 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 00035 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00036 // 00037 // Questions? Contact Roscoe A. Bartlett (rabartl@sandia.gov) 00038 // 00039 // *********************************************************************** 00040 // @HEADER 00041 00042 #include <assert.h> 00043 #include <math.h> 00044 00045 #include <typeinfo> 00046 #include <iomanip> 00047 #include <sstream> 00048 #include <limits> 00049 00050 #include "NLPInterfacePack_CalcFiniteDiffProd.hpp" 00051 #include "NLPInterfacePack_NLP.hpp" 00052 #include "AbstractLinAlgPack_VectorSpace.hpp" 00053 #include "AbstractLinAlgPack_VectorOut.hpp" 00054 #include "AbstractLinAlgPack_VectorMutable.hpp" 00055 #include "AbstractLinAlgPack_LinAlgOpPack.hpp" 00056 #include "AbstractLinAlgPack_assert_print_nan_inf.hpp" 00057 #include "AbstractLinAlgPack_VectorAuxiliaryOps.hpp" 00058 #include "Teuchos_FancyOStream.hpp" 00059 #include "Teuchos_Assert.hpp" 00060 00061 namespace NLPInterfacePack { 00062 00063 CalcFiniteDiffProd::CalcFiniteDiffProd( 00064 EFDMethodOrder fd_method_order 00065 ,EFDStepSelect fd_step_select 00066 ,value_type fd_step_size 00067 ,value_type fd_step_size_min 00068 ,value_type fd_step_size_f 00069 ,value_type fd_step_size_c 00070 ) 00071 :fd_method_order_(fd_method_order) 00072 ,fd_step_select_(fd_step_select) 00073 ,fd_step_size_(fd_step_size) 00074 ,fd_step_size_min_(fd_step_size_min) 00075 ,fd_step_size_f_(fd_step_size_f) 00076 ,fd_step_size_c_(fd_step_size_c) 00077 {} 00078 00079 bool CalcFiniteDiffProd::calc_deriv_product( 00080 const Vector &xo 00081 ,const Vector *xl 00082 ,const Vector *xu 00083 ,const Vector &v 00084 ,const value_type *fo 00085 ,const Vector *co 00086 ,bool check_nan_inf 00087 ,NLP *nlp 00088 ,value_type *Gf_prod 00089 ,VectorMutable *Gc_prod 00090 ,std::ostream *out_arg 00091 ,bool trace 00092 ,bool dump_all 00093 ) const 00094 { 00095 00096 using std::setw; 00097 using std::endl; 00098 using std::right; 00099 00100 using BLAS_Cpp::rows; 00101 using BLAS_Cpp::cols; 00102 00103 typedef VectorSpace::vec_mut_ptr_t vec_mut_ptr_t; 00104 using AbstractLinAlgPack::Vt_S; 00105 using AbstractLinAlgPack::Vp_StV; 00106 using AbstractLinAlgPack::max_near_feas_step; 00107 using AbstractLinAlgPack::assert_print_nan_inf; 00108 using LinAlgOpPack::V_StV; 00109 00110 Teuchos::RCP<Teuchos::FancyOStream> 00111 out = Teuchos::getFancyOStream(Teuchos::rcp(out_arg,false)); 00112 Teuchos::OSTab tab(out); 00113 00114 // 00115 // The gradient of the contraints is defined as the matrix Gc as: 00116 // 00117 // Gc= [ Gc1, Gc2, ..., Gcm ] 00118 // 00119 // [ dc1/dx(1) dc2/dx(1) ... dcm/dx(1) ] 00120 // [ dc1/dx(2) dc2/dx(2) ... dcm/dx(2) ] 00121 // Gc= [ . . ... . ] 00122 // [ dc1/dx(n) dc2/dx(n) ... dcm/dx(n) ] 00123 // 00124 // [ (dc/dx(1))' ] 00125 // [ (dc/dx(2))' ] 00126 // Gc= [ . ] 00127 // [ (dc/dx(n))' ] 00128 // 00129 // The gradient of the objective function is defined as the 00130 // vector Gf as: 00131 // 00132 // [ (df/dx(1))' ] 00133 // [ (df/dx(2))' ] 00134 // Gf= [ . ] 00135 // [ (df/dx(n))' ] 00136 // 00137 // To illustrate the theory behind this implementation consider 00138 // the generic multi-variable function g(x) <: R^n -> R. Now let's 00139 // consider we have the base point xo and the vector v to 00140 // perturb g(x) along. First form the function g(xo+a*v) and then 00141 // let's compute dg/da at a = 0: 00142 // 00143 // (1) d(g(xo+a*v))/d(a) at a = 0 00144 // = sum( dg/dx(i) * dx(i)/da, i = 1...n) 00145 // = sum( dg/dx(i) * v(i), i = 1...n) 00146 // = Gf'*v 00147 // 00148 // Now we can approximate (1) using central differences as: 00149 // 00150 // (2) d(g(xo+a*v))/d(a) at a = 0 00151 // \approx ( g(xo+h*v) - g(xo+h*v) ) / (2*h) 00152 // 00153 // If we equate (1) and (2) we have the approximation: 00154 // 00155 // (3) Gg' * v \approx ( g(xo+h*v) - g(xo+h*v) ) / (2*h) 00156 // 00157 // It should be clear how this applies to computing Gf'*v and Gc'*v. 00158 // 00159 00160 const size_type 00161 n = nlp->n(), 00162 m = nlp->m(); 00163 00164 const value_type 00165 max_bnd_viol = nlp->max_var_bounds_viol(); 00166 00167 // ///////////////////////////////////////// 00168 // Validate the input 00169 00170 TEUCHOS_TEST_FOR_EXCEPTION( 00171 m==0 && Gc_prod, std::invalid_argument 00172 ,"CalcFiniteDiffProd::calc_deriv(...) : " 00173 "Error, if nlp->m() == 0, then Gc_prod must equal NULL" ); 00174 TEUCHOS_TEST_FOR_EXCEPTION( 00175 Gc_prod && !Gc_prod->space().is_compatible(*nlp->space_c()) 00176 ,std::invalid_argument 00177 ,"CalcFiniteDiffProd::calc_deriv(...) : " 00178 "Error, Gc_prod (type \' "<<typeName(*Gc_prod)<<"\' " 00179 "is not compatible with the NLP" ); 00180 TEUCHOS_TEST_FOR_EXCEPTION( 00181 (xl && !xu) || (!xl && xu), std::invalid_argument 00182 ,"CalcFiniteDiffProd::calc_deriv(...) : " 00183 "Error, both xl = "<<xl<<" and xu = "<<xu 00184 <<" must be NULL or not NULL" ); 00185 00186 assert_print_nan_inf(xo,"xo",true,out.get()); 00187 00188 switch(this->fd_method_order()) { 00189 case FD_ORDER_ONE: 00190 if(out.get()&&trace) *out<<"\nUsing one-sided, first-order finite differences ...\n"; 00191 break; 00192 case FD_ORDER_TWO: 00193 if(out.get()&&trace) *out<<"\nUsing one-sided, second-order finite differences ...\n"; 00194 break; 00195 case FD_ORDER_TWO_CENTRAL: 00196 if(out.get()&&trace) *out<<"\nUsing second-order central finite differences ...\n"; 00197 break; 00198 case FD_ORDER_TWO_AUTO: 00199 if(out.get()&&trace) *out<<"\nUsing auto selection of some second-order finite difference method ...\n"; 00200 break; 00201 case FD_ORDER_FOUR: 00202 if(out.get()&&trace) *out<<"\nUsing one-sided, fourth-order finite differences ...\n"; 00203 break; 00204 case FD_ORDER_FOUR_CENTRAL: 00205 if(out.get()&&trace) *out<<"\nUsing fourth-order central finite differences ...\n"; 00206 break; 00207 case FD_ORDER_FOUR_AUTO: 00208 if(out.get()&&trace) *out<<"\nUsing auto selection of some fourth-order finite difference method ...\n"; 00209 break; 00210 default: 00211 TEUCHOS_TEST_FOR_EXCEPT(true); // Should not get here! 00212 } 00213 00214 // //////////////////////// 00215 // Find the step size 00216 00217 // 00218 // Get defaults for the optimal step sizes 00219 // 00220 00221 const value_type 00222 sqrt_epsilon = ::pow(std::numeric_limits<value_type>::epsilon(),1.0/2.0), 00223 u_optimal_1 = sqrt_epsilon, 00224 u_optimal_2 = ::pow(sqrt_epsilon,1.0/2.0), 00225 u_optimal_4 = ::pow(sqrt_epsilon,1.0/4.0), 00226 xo_norm_inf = xo.norm_inf(); 00227 00228 value_type 00229 uh_opt = 0.0; 00230 switch(this->fd_method_order()) { 00231 case FD_ORDER_ONE: 00232 uh_opt = u_optimal_1 * ( fd_step_select() == FD_STEP_ABSOLUTE ? 1.0 : xo_norm_inf + 1.0 ); 00233 break; 00234 case FD_ORDER_TWO: 00235 case FD_ORDER_TWO_CENTRAL: 00236 case FD_ORDER_TWO_AUTO: 00237 uh_opt = u_optimal_2 * ( fd_step_select() == FD_STEP_ABSOLUTE ? 1.0 : xo_norm_inf + 1.0 ); 00238 break; 00239 case FD_ORDER_FOUR: 00240 case FD_ORDER_FOUR_CENTRAL: 00241 case FD_ORDER_FOUR_AUTO: 00242 uh_opt = u_optimal_4 * ( fd_step_select() == FD_STEP_ABSOLUTE ? 1.0 : xo_norm_inf + 1.0 ); 00243 break; 00244 default: 00245 TEUCHOS_TEST_FOR_EXCEPT(true); // Should not get here! 00246 } 00247 00248 if(out.get()&&trace) *out<<"\nDefault optimal step length uh_opt = " << uh_opt << " ...\n"; 00249 00250 // 00251 // Set the step sizes used. 00252 // 00253 00254 value_type 00255 uh = this->fd_step_size(), 00256 uh_f = this->fd_step_size_f(), 00257 uh_c = this->fd_step_size_c(), 00258 uh_min = this->fd_step_size_min(); 00259 00260 // uh 00261 if( uh < 0 ) 00262 uh = uh_opt; 00263 else if(fd_step_select() == FD_STEP_RELATIVE) 00264 uh *= (xo_norm_inf + 1.0); 00265 // uh_f 00266 if( uh_f < 0 ) 00267 uh_f = uh; 00268 else if(fd_step_select() == FD_STEP_RELATIVE) 00269 uh_f *= (xo_norm_inf + 1.0); 00270 // uh_c 00271 if( uh_c < 0 ) 00272 uh_c = uh; 00273 else if(fd_step_select() == FD_STEP_RELATIVE) 00274 uh_c *= (xo_norm_inf + 1.0); 00275 00276 if(out.get()&&trace) *out<<"\nIndividual step sizes initally set: uh="<<uh<<",uh_f="<<uh_f<<",uh_c="<<uh_c<<"\n"; 00277 00278 // 00279 // Determine the maximum step size that can be used and 00280 // still stay in the relaxed bounds. 00281 // 00282 // ToDo: Consider cramped bounds, one sided differences! 00283 // 00284 00285 value_type max_u_feas = std::numeric_limits<value_type>::max(); 00286 if( xl ) { 00287 std::pair<value_type,value_type> 00288 u_pn 00289 = max_near_feas_step( 00290 xo 00291 ,v 00292 ,*xl 00293 ,*xu 00294 ,max_bnd_viol 00295 ); 00296 if( u_pn.first < -u_pn.second ) 00297 max_u_feas = u_pn.first; 00298 else 00299 max_u_feas = u_pn.second; 00300 const value_type abs_max_u_feas = ::fabs(max_u_feas); 00301 if( abs_max_u_feas < uh ) { 00302 if( abs_max_u_feas < uh_min ) { 00303 if(out.get()) 00304 *out 00305 << "Warning, the size of the maximum finite difference step length\n" 00306 << "that does not violate the relaxed variable bounds uh = " 00307 << max_u_feas << " is less than the mimimum allowable step length\n" 00308 << "uh_min = " << uh_min << " and the finite difference " 00309 << "derivatives are not computed!\n"; 00310 return false; 00311 } 00312 if(out.get()) 00313 *out 00314 << "Warning, the size of the maximum finite difference step length\n" 00315 << "that does not violate the relaxed variable bounds uh = " 00316 << max_u_feas << " is less than the desired step length\n" 00317 << "uh = " << uh << " and the finite difference " 00318 << "derivatives may be much less accurate!\n"; 00319 } 00320 } 00321 00322 // 00323 // Set the actual method being used 00324 // 00325 // ToDo: Consider cramped bounds and method order. 00326 // 00327 00328 EFDMethodOrder fd_method_order = this->fd_method_order(); 00329 switch(fd_method_order) { 00330 case FD_ORDER_TWO_AUTO: 00331 fd_method_order = FD_ORDER_TWO_CENTRAL; 00332 break; 00333 case FD_ORDER_FOUR_AUTO: 00334 fd_method_order = FD_ORDER_FOUR_CENTRAL; 00335 break; 00336 } 00337 00338 // Compute the actual individual step size so as to stay in bounds 00339 const value_type 00340 abs_max_u_feas = ::fabs(max_u_feas); 00341 value_type 00342 num_u_i = 0; 00343 switch(fd_method_order) { 00344 case FD_ORDER_ONE: 00345 num_u_i = 1.0; 00346 break; 00347 case FD_ORDER_TWO: 00348 num_u_i = 2.0; 00349 break; 00350 case FD_ORDER_TWO_CENTRAL: 00351 num_u_i = 1.0; 00352 break; 00353 case FD_ORDER_FOUR: 00354 num_u_i = 4.0; 00355 break; 00356 case FD_ORDER_FOUR_CENTRAL: 00357 num_u_i = 2.0; 00358 break; 00359 default: 00360 TEUCHOS_TEST_FOR_EXCEPT(true); // Should not get here! 00361 } 00362 00363 uh = ( abs_max_u_feas/num_u_i < uh ? max_u_feas/num_u_i : uh ); // This can be a negative number! 00364 uh_f = ( abs_max_u_feas/num_u_i < uh_f ? max_u_feas/num_u_i : uh_f ); //"" 00365 uh_c = ( abs_max_u_feas/num_u_i < uh_c ? max_u_feas/num_u_i : uh_c ); //"" 00366 00367 if( uh_min < 0 ) { 00368 uh_min = uh / 100.0; 00369 } 00370 00371 if(out.get()&&trace) *out<<"\nIndividual step sizes to fit in bounds: uh="<<uh<<",uh_f="<<uh_f<<",uh_c="<<uh_c<<"\n"; 00372 00373 // 00374 // Remember some stuff 00375 // 00376 00377 value_type *f_saved = NULL; 00378 VectorMutable *c_saved = NULL; 00379 00380 f_saved = nlp->get_f(); 00381 if(m) c_saved = nlp->get_c(); 00382 00383 int p_saved; 00384 if(out.get()) 00385 p_saved = out->precision(); 00386 00387 // /////////////////////////////////////////////// 00388 // Compute the directional derivatives 00389 00390 try { 00391 00392 value_type 00393 f; 00394 vec_mut_ptr_t 00395 x = nlp->space_x()->create_member(); 00396 vec_mut_ptr_t 00397 c = m && Gc_prod ? nlp->space_c()->create_member() : Teuchos::null; 00398 00399 // Set the quanitities used to compute with 00400 00401 nlp->set_f(&f); 00402 if(m) nlp->set_c( c.get() ); 00403 00404 const int dbl_p = 15; 00405 if(out.get()) 00406 *out << std::setprecision(dbl_p); 00407 00408 // 00409 // Compute the weighted sum of the terms 00410 // 00411 00412 int num_evals = 0; 00413 value_type dwgt = 0.0; 00414 switch(fd_method_order) { 00415 case FD_ORDER_ONE: // may only need one eval if f(xo) etc is passed in 00416 num_evals = 2; 00417 dwgt = 1.0; 00418 break; 00419 case FD_ORDER_TWO: // may only need two evals if c(xo) etc is passed in 00420 num_evals = 3; 00421 dwgt = 2.0; 00422 break; 00423 case FD_ORDER_TWO_CENTRAL: 00424 num_evals = 2; 00425 dwgt = 2.0; 00426 break; 00427 case FD_ORDER_FOUR: 00428 num_evals = 5; 00429 dwgt = 12.0; 00430 break; 00431 case FD_ORDER_FOUR_CENTRAL: 00432 num_evals = 5; 00433 dwgt = 12.0; 00434 break; 00435 default: 00436 TEUCHOS_TEST_FOR_EXCEPT(true); // Should not get here! 00437 } 00438 if(Gc_prod) *Gc_prod = 0.0; 00439 if(Gf_prod) *Gf_prod = 0.0; 00440 for( int eval_i = 1; eval_i <= num_evals; ++eval_i ) { 00441 // Set the step constant and the weighting constant 00442 value_type 00443 uh_i = 0.0, 00444 wgt_i = 0.0; 00445 switch(fd_method_order) { 00446 case FD_ORDER_ONE: { 00447 switch(eval_i) { 00448 case 1: 00449 uh_i = +0.0; 00450 wgt_i = -1.0; 00451 break; 00452 case 2: 00453 uh_i = +1.0; 00454 wgt_i = +1.0; 00455 break; 00456 } 00457 break; 00458 } 00459 case FD_ORDER_TWO: { 00460 switch(eval_i) { 00461 case 1: 00462 uh_i = +0.0; 00463 wgt_i = -3.0; 00464 break; 00465 case 2: 00466 uh_i = +1.0; 00467 wgt_i = +4.0; 00468 break; 00469 case 3: 00470 uh_i = +2.0; 00471 wgt_i = -1.0; 00472 break; 00473 } 00474 break; 00475 } 00476 case FD_ORDER_TWO_CENTRAL: { 00477 switch(eval_i) { 00478 case 1: 00479 uh_i = -1.0; 00480 wgt_i = -1.0; 00481 break; 00482 case 2: 00483 uh_i = +1.0; 00484 wgt_i = +1.0; 00485 break; 00486 } 00487 break; 00488 } 00489 case FD_ORDER_FOUR: { 00490 switch(eval_i) { 00491 case 1: 00492 uh_i = +0.0; 00493 wgt_i = -25.0; 00494 break; 00495 case 2: 00496 uh_i = +1.0; 00497 wgt_i = +48.0; 00498 break; 00499 case 3: 00500 uh_i = +2.0; 00501 wgt_i = -36.0; 00502 break; 00503 case 4: 00504 uh_i = +3.0; 00505 wgt_i = +16.0; 00506 break; 00507 case 5: 00508 uh_i = +4.0; 00509 wgt_i = -3.0; 00510 break; 00511 } 00512 break; 00513 } 00514 case FD_ORDER_FOUR_CENTRAL: { 00515 switch(eval_i) { 00516 case 1: 00517 uh_i = -2.0; 00518 wgt_i = +1.0; 00519 break; 00520 case 2: 00521 uh_i = -1.0; 00522 wgt_i = -8.0; 00523 break; 00524 case 3: 00525 uh_i = +1.0; 00526 wgt_i = +8.0; 00527 break; 00528 case 4: 00529 uh_i = +2.0; 00530 wgt_i = -1.0; 00531 break; 00532 } 00533 break; 00534 } 00535 } 00536 00537 if(out.get()&&dump_all) { 00538 *out<<"\nxo =\n" << xo; 00539 *out<<"\nv =\n" << v; 00540 if(fo) *out << "\nfo = " << *fo << "\n"; 00541 if(co) *out << "\nco =\n" << *co; 00542 } 00543 // Compute the weighted term and add it to the sum 00544 bool new_point = true; 00545 if(Gc_prod) { 00546 if( co && uh_i == 0.0 ) { 00547 if(out.get()&&trace) *out<<"\nBase c = co ...\n"; 00548 *c = *co; 00549 } 00550 else { 00551 if( new_point || uh_c != uh ) { 00552 *x = xo; Vp_StV( x.get(), uh_i * uh_c, v ); // x = xo + uh_i*uh_c*v 00553 } 00554 if(out.get()&&trace) *out<<"\nComputing c = c(xo+"<<(uh_i*uh_c)<<"*v) ...\n"; 00555 if(out.get()&&dump_all) *out<<"\nxo+"<<(uh_i*uh_c)<<"*v =\n" << *x; 00556 nlp->calc_c(*x,new_point); 00557 } 00558 new_point = false; 00559 if(out.get() && dump_all) *out << "\nc =\n" << *c; 00560 if(check_nan_inf) 00561 assert_print_nan_inf(*c,"c(xo+u*v)",true,out.get()); 00562 if(out.get()&&trace) *out<<"\nGc_prod += "<<wgt_i<<"*c ...\n"; 00563 Vp_StV( Gc_prod, wgt_i, *c ); 00564 if(out.get() && dump_all) *out<<"\nGc_prod =\n" << *Gc_prod; 00565 } 00566 00567 if(Gf_prod) { 00568 if( fo && uh_i == 0.0 ) { 00569 if(out.get()&&trace) *out<<"\nBase f = fo ...\n"; 00570 f = *fo; 00571 } 00572 else { 00573 if( new_point || uh_f != uh ) { 00574 *x = xo; Vp_StV( x.get(), uh_i * uh_f, v ); // x = xo + uh_i*uh_f*v 00575 new_point = true; 00576 } 00577 if(out.get()&&trace) *out<<"\nComputing f = f(xo+"<<(uh_i*uh_f)<<"*v) ...\n"; 00578 if(out.get() && dump_all) *out<<"\nxo+"<<(uh_i*uh_f)<<"*v =\n" << *x; 00579 nlp->calc_f(*x,new_point); 00580 } 00581 new_point = false; 00582 if(out.get() && dump_all) *out<<"\nf = " << f << "\n"; 00583 if(check_nan_inf) 00584 assert_print_nan_inf(f,"f(xo+u*v)",true,out.get()); 00585 if(out.get()&&trace) *out<<"\nGf_prod += "<<wgt_i<<"*f ...\n"; 00586 *Gf_prod += wgt_i * f; 00587 if(out.get() && dump_all) *out<<"\nGf_prod = " << *Gf_prod << "\n"; 00588 } 00589 00590 } 00591 00592 // 00593 // Multiply by the scaling factor! 00594 // 00595 00596 if(Gc_prod) { 00597 if(out.get()&&trace) *out<<"\nGc_prod *= "<<(1.0 / (dwgt * uh_c))<<" ...\n"; 00598 Vt_S( Gc_prod, 1.0 / (dwgt * uh_c) ); 00599 if(out.get() && dump_all) 00600 *out<<"\nFinal Gc_prod =\n" << *Gc_prod; 00601 } 00602 00603 if(Gf_prod) { 00604 if(out.get()&&trace) *out<<"\nGf_prod *= "<<(1.0 / (dwgt * uh_f))<<" ...\n"; 00605 *Gf_prod *= ( 1.0 / (dwgt * uh_f) ); 00606 if(out.get() && dump_all) 00607 *out<<"\nFinal Gf_prod = " << *Gf_prod << "\n"; 00608 } 00609 00610 } // end try 00611 catch(...) { 00612 nlp->set_f( f_saved ); 00613 if(m) nlp->set_c( c_saved ); 00614 if(out.get()) 00615 *out << std::setprecision(p_saved); 00616 throw; 00617 } 00618 00619 nlp->set_f( f_saved ); 00620 if(m) nlp->set_c( c_saved ); 00621 if(out.get()) 00622 *out << std::setprecision(p_saved); 00623 00624 return true; 00625 } 00626 00627 } // end namespace NLPInterfacePack
1.7.6.1