|
MoochoPack : Framework for Large-Scale Optimization Algorithms
Version of the Day
|
00001 #if 0 00002 00003 // @HEADER 00004 // *********************************************************************** 00005 // 00006 // Moocho: Multi-functional Object-Oriented arCHitecture for Optimization 00007 // Copyright (2003) Sandia Corporation 00008 // 00009 // Under terms of Contract DE-AC04-94AL85000, there is a non-exclusive 00010 // license for use of this work by or on behalf of the U.S. Government. 00011 // 00012 // Redistribution and use in source and binary forms, with or without 00013 // modification, are permitted provided that the following conditions are 00014 // met: 00015 // 00016 // 1. Redistributions of source code must retain the above copyright 00017 // notice, this list of conditions and the following disclaimer. 00018 // 00019 // 2. Redistributions in binary form must reproduce the above copyright 00020 // notice, this list of conditions and the following disclaimer in the 00021 // documentation and/or other materials provided with the distribution. 00022 // 00023 // 3. Neither the name of the Corporation nor the names of the 00024 // contributors may be used to endorse or promote products derived from 00025 // this software without specific prior written permission. 00026 // 00027 // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY 00028 // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 00029 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 00030 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE 00031 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 00032 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 00033 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 00034 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 00035 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 00036 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 00037 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00038 // 00039 // Questions? Contact Roscoe A. Bartlett (rabartl@sandia.gov) 00040 // 00041 // *********************************************************************** 00042 // @HEADER 00043 00044 #include <ostream> 00045 #include <typeinfo> 00046 00047 #include "MoochoPack_LineSearchWatchDog_Step.hpp" 00048 #include "MoochoPack_MoochoAlgorithmStepNames.hpp" 00049 #include "MoochoPack_moocho_algo_conversion.hpp" 00050 #include "IterationPack_print_algorithm_step.hpp" 00051 #include "ConstrainedOptPack_MeritFuncCalc1DQuadratic.hpp" 00052 #include "ConstrainedOptPack_MeritFuncCalcNLP.hpp" 00053 #include "ConstrainedOptPack_print_vector_change_stats.hpp" 00054 #include "ConstrainedOptPack/src/VectorWithNorms.h" 00055 #include "AbstractLinAlgPack/src/AbstractLinAlgPack_MatrixOp.hpp" 00056 #include "DenseLinAlgPack_DVectorClass.hpp" 00057 #include "DenseLinAlgPack_DVectorOp.hpp" 00058 #include "DenseLinAlgPack_DVectorOut.hpp" 00059 #include "DenseLinAlgPack_LinAlgOpPack.hpp" 00060 00061 namespace { 00062 const int NORMAL_LINE_SEARCH = -1; 00063 } 00064 00065 namespace LinAlgOpPack { 00066 using AbstractLinAlgPack::Vp_StMtV; 00067 } 00068 00069 MoochoPack::LineSearchWatchDog_Step::LineSearchWatchDog_Step( 00070 const direct_line_search_ptr_t& direct_line_search 00071 , const merit_func_ptr_t& merit_func 00072 , value_type eta 00073 , value_type opt_kkt_err_threshold 00074 , value_type feas_kkt_err_threshold 00075 ) 00076 : 00077 direct_line_search_(direct_line_search) 00078 , merit_func_(merit_func) 00079 , eta_(eta) 00080 , opt_kkt_err_threshold_(opt_kkt_err_threshold) 00081 , feas_kkt_err_threshold_(feas_kkt_err_threshold) 00082 , watch_k_(NORMAL_LINE_SEARCH) 00083 {} 00084 00085 bool MoochoPack::LineSearchWatchDog_Step::do_step(Algorithm& _algo 00086 , poss_type step_poss, IterationPack::EDoStepType type, poss_type assoc_step_poss) 00087 { 00088 using DenseLinAlgPack::norm_inf; 00089 using DenseLinAlgPack::V_VpV; 00090 using DenseLinAlgPack::Vp_StV; 00091 using DenseLinAlgPack::Vt_S; 00092 00093 using LinAlgOpPack::Vp_V; 00094 using LinAlgOpPack::V_MtV; 00095 00096 using ConstrainedOptPack::print_vector_change_stats; 00097 00098 NLPAlgo &algo = rsqp_algo(_algo); 00099 NLPAlgoState &s = algo.rsqp_state(); 00100 NLP &nlp = algo.nlp(); 00101 00102 EJournalOutputLevel olevel = algo.algo_cntr().journal_output_level(); 00103 std::ostream& out = algo.track().journal_out(); 00104 out << std::boolalpha; 00105 00106 // print step header. 00107 if( (int)olevel >= (int)PRINT_ALGORITHM_STEPS ) { 00108 using IterationPack::print_algorithm_step; 00109 print_algorithm_step( algo, step_poss, type, assoc_step_poss, out ); 00110 } 00111 00112 // ///////////////////////////////////////// 00113 // Set references to iteration quantities 00114 // 00115 // Set k+1 first then go back to get k to ensure 00116 // we have backward storage. 00117 00118 DVector 00119 &x_kp1 = s.x().set_k(+1).v(); 00120 value_type 00121 &f_kp1 = s.f().set_k(+1); 00122 DVector 00123 &c_kp1 = s.c().set_k(+1).v(); 00124 00125 const value_type 00126 &f_k = s.f().get_k(0); 00127 const DVector 00128 &c_k = s.c().get_k(0).v(); 00129 const DVector 00130 &x_k = s.x().get_k(0).v(); 00131 const DVector 00132 &d_k = s.d().get_k(0).v(); 00133 value_type 00134 &alpha_k = s.alpha().get_k(0); 00135 00136 // ///////////////////////////////////// 00137 // Compute Dphi_k, phi_kp1 and phi_k 00138 00139 // Dphi_k 00140 const value_type 00141 Dphi_k = merit_func().deriv(); 00142 if( Dphi_k >= 0 ) { 00143 throw LineSearchFailure( "LineSearch2ndOrderCorrect_Step::do_step(...) : " 00144 "Error, d_k is not a descent direction for the merit function " ); 00145 } 00146 00147 // ph_kp1 00148 value_type 00149 &phi_kp1 = s.phi().set_k(+1) = merit_func().value( f_kp1, c_kp1 ); 00150 00151 // Must compute phi(x) at the base point x_k since the penalty parameter may have changed. 00152 const value_type 00153 &phi_k = s.phi().set_k(0) = merit_func().value( f_k, c_k ); 00154 00155 // ////////////////////////////////////// 00156 // Setup the calculation merit function 00157 00158 // Here f_kp1, and c_kp1 are updated at the same time the 00159 // line search is being performed. 00160 nlp.set_f( &f_kp1 ); 00161 nlp.set_c( &c_kp1 ); 00162 MeritFuncCalcNLP 00163 phi_calc( &merit_func(), &nlp ); 00164 00165 // //////////////////////////////// 00166 // Use Watchdog near the solution 00167 00168 if( watch_k_ == NORMAL_LINE_SEARCH ) { 00169 const value_type 00170 opt_kkt_err_k = s.opt_kkt_err().get_k(0), 00171 feas_kkt_err_k = s.feas_kkt_err().get_k(0); 00172 if( opt_kkt_err_k <= opt_kkt_err_threshold() && feas_kkt_err_k <= feas_kkt_err_threshold() ) { 00173 if( (int)olevel >= (int)PRINT_ALGORITHM_STEPS ) { 00174 out << "\nopt_kkt_err_k = " << opt_kkt_err_k << " <= opt_kkt_err_threshold = " 00175 << opt_kkt_err_threshold() << std::endl 00176 << "\nfeas_kkt_err_k = " << feas_kkt_err_k << " <= feas_kkt_err_threshold = " 00177 << feas_kkt_err_threshold() << std::endl 00178 << "\nSwitching to watchdog linesearch ...\n"; 00179 } 00180 watch_k_ = 0; 00181 } 00182 } 00183 00184 if( (int)olevel >= (int)PRINT_ALGORITHM_STEPS ) { 00185 out << "\nTrial point:\n" 00186 << "phi_k = " << phi_k << std::endl 00187 << "Dphi_k = " << Dphi_k << std::endl 00188 << "phi_kp1 = " << phi_kp1 << std::endl; 00189 } 00190 00191 bool ls_success = true, 00192 step_return = true; 00193 00194 switch( watch_k_ ) { 00195 case 0: 00196 { 00197 // Take a full step 00198 const value_type phi_cord = phi_k + eta() * Dphi_k; 00199 const bool accept_step = phi_kp1 <= phi_cord; 00200 00201 if( (int)olevel >= (int)PRINT_ALGORITHM_STEPS ) { 00202 out << "\n*** Zeroth watchdog iteration:\n" 00203 << "\nphi_kp1 = " << phi_kp1 << ( accept_step ? " <= " : " > " ) 00204 << "phi_k + eta * Dphi_k = " << phi_cord << std::endl; 00205 } 00206 00207 if( phi_kp1 > phi_cord ) { 00208 if( (int)olevel >= (int)PRINT_ALGORITHM_STEPS ) { 00209 out << "\nAccept this increase for now but watch out next iteration!\n"; 00210 } 00211 // Save this initial point 00212 xo_ = x_k; 00213 fo_ = f_k; 00214 nrm_co_ = norm_inf( c_k ); 00215 do_ = d_k; 00216 phio_ = phi_k; 00217 Dphio_ = Dphi_k; 00218 phiop1_ = phi_kp1; 00219 // Slip the update of the penalty parameter 00220 const value_type mu_k = s.mu().get_k(0); 00221 s.mu().set_k(+1) = mu_k; 00222 // Move on to the next step in the watchdog procedure 00223 watch_k_ = 1; 00224 } 00225 else { 00226 if( (int)olevel >= (int)PRINT_ALGORITHM_STEPS ) { 00227 out << "\nAll is good!\n"; 00228 } 00229 // watch_k_ stays 0 00230 } 00231 step_return = true; 00232 break; 00233 } 00234 case 1: 00235 { 00236 if( (int)olevel >= (int)PRINT_ALGORITHM_STEPS ) { 00237 out << "\n*** First watchdog iteration:\n" 00238 << "\nDo a line search to determine x_kp1 = x_k + alpha_k * d_k ...\n"; 00239 } 00240 // Now do a line search but and we require some type of reduction 00241 const DVectorSlice xd[2] = { x_k(), d_k() }; 00242 MeritFuncCalc1DQuadratic phi_calc_1d( phi_calc, 1, xd, &x_kp1() ); 00243 ls_success = direct_line_search().do_line_search( phi_calc_1d, phi_k 00244 , &alpha_k, &phi_kp1 00245 , (int)olevel >= (int)PRINT_ALGORITHM_STEPS ? 00246 &out : static_cast<std::ostream*>(0) ); 00247 00248 // If the linesearch failed then the rest of the tests will catch this. 00249 00250 value_type phi_cord = 0; 00251 bool test1, test2; 00252 00253 if( ( test1 = ( phi_k <= phio_ ) ) 00254 || ( test2 = phi_kp1 <= ( phi_cord = phio_ + eta() * Dphio_ ) ) ) 00255 { 00256 // We will accept this step and and move on. 00257 if( (int)olevel >= (int)PRINT_ALGORITHM_STEPS ) { 00258 out 00259 << "\nphi_k = " << phi_k << ( test1 ? " <= " : " > " ) 00260 << "phi_km1 = " << phio_ << std::endl 00261 << "phi_kp1 = " << phi_kp1 << ( test2 ? " <= " : " > " ) 00262 << "phi_km1 + eta * Dphi_km1 = " << phi_cord << std::endl 00263 << "This is a sufficent reduction so reset watchdog.\n"; 00264 } 00265 watch_k_ = 0; 00266 step_return = true; 00267 } 00268 else if ( ! ( test1 = ( phi_kp1 <= phio_ ) ) ) { 00269 // Even this reduction is no good! 00270 // Go back to original point and do a linesearch from there. 00271 if( (int)olevel >= (int)PRINT_ALGORITHM_STEPS ) { 00272 out 00273 << "\nphi_kp1 = " << phi_kp1 << " > phi_km1 = " << phio_ << std::endl 00274 << "This is not good reduction in phi so do linesearch from x_km1\n" 00275 << "\n* Go back to x_km1: x_kp1 = x_k - alpha_k * d_k ...\n"; 00276 } 00277 00278 // Go back from x_k to x_km1 for iteration k: 00279 // 00280 // x_kp1 = x_km1 00281 // x_kp1 = x_k - alpha_km1 * d_km1 00282 // 00283 // A negative sign for alpha is an indication that we are backtracking. 00284 // 00285 s.alpha().set_k(0) = -1.0; 00286 s.d().set_k(0).v() = do_; 00287 s.f().set_k(+1) = fo_; 00288 00289 if( (int)olevel >= (int)PRINT_ALGORITHM_STEPS ) { 00290 out << "Output iteration k ...\n" 00291 << "k = k+1\n"; 00292 } 00293 00294 // Output these iteration quantities 00295 algo.track().output_iteration( algo ); // k 00296 // Transition to iteration k+1 00297 s.next_iteration(); 00298 00299 // Take the step from x_k = x_km2 to x_kp1 for iteration k (k+1): 00300 // 00301 // x_kp1 = x_km2 + alpha_n * d_km2 00302 // x_kp1 = x_k + alpha_n * d_km1 00303 // x_kp1 = x_n 00304 // 00305 if( (int)olevel >= (int)PRINT_ALGORITHM_STEPS ) { 00306 out << "\n* Take the step from x_k = x_km2 to x_kp1 for iteration k (k+1)\n" 00307 << "Find: x_kp1 = x_k + alpha_k * d_k = x_km2 + alpha_k * d_km2\n ...\n"; 00308 } 00309 00310 // alpha_k = 1.0 00311 value_type &alpha_k = s.alpha().set_k(0) = 1.0; 00312 00313 // ///////////////////////////////////// 00314 // Compute Dphi_k and phi_k 00315 00316 // x_k 00317 const DVector &x_k = xo_; 00318 00319 // d_k 00320 const DVector &d_k = s.d().set_k(0).v() = do_; 00321 00322 // Dphi_k 00323 const value_type &Dphi_k = Dphio_; 00324 00325 // phi_k 00326 const value_type &phi_k = s.phi().set_k(0) = phio_; 00327 00328 // Here f_kp1, and c_kp1 are updated at the same time the 00329 // line search is being performed. 00330 algo.nlp().set_f( &s.f().set_k(+1) ); 00331 algo.nlp().set_c( &s.c().set_k(+1).v() ); 00332 phi_calc.set_nlp( algo.get_nlp() ); 00333 00334 // //////////////////////////////////////// 00335 // Compute x_xp1 and ph_kp1 for full step 00336 00337 // x_kp1 = x_k + alpha_k * d_k 00338 DVector &x_kp1 = s.x().set_k(+1).v(); 00339 V_VpV( &x_kp1, x_k, d_k ); 00340 00341 // phi_kp1 00342 value_type &phi_kp1 = s.phi().set_k(+1) = phiop1_; 00343 00344 const DVectorSlice xd[2] = { x_k(), d_k() }; 00345 MeritFuncCalc1DQuadratic phi_calc_1d( phi_calc, 1, xd, &x_kp1() ); 00346 ls_success = direct_line_search().do_line_search( 00347 phi_calc_1d, phi_k 00348 , &alpha_k, &phi_kp1 00349 , (int)olevel >= (int)PRINT_ALGORITHM_STEPS ? 00350 &out : static_cast<std::ostream*>(0) ); 00351 00352 if( (int)olevel >= (int)PRINT_ALGORITHM_STEPS ) { 00353 out << "\nOutput iteration k (k+1) ...\n" 00354 << "k = k+1 (k+2)\n" 00355 << "Reinitialize watchdog algorithm\n"; 00356 } 00357 00358 // Output these iteration quantities 00359 algo.track().output_iteration( algo ); // (k+1) 00360 // Transition to iteration k+1 (k+2) 00361 s.next_iteration(); 00362 00363 watch_k_ = 0; // Reinitialize the watchdog 00364 00365 // Any update for k (k+2) should use the last updated value 00366 // which was for k-2 (k) since there is not much info for k-1 (k+1). 00367 // Be careful here and make sure this is square with other steps. 00368 00369 algo.do_step_next( EvalNewPoint_name ); 00370 step_return = false; // Redirect control 00371 } 00372 else { 00373 if( (int)olevel >= (int)PRINT_ALGORITHM_STEPS ) { 00374 out 00375 << "phi_kp1 = " << phi_kp1 << " <= phi_km1 = " << phio_ << std::endl 00376 << "\nAccept this step but do a linesearch next iteration!\n"; 00377 } 00378 // Slip the update of the penalty parameter 00379 const value_type mu_k = s.mu().get_k(0); 00380 s.mu().set_k(+1) = mu_k; 00381 // Do the last stage of the watchdog procedure next iteration. 00382 watch_k_ = 2; 00383 step_return = true; 00384 } 00385 break; 00386 } 00387 case NORMAL_LINE_SEARCH: 00388 case 2: 00389 { 00390 if( (int)olevel >= (int)PRINT_ALGORITHM_STEPS ) { 00391 if( watch_k_ == 2 ) { 00392 out << "\n*** Second watchdog iteration:\n" 00393 << "Do a line search to determine x_kp1 = x_k + alpha_k * d_k ...\n"; 00394 } 00395 else { 00396 out << "\n*** Normal linesearch:\n" 00397 << "Do a line search to determine x_kp1 = x_k + alpha_k * d_k ...\n"; 00398 } 00399 } 00400 00401 const DVectorSlice xd[2] = { x_k(), d_k() }; 00402 MeritFuncCalc1DQuadratic phi_calc_1d( phi_calc, 1, xd, &x_kp1() ); 00403 ls_success = direct_line_search().do_line_search( phi_calc_1d, phi_k 00404 , &alpha_k, &phi_kp1 00405 , (int)olevel >= (int)PRINT_ALGORITHM_STEPS ? 00406 &out : static_cast<std::ostream*>(0) ); 00407 00408 if( watch_k_ == 2 ) 00409 watch_k_ = 0; 00410 00411 step_return = true; 00412 break; 00413 } 00414 default: 00415 TEUCHOS_TEST_FOR_EXCEPT(true); // Only local programming error 00416 } 00417 00418 if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ALGORITHM_STEPS) ) { 00419 out << "\nalpha = " << s.alpha().get_k(0) << "\n"; 00420 out << "\nphi_kp1 = " << s.phi().get_k(+1) << "\n"; 00421 } 00422 00423 if( static_cast<int>(olevel) >= static_cast<int>(PRINT_VECTORS) ) { 00424 out << "\nd_k = \n" << s.d().get_k(0)(); 00425 out << "\nx_kp1 = \n" << s.x().get_k(+1)(); 00426 } 00427 00428 if( !ls_success ) 00429 throw LineSearchFailure("LineSearchWatchDog_Step::do_step(): Line search failure"); 00430 00431 return step_return; 00432 00433 } 00434 00435 void MoochoPack::LineSearchWatchDog_Step::print_step( const Algorithm& algo 00436 , poss_type step_poss, IterationPack::EDoStepType type, poss_type assoc_step_poss 00437 , std::ostream& out, const std::string& L ) const 00438 { 00439 out << L << "*** Use the Watchdog linesearch when near solution.\n" 00440 << L << "default: opt_kkt_err_threshold = 0.0\n" 00441 << L << " feas_kkt_err_threshold = 0.0\n" 00442 << L << " eta = 1.0e-4\n" 00443 << L << " watch_k = NORMAL_LINE_SEARCH\n" 00444 << L << "begin definition of NLP merit function phi.value(f(x),c(x)):\n"; 00445 00446 merit_func().print_merit_func( out, L + " " ); 00447 00448 out << L << "end definition\n" 00449 << L << "Dphi_k = phi.deriv()\n" 00450 << L << "if Dphi_k >= 0 then\n" 00451 << L << " throw line_search_failure\n" 00452 << L << "end\n" 00453 << L << "phi_kp1 = phi_k.value(f_kp1,c_kp1)\n" 00454 << L << "phi_k = phi.value(f_k,c_k)\n" 00455 << L << "if watch_k == NORMAL_LINE_SEARCH then\n" 00456 << L << " if opt_kkt_err <= opt_kkt_err_threshold\n" 00457 << L << " and feas_kkt_err <= feas_kkt_err_threshold then\n" 00458 << L << " *** Start using watchdog from now on!\n" 00459 << L << " watch_k = 0\n" 00460 << L << " end\n" 00461 << L << "end\n" 00462 << L << "if watch_k == 0 then\n" 00463 << L << " *** Zeroth watchdog iteration\n" 00464 << L << " if phi_kp1 >= phi_k + eta * Dphi_k then\n" 00465 << L << " *** Accept this increase for now but watch out next iteration!\n" 00466 << L << " *** Save the first point\n" 00467 << L << " xo = x_k\n" 00468 << L << " fo = f_k\n" 00469 << L << " nrm_co = norm_inf_c_k\n" 00470 << L << " do = d_k\n" 00471 << L << " phio = phi_k\n" 00472 << L << " Dphio = Dphi_k\n" 00473 << L << " phiop1 = phi_kp1\n" 00474 << L << " *** Skip the update of the penalty parameter next iteration.\n" 00475 << L << " mu_kp1 = mu_k\n" 00476 << L << " *** Continue with next step in watchdog\n" 00477 << L << " watch_k = 1\n" 00478 << L << " else\n" 00479 << L << " *** This is a good step so take it!\n" 00480 << L << " end\n" 00481 << L << "else if watch_k == 1 then\n" 00482 << L << " *** First watchdog iteration\n" 00483 << L << " Do line search for: x_kp1 = x_k + alpha_k + d_k\n" 00484 << L << " -> alpha_k, x_kp1, f_kp1, c_kp1, phi_kp1\n" 00485 << L << " if ( phi_k <= phio ) or ( phi_kp1 <= phio + eta * Dphio ) then\n" 00486 << L << " *** We will accept this step and reinitialize the watchdog\n" 00487 << L << " watch_k = 0\n" 00488 << L << " else if ( phi_kp1 > phio ) then\n" 00489 << L << " *** This reduction is no good!\n" 00490 << L << " *** Go back from x_k to x_km1 for this iteration (k)\n" 00491 << L << " alpha_k = -1.0\n" 00492 << L << " d_k = do\n" 00493 << L << " f_kp1 = fo\n" 00494 << L << " Output this iteration (k)\n" 00495 << L << " k = k+1\n" 00496 << L << " *** Go from x_k = x_km2 to x_kp1 for this iteration (k+1)\n" 00497 << L << " alpha_k = 1\n" 00498 << L << " x_k = xo\n" 00499 << L << " d_k = do\n" 00500 << L << " Dphi_k = Dphio\n" 00501 << L << " phi_k = phio\n" 00502 << L << " x_kp1 = x_k + d_k\n" 00503 << L << " phi_kp1 = phiop1\n" 00504 << L << " Do line search for: x_kp1 = x_k + alpha_k + d_k\n" 00505 << L << " -> alpha_k, x_kp1, f_kp1, c_kp1, phi_kp1\n" 00506 << L << " Output this iteration (k+1)\n" 00507 << L << " k = k+1\n" 00508 << L << " *** Any updates for k (k+2) should use the last updated value\n" 00509 << L << " *** which was for k-2 (k) since there is not much info for k-1 (k+1).\n" 00510 << L << " *** Be careful here and make sure this works with other steps.\n" 00511 << L << " goto EvalNewPoint\n" 00512 << L << " else\n" 00513 << L << " *** Accept this reduction but do a linesearch next iteration!\n" 00514 << L << " *** Skip the update of the penalty parameter next iteration.\n" 00515 << L << " mu_kp1 = mu_k\n" 00516 << L << " *** Continue with next step in watchdog\n" 00517 << L << " watch_k = 2\n" 00518 << L << " end\n" 00519 << L << "else if ( watch_k == 2 ) then\n" 00520 << L << " *** Second watchdog iteration\n" 00521 << L << " Do line search for: x_kp1 = x_k + alpha_k + d_k\n" 00522 << L << " -> alpha_k, x_kp1, f_kp1, c_kp1, phi_kp1\n" 00523 << L << " *** Reset the watchdog algorithm\n" 00524 << L << " watch_k = 1\n" 00525 << L << "else if ( watch_k == NORMAL_LINE_SEARCH ) then\n" 00526 << L << " Do line search for: x_kp1 = x_k + alpha_k + d_k\n" 00527 << L << " -> alpha_k, x_kp1, f_kp1, c_kp1, phi_kp1\n" 00528 << L << " begin direct line search : \"" 00529 << typeName(direct_line_search()) << "\"\n"; 00530 00531 direct_line_search().print_algorithm( out, L + " " ); 00532 00533 out 00534 << L << " end direct line search\n" 00535 << L << "end\n" 00536 << L << "if maximum number of linesearch iterations are exceeded then\n" 00537 << L << " throw line_search_failure\n" 00538 << L << "end\n"; 00539 } 00540 00541 #endif // 0
1.7.6.1