|
MoochoPack : Framework for Large-Scale Optimization Algorithms
Version of the Day
|
00001 // @HEADER 00002 // *********************************************************************** 00003 // 00004 // Moocho: Multi-functional Object-Oriented arCHitecture for Optimization 00005 // Copyright (2003) Sandia Corporation 00006 // 00007 // Under terms of Contract DE-AC04-94AL85000, there is a non-exclusive 00008 // license for use of this work by or on behalf of the U.S. Government. 00009 // 00010 // Redistribution and use in source and binary forms, with or without 00011 // modification, are permitted provided that the following conditions are 00012 // met: 00013 // 00014 // 1. Redistributions of source code must retain the above copyright 00015 // notice, this list of conditions and the following disclaimer. 00016 // 00017 // 2. Redistributions in binary form must reproduce the above copyright 00018 // notice, this list of conditions and the following disclaimer in the 00019 // documentation and/or other materials provided with the distribution. 00020 // 00021 // 3. Neither the name of the Corporation nor the names of the 00022 // contributors may be used to endorse or promote products derived from 00023 // this software without specific prior written permission. 00024 // 00025 // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY 00026 // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 00027 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 00028 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE 00029 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 00030 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 00031 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 00032 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 00033 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 00034 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 00035 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00036 // 00037 // Questions? Contact Roscoe A. Bartlett (rabartl@sandia.gov) 00038 // 00039 // *********************************************************************** 00040 // @HEADER 00041 00042 #include <assert.h> 00043 00044 #include <ostream> 00045 #include <limits> 00046 #include <sstream> 00047 00048 #include "MoochoPack_CheckConvergenceStd_Strategy.hpp" 00049 #include "MoochoPack_NLPAlgoContainer.hpp" 00050 #include "MoochoPack_Exceptions.hpp" 00051 #include "MoochoPack_moocho_algo_conversion.hpp" 00052 #include "IterationPack_print_algorithm_step.hpp" 00053 #include "AbstractLinAlgPack_MatrixOpOut.hpp" 00054 #include "AbstractLinAlgPack_VectorMutable.hpp" 00055 #include "AbstractLinAlgPack_VectorAuxiliaryOps.hpp" 00056 #include "AbstractLinAlgPack_VectorStdOps.hpp" 00057 #include "AbstractLinAlgPack_VectorOut.hpp" 00058 #include "AbstractLinAlgPack_assert_print_nan_inf.hpp" 00059 #include "AbstractLinAlgPack_LinAlgOpPack.hpp" 00060 #include "Teuchos_dyn_cast.hpp" 00061 #include "Teuchos_Assert.hpp" 00062 00063 namespace MoochoPack { 00064 00065 CheckConvergenceStd_Strategy::CheckConvergenceStd_Strategy( 00066 EOptErrorCheck opt_error_check 00067 ,EScaleKKTErrorBy scale_opt_error_by 00068 ,EScaleKKTErrorBy scale_feas_error_by 00069 ,EScaleKKTErrorBy scale_comp_error_by 00070 ,bool scale_opt_error_by_Gf 00071 ) 00072 : 00073 CheckConvergence_Strategy( 00074 opt_error_check, 00075 scale_opt_error_by, 00076 scale_feas_error_by, 00077 scale_comp_error_by, 00078 scale_opt_error_by_Gf 00079 ) 00080 {} 00081 00082 bool CheckConvergenceStd_Strategy::Converged( 00083 Algorithm& _algo 00084 ) 00085 { 00086 using AbstractLinAlgPack::assert_print_nan_inf; 00087 using AbstractLinAlgPack::combined_nu_comp_err; 00088 00089 NLPAlgo &algo = rsqp_algo(_algo); 00090 NLPAlgoState &s = algo.rsqp_state(); 00091 NLP &nlp = algo.nlp(); 00092 00093 EJournalOutputLevel olevel = algo.algo_cntr().journal_output_level(); 00094 std::ostream& out = algo.track().journal_out(); 00095 00096 const size_type 00097 n = nlp.n(), 00098 m = nlp.m(), 00099 nb = nlp.num_bounded_x(); 00100 00101 // Get the iteration quantities 00102 IterQuantityAccess<value_type> 00103 &opt_kkt_err_iq = s.opt_kkt_err(), 00104 &feas_kkt_err_iq = s.feas_kkt_err(), 00105 &comp_kkt_err_iq = s.comp_kkt_err(); 00106 00107 IterQuantityAccess<VectorMutable> 00108 &x_iq = s.x(), 00109 &d_iq = s.d(), 00110 &Gf_iq = s.Gf(), 00111 *c_iq = m ? &s.c() : NULL, 00112 *rGL_iq = n > m ? &s.rGL() : NULL, 00113 *GL_iq = n > m ? &s.GL() : NULL, 00114 *nu_iq = n > m ? &s.nu() : NULL; 00115 00116 // opt_err = (||rGL||inf or ||GL||) / (||Gf|| + scale_kkt_factor) 00117 value_type 00118 norm_inf_Gf_k = 0.0, 00119 norm_inf_GLrGL_k = 0.0; 00120 00121 if( n > m && scale_opt_error_by_Gf() && Gf_iq.updated_k(0) ) { 00122 assert_print_nan_inf( 00123 norm_inf_Gf_k = Gf_iq.get_k(0).norm_inf(), 00124 "||Gf_k||inf",true,&out 00125 ); 00126 } 00127 00128 // NOTE: 00129 // The strategy object CheckConvergenceIP_Strategy assumes 00130 // that this will always be the gradient of the lagrangian 00131 // of the original problem, not the gradient of the lagrangian 00132 // for psi. (don't use augmented nlp info here) 00133 if( n > m ) { 00134 if( opt_error_check() == OPT_ERROR_REDUCED_GRADIENT_LAGR ) { 00135 assert_print_nan_inf( norm_inf_GLrGL_k = rGL_iq->get_k(0).norm_inf(), 00136 "||rGL_k||inf",true,&out); 00137 } 00138 else { 00139 assert_print_nan_inf( norm_inf_GLrGL_k = GL_iq->get_k(0).norm_inf(), 00140 "||GL_k||inf",true,&out); 00141 } 00142 } 00143 00144 const value_type 00145 opt_scale_factor = 1.0 + norm_inf_Gf_k, 00146 opt_err = norm_inf_GLrGL_k / opt_scale_factor; 00147 00148 // feas_err 00149 const value_type feas_err = ( ( m ? c_iq->get_k(0).norm_inf() : 0.0 ) ); 00150 00151 // comp_err 00152 value_type comp_err = 0.0; 00153 if ( n > m ) { 00154 if (nb > 0) { 00155 comp_err = combined_nu_comp_err(nu_iq->get_k(0), x_iq.get_k(0), nlp.xl(), nlp.xu()); 00156 } 00157 if(m) { 00158 assert_print_nan_inf( feas_err,"||c_k||inf",true,&out); 00159 } 00160 } 00161 00162 // scaling factors 00163 const value_type 00164 scale_opt_factor = CalculateScalingFactor(s, scale_opt_error_by()), 00165 scale_feas_factor = CalculateScalingFactor(s, scale_feas_error_by()), 00166 scale_comp_factor = CalculateScalingFactor(s, scale_comp_error_by()); 00167 00168 // kkt_err 00169 const value_type 00170 opt_kkt_err_k = opt_err/scale_opt_factor, 00171 feas_kkt_err_k = feas_err/scale_feas_factor, 00172 comp_kkt_err_k = comp_err/scale_comp_factor; 00173 00174 // update the iteration quantities 00175 if(n > m) opt_kkt_err_iq.set_k(0) = opt_kkt_err_k; 00176 feas_kkt_err_iq.set_k(0) = feas_kkt_err_k; 00177 comp_kkt_err_iq.set_k(0) = comp_kkt_err_k; 00178 00179 // step_err 00180 value_type step_err = 0.0; 00181 if( d_iq.updated_k(0) ) { 00182 step_err = AbstractLinAlgPack::max_rel_step(x_iq.get_k(0),d_iq.get_k(0)); 00183 assert_print_nan_inf( step_err,"max(d(i)/max(1,x(i)),i=1...n)",true,&out); 00184 } 00185 00186 const value_type 00187 opt_tol = algo.algo_cntr().opt_tol(), 00188 feas_tol = algo.algo_cntr().feas_tol(), 00189 comp_tol = algo.algo_cntr().comp_tol(), 00190 step_tol = algo.algo_cntr().step_tol(); 00191 00192 const bool found_solution = 00193 opt_kkt_err_k < opt_tol 00194 && feas_kkt_err_k < feas_tol 00195 && comp_kkt_err_k < comp_tol 00196 && step_err < step_tol; 00197 00198 if( int(olevel) >= int(PRINT_ALGORITHM_STEPS) || (int(olevel) >= int(PRINT_BASIC_ALGORITHM_INFO) && found_solution) ) 00199 { 00200 out 00201 << "\nscale_opt_factor = " << scale_opt_factor 00202 << " (scale_opt_error_by = " << (scale_opt_error_by()==SCALE_BY_ONE ? "SCALE_BY_ONE" 00203 : (scale_opt_error_by()==SCALE_BY_NORM_INF_X ? "SCALE_BY_NORM_INF_X" 00204 : "SCALE_BY_NORM_2_X" ) ) << ")" 00205 00206 << "\nscale_feas_factor = " << scale_feas_factor 00207 << " (scale_feas_error_by = " << (scale_feas_error_by()==SCALE_BY_ONE ? "SCALE_BY_ONE" 00208 : (scale_feas_error_by()==SCALE_BY_NORM_INF_X ? "SCALE_BY_NORM_INF_X" 00209 : "SCALE_BY_NORM_2_X" ) ) << ")" 00210 00211 << "\nscale_comp_factor = " << scale_comp_factor 00212 << " (scale_comp_error_by = " << (scale_comp_error_by()==SCALE_BY_ONE ? "SCALE_BY_ONE" 00213 : (scale_comp_error_by()==SCALE_BY_NORM_INF_X ? "SCALE_BY_NORM_INF_X" 00214 : "SCALE_BY_NORM_2_X" ) ) << ")" 00215 << "\nopt_scale_factor = " << opt_scale_factor 00216 << " (scale_opt_error_by_Gf = " << (scale_opt_error_by_Gf()?"true":"false") << ")" 00217 << "\nopt_kkt_err_k = " << opt_kkt_err_k << ( opt_kkt_err_k < opt_tol ? " < " : " > " ) 00218 << "opt_tol = " << opt_tol 00219 << "\nfeas_kkt_err_k = " << feas_kkt_err_k << ( feas_kkt_err_k < feas_tol ? " < " : " > " ) 00220 << "feas_tol = " << feas_tol 00221 << "\ncomp_kkt_err_k = " << comp_kkt_err_k << ( comp_kkt_err_k < comp_tol ? " < " : " > " ) 00222 << "comp_tol = " << comp_tol 00223 << "\nstep_err = " << step_err << ( step_err < step_tol ? " < " : " > " ) 00224 << "step_tol = " << step_tol 00225 << std::endl; 00226 } 00227 00228 return found_solution; 00229 00230 } 00231 00232 void CheckConvergenceStd_Strategy::print_step( const Algorithm& _algo, std::ostream& out, const std::string& L ) const 00233 { 00234 out 00235 << L << "*** Check to see if the KKT error is small enough for convergence\n" 00236 << L << "if scale_(opt|feas|comp)_error_by == SCALE_BY_ONE then\n" 00237 << L << " scale_(opt|feas|comp)_factor = 1.0\n" 00238 << L << "else if scale_(opt|feas|comp)_error_by == SCALE_BY_NORM_2_X then\n" 00239 << L << " scale_(opt|feas|comp)_factor = 1.0 + norm_2(x_k)\n" 00240 << L << "else if scale_(opt|feas|comp)_error_by == SCALE_BY_NORM_INF_X then\n" 00241 << L << " scale_(opt|feas|comp)_factor = 1.0 + norm_inf(x_k)\n" 00242 << L << "end\n" 00243 << L << "if scale_opt_error_by_Gf == true then\n" 00244 << L << " opt_scale_factor = 1.0 + norm_inf(Gf_k)\n" 00245 << L << "else\n" 00246 << L << " opt_scale_factor = 1.0\n" 00247 << L << "end\n"; 00248 if( opt_error_check() == OPT_ERROR_REDUCED_GRADIENT_LAGR ) 00249 { 00250 out 00251 << L << "opt_err = norm_inf(rGL_k)/opt_scale_factor\n"; 00252 } 00253 else 00254 { 00255 out 00256 << L << "opt_err = norm_inf(GL_k)/opt_scale_factor\n"; 00257 } 00258 00259 out 00260 << L << "feas_err = norm_inf(c_k)\n" 00261 << L << "comp_err = max(i, nu(i)*(xu(i)-x(i)), -nu(i)*(x(i)-xl(i)))\n" 00262 << L << "opt_kkt_err_k = opt_err/scale_opt_factor\n" 00263 << L << "feas_kkt_err_k = feas_err/scale_feas_factor\n" 00264 << L << "comp_kkt_err_k = feas_err/scale_comp_factor\n" 00265 << L << "if d_k is updated then\n" 00266 << L << " step_err = max( |d_k(i)|/(1+|x_k(i)|), i=1..n )\n" 00267 << L << "else\n" 00268 << L << " step_err = 0\n" 00269 << L << "end\n" 00270 << L << "if opt_kkt_err_k < opt_tol\n" 00271 << L << " and feas_kkt_err_k < feas_tol\n" 00272 << L << " and step_err < step_tol then\n" 00273 << L << " report optimal x_k, lambda_k and nu_k to the nlp\n" 00274 << L << " terminate, the solution has beed found!\n" 00275 << L << "end\n"; 00276 } 00277 00278 00279 value_type CheckConvergenceStd_Strategy::CalculateScalingFactor( NLPAlgoState& state, EScaleKKTErrorBy scale_by ) const 00280 { 00281 // scale_kkt_factor 00282 value_type scale_factor = 1.0; 00283 switch(scale_by) 00284 { 00285 case SCALE_BY_ONE: 00286 scale_factor = 1.0; 00287 break; 00288 case SCALE_BY_NORM_2_X: 00289 scale_factor = 1.0 + state.x().get_k(0).norm_2(); 00290 break; 00291 case SCALE_BY_NORM_INF_X: 00292 scale_factor = 1.0 + state.x().get_k(0).norm_inf(); 00293 break; 00294 default: 00295 TEUCHOS_TEST_FOR_EXCEPT(true); // Should never be called 00296 } 00297 00298 return scale_factor; 00299 } 00300 00301 } // end namespace MoochoPack 00302
1.7.6.1