|
ConstrainedOptPack: C++ Tools for Constrained (and Unconstrained) Optimization
Version of the Day
|
00001 // @HEADER 00002 // *********************************************************************** 00003 // 00004 // Moocho: Multi-functional Object-Oriented arCHitecture for Optimization 00005 // Copyright (2003) Sandia Corporation 00006 // 00007 // Under terms of Contract DE-AC04-94AL85000, there is a non-exclusive 00008 // license for use of this work by or on behalf of the U.S. Government. 00009 // 00010 // Redistribution and use in source and binary forms, with or without 00011 // modification, are permitted provided that the following conditions are 00012 // met: 00013 // 00014 // 1. Redistributions of source code must retain the above copyright 00015 // notice, this list of conditions and the following disclaimer. 00016 // 00017 // 2. Redistributions in binary form must reproduce the above copyright 00018 // notice, this list of conditions and the following disclaimer in the 00019 // documentation and/or other materials provided with the distribution. 00020 // 00021 // 3. Neither the name of the Corporation nor the names of the 00022 // contributors may be used to endorse or promote products derived from 00023 // this software without specific prior written permission. 00024 // 00025 // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY 00026 // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 00027 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 00028 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE 00029 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 00030 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 00031 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 00032 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 00033 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 00034 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 00035 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00036 // 00037 // Questions? Contact Roscoe A. Bartlett (rabartl@sandia.gov) 00038 // 00039 // *********************************************************************** 00040 // @HEADER 00041 00042 // 00043 // 7/4/2002: RAB: I was able to update this class using the 00044 // functions in AbstractLinAlgPack_LinAlgOpPackHack.hpp. This is wastefull in that 00045 // I am creating temporaries every time any operation is performed 00046 // but this was the easiest way to get things going. 00047 // 00048 // 7/4/2002: RAB : ToDo: In the future it would be good to create 00049 // some type of temporary vector server so that I could avoid 00050 // creating all of these temporaries. This will take some thought 00051 // and may not be worth it for now. 00052 // 00053 00054 #include <assert.h> 00055 00056 #include <ostream> 00057 #include <iomanip> 00058 #include <limits> 00059 00060 #include "ConstrainedOptPack_QPSchur.hpp" 00061 #include "ConstrainedOptPack_ComputeMinMult.hpp" 00062 #include "AbstractLinAlgPack_MatrixSymPosDefCholFactor.hpp" 00063 #include "AbstractLinAlgPack_SpVectorOp.hpp" 00064 #include "AbstractLinAlgPack_GenPermMatrixSliceOp.hpp" 00065 #include "AbstractLinAlgPack_LinAlgOpPackHack.hpp" 00066 #include "AbstractLinAlgPack_GenPermMatrixSliceOut.hpp" 00067 #include "AbstractLinAlgPack_SpVectorOut.hpp" 00068 #include "AbstractLinAlgPack_MatrixOpNonsing.hpp" 00069 #include "AbstractLinAlgPack_MatrixOpOut.hpp" 00070 #include "AbstractLinAlgPack_VectorStdOps.hpp" 00071 #include "AbstractLinAlgPack_EtaVector.hpp" 00072 #include "DenseLinAlgPack_AssertOp.hpp" 00073 #include "DenseLinAlgPack_DVectorClass.hpp" 00074 #include "DenseLinAlgPack_DVectorClassExt.hpp" 00075 #include "DenseLinAlgPack_DVectorOp.hpp" 00076 #include "DenseLinAlgPack_LinAlgOpPack.hpp" 00077 #include "DenseLinAlgPack_DVectorOut.hpp" 00078 #include "DenseLinAlgPack_DMatrixOut.hpp" 00079 #include "Teuchos_Workspace.hpp" 00080 #include "Teuchos_Assert.hpp" 00081 00082 namespace LinAlgOpPack { 00083 using AbstractLinAlgPack::Vp_StV; 00084 using AbstractLinAlgPack::Vp_StMtV; 00085 using AbstractLinAlgPack::Vp_StV; 00086 using AbstractLinAlgPack::Vp_StMtV; 00087 } 00088 00089 namespace { 00090 00091 // Some local helper functions. 00092 00093 template< class T > 00094 inline 00095 T my_max( const T& v1, const T& v2 ) { return v1 > v2 ? v1 : v2; } 00096 00097 // 00098 // Print a bnd as a string 00099 // 00100 inline 00101 const char* bnd_str( ConstrainedOptPack::EBounds bnd ) 00102 { 00103 switch(bnd) { 00104 case ConstrainedOptPack::FREE: 00105 return "FREE"; 00106 case ConstrainedOptPack::UPPER: 00107 return "UPPER"; 00108 case ConstrainedOptPack::LOWER: 00109 return "LOWER"; 00110 case ConstrainedOptPack::EQUALITY: 00111 return "EQUALITY"; 00112 } 00113 TEUCHOS_TEST_FOR_EXCEPT(true); // should never be executed 00114 return 0; 00115 } 00116 00117 // 00118 // print a bool 00119 // 00120 inline 00121 const char* bool_str( bool b ) 00122 { 00123 return b ? "true" : "false"; 00124 } 00125 00126 // 00127 // Return a std::string that has a file name, line number and 00128 // error message. 00129 // 00130 std::string error_msg( 00131 const char file_name[], const int line_num, const char err_msg[] 00132 ) 00133 { 00134 std::ostringstream omsg; 00135 omsg 00136 << file_name << ":" << line_num << ":" << err_msg; 00137 return omsg.str(); 00138 } 00139 00140 // 00141 // Deincrement all indices less that k_remove 00142 // 00143 void deincrement_indices( 00144 DenseLinAlgPack::size_type k_remove 00145 ,std::vector<DenseLinAlgPack::size_type> *indice_vector 00146 ,size_t len_vector 00147 ) 00148 { 00149 typedef DenseLinAlgPack::size_type size_type; 00150 typedef std::vector<DenseLinAlgPack::size_type> vec_t; 00151 TEUCHOS_TEST_FOR_EXCEPT( !( len_vector <= indice_vector->size() ) ); 00152 for( vec_t::iterator itr = indice_vector->begin(); itr != indice_vector->begin() + len_vector; ++itr ) { 00153 if( *itr > k_remove ) 00154 --(*itr); 00155 } 00156 } 00157 00158 // 00159 // Insert the element (r_v,c_v) into r[] and c[] sorted by r[] 00160 // 00161 void insert_pair_sorted( 00162 DenseLinAlgPack::size_type r_v 00163 ,DenseLinAlgPack::size_type c_v 00164 ,size_t len_vector // length of the new vector 00165 ,std::vector<DenseLinAlgPack::size_type> *r 00166 ,std::vector<DenseLinAlgPack::size_type> *c 00167 ) 00168 { 00169 typedef std::vector<DenseLinAlgPack::size_type> rc_t; 00170 TEUCHOS_TEST_FOR_EXCEPT( !( r->size() >= len_vector && c->size() >= len_vector ) ); 00171 // find the insertion point in r[] 00172 rc_t::iterator 00173 itr = std::lower_bound( r->begin(), r->begin() + len_vector-1, r_v ); 00174 const DenseLinAlgPack::size_type p = itr - r->begin(); 00175 // Shift all of the stuff out of the way to make room for the insert 00176 {for( rc_t::iterator itr_last = r->begin() + len_vector-1; 00177 itr_last > r->begin() + p; --itr_last ) 00178 { 00179 *itr_last = *(itr_last-1); 00180 }} 00181 {for( rc_t::iterator itr_last = c->begin() + len_vector-1; 00182 itr_last > c->begin() + p; --itr_last ) 00183 { 00184 *itr_last = *(itr_last-1); 00185 }} 00186 // Insert the new elements 00187 (*r)[p] = r_v; 00188 (*c)[p] = c_v; 00189 } 00190 00191 // 00192 // z_hat = inv(S_hat) * ( d_hat - U_hat'*vo ) 00193 // 00194 void calc_z( 00195 const AbstractLinAlgPack::MatrixSymOpNonsing &S_hat 00196 ,const DenseLinAlgPack::DVectorSlice &d_hat 00197 ,const AbstractLinAlgPack::MatrixOp &U_hat 00198 ,const DenseLinAlgPack::DVectorSlice *vo // If NULL then assumed zero 00199 ,DenseLinAlgPack::DVectorSlice *z_hat 00200 ) 00201 { 00202 using LinAlgOpPack::Vp_StMtV; 00203 using LinAlgOpPack::V_InvMtV; 00204 using Teuchos::Workspace; 00205 Teuchos::WorkspaceStore* wss = Teuchos::get_default_workspace_store().get(); 00206 00207 Workspace<DenseLinAlgPack::value_type> t_ws(wss,d_hat.dim()); 00208 DenseLinAlgPack::DVectorSlice t(&t_ws[0],t_ws.size()); 00209 t = d_hat; 00210 if(vo) 00211 Vp_StMtV( &t, -1.0, U_hat, BLAS_Cpp::trans, *vo ); 00212 V_InvMtV( z_hat, S_hat, BLAS_Cpp::no_trans, t ); 00213 } 00214 00215 // 00216 // v = inv(Ko) * ( fo - U_hat * z_hat ) 00217 // 00218 void calc_v( 00219 const AbstractLinAlgPack::MatrixSymOpNonsing &Ko 00220 ,const DenseLinAlgPack::DVectorSlice *fo // If NULL then assumed to be zero 00221 ,const AbstractLinAlgPack::MatrixOp &U_hat 00222 ,const DenseLinAlgPack::DVectorSlice &z_hat // Only accessed if U_hat.cols() > 0 00223 ,DenseLinAlgPack::DVectorSlice *v 00224 ) 00225 { 00226 using DenseLinAlgPack::norm_inf; 00227 using LinAlgOpPack::Vp_StMtV; 00228 using LinAlgOpPack::V_InvMtV; 00229 using Teuchos::Workspace; 00230 Teuchos::WorkspaceStore* wss = Teuchos::get_default_workspace_store().get(); 00231 00232 Workspace<DenseLinAlgPack::value_type> t_ws(wss,v->dim()); 00233 DenseLinAlgPack::DVectorSlice t(&t_ws[0],t_ws.size()); 00234 if(fo) { 00235 t = *fo; 00236 } 00237 else { 00238 t = 0.0; 00239 } 00240 if( U_hat.cols() ) 00241 Vp_StMtV( &t, -1.0, U_hat, BLAS_Cpp::no_trans, z_hat ); 00242 if( norm_inf(t) > 0.0 ) 00243 V_InvMtV( v, Ko, BLAS_Cpp::no_trans, t ); 00244 else 00245 *v = 0.0; 00246 } 00247 00248 // 00249 // mu_D_hat = 00250 // - Q_XD_hat' * g 00251 // - Q_XD_hat' * G * x 00252 // - Q_XD_hat' * A * v(n_R+1:n_R+m) 00253 // - Q_XD_hat' * A_bar * P_plus_hat * z_hat 00254 // 00255 void calc_mu_D( 00256 const ConstrainedOptPack::QPSchur::ActiveSet &act_set 00257 ,const DenseLinAlgPack::DVectorSlice &x 00258 ,const DenseLinAlgPack::DVectorSlice &v 00259 ,DenseLinAlgPack::DVectorSlice *mu_D 00260 ) 00261 { 00262 using BLAS_Cpp::no_trans; 00263 using BLAS_Cpp::trans; 00264 using LinAlgOpPack::V_MtV; 00265 using LinAlgOpPack::V_StMtV; 00266 using LinAlgOpPack::Vp_StPtMtV; 00267 using AbstractLinAlgPack::V_MtV; 00268 using AbstractLinAlgPack::Vp_MtV; 00269 using Teuchos::Workspace; 00270 Teuchos::WorkspaceStore* wss = Teuchos::get_default_workspace_store().get(); 00271 00272 const ConstrainedOptPack::QPSchurPack::QP 00273 &qp = act_set.qp(); 00274 const DenseLinAlgPack::size_type 00275 n = qp.n(), 00276 n_R = qp.n_R(), 00277 m = qp.m(); 00278 00279 const AbstractLinAlgPack::GenPermMatrixSlice &Q_XD_hat = act_set.Q_XD_hat(); 00280 const DenseLinAlgPack::DVectorSlice g = qp.g(); 00281 const AbstractLinAlgPack::MatrixSymOp &G = qp.G(); 00282 // mu_D_hat = - Q_XD_hat' * g 00283 V_StMtV( mu_D, -1.0, Q_XD_hat, trans, g ); 00284 // mu_D_hat += - Q_XD_hat' * G * x 00285 Vp_StPtMtV( mu_D, -1.0, Q_XD_hat, trans, G, no_trans, x ); 00286 // mu_D_hat += - Q_XD_hat' * A * v(n_R+1:n_R+m) 00287 if( m ) { 00288 Vp_StPtMtV( mu_D, -1.0, Q_XD_hat, trans, qp.A(), no_trans, v(n_R+1,n_R+m) ); 00289 } 00290 // p_mu_D_hat += - Q_XD_hat' * A_bar * P_plus_hat * z_hat 00291 if( act_set.q_plus_hat() && act_set.q_hat() ) { 00292 const DenseLinAlgPack::DVectorSlice z_hat = act_set.z_hat(); 00293 AbstractLinAlgPack::SpVector P_plus_hat_z_hat; 00294 V_MtV( &P_plus_hat_z_hat, act_set.P_plus_hat(), no_trans, z_hat ); 00295 Vp_StPtMtV( mu_D, -1.0, Q_XD_hat, trans 00296 , qp.constraints().A_bar(), no_trans, P_plus_hat_z_hat() ); 00297 } 00298 } 00299 00300 // 00301 // p_mu_D_hat = 00302 // - Q_XD_hat' * G * Q_R * p_v(1:n_R) 00303 // - Q_XD_hat' * G * P_XF_hat * p_z_hat 00304 // - Q_XD_hat' * A * p_v(n_R+1:n_R+m) 00305 // - Q_XD_hat' * A_bar * (P_plus_hat * p_z_hat + e(ja)) 00306 // 00307 void calc_p_mu_D( 00308 const ConstrainedOptPack::QPSchur::ActiveSet &act_set 00309 ,const DenseLinAlgPack::DVectorSlice &p_v 00310 ,const DenseLinAlgPack::DVectorSlice &p_z_hat 00311 ,const DenseLinAlgPack::size_type *ja // If != NULL then we will include the term e(ja) 00312 ,DenseLinAlgPack::DVectorSlice *p_mu_D 00313 ) 00314 { 00315 using BLAS_Cpp::no_trans; 00316 using BLAS_Cpp::trans; 00317 using AbstractLinAlgPack::V_MtV; 00318 using AbstractLinAlgPack::Vp_MtV; 00319 using LinAlgOpPack::Vp_StPtMtV; 00320 using Teuchos::Workspace; 00321 Teuchos::WorkspaceStore* wss = Teuchos::get_default_workspace_store().get(); 00322 00323 const ConstrainedOptPack::QPSchurPack::QP 00324 &qp = act_set.qp(); 00325 const ConstrainedOptPack::QPSchurPack::Constraints 00326 &constraints = qp.constraints(); 00327 const DenseLinAlgPack::size_type 00328 n = qp.n(), 00329 n_R = qp.n_R(), 00330 m = qp.m(); 00331 00332 const AbstractLinAlgPack::GenPermMatrixSlice &Q_XD_hat = act_set.Q_XD_hat(); 00333 const AbstractLinAlgPack::MatrixSymOp &G = qp.G(); 00334 // p_mu_D_hat = - Q_XD_hat' * G * Q_R * p_v(1:n_R) 00335 { 00336 AbstractLinAlgPack::SpVector Q_R_p_v1; 00337 V_MtV( &Q_R_p_v1, qp.Q_R(), no_trans, p_v(1,n_R) ); 00338 Vp_StPtMtV( p_mu_D, -1.0, Q_XD_hat, trans, G, no_trans, Q_R_p_v1(), 0.0 ); 00339 } 00340 // p_mu_D_hat += - Q_XD_hat' * G * P_XF_hat * p_z_hat 00341 if( act_set.q_F_hat() ) { 00342 AbstractLinAlgPack::SpVector P_XF_hat_p_z_hat; 00343 V_MtV( &P_XF_hat_p_z_hat, act_set.P_XF_hat(), no_trans, p_z_hat ); 00344 Vp_StPtMtV( p_mu_D, -1.0, Q_XD_hat, trans, G, no_trans, P_XF_hat_p_z_hat() ); 00345 } 00346 // p_mu_D_hat += - Q_XD_hat' * A * p_v(n_R+1:n_R+m) 00347 if( m ) { 00348 Vp_StPtMtV( p_mu_D, -1.0, Q_XD_hat, trans, qp.A(), no_trans, p_v(n_R+1,n_R+m) ); 00349 } 00350 // p_mu_D_hat += - Q_XD_hat' * A_bar * ( P_plus_hat * p_z_hat + e(ja) ) 00351 if( act_set.q_plus_hat() || ja ) { 00352 AbstractLinAlgPack::SpVector p_lambda_bar( 00353 n+constraints.m_breve(), act_set.q_plus_hat() + (ja ? 1 : 0) ); 00354 if( act_set.q_plus_hat() ) // p_lambda_bar = P_plus_hat * p_z_hat 00355 Vp_MtV( &p_lambda_bar, act_set.P_plus_hat(), no_trans, p_z_hat ); 00356 if( ja ) // p_lambda_bar += e(ja) (non-duplicate indices?) 00357 p_lambda_bar.insert_element(AbstractLinAlgPack::SpVector::element_type(*ja,1.0)); 00358 Vp_StPtMtV( p_mu_D, -1.0, Q_XD_hat, trans 00359 , constraints.A_bar(), no_trans, p_lambda_bar() ); 00360 } 00361 } 00362 00363 // 00364 // Calculate the residual of the augmented KKT system: 00365 // 00366 // [ ro ] = [ Ko U_hat ] [ v ] + [ ao * bo ] 00367 // [ ra ] [ U_hat' V_hat ] [ z_hat ] [ aa * ba ] 00368 // 00369 // Expanding this out we have: 00370 // 00371 // ro = Ko * v + U_hat * z_hat + ao * bo 00372 // 00373 // = [ Q_R'*G*Q_R Q_R'*A ] * [ x_R ] + [ Q_R'*G*P_XF_hat + Q_R'*A_bar*P_plus_hat ] * z_hat + [ ao*boR ] 00374 // [ A'*Q_R ] [ lambda ] [ A'*P_XF_hat ] [ ao*bom ] 00375 // 00376 // = [ Q_R'*G*(Q_R*x_R + P_XF_hat*z_hat) + Q_R'*A*lambda + Q_R'*A_bar*P_plus_hat*z_hat + ao*boR ] 00377 // [ A'*(Q_R*x_R + P_XF_hat*z_hat) + ao*bom ] 00378 // 00379 // ra = [ U_hat' * v + V_hat * z_hat + aa*ba 00380 // 00381 // = [ P_XF_hat'*G*Q_R + P_plus_hat'*A_bar'*Q_R , P_XF_hat'*A ] * [ x_R ; lambda ] 00382 // + [ P_XF_hat'*G*P_XF_hat + P_XF_hat'*A_bar*P_plus_hat + P_plus_hat'*A_bar'*P_XF_hat 00383 // + P_F_tilde'*P_C_hat + P_C_hat'*P_F_tilde ] * z_hat + aa*ba 00384 // 00385 // = P_XF_hat'*G*(Q_R*x_R + P_XF_hat*z_hat) + P_plus_hat'*A_bar'*(Q_R*x_R + P_XF_hat*z_hat) 00386 // + P_XF_hat'*A*lambda + P_XF_hat'*A_bar*P_plus_hat*z_hat 00387 // + (P_F_tilde'*P_C_hat + P_C_hat'*P_F_tilde)*z_hat + aa*ba 00388 // 00389 // Given the QP matrices G, A, and A_bar and the active set mapping matrices Q_R, P_XF_hat, 00390 // P_plus_hat and P_FC_hat = P_F_tilde'*P_C_hat, we can compute the residual efficiently 00391 // as follows: 00392 // 00393 // x_free = Q_R*x_R + P_XF_hat*z_hat (sparse) 00394 // 00395 // lambda_bar = P_plus_hat*z_hat (sparse) 00396 // 00397 // t1 = G*x_free 00398 // 00399 // t2 = A*lambda 00400 // 00401 // t3 = A_bar*lambda_bar 00402 // 00403 // roR = Q_R'*t1 + Q_R'*t2 + Q_R'*t3 + ao*boR 00404 // 00405 // rom = A'*t1 + ao*bom 00406 // 00407 // ra = P_XF_hat'*t1 + P_plus_hat'*A_bar'*x_free + P_XF_hat'*t2 + P_XF_hat'*t3 00408 // + (P_FC_hat + P_FC_hat')*z_hat + aa*ba 00409 // 00410 // On output we will have set: 00411 // 00412 // roR_scaling = ||Q_R'*t1||inf + ||Q_R'*t2||inf + ||Q_R'*t3||inf + ||ao*boR||inf 00413 // 00414 // rom_scaling = ||A'*t1||inf + ||ao*bom||inf 00415 // 00416 // ra_scaling = ||P_XF_hat'*t1||inf + ||P_plus_hat'*A_bar'*t1||inf + ||P_XF_hat'*t2||inf 00417 // + ||P_XF_hat'*t3||inf + ||(P_FC_hat + P_FC_hat')*z_hat|| + ||aa*ba||inf 00418 // 00419 // Note: In the future we could make this a little more efficent by combining (Q_R + P_XF_hat) into 00420 // an single permulation matrix and then we could leave out the terms for the variables initially 00421 // fixed and still fixed rather than computing the terms then throwing them away. 00422 // 00423 // Also, in the future, this could really be implemented for extended precision data types but 00424 // it would require some real work! 00425 // 00426 template<class val_type> 00427 void calc_resid( 00428 const ConstrainedOptPack::QPSchur::ActiveSet &act_set 00429 ,const DenseLinAlgPack::DVectorSlice &v 00430 ,const DenseLinAlgPack::DVectorSlice &z_hat // Only accessed if q_hat > 0 00431 ,const DenseLinAlgPack::value_type ao // Only accessed if bo != NULL 00432 ,const DenseLinAlgPack::DVectorSlice *bo // If NULL then considered 0 00433 ,DenseLinAlgPack::VectorSliceTmpl<val_type> *ro 00434 ,DenseLinAlgPack::value_type *roR_scaling 00435 ,DenseLinAlgPack::value_type *rom_scaling // Only set if m > 0 00436 ,const DenseLinAlgPack::value_type aa // Only accessed if q_hat > 0 00437 ,const DenseLinAlgPack::DVectorSlice *ba // If NULL then considered 0, Only accessed if q_hat > 0 00438 ,DenseLinAlgPack::VectorSliceTmpl<val_type> *ra // Only set if q_hat > 0 00439 ,DenseLinAlgPack::value_type *ra_scaling // Only set if q_hat > 0 00440 ) 00441 { 00442 using BLAS_Cpp::no_trans; 00443 using BLAS_Cpp::trans; 00444 using DenseLinAlgPack::norm_inf; 00445 using DenseLinAlgPack::DVectorSlice; 00446 using DenseLinAlgPack::Vp_StV; 00447 using AbstractLinAlgPack::V_MtV; 00448 using AbstractLinAlgPack::SpVector; 00449 using AbstractLinAlgPack::GenPermMatrixSlice; 00450 using LinAlgOpPack::Vp_V; 00451 using LinAlgOpPack::V_StV; 00452 using LinAlgOpPack::V_MtV; 00453 using LinAlgOpPack::Vp_MtV; 00454 using LinAlgOpPack::Vp_StMtV; 00455 using LinAlgOpPack::Vp_StPtMtV; 00456 namespace COP = ConstrainedOptPack; 00457 using Teuchos::Workspace; 00458 Teuchos::WorkspaceStore* wss = Teuchos::get_default_workspace_store().get(); 00459 00460 const COP::QPSchurPack::QP 00461 &qp = act_set.qp(); 00462 const COP::QPSchurPack::Constraints 00463 &constraints = qp.constraints(); 00464 const GenPermMatrixSlice 00465 &Q_R = qp.Q_R(), 00466 &P_XF_hat = act_set.P_XF_hat(), 00467 &P_plus_hat = act_set.P_plus_hat(); 00468 const DenseLinAlgPack::size_type 00469 n = qp.n(), 00470 n_R = qp.n_R(), 00471 m = qp.m(), 00472 m_breve = constraints.m_breve(), 00473 q_hat = act_set.q_hat(), 00474 q_F_hat = act_set.q_F_hat(), 00475 q_C_hat = act_set.q_C_hat(), 00476 q_plus_hat = act_set.q_plus_hat(); 00477 00478 const DVectorSlice 00479 x_R = v(1,n_R), 00480 lambda = ( m ? v(n_R+1,n_R+m): DVectorSlice() ), 00481 boR = ( bo ? (*bo)(1,n_R) : DVectorSlice() ), 00482 bom = ( bo && m ? (*bo)(n_R+1,n_R+m) : DVectorSlice() ); 00483 00484 DenseLinAlgPack::VectorSliceTmpl<val_type> 00485 roR = (*ro)(1,n_R), 00486 rom = ( m ? (*ro)(n_R+1,n_R+m) : DenseLinAlgPack::VectorSliceTmpl<val_type>() ); 00487 00488 Workspace<DenseLinAlgPack::value_type> 00489 x_free_ws(wss,n); 00490 DenseLinAlgPack::DVectorSlice 00491 x_free(&x_free_ws[0],x_free_ws.size()); 00492 Workspace<val_type> 00493 t1_ws(wss,n), 00494 t2_ws(wss,n), 00495 t3_ws(wss,n), 00496 tR_ws(wss,n_R), 00497 tm_ws(wss,m), 00498 ta_ws(wss,q_hat); 00499 DenseLinAlgPack::VectorSliceTmpl<val_type> 00500 t1(&t1_ws[0],t1_ws.size()), 00501 t2(&t2_ws[0],t2_ws.size()), 00502 t3(&t3_ws[0],t3_ws.size()), 00503 tR(&tR_ws[0],tR_ws.size()), 00504 tm(tm_ws.size()?&tm_ws[0]:NULL,tm_ws.size()), 00505 ta(ta_ws.size()?&ta_ws[0]:NULL,ta_ws.size()); 00506 00507 *roR_scaling = 0.0; 00508 if( m ) 00509 *rom_scaling = 0.0; 00510 if( q_hat ) 00511 *ra_scaling = 0.0; 00512 00513 // x_free = Q_R*x_R + P_XF_hat*z_hat (dense for now) 00514 LinAlgOpPack::V_MtV( &x_free, Q_R, no_trans, x_R ); 00515 if( q_F_hat ) 00516 LinAlgOpPack::Vp_MtV( &x_free, P_XF_hat, no_trans, z_hat ); 00517 // lambda_bar = P_plus_hat*z_hat 00518 SpVector lambda_bar; 00519 if( q_plus_hat ) 00520 V_MtV( &lambda_bar, P_plus_hat, no_trans, z_hat ); 00521 // t1 = G*x_free 00522 Vp_StMtV( &t1, 1.0, qp.G(), no_trans, x_free, 0.0 ); 00523 // t2 = A*lambda 00524 if( m ) 00525 Vp_StMtV( &t2, 1.0, qp.A(), no_trans, lambda, 0.0 ); 00526 // t3 = A_bar*lambda_bar 00527 if( q_plus_hat ) 00528 Vp_StMtV( &t3, 1.0, constraints.A_bar(), no_trans, lambda_bar(), 0.0 ); 00529 // roR = Q_R'*t1 + Q_R'*t2 + Q_R'*t3 + ao*boR 00530 LinAlgOpPack::V_MtV( &tR, Q_R, trans, t1 ); // roR = Q_R'*t1 00531 *roR_scaling += norm_inf(tR); 00532 roR = tR; 00533 if( m ) { // roR += Q_R'*t2 00534 LinAlgOpPack::V_MtV( &tR, Q_R, trans, t2 ); 00535 *roR_scaling += norm_inf(tR); 00536 LinAlgOpPack::Vp_V(&roR,tR); 00537 } 00538 if( q_plus_hat ) { // roR += Q_R'*t3 00539 LinAlgOpPack::V_MtV( &tR, Q_R, trans, t3 ); 00540 *roR_scaling += norm_inf(tR); 00541 LinAlgOpPack::Vp_V(&roR,tR); 00542 } 00543 if( bo ) { 00544 LinAlgOpPack::Vp_StV( &roR, ao, boR ); // roR += ao*boR 00545 *roR_scaling += std::fabs(ao) * norm_inf(boR); 00546 } 00547 // rom = A'*t1 + ao*bom 00548 if( m ) { 00549 Vp_StMtV( &tm, 1.0, qp.A(), trans, t1, 0.0 ); // A'*t1 00550 *rom_scaling += norm_inf(tm); 00551 LinAlgOpPack::Vp_V(&rom,tm); 00552 if(bo) { 00553 LinAlgOpPack::Vp_StV( &rom, ao, bom ); // rom += ao*bom 00554 *rom_scaling += std::fabs(ao)*norm_inf(bom); 00555 } 00556 } 00557 // ra = P_XF_hat'*t1 + P_plus_hat'*A_bar'*x_free + P_XF_hat'*t2 + P_XF_hat'*t3 00558 // +(P_FC_hat + P_FC_hat')*z_hat + aa*ba 00559 if( q_hat ) { 00560 if(ba) { // ra = aa*ba 00561 V_StV( ra, aa, *ba ); 00562 *ra_scaling += std::fabs(aa) * norm_inf(*ba); 00563 } 00564 else { 00565 *ra = 0.0; 00566 } 00567 if( q_F_hat ) { // ra += P_XF_hat'*t1 00568 LinAlgOpPack::V_MtV( &ta, P_XF_hat, trans, t1 ); 00569 *ra_scaling += norm_inf(ta); 00570 LinAlgOpPack::Vp_V(ra,ta); 00571 } 00572 if( q_plus_hat ) { // ra += P_plus_hat'*A_bar'*x_free 00573 Vp_StPtMtV( &ta, 1.0, P_plus_hat, trans, constraints.A_bar(), trans, x_free, 0.0 ); 00574 *ra_scaling += norm_inf(ta); 00575 LinAlgOpPack::Vp_V(ra,ta); 00576 } 00577 if( q_F_hat && m ) { // ra += P_XF_hat'*t2 00578 LinAlgOpPack::V_MtV( &ta, P_XF_hat, trans, t2 ); 00579 *ra_scaling += norm_inf(ta); 00580 LinAlgOpPack::Vp_V(ra,ta); 00581 } 00582 if( q_F_hat && q_plus_hat ) { // ra += P_XF_hat'*t3 00583 LinAlgOpPack::V_MtV( &ta, P_XF_hat, trans, t3 ); 00584 *ra_scaling += norm_inf(ta); 00585 LinAlgOpPack::Vp_V(ra,ta); 00586 } 00587 if( q_C_hat ) { // ra += (P_FC_hat + P_FC_hat')*z_hat 00588 const GenPermMatrixSlice 00589 &P_FC_hat = act_set.P_FC_hat(); 00590 ta = 0.0; 00591 for( GenPermMatrixSlice::const_iterator itr = P_FC_hat.begin(); itr != P_FC_hat.end(); ++itr ) { 00592 ta(itr->row_i()) = z_hat(itr->col_j()); 00593 ta(itr->col_j()) = z_hat(itr->row_i()); 00594 } 00595 *ra_scaling += norm_inf(ta); 00596 LinAlgOpPack::Vp_V(ra,ta); 00597 } 00598 } 00599 } 00600 00601 // 00602 // Correct a nearly degenerate Lagrange multiplier 00603 // 00604 // If < 0 is returned it means that the multiplier could not 00605 // be corrected and this should be considered to be dual infeasible 00606 // In this case the error output is sent to *out if print_dual_infeas 00607 // == true. 00608 // 00609 // If 0 is returned then the multiplier was near degenerate and 00610 // was corrected. In this case a warning message is printed to 00611 // *out. 00612 // 00613 // If > 0 is returned then the multiplier's sign 00614 // is just fine and no corrections were needed (no output). 00615 // 00616 int correct_dual_infeas( 00617 const DenseLinAlgPack::size_type j // for output info only 00618 ,const ConstrainedOptPack::EBounds bnd_j 00619 ,const DenseLinAlgPack::value_type t_P // (> 0) full step length 00620 ,const DenseLinAlgPack::value_type scale // (> 0) scaling value 00621 ,const DenseLinAlgPack::value_type dual_infeas_tol 00622 ,const DenseLinAlgPack::value_type degen_mult_val 00623 ,std::ostream *out // Can be NULL 00624 ,const ConstrainedOptPack::QPSchur::EOutputLevel olevel 00625 ,const bool print_dual_infeas 00626 ,const char nu_j_n[] // Name of nu_j 00627 ,DenseLinAlgPack::value_type *nu_j // required 00628 ,DenseLinAlgPack::value_type *scaled_viol // = scale*nu_j*(bnd_j==UPPER ? 1.0: -1.0 ) (after output) 00629 ,const char p_nu_j_n[] = NULL // Name of p_nu_j (can be NULL if p_nu_j==NULL) 00630 ,DenseLinAlgPack::value_type *p_nu_j = NULL // optional (can be NULL) 00631 ,const char nu_j_plus_n[] = NULL // Name of nu_j_plus (can be NULL if p_nu_j==NULL) 00632 ,DenseLinAlgPack::value_type *nu_j_plus = NULL // optional (can be NULL) 00633 ) 00634 { 00635 typedef DenseLinAlgPack::value_type value_type; 00636 namespace COP = ConstrainedOptPack; 00637 00638 value_type nu_j_max = (*scaled_viol) = scale * (*nu_j) * (bnd_j == COP::UPPER ? +1.0 : -1.0); 00639 if( nu_j_max > 0.0 || bnd_j == COP::EQUALITY ) // Leave any multiplier value with the correct sign alone! 00640 return +1; // No correction needed 00641 // See if we need to correct the multiplier 00642 nu_j_max = std::fabs(nu_j_max); 00643 if( nu_j_max < dual_infeas_tol ) { 00644 // This is a near degenerate multiplier so adjust it 00645 value_type degen_val = degen_mult_val * ( bnd_j == COP::UPPER ? +1.0 : -1.0 ); 00646 if( out && (int)olevel >= (int)COP::QPSchur::OUTPUT_BASIC_INFO ) { 00647 *out 00648 << "\nWarning, the constriant a(" << j << ") currently at its " 00649 << (bnd_j == COP::UPPER ? "UPPER" : "LOWER") << " bound" 00650 << " has the wrong Lagrange multiplier value but\n" 00651 << "scale*|"<<nu_j_n<<"| = " << scale << " * |" << (*nu_j) 00652 << "| = " << nu_j_max << " < dual_infeas_tol = " << dual_infeas_tol 00653 << "\nTherefore, this is considered a degenerate constraint and this " 00654 << "multiplier is set to " << degen_val << std::endl; 00655 } 00656 if(p_nu_j) { 00657 nu_j_max += std::fabs( t_P * (*p_nu_j) ) * scale; 00658 if( nu_j_max < dual_infeas_tol ) { 00659 // The full step is also degenerate so adjust it also 00660 if( out && (int)olevel >= (int)COP::QPSchur::OUTPUT_BASIC_INFO ) { 00661 *out 00662 << "Also, the maximum full step scale*(|"<<nu_j_n<<"|+|t_P*"<<p_nu_j_n<<"|) = " 00663 << scale << " * (|" << (*nu_j) << "| + |" << t_P << " * " << (*p_nu_j) << "|) = " 00664 << nu_j_max << " < dual_infeas_tol = " << dual_infeas_tol 00665 << "\nTherefore, this is considered degenerate and therefore " 00666 << "seting "<<p_nu_j_n<<" = 0"; 00667 if(nu_j_plus) 00668 *out << " and "<< nu_j_plus_n <<" = " << degen_val; 00669 *out << std::endl; 00670 } 00671 *p_nu_j = 0.0; // Don't let it limit the step length 00672 if(nu_j_plus) { 00673 *nu_j_plus = degen_val; 00674 } 00675 } 00676 } 00677 *nu_j = degen_val; // Now set it 00678 *scaled_viol = scale * (*nu_j) * (bnd_j == COP::UPPER ? +1.0 : -1.0); // Not violated! 00679 return 0; 00680 } 00681 else { 00682 if( print_dual_infeas && out && (int)olevel >= (int)COP::QPSchur::OUTPUT_BASIC_INFO ) { 00683 *out 00684 << "\nError, the constriant a(" << j << ") currently at its " 00685 << (bnd_j == COP::UPPER ? "UPPER" : "LOWER") << " bound" 00686 << " has the wrong Lagrange multiplier value and\n" 00687 << "scale*|"<<nu_j_n<<"| = " << scale << " * |" << (*nu_j) 00688 << "| = " << nu_j_max << " > dual_infeas_tol = " << dual_infeas_tol 00689 << "\nThis is an indication of instability in the calculations.\n" 00690 << "The QP algorithm is terminated!\n"; 00691 } 00692 return -1; 00693 } 00694 return 0; // Will never be executed! 00695 } 00696 00697 // 00698 // Calculate the QP objective if it has not been already 00699 // 00700 // qp_grad_norm_inf = ||g + G*x||inf 00701 // 00702 // On input, if qp_grad_norm_inf != 0, then it will be assumed 00703 // that this value has already been computed and the computation will 00704 // be skipped. 00705 // 00706 void calc_obj_grad_norm_inf( 00707 const ConstrainedOptPack::QPSchurPack::QP &qp 00708 ,const DenseLinAlgPack::DVectorSlice &x 00709 ,DenseLinAlgPack::value_type *qp_grad_norm_inf 00710 ) 00711 { 00712 TEUCHOS_TEST_FOR_EXCEPT(true); // ToDo: Implement this? 00713 } 00714 00715 } // end namespace 00716 00717 namespace ConstrainedOptPack { 00718 00719 // public member functions for QPSchur::U_hat_t 00720 00721 QPSchur::U_hat_t::U_hat_t() 00722 :G_(NULL) 00723 ,A_(NULL) 00724 ,A_bar_(NULL) 00725 ,Q_R_(NULL) 00726 ,P_XF_hat_(NULL) 00727 ,P_plus_hat_(NULL) 00728 {} 00729 00730 void QPSchur::U_hat_t::initialize( 00731 const MatrixSymOp *G 00732 ,const MatrixOp *A 00733 ,const MatrixOp *A_bar 00734 ,const GenPermMatrixSlice *Q_R 00735 ,const GenPermMatrixSlice *P_XF_hat 00736 ,const GenPermMatrixSlice *P_plus_hat 00737 ) 00738 { 00739 G_ = G; 00740 A_ = A; 00741 A_bar_ = A_bar; 00742 Q_R_ = Q_R; 00743 P_XF_hat_ = P_XF_hat; 00744 P_plus_hat_ = P_plus_hat; 00745 } 00746 00747 size_type QPSchur::U_hat_t::rows() const 00748 { 00749 return Q_R_->cols() + ( A_ ? A_->cols() : 0 ); 00750 } 00751 00752 size_type QPSchur::U_hat_t::cols() const 00753 { 00754 return P_plus_hat_->cols(); 00755 } 00756 00757 /* 10/25/00: I don't think we need this function! 00758 void QPSchur::U_hat_t::Mp_StM(DMatrixSlice* C, value_type a 00759 , BLAS_Cpp::Transp M_trans ) const 00760 { 00761 using BLAS_Cpp::no_trans; 00762 using BLAS_Cpp::trans; 00763 using LinAlgOpPack::Mp_StMtP; 00764 using LinAlgOpPack::Mp_StPtMtP; 00765 00766 // C += a * op(U_hat) 00767 00768 LinAlgOpPack::Mp_M_assert_sizes( C->rows(), C->cols(), no_trans 00769 , rows(), cols(), M_trans ); 00770 00771 const size_type 00772 n_R = Q_R_->cols(), 00773 m = A() ? A()->cols() : 0; 00774 00775 if( M_trans == no_trans ) { 00776 // 00777 // C += a * op(U_hat) 00778 // 00779 // = a * [ Q_R' * G * P_XF_hat + Q_R' * A_bar * P_plus_hat ] 00780 // [ A' * P_XF_hat ] 00781 // 00782 // C1 += a * Q_R' * G * P_XF_hat + a * Q_R' * A_bar * P_plus_hat 00783 // 00784 // C2 += a * A' * P_XF_hat 00785 // 00786 DMatrixSlice 00787 C1 = (*C)(1,n_R,1,C->cols()), 00788 C2 = m ? (*C)(n_R+1,n_R+m,1,C->cols()) : DMatrixSlice(); 00789 // C1 += a * Q_R' * G * P_XF_hat 00790 if( P_XF_hat().nz() ) 00791 Mp_StPtMtP( &C1, a, Q_R(), trans, G(), no_trans, P_XF_hat(), no_trans ); 00792 // C1 += a * Q_R' * A_bar * P_plus_hat 00793 if( P_plus_hat().nz() ) 00794 Mp_StPtMtP( &C1, a, Q_R(), trans, A_bar(), no_trans, P_plus_hat(), no_trans ); 00795 // C2 += a * A' * P_XF_hat 00796 if(m && P_XF_hat().nz()) 00797 Mp_StMtP( &C2, a, *A(), trans, P_plus_hat(), no_trans ); 00798 } 00799 else { 00800 TEUCHOS_TEST_FOR_EXCEPT(true); // Implement this! 00801 } 00802 } 00803 */ 00804 00805 void QPSchur::U_hat_t::Vp_StMtV( 00806 DVectorSlice* y, value_type a, BLAS_Cpp::Transp M_trans 00807 ,const DVectorSlice& x, value_type b 00808 ) const 00809 { 00810 using BLAS_Cpp::no_trans; 00811 using BLAS_Cpp::trans; 00812 using DenseLinAlgPack::Vt_S; 00813 using AbstractLinAlgPack::V_MtV; 00814 using AbstractLinAlgPack::Vp_StMtV; 00815 using LinAlgOpPack::V_MtV; 00816 using LinAlgOpPack::Vp_StMtV; 00817 using LinAlgOpPack::Vp_StPtMtV; 00818 using Teuchos::Workspace; 00819 Teuchos::WorkspaceStore* wss = Teuchos::get_default_workspace_store().get(); 00820 00821 LinAlgOpPack::Vp_MtV_assert_sizes(y->dim(),rows(),cols(),M_trans,x.dim()); 00822 00823 // 00824 // U_hat = [ Q_R' * G * P_XF_hat + Q_R' * A_bar * P_plus_hat ] 00825 // [ A' * P_XF_hat ] 00826 // 00827 00828 const size_type 00829 n_R = Q_R_->cols(), 00830 m = A() ? A()->cols() : 0; 00831 00832 if( M_trans == BLAS_Cpp::no_trans ) { 00833 // 00834 // y = b*y + a * U_hat * x 00835 // 00836 // = b*y + a * [ Q_R' * G * P_XF_hat + Q_R' * A_bar * P_plus_hat ] * x 00837 // [ A' * P_XF_hat ] 00838 // 00839 // => 00840 // 00841 // y1 = b * y1 + a * Q_R' * G * P_XF_hat * x + a * Q_R' * A_bar * P_plus_hat * x 00842 // y2 = b * y2 + a * A' * P_XF_hat * x 00843 // 00844 DVectorSlice 00845 y1 = (*y)(1,n_R), 00846 y2 = m ? (*y)(n_R+1,n_R+m) : DVectorSlice(); 00847 SpVector 00848 P_XF_hat_x, 00849 P_plus_hat_x; 00850 // P_XF_hat_x = P_XF_hat * x 00851 if( P_XF_hat().nz() ) 00852 V_MtV( &P_XF_hat_x, P_XF_hat(), no_trans, x ); 00853 // P_plus_hat_x = P_plus_hat * x 00854 if(P_plus_hat().nz()) 00855 V_MtV( &P_plus_hat_x, P_plus_hat(), no_trans, x ); 00856 // y1 = b * y1 00857 if(b==0.0) y1=0.0; 00858 else if(b!=1.0) Vt_S(&y1,b); 00859 // y1 += a * Q_R' * G * P_XF_hat_x 00860 if(P_XF_hat().nz()) 00861 Vp_StPtMtV( &y1, a, Q_R(), trans, G(), no_trans, P_XF_hat_x() ); 00862 // y1 += a * Q_R' * A_bar * P_plus_hat_x 00863 if(P_plus_hat().nz()) 00864 Vp_StPtMtV( &y1, a, Q_R(), trans, A_bar(), no_trans, P_plus_hat_x() ); 00865 if(m) { 00866 // y2 = b * y2 00867 if(b==0.0) y2=0.0; 00868 else if(b!=1.0) Vt_S(&y2,b); 00869 // y2 += a * A' * P_XF_hat_x 00870 if( P_XF_hat().nz() ) 00871 Vp_StMtV( &y2, a, *A(), trans, P_XF_hat_x() ); 00872 } 00873 } 00874 else if( M_trans == BLAS_Cpp::trans ) { 00875 // 00876 // y = b*y + a * U_hat' * x 00877 // 00878 // = b*y + a * [ P_XF_hat' * G * Q_R + P_plus_hat' * A_bar' * Q_R, P_XF_hat' * A ] * [ x1 ] 00879 // [ x2 ] 00880 // => 00881 // 00882 // y = b * y + a * P_XF_hat' * G * Q_R * x1 + a * P_plus_hat' * A_bar' * Q_R * x1 00883 // + a * P_XF_hat' * A * x2 00884 // 00885 const DVectorSlice 00886 x1 = x(1,n_R), 00887 x2 = m ? x(n_R+1,n_R+m) : DVectorSlice(); 00888 SpVector 00889 Q_R_x1; 00890 // Q_R_x1 = Q_R * x1 00891 V_MtV( &Q_R_x1, Q_R(), no_trans, x1 ); 00892 // y = b*y 00893 if(b==0.0) *y = 0.0; 00894 else if(b!=1.0) Vt_S( y, b ); 00895 // y += a * P_XF_hat' * G * Q_R_x1 00896 if(P_XF_hat().nz()) 00897 Vp_StPtMtV( y, a, P_XF_hat(), trans, G(), no_trans, Q_R_x1() ); 00898 // y += a * P_plus_hat' * A_bar' * Q_R_x1 00899 if(P_plus_hat().nz()) 00900 Vp_StPtMtV( y, a, P_plus_hat(), trans, A_bar(), trans, Q_R_x1() ); 00901 // y += a * P_XF_hat' * A * x2 00902 if( m && P_XF_hat().nz() ) 00903 Vp_StPtMtV( y, a, P_XF_hat(), trans, *A(), no_trans, x2 ); 00904 } 00905 else { 00906 TEUCHOS_TEST_FOR_EXCEPT(true); // Invalid value for M_trans 00907 } 00908 } 00909 00910 void QPSchur::U_hat_t::Vp_StMtV( 00911 DVectorSlice* y, value_type a, BLAS_Cpp::Transp M_trans 00912 ,const SpVectorSlice& x, value_type b 00913 ) const 00914 { 00915 // // Uncomment to use the default version 00916 // MatrixOp::Vp_StMtV(y,a,M_trans,x,b); return; 00917 00918 using BLAS_Cpp::no_trans; 00919 using BLAS_Cpp::trans; 00920 using DenseLinAlgPack::Vt_S; 00921 using LinAlgOpPack::V_MtV; 00922 using AbstractLinAlgPack::V_MtV; 00923 using AbstractLinAlgPack::Vp_StMtV; 00924 using LinAlgOpPack::V_MtV; 00925 using LinAlgOpPack::Vp_StMtV; 00926 using LinAlgOpPack::Vp_StPtMtV; 00927 using Teuchos::Workspace; 00928 Teuchos::WorkspaceStore* wss = Teuchos::get_default_workspace_store().get(); 00929 00930 LinAlgOpPack::Vp_MtV_assert_sizes(y->dim(),rows(),cols(),M_trans,x.dim()); 00931 00932 // 00933 // U_hat = [ Q_R' * G * P_XF_hat + Q_R' * A_bar * P_plus_hat ] 00934 // [ A' * P_XF_hat ] 00935 // 00936 00937 const size_type 00938 n_R = Q_R_->cols(), 00939 m = A() ? A()->cols() : 0; 00940 00941 if( M_trans == BLAS_Cpp::no_trans ) { 00942 // 00943 // y = b*y + a * U_hat * x 00944 // 00945 // = b*y + a * [ Q_R' * G * P_XF_hat + Q_R' * A_bar * P_plus_hat ] * x 00946 // [ A' * P_XF_hat ] 00947 // 00948 // => 00949 // 00950 // y1 = b * y1 + a * Q_R' * G * P_XF_hat * x + a * Q_R' * A_bar * P_plus_hat * x 00951 // y2 = b * y2 + a * A' * P_XF_hat * x 00952 // 00953 DVectorSlice 00954 y1 = (*y)(1,n_R), 00955 y2 = m ? (*y)(n_R+1,n_R+m) : DVectorSlice(); 00956 SpVector 00957 P_XF_hat_x, 00958 P_plus_hat_x; 00959 // P_XF_hat_x = P_XF_hat * x 00960 if( P_XF_hat().nz() ) 00961 V_MtV( &P_XF_hat_x, P_XF_hat(), no_trans, x ); 00962 // P_plus_hat_x = P_plus_hat * x 00963 if(P_plus_hat().nz()) 00964 V_MtV( &P_plus_hat_x, P_plus_hat(), no_trans, x ); 00965 // y1 = b * y1 00966 if(b==0.0) y1=0.0; 00967 else if(b!=1.0) Vt_S(&y1,b); 00968 // y1 += a * Q_R' * G * P_XF_hat_x 00969 if(P_XF_hat().nz()) 00970 Vp_StPtMtV( &y1, a, Q_R(), trans, G(), no_trans, P_XF_hat_x() ); 00971 // y1 += a * Q_R' * A_bar * P_plus_hat_x 00972 if(P_plus_hat().nz()) 00973 Vp_StPtMtV( &y1, a, Q_R(), trans, A_bar(), no_trans, P_plus_hat_x() ); 00974 if(m) { 00975 // y2 = b * y2 00976 if(b==0.0) y2=0.0; 00977 else if(b!=1.0) Vt_S(&y2,b); 00978 // y2 += a * A' * P_XF_hat_x 00979 if(P_XF_hat().nz()) 00980 Vp_StMtV( &y2, a, *A(), trans, P_XF_hat_x() ); 00981 } 00982 } 00983 else if( M_trans == BLAS_Cpp::trans ) { 00984 // 00985 // y = b*y + a * U_hat' * x 00986 // 00987 // = b*y + a * [ P_XF_hat' * G * Q_R + P_plus_hat' * A_bar' * Q_R, P_XF_hat' * A ] * [ x1 ] 00988 // [ x2 ] 00989 // => 00990 // 00991 // y = b * y + a * P_XF_hat' * G * Q_R * x1 + a * P_plus_hat' * A_bar' * Q_R * x1 00992 // + a * P_XF_hat' * A * x2 00993 // 00994 const SpVectorSlice 00995 x1 = x(1,n_R), 00996 x2 = m ? x(n_R+1,n_R+m) : SpVectorSlice(NULL,0,0,0); 00997 SpVector 00998 Q_R_x1; 00999 // Q_R_x1 = Q_R * x1 01000 V_MtV( &Q_R_x1, Q_R(), no_trans, x1 ); 01001 // y = b*y 01002 if(b ==0.0) *y = 0.0; 01003 else if(b!=1.0) Vt_S( y, b ); 01004 // y += a * P_XF_hat' * G * Q_R_x1 01005 if(P_XF_hat().nz()) 01006 Vp_StPtMtV( y, a, P_XF_hat(), trans, G(), no_trans, Q_R_x1() ); 01007 // y += a * P_plus_hat' * A_bar' * Q_R_x1 01008 if(P_plus_hat().nz()) 01009 Vp_StPtMtV( y, a, P_plus_hat(), trans, A_bar(), trans, Q_R_x1() ); 01010 // y += a * P_XF_hat' * A * x2 01011 if( m && P_XF_hat().nz() ) 01012 Vp_StPtMtV( y, a, P_XF_hat(), trans, *A(), no_trans, x2 ); 01013 } 01014 else { 01015 TEUCHOS_TEST_FOR_EXCEPT(true); // Invalid value for M_trans 01016 } 01017 } 01018 01019 // public member functions for QPSchur::ActiveSet 01020 01021 QPSchur::ActiveSet::ActiveSet( 01022 const schur_comp_ptr_t& schur_comp 01023 ,MSADU::PivotTolerances pivot_tols 01024 ) 01025 :schur_comp_(schur_comp) 01026 ,pivot_tols_(pivot_tols) 01027 ,initialized_(false) 01028 ,test_(false) 01029 ,qp_(NULL) 01030 ,x_init_(NULL) 01031 ,n_(0) 01032 ,n_R_(0) 01033 ,m_(0) 01034 ,q_plus_hat_(0) 01035 ,q_F_hat_(0) 01036 ,q_C_hat_(0) 01037 {} 01038 01039 void QPSchur::ActiveSet::initialize( 01040 QP& qp, size_type num_act_change, const int ij_act_change[] 01041 ,const EBounds bnds[], bool test, bool salvage_init_schur_comp 01042 ,std::ostream *out, EOutputLevel output_level ) 01043 { 01044 using LinAlgOpPack::V_mV; 01045 using LinAlgOpPack::V_MtV; 01046 using LinAlgOpPack::V_InvMtV; 01047 using LinAlgOpPack::Vp_StPtMtV; 01048 using AbstractLinAlgPack::V_MtV; 01049 using AbstractLinAlgPack::Mp_StPtMtP; 01050 using AbstractLinAlgPack::M_StMtInvMtM; 01051 using DenseLinAlgPack::sym; 01052 typedef MatrixSymAddDelUpdateable MSADU; 01053 namespace GPMSTP = AbstractLinAlgPack::GenPermMatrixSliceIteratorPack; 01054 using Teuchos::Workspace; 01055 Teuchos::WorkspaceStore* wss = Teuchos::get_default_workspace_store().get(); 01056 01057 const size_type 01058 n = qp.n(), 01059 n_R = qp.n_R(), 01060 n_X = n - n_R, 01061 m = qp.m(); 01062 const QP::x_init_t 01063 &x_init = qp.x_init(); 01064 const QP::l_x_X_map_t 01065 &l_x_X_map = qp.l_x_X_map(); 01066 const QP::i_x_X_map_t 01067 &i_x_X_map = qp.i_x_X_map(); 01068 const DVectorSlice 01069 b_X = qp.b_X(); 01070 const DVectorSlice 01071 g = qp.g(); 01072 const MatrixSymOp 01073 &G = qp.G(); 01074 const QP::Constraints 01075 &constraints = qp.constraints(); 01076 const size_type 01077 m_breve = constraints.m_breve(); 01078 01079 try { 01080 01081 // Count the number of each type of change 01082 size_type 01083 q_plus_hat = 0, 01084 q_F_hat = 0, 01085 q_C_hat = 0; 01086 if( num_act_change ) { 01087 for( size_type k = 1; k <= num_act_change; ++k ) { 01088 const int ij = ij_act_change[k-1]; 01089 const EBounds bnd = bnds[k-1]; 01090 if( ij < 0 ) { 01091 // Initially fixed variable being freed. 01092 if( x_init(-ij) == FREE ) { 01093 std::ostringstream omsg; 01094 omsg 01095 << "QPSchur::ActiveSet::initialize(...) : Error, " 01096 << "The variable x(" << -ij << ") is not initially fixed and can not " 01097 << "be freed by ij_act_change[" << k-1 << "]\n"; 01098 throw std::invalid_argument( omsg.str() ); 01099 } 01100 if( x_init(-ij) == EQUALITY ) { 01101 std::ostringstream omsg; 01102 omsg 01103 << "QPSchur::ActiveSet::initialize(...) : Error, " 01104 << "The variable x(" << -ij << ") is equality fixed and therefore can not " 01105 << "be freed by ij_act_change[" << k-1 << "]\n"; 01106 throw std::invalid_argument( omsg.str() ); 01107 } 01108 ++q_F_hat; 01109 } 01110 else { 01111 // Constraint being added to the active-set 01112 if( ij <= n ) { 01113 // Fixing a variable to a bound 01114 EBounds x_init_bnd = x_init(ij); 01115 if( x_init_bnd == FREE ) { 01116 // initially free variable being fixed 01117 ++q_plus_hat; 01118 } 01119 else if ( x_init_bnd == EQUALITY ) { 01120 // ToDo: Throw exception 01121 TEUCHOS_TEST_FOR_EXCEPT(true); 01122 } 01123 else if( x_init_bnd == bnd ) { 01124 // ToDo: Throw exception 01125 TEUCHOS_TEST_FOR_EXCEPT(true); 01126 } 01127 else { 01128 // Initially fixed variable being fixed to another bound 01129 ++q_F_hat; // We must free the variable first 01130 ++q_C_hat; // Now we fix it to a different bound. 01131 } 01132 } 01133 else { 01134 // Adding a general inequality (or equality) constraint 01135 if( ij > n + m_breve ) { 01136 // ToDo: Throw exception 01137 TEUCHOS_TEST_FOR_EXCEPT(true); 01138 } 01139 ++q_plus_hat; 01140 } 01141 } 01142 } 01143 } 01144 01145 const size_type 01146 q_D_hat = (n - n_R) - q_F_hat, 01147 q_D_hat_max = n_X; 01148 01149 // Now let's set stuff up: ij_map, constr_norm, bnds and part of d_hat 01150 const size_type 01151 q_hat = q_plus_hat + q_F_hat + q_C_hat, 01152 q_hat_max = n_X + n, // If all the initially fixed variables where freed 01153 // Then all the degrees of freedom where used up with other constraints. 01154 q_F_hat_max = n_X, 01155 q_plus_hat_max = n; 01156 01157 ij_map_.resize(q_hat_max); 01158 constr_norm_.resize(q_hat_max); 01159 bnds_.resize(q_hat_max); 01160 d_hat_.resize(q_hat_max); // set the terms involving the bounds first. 01161 01162 if( num_act_change ) { 01163 size_type s = 0; 01164 for( size_type k = 1; k <= num_act_change; ++k ) { 01165 const int ij = ij_act_change[k-1]; 01166 const EBounds bnd = bnds[k-1]; 01167 if( ij < 0 ) { 01168 // Initially fixed variable being freed. 01169 ij_map_[s] = ij; 01170 constr_norm_[s] = 1.0; 01171 bnds_[s] = FREE; 01172 d_hat_[s] = - g(-ij); // - g^X_{l^{(-)}} 01173 ++s; 01174 } 01175 else { 01176 // Constraint being added to the active-set 01177 if( ij <= n ) { 01178 // Fixing a variable to a bound 01179 EBounds x_init_bnd = x_init(ij); 01180 if( x_init_bnd == FREE ) { 01181 // initially free variable being fixed 01182 ij_map_[s] = ij; 01183 constr_norm_[s] = 1.0; 01184 bnds_[s] = bnd; 01185 d_hat_[s] = constraints.get_bnd(ij,bnd); 01186 ++s; 01187 } 01188 else { 01189 // Initially fixed variable being fixed to another bound 01190 // Free the variable first 01191 ij_map_[s] = ij; 01192 constr_norm_[s] = 1.0; 01193 bnds_[s] = FREE; 01194 d_hat_[s] = - g(ij); // - g^X_{l^{(-)}} 01195 ++s; 01196 // Now fix to a different bound 01197 ij_map_[s] = ij; 01198 constr_norm_[s] = 1.0; 01199 bnds_[s] = bnd; 01200 d_hat_[s] = constraints.get_bnd(ij,bnd) - b_X(l_x_X_map(ij)); 01201 ++s; 01202 } 01203 } 01204 else { 01205 // Adding a general inequality (or equality) constraint 01206 ij_map_[s] = ij; 01207 constr_norm_[s] = 1.0; // ToDo: We need to compute this in an efficient way! 01208 bnds_[s] = bnd; 01209 d_hat_[s] = constraints.get_bnd(ij,bnd); // \bar{b}_{j^{(+)}} 01210 ++s; 01211 } 01212 } 01213 } 01214 TEUCHOS_TEST_FOR_EXCEPT( !( s == q_hat ) ); 01215 } 01216 01217 // Setup P_XF_hat_ and P_plus_hat_ 01218 P_XF_hat_row_.resize(q_F_hat_max); 01219 P_XF_hat_col_.resize(q_F_hat_max); 01220 P_FC_hat_row_.resize(q_F_hat_max); 01221 P_FC_hat_col_.resize(q_F_hat_max); 01222 P_plus_hat_row_.resize(q_plus_hat_max); 01223 P_plus_hat_col_.resize(q_plus_hat_max); 01224 if(q_hat) { 01225 // See ConstrainedOptPack_QPSchur.hpp for description of P_XF_hat and P_plus_hat 01226 size_type 01227 k_XF_hat = 0, // zero based 01228 k_plus_hat = 0; // zero based 01229 ij_map_t::const_iterator 01230 ij_itr = ij_map_.begin(), 01231 ij_itr_end = ij_itr + q_hat; 01232 for( size_type s = 1; ij_itr != ij_itr_end; ++ij_itr, ++s ) { 01233 const int ij = *ij_itr; 01234 EBounds x_init_ij; 01235 if( ij < 0 ) { 01236 const size_type i = -ij; 01237 TEUCHOS_TEST_FOR_EXCEPT( !( i <= n ) ); 01238 // [P_XF_hat](:,s) = e(i) 01239 P_XF_hat_row_[k_XF_hat] = i; 01240 P_XF_hat_col_[k_XF_hat] = s; 01241 ++k_XF_hat; 01242 } 01243 else if( !(ij <= n && (x_init_ij = x_init(ij)) != FREE ) ) { 01244 const size_type j = ij; 01245 TEUCHOS_TEST_FOR_EXCEPT( !( 0 < j && j <= n + m_breve ) ); 01246 // [P_plus_hat](:,s) = e(j) 01247 P_plus_hat_row_[k_plus_hat] = j; 01248 P_plus_hat_col_[k_plus_hat] = s; 01249 ++k_plus_hat; 01250 } 01251 } 01252 TEUCHOS_TEST_FOR_EXCEPT( !( k_XF_hat == q_F_hat ) ); 01253 TEUCHOS_TEST_FOR_EXCEPT( !( k_plus_hat == q_plus_hat ) ); 01254 } 01255 P_XF_hat_.initialize_and_sort( 01256 n,q_hat,q_F_hat,0,0,GPMSTP::BY_ROW 01257 ,q_F_hat ? &P_XF_hat_row_[0] : NULL 01258 ,q_F_hat ? &P_XF_hat_col_[0] : NULL 01259 ,test 01260 ); 01261 P_plus_hat_.initialize_and_sort( 01262 n+m_breve,q_hat,q_plus_hat,0,0,GPMSTP::BY_ROW 01263 ,q_plus_hat ? &P_plus_hat_row_[0] : NULL 01264 ,q_plus_hat ? &P_plus_hat_col_[0] : NULL 01265 ,test 01266 ); 01267 01268 // Setup P_FC_hat_ 01269 if( q_C_hat ) { 01270 throw std::logic_error( 01271 error_msg(__FILE__,__LINE__,"QPSchur::ActiveSet::initialize(...) : " 01272 "Error, q_C_hat != 0, now supported yet!")); 01273 // ToDo: We should implement this but it is unlikely to be needed 01274 } 01275 P_FC_hat_.initialize_and_sort( 01276 q_hat,q_hat,q_C_hat,0,0,GPMSTP::BY_ROW 01277 ,q_C_hat ? &P_FC_hat_row_[0] : NULL 01278 ,q_C_hat ? &P_FC_hat_col_[0] : NULL 01279 ,test 01280 ); 01281 01282 // Setup Q_XD_hat_ 01283 Q_XD_hat_row_.resize(q_D_hat_max); 01284 Q_XD_hat_col_.resize(q_D_hat_max); 01285 if(q_D_hat) { 01286 // See ConstrainedOptPack_QPSchur.hpp for description of Q_XD_hat 01287 size_type 01288 k_XD_hat = 0; // zero based 01289 GenPermMatrixSlice::const_iterator 01290 Q_X_itr = qp.Q_X().begin(); // This is sorted by row already! 01291 P_row_t::const_iterator 01292 XF_search = P_XF_hat_row_.begin(), // These are already sorted by row! 01293 XF_search_end = XF_search + q_F_hat; 01294 for( size_type l = 1; l <= n_X; ++l, ++Q_X_itr ) { 01295 const size_type i = Q_X_itr->row_i(); // Already sorted by row 01296 // Look for i in XF 01297 for( ; XF_search != XF_search_end && *XF_search < i; ++XF_search ) ; 01298 if( XF_search == XF_search_end || (XF_search < XF_search_end && *XF_search > i) ) { 01299 // We went right past i and did not find it so 01300 // this variable has not been freed so lets add it! 01301 Q_XD_hat_row_[k_XD_hat] = i; 01302 Q_XD_hat_col_[k_XD_hat] = k_XD_hat + 1; 01303 ++k_XD_hat; 01304 } 01305 } 01306 TEUCHOS_TEST_FOR_EXCEPT( !( k_XD_hat == q_D_hat ) ); 01307 } 01308 Q_XD_hat_.initialize( 01309 n,q_D_hat,q_D_hat,0,0,GPMSTP::BY_ROW // Should already be sorted by row! 01310 , q_D_hat ? &Q_XD_hat_row_[0] : NULL 01311 , q_D_hat ? &Q_XD_hat_col_[0] : NULL 01312 ,test 01313 ); 01314 01315 // Setup l_fxfx 01316 l_fxfx_.resize(q_D_hat_max); 01317 if(q_D_hat) { 01318 for( size_type k = 0; k < q_D_hat; ++k ) { 01319 l_fxfx_[k] = l_x_X_map(Q_XD_hat_row_[k]); 01320 TEUCHOS_TEST_FOR_EXCEPT( !( l_fxfx_[k] != 0 ) ); 01321 } 01322 } 01323 01324 // Set the rest of the terms in d_hat involving matrices 01325 // 01326 // d_hat += - P_XF_hat' * G * b_XX - P_plus_hat' * A_bar' * b_XX 01327 // 01328 // where: b_XX = Q_X * b_X 01329 // 01330 if( q_hat ) { 01331 SpVector b_XX; 01332 V_MtV( &b_XX, qp.Q_X(), BLAS_Cpp::no_trans, b_X ); 01333 Vp_StPtMtV( &d_hat_(1,q_hat), -1.0, P_XF_hat_, BLAS_Cpp::trans 01334 , G, BLAS_Cpp::no_trans, b_XX() ); 01335 Vp_StPtMtV( &d_hat_(1,q_hat), -1.0, P_plus_hat_, BLAS_Cpp::trans 01336 , constraints.A_bar(), BLAS_Cpp::trans, b_XX() ); 01337 } 01338 01339 // Setup U_hat 01340 U_hat_.initialize( &G, m ? &qp.A() : NULL, &constraints.A_bar() 01341 , &qp.Q_R(), &P_XF_hat_, &P_plus_hat_ ); 01342 01343 // Set the rest of the members 01344 test_ = test; 01345 qp_ = &qp; 01346 x_init_ = &x_init; 01347 n_ = n; 01348 n_R_ = n_R; 01349 m_ = m; 01350 m_breve_ = m_breve; 01351 q_plus_hat_ = q_plus_hat; 01352 q_F_hat_ = q_F_hat; 01353 q_C_hat_ = q_C_hat; 01354 01355 // Resize storage for z_hat, p_z_hat, mu_D_hat, and p_mu_D_hat and set to zero by default 01356 z_hat_.resize(q_hat_max); 01357 p_z_hat_.resize(q_hat_max); 01358 mu_D_hat_.resize(n_X); 01359 p_mu_D_hat_.resize(n_X); 01360 01361 initialized_ = true; // set to true tenatively so that we can 01362 // print this stuff. 01363 01364 if( out && (int)output_level >= (int)OUTPUT_ITER_QUANTITIES ) { 01365 *out 01366 << "\nPrint definition of Active-Set before the Schur complement is formed...\n"; 01367 dump_act_set_quantities( *this, *out, false ); 01368 } 01369 01370 // Initialize and factorize the schur complement 01371 if( q_hat ) { 01372 // Temporary storage for S (dense) 01373 DMatrix S_store(q_hat+1,q_hat+1); 01374 DMatrixSliceSym S_sym( S_store(2,q_hat+1,1,q_hat), BLAS_Cpp::lower ); 01375 MatrixSymPosDefCholFactor S(&S_store()); 01376 // S = -1.0 * U_hat' * inv(Ko) * U_hat 01377 M_StMtInvMtM( &S, -1.0, U_hat_, BLAS_Cpp::trans, qp.Ko() 01378 , MatrixSymNonsing::DUMMY_ARG ); 01379 // Now add parts of V_hat 01380 if( q_F_hat ) { 01381 // S += P_XF_hat' * G * P_XF_hat 01382 Mp_StPtMtP( &S, 1.0, MatrixSymOp::DUMMY_ARG, qp_->G(), P_XF_hat_, BLAS_Cpp::no_trans ); 01383 } 01384 if( q_F_hat && q_plus_hat ) { 01385 // S += P_XF_hat' * A_bar * P_plus_hat + P_plus_hat' * A_bar' * P_XF_hat 01386 AbstractLinAlgPack::syr2k( 01387 qp_->constraints().A_bar() 01388 ,BLAS_Cpp::no_trans, 1.0 01389 ,P_XF_hat_, BLAS_Cpp::no_trans 01390 ,P_plus_hat_, BLAS_Cpp::no_trans 01391 ,1.0, &S ); 01392 } 01393 if( q_F_hat && q_C_hat ) { 01394 // S += P_FC_hat + P_FC_hat' 01395 throw std::logic_error( 01396 error_msg(__FILE__,__LINE__,"QPSchur::ActiveSet::initialize(...) : " 01397 "Error, q_C_hat != 0, now supported yet!")); 01398 // ToDo: We should implement this but it is unlikely to be needed 01399 } 01400 01401 if( out && (int)output_level >= (int)OUTPUT_ITER_QUANTITIES ) { 01402 *out 01403 << "\nIninitial Schur Complement before it is nonsingular:\n" 01404 << "\nS_hat =\nLower triangular part (ignore nonzeros above diagonal)\n" 01405 << S_store(); 01406 } 01407 // Initialize and factorize the schur complement! 01408 try { 01409 schur_comp().update_interface().initialize( 01410 S_sym,q_hat_max,true,MSADU::Inertia( q_plus_hat + q_C_hat, 0, q_F_hat ) 01411 ,pivot_tols() ); 01412 } 01413 catch(const MSADU::WarnNearSingularUpdateException& excpt) { 01414 if( out && (int)output_level >= QPSchur::OUTPUT_BASIC_INFO ) { 01415 *out 01416 << "\nActiveSet::initialize(...) : " << excpt.what() 01417 << std::endl; 01418 } 01419 } 01420 catch(const MSADU::SingularUpdateException& excpt) { 01421 if( out && (int)output_level >= QPSchur::OUTPUT_BASIC_INFO ) { 01422 *out 01423 << "\nActiveSet::initialize(...) : " << excpt.what() 01424 << std::endl; 01425 } 01426 if(salvage_init_schur_comp) { 01427 if( out && (int)output_level >= QPSchur::OUTPUT_BASIC_INFO ) { 01428 *out 01429 << "\nsalvage_init_schur_comp == true\n" 01430 << "We will attempt to add as many rows/cols of the " 01431 << "initial Schur complement as possible ...\n"; 01432 } 01433 // ToDo: We will build the schur complement one row/col at a time 01434 // skipping those updates that cause it to become singular. For each 01435 // update that causes the schur compplement to become singular we 01436 // will remove the corresponding change. 01437 01438 throw; // For now just rethrow the exception! 01439 } 01440 else { 01441 if( out && (int)output_level >= QPSchur::OUTPUT_BASIC_INFO ) { 01442 *out 01443 << "\nsalvage_init_schur_comp == false\n" 01444 << "We will just throw this singularity exception out of here ...\n"; 01445 } 01446 throw; 01447 } 01448 } 01449 if( out && (int)output_level >= (int)OUTPUT_ITER_QUANTITIES ) { 01450 *out 01451 << "\nSchur Complement after factorization:\n" 01452 << "\nS_hat =\n" 01453 << schur_comp().op_interface(); 01454 } 01455 } 01456 else { 01457 schur_comp().update_interface().set_uninitialized(); 01458 } 01459 01460 // Success, we are initialized! 01461 initialized_ = true; 01462 return; 01463 01464 } // end try 01465 catch(...) { 01466 initialized_ = false; 01467 throw; 01468 } 01469 } 01470 01471 void QPSchur::ActiveSet::refactorize_schur_comp() 01472 { 01473 // ToDo: Finish Me 01474 TEUCHOS_TEST_FOR_EXCEPT(true); 01475 } 01476 01477 bool QPSchur::ActiveSet::add_constraint( 01478 size_type ja, EBounds bnd_ja, bool update_steps 01479 ,std::ostream *out, EOutputLevel output_level 01480 ,bool force_refactorization 01481 ,bool allow_any_cond 01482 ) 01483 { 01484 using BLAS_Cpp::no_trans; 01485 using BLAS_Cpp::trans; 01486 using DenseLinAlgPack::dot; 01487 using AbstractLinAlgPack::dot; 01488 using LinAlgOpPack::V_StMtV; 01489 using LinAlgOpPack::Vp_StPtMtV; 01490 using LinAlgOpPack::V_InvMtV; 01491 using Teuchos::Workspace; 01492 Teuchos::WorkspaceStore* wss = Teuchos::get_default_workspace_store().get(); 01493 01494 typedef AbstractLinAlgPack::EtaVector eta_t; 01495 01496 assert_initialized(); 01497 01498 bool wrote_output = false; 01499 01500 const QPSchurPack::QP::Constraints 01501 &constraints = qp_->constraints(); 01502 01503 if( is_init_fixed(ja) && (*x_init_)(ja) == bnd_ja ) { 01504 // 01505 // This is a variable that was initially fixed, then freed and now 01506 // is being fixed back to its original bound. 01507 // 01508 // Here we will shrink the augmented KKT system. 01509 // 01510 const size_type q_hat = this->q_hat(); 01511 const size_type sd = s_map(-int(ja)); 01512 const size_type la = qp_->l_x_X_map()(ja); 01513 TEUCHOS_TEST_FOR_EXCEPT( !( sd ) ); 01514 TEUCHOS_TEST_FOR_EXCEPT( !( la ) ); 01515 wrote_output = remove_augmented_element( 01516 sd,force_refactorization 01517 ,MatrixSymAddDelUpdateable::EIGEN_VAL_POS 01518 ,out,output_level,allow_any_cond); 01519 // We must remove (ja,sd) from P_XF_hat 01520 P_row_t::iterator 01521 itr = std::lower_bound( P_XF_hat_row_.begin(), P_XF_hat_row_.begin()+q_F_hat_, ja ); 01522 TEUCHOS_TEST_FOR_EXCEPT( !( itr != P_XF_hat_row_.end() ) ); 01523 const size_type p = itr - P_XF_hat_row_.begin(); 01524 std::copy( P_XF_hat_row_.begin() + p + 1, P_XF_hat_row_.begin()+q_F_hat_, 01525 P_XF_hat_row_.begin() + p ); 01526 std::copy( P_XF_hat_col_.begin() + p + 1, P_XF_hat_col_.begin()+q_F_hat_, 01527 P_XF_hat_col_.begin() + p ); 01528 // Deincrement all counters in permutation matrices for removed element 01529 if( q_F_hat_ > 1 ) 01530 deincrement_indices( sd, &P_XF_hat_col_, q_F_hat_-1 ); 01531 if( q_C_hat_ > 0 ) 01532 deincrement_indices( sd, &P_FC_hat_col_, q_C_hat_ ); 01533 if( q_plus_hat_ > 0 ) 01534 deincrement_indices( sd, &P_plus_hat_col_, q_plus_hat_ ); 01535 // 01536 // Add the multiplier for mu_D_hat(...) 01537 // 01538 const size_type q_D_hat = this->q_D_hat(); 01539 // add l_fxfx(q_D_hat+1) = la 01540 l_fxfx_[q_D_hat] = la; 01541 // add mu_D_hat(q_D_hat+1) = 0.0 01542 mu_D_hat_[q_D_hat] = 0.0; 01543 // add p_mu_D_hat(q_D_hat+1) = 1.0 01544 if(update_steps) 01545 p_mu_D_hat_[q_D_hat] = 1.0; 01546 // Insert the pair (ja,q_D_hat+1) into Q_XD_hat(...) (sorted by row) 01547 insert_pair_sorted(ja,q_D_hat+1,q_D_hat+1,&Q_XD_hat_row_,&Q_XD_hat_col_); 01548 // 01549 // Update the counts 01550 // 01551 --q_F_hat_; 01552 } 01553 else { 01554 // 01555 // Expand the size of the schur complement to add the new constraint 01556 // 01557 01558 // Compute the terms for the update 01559 01560 value_type d_p = 0.0; 01561 const size_type q_hat = this->q_hat(); 01562 Workspace<value_type> t_hat_ws(wss,q_hat); 01563 DVectorSlice t_hat(t_hat_ws.size()?&t_hat_ws[0]:NULL,t_hat_ws.size()); 01564 value_type alpha_hat = 0.0; 01565 bool changed_bounds = false; 01566 size_type sd = 0; // Only used if changed_bounds == true 01567 01568 if( ja <= n_ && !is_init_fixed(ja) ) { 01569 // 01570 // Fix an initially free variable is being fixed 01571 // 01572 // u_p = [ Q_R' * e(ja) ] <: R^(n_R+m) 01573 // [ 0 ] 01574 // 01575 const size_type 01576 la = qp_->Q_R().lookup_col_j(ja); 01577 TEUCHOS_TEST_FOR_EXCEPT( !( la ) ); 01578 const eta_t u_p = eta_t(la,n_R_+m_); 01579 // r = inv(Ko)*u_p 01580 DVector r; // ToDo: Make this sparse! 01581 V_InvMtV( &r, qp_->Ko(), no_trans, u_p() ); 01582 // t_hat = - U_hat' * r 01583 if(q_hat) 01584 V_StMtV( &t_hat(), -1.0, U_hat_, trans, r() ); 01585 // alpha_hat = - u_p ' * r 01586 alpha_hat = - dot( u_p(), r() ); 01587 // d_p = \bar{b}_{j^{(+)}} 01588 d_p = constraints.get_bnd( ja, bnd_ja ); 01589 01590 changed_bounds = false; 01591 } 01592 else if ( is_init_fixed(ja) ) { 01593 // 01594 // An intially fixed variable was freed and 01595 // is now being fixed to the other bound. 01596 // 01597 // Here we must expand the augmented KKT system for this 01598 // simple change. 01599 // 01600 // u_p = 0 01601 // 01602 // v_p = e(sd) <: R^(q_hat), where sd = s_map(-ja) 01603 // 01604 sd = s_map(-int(ja)); 01605 TEUCHOS_TEST_FOR_EXCEPT( !( sd ) ); 01606 const size_type la = qp_->l_x_X_map()(ja); 01607 TEUCHOS_TEST_FOR_EXCEPT( !( la ) ); 01608 // t_hat = e(sd) 01609 t_hat = 0.0; 01610 t_hat(sd) = 1.0; 01611 // alpha_hat = 0.0 01612 alpha_hat = 0.0; 01613 // d_p = \bar{b}_{j^{(+)}} - b_X(la) 01614 d_p = constraints.get_bnd( ja, bnd_ja ) - qp_->b_X()(la); 01615 01616 changed_bounds = true; 01617 } 01618 else { // ja > n 01619 // 01620 // Add an extra equality or inequality constraint. 01621 // 01622 // u_p = [ Q_R' * A_bar * e(ja) ] n_R 01623 // [ 0 ] m 01624 const eta_t e_ja = eta_t(ja,n_+m_breve_); 01625 const MatrixOp &A_bar = constraints.A_bar(); 01626 DVector u_p( n_R_ + m_ ); // ToDo: make this sparse 01627 Vp_StPtMtV( &u_p(1,n_R_), 1.0, qp_->Q_R(), trans, A_bar, no_trans, e_ja(), 0.0 ); 01628 if( m_ ) 01629 u_p(n_R_+1,n_R_+m_) = 0.0; 01630 // r = inv(Ko) * u_p 01631 DVector r; // ToDo: Make this sparse! 01632 V_InvMtV( &r, qp_->Ko(), no_trans, u_p() ); 01633 if(q_hat) { 01634 // t_hat = v_p - U_hat' * r 01635 // where: v_p = P_XF_hat' * A_bar * e(ja) 01636 V_StMtV( &t_hat(), -1.0, U_hat_, trans, r() ); 01637 Vp_StPtMtV( &t_hat(), 1.0, P_XF_hat_, trans, A_bar, no_trans, e_ja() ); 01638 } 01639 // alpha_hat = - u_p ' * r 01640 alpha_hat = - dot( u_p(), r() ); 01641 // d_p = \bar{b}_{j^{(+)}} - b_X' * Q_X' * A_bar * e(ja) 01642 // 01643 // d_p = \bar{b}_{j^{(+)}} 01644 d_p = constraints.get_bnd( ja, bnd_ja ); 01645 if(n_ > n_R_) { 01646 // d_p += - b_X' * Q_X' * A_bar * e(ja) 01647 r.resize( n_ - n_R_ ); // reuse storage 01648 Vp_StPtMtV( &r(), 1.0, qp_->Q_X(), trans, A_bar, no_trans, e_ja(), 0.0 ); 01649 d_p += - dot( qp_->b_X(), r() ); 01650 } 01651 01652 changed_bounds = false; 01653 } 01654 01655 // Update the schur complement if nonsingular. These 01656 // with throw exceptions if the matrix is singular 01657 // or has the wrong inertia 01658 if(q_hat) { 01659 try { 01660 schur_comp().update_interface().augment_update( 01661 &t_hat(), alpha_hat, force_refactorization 01662 ,MatrixSymAddDelUpdateable::EIGEN_VAL_NEG 01663 ,MSADU::PivotTolerances( 01664 pivot_tols().warning_tol 01665 ,allow_any_cond ? 0.0 : pivot_tols().singular_tol 01666 ,allow_any_cond ? 0.0 : pivot_tols().wrong_inertia_tol ) ); 01667 } 01668 catch(const MSADU::WarnNearSingularUpdateException& excpt) { 01669 if( out && (int)output_level >= QPSchur::OUTPUT_BASIC_INFO ) { 01670 *out 01671 << "\nActiveSet::add_constraint(...) : " << excpt.what() 01672 << std::endl; 01673 wrote_output = true; 01674 } 01675 } 01676 } 01677 else { 01678 schur_comp().update_interface().initialize( 01679 alpha_hat, (n_-n_R_) + n_-m_ ); 01680 } 01681 // Update the rest of the augmented KKT system 01682 if( changed_bounds ) 01683 ++q_C_hat_; 01684 else 01685 ++q_plus_hat_; 01686 const size_type q_hat_new = q_F_hat_ + q_C_hat_ + q_plus_hat_; 01687 // Add ij_map(q_hat) = ja to ij_map(...) 01688 ij_map_[q_hat_new - 1] = ja; 01689 // Add constr_norm(q_hat) to constr_norm(...) 01690 constr_norm_[q_hat_new - 1] = 1.0; // ToDo: Compute this for real! 01691 // Add bnds(q_hat)_ = bnd_ja to bnds(...) 01692 bnds_[q_hat_new - 1] = bnd_ja; 01693 // Augment d_hat = [ d_hat; d_p ] 01694 d_hat_(q_hat_new) = d_p; 01695 // Augment z_hat with new (zeroed) multiplier value, z_hat = [ z_hat; 0 ] 01696 z_hat_(q_hat_new) = 0.0; 01697 if( update_steps ) { 01698 // Set the step for this multiplier to 1.0, p_z_hat = [ p_z_hat; 1 ] 01699 p_z_hat_(q_hat_new) = 1.0; 01700 } 01701 if( !changed_bounds ) { 01702 // Insert (ja, q_hat_new) into P_plus_hat, sorted by row 01703 insert_pair_sorted(ja,q_hat_new,q_plus_hat_,&P_plus_hat_row_,&P_plus_hat_col_); 01704 } 01705 else { 01706 TEUCHOS_TEST_FOR_EXCEPT( !( sd ) ); 01707 // Insert (sd,q_hat_new) into P_FC_hat, sorted by row) 01708 insert_pair_sorted(sd,q_hat_new,q_C_hat_,&P_FC_hat_row_,&P_FC_hat_col_); 01709 } 01710 } 01711 // Update the permutation matrices and U_hat 01712 reinitialize_matrices(test_); 01713 return wrote_output; 01714 } 01715 01716 bool QPSchur::ActiveSet::drop_constraint( 01717 int jd, std::ostream *out, EOutputLevel output_level 01718 ,bool force_refactorization, bool allow_any_cond 01719 ) 01720 { 01721 using BLAS_Cpp::no_trans; 01722 using BLAS_Cpp::trans; 01723 using DenseLinAlgPack::dot; 01724 using AbstractLinAlgPack::dot; 01725 using LinAlgOpPack::V_StMtV; 01726 using LinAlgOpPack::V_MtV; 01727 using LinAlgOpPack::Vp_MtV; 01728 using LinAlgOpPack::Vp_StPtMtV; 01729 using LinAlgOpPack::V_InvMtV; 01730 using AbstractLinAlgPack::transVtMtV; 01731 using Teuchos::Workspace; 01732 Teuchos::WorkspaceStore* wss = Teuchos::get_default_workspace_store().get(); 01733 01734 typedef AbstractLinAlgPack::EtaVector eta_t; 01735 01736 assert_initialized(); 01737 01738 bool wrote_output = false; 01739 01740 if( jd < 0 ) { 01741 // 01742 // A variable initially fixed is being freed. 01743 // Increase the dimension of the augmented the KKT system! 01744 // 01745 size_type 01746 q_hat = this->q_hat(), 01747 q_F_hat = this->q_F_hat(), 01748 q_plus_hat = this->q_plus_hat(), 01749 q_D_hat = this->q_D_hat(); 01750 // Get indexes 01751 const size_type id = -jd; 01752 TEUCHOS_TEST_FOR_EXCEPT( !( 1 <= id && id <= n_ ) ); 01753 const size_type ld = qp_->l_x_X_map()(-jd); 01754 TEUCHOS_TEST_FOR_EXCEPT( !( 1 <= ld && ld <= n_ - n_R_ ) ); 01755 size_type kd; // Find kd (this is unsorted) 01756 {for( kd = 1; kd <= q_D_hat; ++kd ) { 01757 if( l_fxfx_[kd-1] == ld ) break; 01758 }} 01759 TEUCHOS_TEST_FOR_EXCEPT( !( kd <= q_D_hat ) ); 01760 // Get references 01761 const MatrixSymOp 01762 &G = qp_->G(); 01763 const DVectorSlice 01764 g = qp_->g(); 01765 const MatrixOp 01766 &A_bar = qp_->constraints().A_bar(); 01767 const MatrixSymOpNonsing 01768 &Ko = qp_->Ko(); 01769 const MatrixOp 01770 &U_hat = this->U_hat(); 01771 const GenPermMatrixSlice 01772 &Q_R = qp_->Q_R(), 01773 &Q_X = qp_->Q_X(), 01774 &P_XF_hat = this->P_XF_hat(), 01775 &P_plus_hat = this->P_plus_hat(); 01776 const DVectorSlice 01777 b_X = qp_->b_X(); 01778 // 01779 // Compute the update quantities to augmented KKT system 01780 // 01781 // e_id 01782 eta_t e_id(id,n_); 01783 // u_p = [ Q_R'*G*e_id ; A'*e_id ] <: R^(n_R+m) 01784 DVector u_p(n_R_+m_); 01785 Vp_StPtMtV( &u_p(1,n_R_), 1.0, Q_R, trans, G, no_trans, e_id(), 0.0 ); 01786 if( m_ ) 01787 V_MtV( &u_p(n_R_+1,n_R_+m_), qp_->A(), trans, e_id() ); 01788 const value_type 01789 nrm_u_p = DenseLinAlgPack::norm_inf( u_p() ); 01790 // sigma = e_id'*G*e_id <: R 01791 const value_type 01792 sigma = transVtMtV( e_id(), G, no_trans, e_id() ); 01793 // d_p = - g(id) - b_X'*(Q_X'*G*e_id) <: R 01794 DVector Q_X_G_e_id(Q_X.cols()); 01795 Vp_StPtMtV( &Q_X_G_e_id(), 1.0, Q_X, trans, G, no_trans, e_id(), 0.0 ); 01796 const value_type 01797 d_p = -g(id) - dot( b_X, Q_X_G_e_id() ); 01798 // r = inv(Ko)*u_p <: R^(n_R+m) 01799 DVector r; 01800 if( nrm_u_p > 0.0 ) 01801 V_InvMtV( &r, Ko, no_trans, u_p() ); 01802 // t_hat = v_p - U_hat'*r 01803 // where: v_p = P_XF_hat'*G*e_id + P_plus_hat'*A_bar'*e_id <: R^(q_hat) 01804 Workspace<value_type> 01805 t_hat_ws(wss,q_hat); 01806 DVectorSlice 01807 t_hat(&t_hat_ws[0],q_hat); 01808 if(q_hat) { 01809 t_hat = 0.0; 01810 // t_hat += v_p 01811 if(q_F_hat_) 01812 Vp_StPtMtV( &t_hat(), 1.0, P_XF_hat, trans, G, no_trans, e_id() ); 01813 if(q_plus_hat_) 01814 Vp_StPtMtV( &t_hat(), 1.0, P_plus_hat, trans, A_bar, trans, e_id() ); 01815 // t_hat += U_hat'*r 01816 if( nrm_u_p > 0.0 ) 01817 Vp_MtV( &t_hat(), U_hat, trans, r() ); 01818 } 01819 // alpha_hat = sigma - u_p'*r 01820 const value_type 01821 alpha_hat = sigma - ( nrm_u_p > 0.0 ? dot(u_p(),r()) : 0.0 ); 01822 // 01823 // Update the schur complement if nonsingular. These 01824 // with throw exceptions if the matrix is singular 01825 // or has the wrong inertia 01826 // 01827 if(q_hat) { 01828 try { 01829 schur_comp().update_interface().augment_update( 01830 &t_hat(), alpha_hat, force_refactorization 01831 ,MatrixSymAddDelUpdateable::EIGEN_VAL_POS ); 01832 } 01833 catch(const MSADU::WarnNearSingularUpdateException& excpt) { 01834 if( out && (int)output_level >= QPSchur::OUTPUT_BASIC_INFO ) { 01835 *out 01836 << "\nActiveSet::drop_constraint(...) : " << excpt.what() 01837 << std::endl; 01838 wrote_output = true; 01839 } 01840 } 01841 } 01842 else { 01843 schur_comp().update_interface().initialize( 01844 alpha_hat, (n_-n_R_) + n_-m_ ); 01845 } 01846 // 01847 // Remove multiplier from mu_D_hat(...) 01848 // 01849 // remove l_fxfx(kd) == ld from l_fxfx(...) 01850 std::copy( l_fxfx_.begin() + kd, l_fxfx_.begin() + q_D_hat 01851 , l_fxfx_.begin() + (kd-1) ); 01852 // remove mu_D_hat(kd) from mu_D_hat(...) 01853 std::copy( mu_D_hat_.begin() + kd, mu_D_hat_.begin() + q_D_hat 01854 , mu_D_hat_.begin() + (kd-1) ); 01855 // remove Q_XD_hat(id,ld) from Q_XD_hat(...) 01856 P_row_t::iterator 01857 itr = std::lower_bound( Q_XD_hat_row_.begin(), Q_XD_hat_row_.begin()+q_D_hat, id ); 01858 TEUCHOS_TEST_FOR_EXCEPT( !( itr != Q_XD_hat_row_.end() ) ); 01859 const size_type p = itr - Q_XD_hat_row_.begin(); 01860 std::copy( Q_XD_hat_row_.begin() + p + 1, Q_XD_hat_row_.begin()+q_D_hat, 01861 Q_XD_hat_row_.begin() + p ); 01862 std::copy( Q_XD_hat_col_.begin() + p + 1, Q_XD_hat_col_.begin()+q_D_hat, 01863 Q_XD_hat_col_.begin() + p ); 01864 if( q_D_hat > 1 ) 01865 deincrement_indices( kd, &Q_XD_hat_col_, q_D_hat-1 ); 01866 // 01867 // Update the counts 01868 // 01869 ++q_F_hat_; 01870 q_hat = this->q_hat(); 01871 // 01872 // Add the elements for newly freed variable 01873 // 01874 // add ij_map(q_hat) == -id to ij_map(...) 01875 ij_map_[q_hat-1] = -id; 01876 // add s_map(-id) == q_hat to s_map(...) 01877 // ToDo: implement s_map(...) 01878 // add bnds(q_hat) == FREE to bnds(...) 01879 bnds_[q_hat-1] = FREE; 01880 // add d_hat(q_hat) == d_p to d_hat(...) 01881 d_hat_[q_hat-1] = d_p; 01882 // add p_X(ld) == 0 to the end of z_hat(...) 01883 z_hat_[q_hat-1] = 0.0; // This is needed so that (z_hat + beta*t_D*p_z_hat)(q_hat) == 0 01884 // Insert (id,q_hat) into P_XF_hat sorted by row 01885 insert_pair_sorted(id,q_hat,q_F_hat_,&P_XF_hat_row_,&P_XF_hat_col_); 01886 } 01887 else { 01888 // 01889 // Shrink the dimension of the augmented KKT system to remove the constraint! 01890 // 01891 const size_type q_hat = this->q_hat(); 01892 const size_type sd = s_map(jd); 01893 TEUCHOS_TEST_FOR_EXCEPT( !( sd ) ); 01894 wrote_output = remove_augmented_element( 01895 sd,force_refactorization 01896 ,MatrixSymAddDelUpdateable::EIGEN_VAL_NEG 01897 ,out,output_level,allow_any_cond 01898 ); 01899 if( is_init_fixed(jd) ) { 01900 // This must be an intially fixed variable, currently fixed at a different bound. 01901 // We must remove this element from P_FC_hat(...) 01902 const size_type sd1 = s_map(-jd); // This is the position in the schur complement where first freed 01903 TEUCHOS_TEST_FOR_EXCEPT( !( sd1 ) ); 01904 // Remove P_FC_hat(sd1,sd) from P_FC_hat(...) 01905 P_row_t::iterator 01906 itr = std::lower_bound( P_FC_hat_row_.begin(), P_FC_hat_row_.begin()+q_C_hat_, sd1 ); 01907 TEUCHOS_TEST_FOR_EXCEPT( !( itr != P_FC_hat_row_.end() ) ); 01908 const size_type p = itr - P_FC_hat_row_.begin(); 01909 std::copy( P_FC_hat_row_.begin() + p + 1, P_FC_hat_row_.begin()+q_C_hat_, 01910 P_FC_hat_row_.begin() + p ); 01911 std::copy( P_FC_hat_col_.begin() + p + 1, P_FC_hat_col_.begin()+q_C_hat_, 01912 P_FC_hat_col_.begin() + p ); 01913 --q_C_hat_; 01914 } 01915 else { 01916 // We must remove P_plus_hat(jd,sd) from P_plus_hat(...) 01917 P_row_t::iterator 01918 itr = std::lower_bound( P_plus_hat_row_.begin(), P_plus_hat_row_.begin()+q_plus_hat_, jd ); 01919 TEUCHOS_TEST_FOR_EXCEPT( !( itr != P_plus_hat_row_.end() ) ); 01920 const size_type p = itr - P_plus_hat_row_.begin(); 01921 std::copy( P_plus_hat_row_.begin() + p + 1, P_plus_hat_row_.begin()+q_plus_hat_, 01922 P_plus_hat_row_.begin() + p ); 01923 std::copy( P_plus_hat_col_.begin() + p + 1, P_plus_hat_col_.begin()+q_plus_hat_, 01924 P_plus_hat_col_.begin() + p ); 01925 --q_plus_hat_; 01926 } 01927 // Deincrement all counters in permutation matrices for removed element 01928 if( q_F_hat_ > 0 ) 01929 deincrement_indices( sd, &P_XF_hat_col_, q_F_hat_ ); 01930 if( q_C_hat_ > 0 ) 01931 deincrement_indices( sd, &P_FC_hat_col_, q_C_hat_ ); 01932 if( q_plus_hat_ > 0 ) 01933 deincrement_indices( sd, &P_plus_hat_col_, q_plus_hat_ ); 01934 } 01935 // Update the permutation matrices and U_hat 01936 reinitialize_matrices(test_); 01937 return wrote_output; 01938 } 01939 01940 bool QPSchur::ActiveSet::drop_add_constraints( 01941 int jd, size_type ja, EBounds bnd_ja, bool update_steps 01942 ,std::ostream *out, EOutputLevel output_level 01943 ) 01944 { 01945 bool wrote_output = false; 01946 if( drop_constraint( jd, out, output_level, false, true ) ) 01947 wrote_output = true; 01948 if( add_constraint( ja, bnd_ja, update_steps, out, output_level, true, true ) ) 01949 wrote_output = true; 01950 return wrote_output; 01951 } 01952 01953 QPSchur::ActiveSet::QP& 01954 QPSchur::ActiveSet::qp() 01955 { 01956 assert_initialized(); 01957 return *qp_; 01958 } 01959 01960 const QPSchur::ActiveSet::QP& 01961 QPSchur::ActiveSet::qp() const 01962 { 01963 assert_initialized(); 01964 return *qp_; 01965 } 01966 01967 size_type QPSchur::ActiveSet::q_hat() const 01968 { 01969 assert_initialized(); 01970 return q_plus_hat_ + q_F_hat_ + q_C_hat_; 01971 } 01972 01973 size_type QPSchur::ActiveSet::q_plus_hat() const 01974 { 01975 assert_initialized(); 01976 return q_plus_hat_; 01977 } 01978 01979 size_type QPSchur::ActiveSet::q_F_hat() const 01980 { 01981 assert_initialized(); 01982 return q_F_hat_; 01983 } 01984 01985 size_type QPSchur::ActiveSet::q_C_hat() const 01986 { 01987 assert_initialized(); 01988 return q_C_hat_; 01989 } 01990 01991 size_type QPSchur::ActiveSet::q_D_hat() const 01992 { 01993 assert_initialized(); 01994 return (n_ - n_R_) - q_F_hat_; // n^{X} - \hat{q}^{F} 01995 } 01996 01997 int QPSchur::ActiveSet::ij_map( size_type s ) const 01998 { 01999 TEUCHOS_TEST_FOR_EXCEPT( !( 1 <= s && s <= this->q_hat() ) ); 02000 return ij_map_[s-1]; 02001 } 02002 02003 size_type QPSchur::ActiveSet::s_map( int ij ) const 02004 { 02005 ij_map_t::const_iterator 02006 begin = ij_map_.begin(), 02007 end = begin + q_hat(), 02008 itr = std::find( begin, end, ij ); 02009 return ( itr != end ? (itr - begin) + 1 : 0 ); 02010 } 02011 02012 value_type QPSchur::ActiveSet::constr_norm( size_type s ) const 02013 { 02014 TEUCHOS_TEST_FOR_EXCEPT( !( 1 <= s && s <= this->q_hat() ) ); 02015 return constr_norm_(s); 02016 } 02017 02018 EBounds QPSchur::ActiveSet::bnd( size_type s ) const 02019 { 02020 TEUCHOS_TEST_FOR_EXCEPT( !( 1 <= s && s <= this->q_hat() ) ); 02021 return bnds_[s-1]; 02022 } 02023 02024 size_type QPSchur::ActiveSet::l_fxfx( size_type k ) const 02025 { 02026 TEUCHOS_TEST_FOR_EXCEPT( !( 1 <= k && k <= this->q_D_hat() ) ); 02027 return l_fxfx_[k-1]; 02028 } 02029 02030 const QPSchur::U_hat_t& QPSchur::ActiveSet::U_hat() const 02031 { 02032 assert_initialized(); 02033 return U_hat_; 02034 } 02035 02036 const MatrixSymOpNonsing& QPSchur::ActiveSet::S_hat() const 02037 { 02038 assert_initialized(); 02039 return schur_comp().op_interface(); 02040 } 02041 02042 const GenPermMatrixSlice& QPSchur::ActiveSet::P_XF_hat() const 02043 { 02044 assert_initialized(); 02045 return P_XF_hat_; 02046 } 02047 02048 const GenPermMatrixSlice& QPSchur::ActiveSet::P_FC_hat() const 02049 { 02050 assert_initialized(); 02051 return P_FC_hat_; 02052 } 02053 02054 const GenPermMatrixSlice& QPSchur::ActiveSet::P_plus_hat() const 02055 { 02056 assert_initialized(); 02057 return P_plus_hat_; 02058 } 02059 02060 const GenPermMatrixSlice& QPSchur::ActiveSet::Q_XD_hat() const 02061 { 02062 assert_initialized(); 02063 return Q_XD_hat_; 02064 } 02065 02066 const DVectorSlice QPSchur::ActiveSet::d_hat() const 02067 { 02068 assert_initialized(); 02069 return d_hat_(1,q_hat()); 02070 } 02071 02072 DVectorSlice QPSchur::ActiveSet::z_hat() 02073 { 02074 assert_initialized(); 02075 return z_hat_(1,q_hat()); 02076 } 02077 02078 const DVectorSlice QPSchur::ActiveSet::z_hat() const 02079 { 02080 assert_initialized(); 02081 return z_hat_(1,q_hat()); 02082 } 02083 02084 DVectorSlice QPSchur::ActiveSet::p_z_hat() 02085 { 02086 assert_initialized(); 02087 return p_z_hat_(1,q_hat()); 02088 } 02089 02090 const DVectorSlice QPSchur::ActiveSet::p_z_hat() const 02091 { 02092 assert_initialized(); 02093 return p_z_hat_(1,q_hat()); 02094 } 02095 02096 DVectorSlice QPSchur::ActiveSet::mu_D_hat() 02097 { 02098 assert_initialized(); 02099 return mu_D_hat_(1,q_D_hat()); 02100 } 02101 02102 const DVectorSlice QPSchur::ActiveSet::mu_D_hat() const 02103 { 02104 assert_initialized(); 02105 return mu_D_hat_(1,q_D_hat()); 02106 } 02107 02108 DVectorSlice QPSchur::ActiveSet::p_mu_D_hat() 02109 { 02110 assert_initialized(); 02111 return p_mu_D_hat_(1,q_D_hat()); 02112 } 02113 02114 const DVectorSlice QPSchur::ActiveSet::p_mu_D_hat() const 02115 { 02116 assert_initialized(); 02117 return p_mu_D_hat_(1,q_D_hat()); 02118 } 02119 02120 bool QPSchur::ActiveSet::is_init_fixed( size_type j ) const 02121 { 02122 assert_initialized(); 02123 return j <= n_ && (*x_init_)(j) != FREE; 02124 } 02125 02126 bool QPSchur::ActiveSet::all_dof_used_up() const 02127 { 02128 return n_ - m_ == (n_ - n_R_) - q_F_hat_ + q_C_hat_ + q_plus_hat_; 02129 } 02130 02131 // private member functions for QPSchur::ActiveSet 02132 02133 void QPSchur::ActiveSet::assert_initialized() const 02134 { 02135 if( !initialized_ ) 02136 throw std::logic_error( 02137 error_msg(__FILE__,__LINE__,"QPSchur::ActiveSet::assert_initialized() : Error, " 02138 "The active set has not been initialized yet!") ); 02139 } 02140 02141 void QPSchur::ActiveSet::assert_s( size_type s) const 02142 { 02143 TEUCHOS_TEST_FOR_EXCEPT( !( s <= q_hat() ) ); // ToDo: Throw an exception 02144 } 02145 02146 void QPSchur::ActiveSet::reinitialize_matrices(bool test) 02147 { 02148 namespace GPMSTP = AbstractLinAlgPack::GenPermMatrixSliceIteratorPack; 02149 02150 const size_type q_hat = this->q_hat(); 02151 const size_type q_D_hat = this->q_D_hat(); 02152 02153 P_XF_hat_.initialize( 02154 n_,q_hat,q_F_hat_,0,0,GPMSTP::BY_ROW 02155 ,q_F_hat_ ? &P_XF_hat_row_[0] : NULL 02156 ,q_F_hat_ ? &P_XF_hat_col_[0] : NULL 02157 ,test 02158 ); 02159 P_FC_hat_.initialize( 02160 q_hat,q_hat,q_C_hat_,0,0,GPMSTP::BY_ROW 02161 ,q_C_hat_ ? &P_FC_hat_row_[0] : NULL 02162 ,q_C_hat_ ? &P_FC_hat_col_[0] : NULL 02163 ,test 02164 ); 02165 P_plus_hat_.initialize( 02166 n_+m_breve_,q_hat,q_plus_hat_,0,0,GPMSTP::BY_ROW 02167 ,q_plus_hat_ ? &P_plus_hat_row_[0] : NULL 02168 ,q_plus_hat_ ? &P_plus_hat_col_[0] : NULL 02169 ,test 02170 ); 02171 Q_XD_hat_.initialize( 02172 n_,q_D_hat,q_D_hat,0,0,GPMSTP::BY_ROW 02173 ,q_D_hat ? &Q_XD_hat_row_[0] : NULL 02174 ,q_D_hat ? &Q_XD_hat_col_[0] : NULL 02175 ,test 02176 ); 02177 U_hat_.initialize( 02178 &qp_->G(), m_ ? &qp_->A() : NULL, &qp_->constraints().A_bar() 02179 ,&qp_->Q_R(), &P_XF_hat_, &P_plus_hat_); 02180 } 02181 02182 bool QPSchur::ActiveSet::remove_augmented_element( 02183 size_type sd, bool force_refactorization 02184 ,MatrixSymAddDelUpdateable::EEigenValType eigen_val_drop 02185 ,std::ostream *out, EOutputLevel output_level 02186 ,bool allow_any_cond 02187 ) 02188 { 02189 bool wrote_output = false; 02190 const size_type q_hat = this->q_hat(); 02191 // Delete the sd row and column for S_hat 02192 try { 02193 schur_comp().update_interface().delete_update( 02194 sd,force_refactorization,eigen_val_drop 02195 ,MSADU::PivotTolerances( 02196 pivot_tols().warning_tol 02197 ,allow_any_cond ? 0.0 : pivot_tols().singular_tol 02198 ,allow_any_cond ? 0.0 : pivot_tols().wrong_inertia_tol )); 02199 } 02200 catch(const MSADU::WarnNearSingularUpdateException& excpt) { 02201 if( out && (int)output_level >= QPSchur::OUTPUT_BASIC_INFO ) { 02202 *out 02203 << "\nActiveSet::drop_constraint(...) : " << excpt.what() 02204 << std::endl; 02205 wrote_output = true; 02206 } 02207 } 02208 // Remove the ij_map(s) = jd element from ij_map(...) 02209 std::copy( ij_map_.begin() + sd, ij_map_.begin() + q_hat 02210 , ij_map_.begin() + (sd-1) ); 02211 // Remove the constr_norm(s) elment from constr_norm(...) 02212 std::copy( constr_norm_.begin() + sd, constr_norm_.begin() + q_hat 02213 , constr_norm_.begin() + (sd-1) ); 02214 // Remove the bnds(s) element from bnds(...) 02215 std::copy( bnds_.begin() + sd, bnds_.begin() + q_hat 02216 , bnds_.begin() + (sd-1) ); 02217 // Remove the d_hat(s) element from d_hat(...) 02218 std::copy( d_hat_.begin() + sd, d_hat_.begin() + q_hat 02219 , d_hat_.begin() + (sd-1) ); 02220 // Remove the z_hat(s) element from z_hat(...) 02221 std::copy( z_hat_.begin() + sd, z_hat_.begin() + q_hat 02222 , z_hat_.begin() + (sd-1) ); 02223 // Remove the p_z_hat(s) element from p_z_hat(...) 02224 std::copy( p_z_hat_.begin() + sd, p_z_hat_.begin() + q_hat 02225 , p_z_hat_.begin() + (sd-1) ); 02226 return wrote_output; 02227 } 02228 02229 // public member functions for QPSchur 02230 02231 value_type QPSchur::DEGENERATE_MULT = std::numeric_limits<value_type>::min(); 02232 02233 void QPSchur::pivot_tols( MSADU::PivotTolerances pivot_tols ) 02234 { 02235 act_set_.pivot_tols(pivot_tols); 02236 } 02237 02238 QPSchur::MSADU::PivotTolerances QPSchur::pivot_tols() const 02239 { 02240 return act_set_.pivot_tols(); 02241 } 02242 02243 QPSchur::QPSchur( 02244 const schur_comp_ptr_t& schur_comp 02245 ,size_type max_iter 02246 ,value_type max_real_runtime 02247 ,value_type feas_tol 02248 ,value_type loose_feas_tol 02249 ,value_type dual_infeas_tol 02250 ,value_type huge_primal_step 02251 ,value_type huge_dual_step 02252 ,value_type warning_tol 02253 ,value_type error_tol 02254 ,size_type iter_refine_min_iter 02255 ,size_type iter_refine_max_iter 02256 ,value_type iter_refine_opt_tol 02257 ,value_type iter_refine_feas_tol 02258 ,bool iter_refine_at_solution 02259 ,bool salvage_init_schur_comp 02260 ,MSADU::PivotTolerances pivot_tols 02261 ) 02262 :schur_comp_(schur_comp) 02263 ,max_iter_(max_iter) 02264 ,max_real_runtime_(max_real_runtime) 02265 ,feas_tol_(feas_tol) 02266 ,loose_feas_tol_(loose_feas_tol) 02267 ,dual_infeas_tol_(dual_infeas_tol) 02268 ,huge_primal_step_(huge_primal_step) 02269 ,huge_dual_step_(huge_dual_step) 02270 ,warning_tol_(warning_tol) 02271 ,error_tol_(error_tol) 02272 ,iter_refine_min_iter_(iter_refine_min_iter) 02273 ,iter_refine_max_iter_(iter_refine_max_iter) 02274 ,iter_refine_opt_tol_(iter_refine_opt_tol) 02275 ,iter_refine_feas_tol_(iter_refine_feas_tol) 02276 ,iter_refine_at_solution_(iter_refine_at_solution) 02277 ,salvage_init_schur_comp_(salvage_init_schur_comp) 02278 ,act_set_(schur_comp,pivot_tols) 02279 {} 02280 02281 QPSchur::ESolveReturn QPSchur::solve_qp( 02282 QP& qp 02283 ,size_type num_act_change, const int ij_act_change[], const EBounds bnds[] 02284 ,std::ostream *out, EOutputLevel output_level, ERunTests test_what 02285 ,DVectorSlice* x, SpVector* mu, DVectorSlice* lambda, SpVector* lambda_breve 02286 ,size_type* iter, size_type* num_adds, size_type* num_drops 02287 ) 02288 { 02289 using std::setw; 02290 using std::endl; 02291 using std::right; 02292 using DenseLinAlgPack::norm_inf; 02293 using AbstractLinAlgPack::norm_inf; 02294 using LinAlgOpPack::V_InvMtV; 02295 using Teuchos::Workspace; 02296 Teuchos::WorkspaceStore* wss = Teuchos::get_default_workspace_store().get(); 02297 using StopWatchPack::stopwatch; 02298 02299 const value_type inf = std::numeric_limits<value_type>::max(); 02300 02301 if( !out ) 02302 output_level = NO_OUTPUT; 02303 02304 const int dbl_min_w = 20; 02305 const int dbl_w = (out ? my_max(dbl_min_w,int(out->precision()+8)) : 20 ); 02306 02307 // Set the schur complement 02308 act_set_.set_schur_comp( schur_comp_ ); 02309 02310 ESolveReturn 02311 solve_return = SUBOPTIMAL_POINT; 02312 02313 // Print QPSchur output header 02314 if( (int)output_level >= (int)OUTPUT_BASIC_INFO ) { 02315 *out 02316 << "\n*** Entering QPSchur::solve_qp(...)\n"; 02317 } 02318 02319 // Start the timer! 02320 stopwatch timer; 02321 timer.reset(); 02322 timer.start(); 02323 02324 // Print the definition of the QP to be solved. 02325 if( (int)output_level >= (int)OUTPUT_ITER_QUANTITIES ) { 02326 *out 02327 << "\n*** Dump the definition of the QP to be solved ***\n"; 02328 qp.dump_qp(*out); 02329 } 02330 02331 // Print warm start info. 02332 if( (int)output_level >= (int)OUTPUT_BASIC_INFO ) { 02333 *out 02334 << "\n*** Warm start info\n" 02335 << "\nNumber of variables = " << qp.n() 02336 << "\nNumber of initially fixed variables (not in Ko) = " << (qp.n() - qp.n_R()) 02337 << "\nNumber of changes to the initial KKT system (num_act_change) = " << num_act_change << endl; 02338 const size_type 02339 n = qp.n(); 02340 size_type 02341 num_var_fixed = 0, num_var_freed = 0, num_gen_equ = 0, num_gen_inequ = 0; 02342 for( size_type s = 1; s <= num_act_change; ++s ) { 02343 const int ij = ij_act_change[s-1]; 02344 const EBounds bnd = bnds[s-1]; 02345 if( ij < 0 ) 02346 ++num_var_freed; 02347 else if( ij < n ) 02348 ++num_var_fixed; 02349 else if( bnd == EQUALITY ) 02350 ++num_gen_equ; 02351 else 02352 ++num_gen_inequ; 02353 } 02354 *out 02355 << "\n Number of initially fixed variables freed from a bound = " << num_var_freed 02356 << "\n Number of initially free variables fixed to a bound = " << num_var_fixed 02357 << "\n Number of general equality constraints added = " << num_gen_equ 02358 << "\n Number of general inequality constraints added = " << num_gen_inequ << endl; 02359 } 02360 02361 if( num_act_change > 0 && (int)output_level >= (int)OUTPUT_ACT_SET ) { 02362 *out 02363 << endl 02364 << right << setw(5) << "s" 02365 << right << setw(20) << "ij_act_change" 02366 << right << setw(10) << "bnds" << endl 02367 << right << setw(5) << "---" 02368 << right << setw(20) << "-------------" 02369 << right << setw(10) << "--------" << endl; 02370 for( size_type s = 1; s <= num_act_change; ++s ) 02371 *out 02372 << right << setw(5) << s 02373 << right << setw(20) << ij_act_change[s-1] 02374 << right << setw(10) << bnd_str(bnds[s-1]) << endl; 02375 } 02376 02377 // Initialize the active set. 02378 try { 02379 act_set_.initialize( qp, num_act_change, ij_act_change, bnds 02380 , test_what == RUN_TESTS, salvage_init_schur_comp(), out, output_level ); 02381 // If this throws a WrongInteriaUpdateExecption it will be 02382 // thrown clean out of here! 02383 } 02384 catch( const MatrixSymAddDelUpdateable::SingularUpdateException& excpt ) { 02385 if( (int)output_level >= (int)OUTPUT_BASIC_INFO ) { 02386 *out 02387 << "\n*** Error in initializing schur complement\n" 02388 << excpt.what() << std::endl 02389 << "\nSetting num_act_change = 0 and proceeding with a cold start...\n"; 02390 } 02391 act_set_.initialize( qp, num_act_change = 0, ij_act_change, bnds 02392 , test_what == RUN_TESTS, salvage_init_schur_comp(), out, output_level ); 02393 } 02394 02395 // Compute vo = inv(Ko) * fo 02396 Workspace<value_type> vo_ws(wss,qp.n_R()+qp.m()); 02397 DVectorSlice vo(&vo_ws[0],vo_ws.size()); 02398 V_InvMtV( &vo, qp.Ko(), BLAS_Cpp::no_trans, qp.fo() ); 02399 02400 if( (int)output_level >= (int)OUTPUT_BASIC_INFO ) { 02401 *out 02402 << "\nSolution to the initial KKT system, vo = inv(Ko)*fo:\n\n||vo||inf = " << norm_inf(vo) << std::endl; 02403 } 02404 if( (int)output_level >= (int)OUTPUT_ITER_QUANTITIES ) { 02405 *out 02406 << "\nvo =\n" << vo; 02407 } 02408 02409 // //////////////////////////////////////////////// 02410 // Remove constraints until we are dual feasible. 02411 // 02412 // Here we are essentially performing a primal algorithm where we are only 02413 // removing constraints. If the dual variables are not dual feasible then 02414 // we will remove the one with the largest scaled dual feasibility 02415 // violation then compute the dual variables again. Eventually we 02416 // will come to a point where we have a dual feasible point. If 02417 // we have to, we will remove all of the inequality constraints and then 02418 // this will by default be a dual feasible point (i.e. we picked all the 02419 // wrong inequality constraints). 02420 // 02421 // The difficulty here is in dealing with near degenerate constraints. 02422 // If a constraint is near degenerate then we would like to not drop 02423 // it since we may have to add it again later. 02424 // 02425 *iter = 0; 02426 *num_adds = 0; 02427 *num_drops = 0; 02428 // Print header for removing constraints 02429 if( (int)output_level >= (int)OUTPUT_BASIC_INFO ) { 02430 *out 02431 << "\n***" 02432 << "\n*** Removing constriants until we are dual feasible" 02433 << "\n***\n" 02434 << "\n*** Start by removing constraints within the Schur complement first\n"; 02435 } 02436 // Print summary header for max_viol and jd. 02437 if( (int)OUTPUT_ITER_SUMMARY <= (int)output_level 02438 && (int)output_level < (int)OUTPUT_ITER_QUANTITIES 02439 && num_act_change > 0 ) 02440 { 02441 *out 02442 << "\nIf max_viol > 0 and jd != 0 then constraint jd will be dropped from the active set\n\n" 02443 << right << setw(dbl_w) << "max_viol" 02444 << right << setw(5) << "sd" 02445 << right << setw(5) << "jd" << endl 02446 << right << setw(dbl_w) << "--------------" 02447 << right << setw(5) << "----" 02448 << right << setw(5) << "----" << endl; 02449 } 02450 for( int k = num_act_change; k > 0; --k, ++(*iter) ) { 02451 // Check runtime 02452 if( timeout_return(&timer,out,output_level) ) 02453 return MAX_RUNTIME_EXEEDED_FAIL; 02454 // Compute z_hat (z_hat = inv(S_hat)*(d_hat - U_hat'*vo)) 02455 DVectorSlice z_hat = act_set_.z_hat(); 02456 calc_z( act_set_.S_hat(), act_set_.d_hat(), act_set_.U_hat(), &vo 02457 , &z_hat ); 02458 // Determine if we are dual feasible. 02459 value_type max_viol = 0.0; // max scaled violation of dual feasability. 02460 size_type jd = 0; // indice of constraint with max scaled violation. 02461 DVectorSlice::iterator 02462 z_itr = z_hat.begin(); 02463 const size_type q_hat = act_set_.q_hat(); // Size of schur complement system. 02464 // Print header for s, z_hat(s), bnd(s), viol, max_viol and jd 02465 if( (int)output_level >= (int)OUTPUT_ITER_QUANTITIES ) { 02466 *out 02467 << "\nLooking for a constraint with the maximum dual infeasiblity to drop...\n\n" 02468 << right << setw(5) << "s" 02469 << right << setw(dbl_w) << "z_hat" 02470 << right << setw(20) << "bnd" 02471 << right << setw(dbl_w) << "viol" 02472 << right << setw(dbl_w) << "max_viol" 02473 << right << setw(5) << "jd" << endl 02474 << right << setw(5) << "----" 02475 << right << setw(dbl_w) << "--------------" 02476 << right << setw(20) << "--------------" 02477 << right << setw(dbl_w) << "--------------" 02478 << right << setw(dbl_w) << "--------------" 02479 << right << setw(5) << "----" << endl; 02480 } 02481 for( int s = 1; s <= q_hat; ++s, ++z_itr ) { 02482 int j = act_set_.ij_map(s); 02483 if( j > 0 ) { 02484 // This is for an active constraint not initially in Ko so z_hat(s) = mu(j) 02485 EBounds bnd = act_set_.bnd(s); 02486 value_type viol; // Get the amount of violation and fix near degeneracies 02487 const int dual_feas_status 02488 = correct_dual_infeas( 02489 j,bnd,0.0,act_set_.constr_norm(s),dual_infeas_tol(),DEGENERATE_MULT 02490 ,out,output_level,false 02491 ,"z_hat(s)", &(*z_itr), &viol ); 02492 if( dual_feas_status < 0 && viol < max_viol ) { 02493 max_viol = viol; 02494 jd = j; 02495 } 02496 // Print row for s, z_hat(s), bnd(s), viol, max_viol and jd 02497 if( (int)output_level >= (int)OUTPUT_ITER_QUANTITIES ) { 02498 *out 02499 << right << setw(5) << s 02500 << right << setw(dbl_w) << *z_itr 02501 << right << setw(20) << bnd_str(bnd) 02502 << right << setw(dbl_w) << viol 02503 << right << setw(dbl_w) << max_viol 02504 << right << setw(5) << jd << endl; 02505 } 02506 } 02507 } 02508 // Print row of max_viol and jd 02509 if( (int)OUTPUT_ITER_SUMMARY <= (int)output_level 02510 && (int)output_level < (int)OUTPUT_ITER_QUANTITIES ) 02511 { 02512 *out 02513 << right << setw(dbl_w) << max_viol 02514 << right << setw(5) << act_set_.s_map(jd) 02515 << right << setw(5) << jd << endl; 02516 } 02517 if( jd == 0 ) break; // We have a dual feasible point w.r.t. these constraints 02518 // Remove the active constraint with the largest scaled violation. 02519 act_set_.drop_constraint( jd, out, output_level, true, true ); 02520 ++(*iter); 02521 ++(*num_drops); 02522 // Print U_hat, S_hat and d_hat. 02523 if( (int)output_level >= (int)OUTPUT_ITER_QUANTITIES ) { 02524 *out 02525 << "\nPrinting active set after dropping constraint jd = " << jd << " ...\n"; 02526 dump_act_set_quantities( act_set_, *out ); 02527 } 02528 } 02529 02530 // Print how many constraints where removed from the schur complement 02531 if( (int)output_level >= (int)OUTPUT_BASIC_INFO ) { 02532 *out 02533 << "\nThere where " << (*num_drops) 02534 << " constraints dropped from the schur complement from the initial guess of the active set.\n"; 02535 } 02536 02537 // Compute v 02538 Workspace<value_type> v_ws(wss,qp.n_R()+qp.m()); 02539 DVectorSlice v(&v_ws[0],v_ws.size()); 02540 if( act_set_.q_hat() > 0 ) { 02541 calc_v( qp.Ko(), &qp.fo(), act_set_.U_hat(), act_set_.z_hat(), &v ); 02542 if( (int)output_level >= (int)OUTPUT_BASIC_INFO ) { 02543 *out 02544 << "\nSolution to the system; v = inv(Ko)*(fo - U_hat*z_hat):\n" 02545 << "\n||v||inf = " << norm_inf(v) << std::endl; 02546 } 02547 if( (int)output_level >= (int)OUTPUT_ITER_QUANTITIES ) { 02548 *out 02549 << "\nv =\n" << v; 02550 } 02551 } 02552 else { 02553 v = vo; 02554 } 02555 02556 // Set x 02557 set_x( act_set_, v, x ); 02558 if( (int)output_level >= (int)OUTPUT_BASIC_INFO ) { 02559 *out 02560 << "\nCurrent guess for unknowns x:\n\n||x||inf = " << norm_inf(*x) << std::endl; 02561 } 02562 if( (int)output_level >= (int)OUTPUT_ITER_QUANTITIES ) { 02563 *out 02564 << "\nx =\n" << *x; 02565 } 02566 02567 // 02568 // Determine if any initially fixed variables need to be freed by checking mu_D_hat. 02569 // 02570 if( act_set_.q_D_hat() ) { 02571 if( (int)output_level >= (int)OUTPUT_BASIC_INFO ) { 02572 *out << "\n*** Second, free initially fixed variables not in Ko\n"; 02573 } 02574 const QPSchurPack::QP::i_x_X_map_t& i_x_X_map = act_set_.qp().i_x_X_map(); 02575 const QPSchurPack::QP::x_init_t& x_init = act_set_.qp().x_init(); 02576 // Print summary header for max_viol and jd. 02577 if( (int)OUTPUT_ITER_SUMMARY <= (int)output_level 02578 && (int)output_level < (int)OUTPUT_ITER_QUANTITIES ) 02579 { 02580 *out 02581 << "\nIf max_viol > 0 and id != 0 then the variable x(id) will be freed from its initial bound\n\n" 02582 << right << setw(dbl_w) << "max_viol" 02583 << right << setw(5) << "kd" 02584 << right << setw(5) << "id" << endl 02585 << right << setw(dbl_w) << "--------------" 02586 << right << setw(5) << "----" 02587 << right << setw(5) << "----" << endl; 02588 } 02589 size_type q_D_hat = act_set_.q_D_hat(); // This will be deincremented 02590 while( q_D_hat > 0 ) { 02591 // Check runtime 02592 if( timeout_return(&timer,out,output_level) ) 02593 return MAX_RUNTIME_EXEEDED_FAIL; 02594 // mu_D_hat = ??? 02595 DVectorSlice mu_D_hat = act_set_.mu_D_hat(); 02596 calc_mu_D( act_set_, *x, v, &mu_D_hat ); 02597 // Determine if we are dual feasible. 02598 value_type max_viol = 0.0; // max scaled violation of dual feasability. 02599 int id = 0; // indice of variable with max scaled violation. 02600 size_type kd = 0; 02601 DVectorSlice::iterator 02602 mu_D_itr = mu_D_hat.begin(); 02603 // Print header for k, mu_D_hat(k), bnd, viol, max_viol and id 02604 if( (int)output_level >= (int)OUTPUT_ITER_QUANTITIES ) { 02605 *out 02606 << "\nLooking for a variable bound with the max dual infeasibility to drop...\n\n" 02607 << right << setw(5) << "k" 02608 << right << setw(dbl_w) << "mu_D_hat" 02609 << right << setw(20) << "bnd" 02610 << right << setw(dbl_w) << "viol" 02611 << right << setw(dbl_w) << "max_viol" 02612 << right << setw(5) << "id" << endl 02613 << right << setw(5) << "----" 02614 << right << setw(dbl_w) << "--------------" 02615 << right << setw(20) << "--------------" 02616 << right << setw(dbl_w) << "--------------" 02617 << right << setw(dbl_w) << "--------------" 02618 << right << setw(5) << "----" << endl; 02619 } 02620 for( int k = 1; k <= q_D_hat; ++k, ++mu_D_itr ) { 02621 int 02622 i = i_x_X_map(act_set_.l_fxfx(k)); 02623 EBounds 02624 bnd = x_init(i); 02625 value_type viol; // Get the amount of violation and fix near degeneracies 02626 const int dual_feas_status 02627 = correct_dual_infeas( 02628 i,bnd,0.0,1.0,dual_infeas_tol(),DEGENERATE_MULT 02629 ,out,output_level,false 02630 ,"mu_D_hat(k)", &(*mu_D_itr), &viol ); 02631 if( dual_feas_status < 0 && viol < max_viol ) { 02632 max_viol = viol; 02633 kd = k; 02634 id = i; 02635 } 02636 // Print row for k, mu_D_hat(k), bnd, viol, max_viol and jd 02637 if( (int)output_level >= (int)OUTPUT_ITER_QUANTITIES ) { 02638 *out 02639 << right << setw(5) << k 02640 << right << setw(dbl_w) << *mu_D_itr 02641 << right << setw(20) << bnd_str(bnd) 02642 << right << setw(dbl_w) << viol 02643 << right << setw(dbl_w) << max_viol 02644 << right << setw(5) << id << endl; 02645 } 02646 } 02647 // Print row of max_viol and id 02648 if( (int)OUTPUT_ITER_SUMMARY <= (int)output_level 02649 && (int)output_level < (int)OUTPUT_ITER_QUANTITIES ) 02650 { 02651 *out 02652 << right << setw(dbl_w) << max_viol 02653 << right << setw(5) << kd 02654 << right << setw(5) << id << endl; 02655 } 02656 if( id == 0 ) break; // We have a dual feasible point w.r.t. these variable bounds 02657 // Remove the active constraint with the largest scaled violation. 02658 act_set_.drop_constraint( -id, out, output_level, true, true ); 02659 ++(*iter); 02660 ++(*num_adds); 02661 --q_D_hat; 02662 // Print U_hat, S_hat and d_hat. 02663 if( (int)output_level >= (int)OUTPUT_ITER_QUANTITIES ) { 02664 *out 02665 << "\nPrinting active set after freeing initially fixed variable id = " << id << " ...\n"; 02666 dump_act_set_quantities( act_set_, *out ); 02667 } 02668 if( (int)output_level >= (int)OUTPUT_ITER_STEPS ) { 02669 *out 02670 << "\nSolution to the new KKT system; z_hat = inv(S_hat)*(d_hat - U_hat'*vo), v = inv(Ko)*(fo - U_hat*z_hat):\n"; 02671 } 02672 // Compute z_hat (z_hat = inv(S_hat)*(d_hat - U_hat'*vo)) 02673 calc_z( act_set_.S_hat(), act_set_.d_hat(), act_set_.U_hat(), &vo 02674 , &act_set_.z_hat() ); 02675 if( (int)output_level >= (int)OUTPUT_ITER_STEPS ) { 02676 *out 02677 << "\n||z_hat||inf = " << norm_inf(act_set_.z_hat()) << std::endl; 02678 } 02679 if( (int)output_level >= (int)OUTPUT_ITER_QUANTITIES ) { 02680 *out 02681 << "\nz_hat =\n" << act_set_.z_hat(); 02682 } 02683 // Compute v 02684 calc_v( qp.Ko(), &qp.fo(), act_set_.U_hat(), act_set_.z_hat(), &v ); 02685 if( (int)output_level >= (int)OUTPUT_ITER_STEPS ) { 02686 *out 02687 << "\n||v||inf = " << norm_inf(v) << std::endl; 02688 } 02689 if( (int)output_level >= (int)OUTPUT_ITER_QUANTITIES ) { 02690 *out 02691 << "\nv =\n" << v; 02692 } 02693 // Set x 02694 set_x( act_set_, v, x ); 02695 if( (int)output_level >= (int)OUTPUT_ITER_STEPS ) { 02696 *out 02697 << "\nCurrent guess for unknowns x:\n\n||x||inf = " << norm_inf(*x) << std::endl; 02698 } 02699 if( (int)output_level >= (int)OUTPUT_ITER_QUANTITIES ) { 02700 *out 02701 << "\nx =\n" << *x; 02702 } 02703 } 02704 } 02705 02706 // Print how many initially fixed variables where freed 02707 if( (int)output_level >= (int)OUTPUT_BASIC_INFO ) { 02708 *out 02709 << "\nThere where " << (*num_adds) 02710 << " initially fixed variables not in Ko that were freed and added to the schur complement.\n"; 02711 } 02712 02713 // Run the primal dual algorithm 02714 size_type iter_refine_num_resid = 0, iter_refine_num_solves = 0; 02715 solve_return = qp_algo( 02716 PICK_VIOLATED_CONSTRAINT 02717 ,out, output_level, test_what 02718 ,vo, &act_set_, &v 02719 ,x, iter, num_adds, num_drops 02720 ,&iter_refine_num_resid, &iter_refine_num_solves 02721 ,&timer 02722 ); 02723 02724 if( solve_return != OPTIMAL_SOLUTION ) 02725 set_x( act_set_, v, x ); 02726 02727 // Correct the sign of near degenerate multipliers in case it has not been done yet! 02728 if( solve_return != SUBOPTIMAL_POINT && act_set_.q_hat() ) { 02729 const size_type q_hat = act_set_.q_hat(); 02730 DVectorSlice z_hat = act_set_.z_hat(); 02731 for( size_type s = 1; s <= q_hat; ++s ) { 02732 const int j = act_set_.ij_map(s); 02733 value_type viol = 0.0; 02734 const EBounds bnd = act_set_.bnd(s); 02735 if(bnd == FREE) 02736 continue; 02737 const int dual_feas_status 02738 = correct_dual_infeas( 02739 j,bnd,0.0,1.0,dual_infeas_tol(),DEGENERATE_MULT 02740 ,out,output_level,true,"z_hat(s)",&z_hat(s),&viol ); 02741 if( dual_feas_status < 0 ) { 02742 solve_return = SUBOPTIMAL_POINT; 02743 break; 02744 } 02745 } 02746 } 02747 if( solve_return != SUBOPTIMAL_POINT && act_set_.q_D_hat() ) { 02748 const GenPermMatrixSlice& Q_XD_hat = act_set_.Q_XD_hat(); 02749 DVectorSlice mu_D_hat = act_set_.mu_D_hat(); 02750 const QPSchurPack::QP::x_init_t& x_init = qp.x_init(); 02751 for( GenPermMatrixSlice::const_iterator itr = Q_XD_hat.begin(); itr != Q_XD_hat.end(); ++itr ) { 02752 const int i = itr->row_i(); 02753 value_type viol = 0.0; 02754 const EBounds bnd = x_init(i); 02755 TEUCHOS_TEST_FOR_EXCEPT( !( bnd != FREE ) ); 02756 const int dual_feas_status 02757 = correct_dual_infeas( 02758 i,bnd,0.0,1.0,dual_infeas_tol(),DEGENERATE_MULT 02759 ,out,output_level,true,"mu_D_hat(k)",&mu_D_hat(itr->col_j()),&viol ); 02760 if( dual_feas_status < 0 ) { 02761 solve_return = SUBOPTIMAL_POINT; 02762 break; 02763 } 02764 } 02765 } 02766 02767 set_multipliers( act_set_, v, mu, lambda, lambda_breve ); 02768 02769 if( (int)output_level >= (int)OUTPUT_BASIC_INFO ) { 02770 switch(solve_return) { 02771 case OPTIMAL_SOLUTION: 02772 *out 02773 << "\n*** Solution found!\n"; 02774 break; 02775 case MAX_ITER_EXCEEDED: 02776 *out 02777 << "\n*** Maximum iterations exceeded!\n"; 02778 break; 02779 case MAX_RUNTIME_EXEEDED_DUAL_FEAS: 02780 *out 02781 << "\n*** Maximum runtime exceeded!\n"; 02782 break; 02783 case MAX_ALLOWED_STORAGE_EXCEEDED: 02784 *out 02785 << "\n*** The maxinum size of the schur complement has been exceeded!\n"; 02786 break; 02787 case INFEASIBLE_CONSTRAINTS: 02788 *out 02789 << "\n*** The constraints are infeasible!\n"; 02790 break; 02791 case NONCONVEX_QP: 02792 *out 02793 << "\n*** The QP appears to be nonconvex but will return current point anyway!\n"; 02794 break; 02795 case DUAL_INFEASIBILITY: 02796 *out 02797 << "\n*** The dual variables are infeasible (numerical roundoff?)!\n"; 02798 break; 02799 case SUBOPTIMAL_POINT: 02800 *out 02801 << "\n*** The current point is suboptimal but we will return it anyway!\n"; 02802 break; 02803 default: 02804 TEUCHOS_TEST_FOR_EXCEPT(true); 02805 } 02806 *out << "\nNumber of QP iteratons = " << *iter; 02807 *out << "\nNumber of iterative refinement residual calculations = " << iter_refine_num_resid; 02808 *out << "\nNumber of iterative refinement solves = " << iter_refine_num_solves << endl; 02809 *out << "\n||x||inf = " << norm_inf(*x); 02810 *out << "\nmu.nz() = " << mu->nz(); 02811 *out << "\nmax(|mu(i)|) = " << norm_inf((*mu)()); 02812 *out << "\nmin(|mu(i)|) = " << min_abs((*mu)()); 02813 if(lambda) 02814 *out 02815 << "\nmax(|lambda(i)|) = " << norm_inf(*lambda) 02816 << "\nmin(|lambda(i)|) = " << min_abs(*lambda); 02817 if(lambda_breve) 02818 *out 02819 << "\nlambda_breve.nz() = " << lambda_breve->nz() 02820 << "\nmax(|lambda_breve(i)|) = " << norm_inf((*lambda_breve)()) 02821 << "\nmin(|lambda_breve(i)|) = " << min_abs((*lambda_breve)()); 02822 *out << std::endl; 02823 } 02824 // Print Solution x, lambda and mu 02825 if( (int)output_level >= (int)OUTPUT_ITER_QUANTITIES ) { 02826 *out << "\nx =\n" << (*x)(); 02827 *out << "\nmu =\n" << (*mu)(); 02828 if(lambda) 02829 *out << "\nlambda =\n" << (*lambda)(); 02830 if(lambda_breve) 02831 *out << "\nlambda_breve =\n" << (*lambda_breve)(); 02832 } 02833 // Print 'goodby' header. 02834 if( (int)output_level >= (int)OUTPUT_BASIC_INFO ) { 02835 *out 02836 << "\n*** Leaving QPSchur::solve_qp(...)\n"; 02837 } 02838 02839 return solve_return; 02840 } 02841 02842 const QPSchur::ActiveSet& QPSchur::act_set() const 02843 { 02844 return act_set_; 02845 } 02846 02847 // protected member functions for QPSchur 02848 02849 QPSchur::ESolveReturn QPSchur::qp_algo( 02850 EPDSteps next_step 02851 , std::ostream *out, EOutputLevel output_level, ERunTests test_what 02852 , const DVectorSlice& vo, ActiveSet* act_set, DVectorSlice* v 02853 , DVectorSlice* x, size_type* iter, size_type* num_adds, size_type* num_drops 02854 , size_type* iter_refine_num_resid, size_type* iter_refine_num_solves 02855 , StopWatchPack::stopwatch* timer 02856 ) 02857 { 02858 using std::setw; 02859 using std::endl; 02860 using std::right; 02861 using BLAS_Cpp::no_trans; 02862 using BLAS_Cpp::trans; 02863 using DenseLinAlgPack::dot; 02864 using DenseLinAlgPack::norm_inf; 02865 using DenseLinAlgPack::Vt_S; 02866 using DenseLinAlgPack::V_mV; 02867 using DenseLinAlgPack::Vp_StV; 02868 using DenseLinAlgPack::V_VmV; 02869 using LinAlgOpPack::Vp_V; 02870 using LinAlgOpPack::V_StV; 02871 using LinAlgOpPack::V_StMtV; 02872 using AbstractLinAlgPack::dot; 02873 using AbstractLinAlgPack::norm_inf; 02874 using AbstractLinAlgPack::V_MtV; 02875 using AbstractLinAlgPack::EtaVector; 02876 using LinAlgOpPack::V_MtV; 02877 using LinAlgOpPack::V_InvMtV; 02878 using LinAlgOpPack::Vp_StMtV; 02879 using LinAlgOpPack::Vp_StPtMtV; 02880 using Teuchos::Workspace; 02881 Teuchos::WorkspaceStore* wss = Teuchos::get_default_workspace_store().get(); 02882 02883 // Print header for "Starting Primal-Dual Iterations" 02884 if( (int)output_level >= (int)OUTPUT_BASIC_INFO ) { 02885 *out 02886 << "\n*** Starting Primal-Dual Iterations ***\n"; 02887 } 02888 02889 const int dbl_min_w = 20; 02890 const int dbl_w = (out ? my_max(dbl_min_w,int(out->precision()+8)): 20 ); 02891 02892 try { 02893 02894 QPSchurPack::QP 02895 &qp = act_set->qp(); 02896 const size_type 02897 n = qp.n(), 02898 n_R = qp.n_R(), 02899 m = qp.m(), 02900 m_breve = qp.constraints().m_breve(); 02901 02902 Workspace<value_type> 02903 v_plus_ws(wss,v->dim()), 02904 z_hat_plus_ws(wss,(n-m)+(n-n_R)), 02905 p_v_ws(wss,v->dim()); 02906 DVectorSlice 02907 v_plus(&v_plus_ws[0],v_plus_ws.size()), 02908 z_hat_plus, 02909 p_v(&p_v_ws[0],p_v_ws.size()); 02910 02911 const value_type 02912 inf = std::numeric_limits<value_type>::max(); 02913 size_type itr; // move to somewhere else? 02914 02915 // Put these here because they need to be remembered between iterations if a linearly 02916 // dependent constriant is dropped. 02917 size_type ja = 0; // + indice of violated constraint to add to active set 02918 size_type last_ja = 0; // + last_ja to be added 02919 value_type con_ja_val; // value of violated constraint. 02920 value_type b_a; // value of the violated bound 02921 value_type norm_2_constr; // norm of violated constraint 02922 EBounds bnd_ja; // bound of constraint ja which is violated. 02923 bool can_ignore_ja; // true if we can ignore a constraint if it is LD. 02924 bool assume_lin_dep_ja; 02925 value_type gamma_plus; // used to store the new multipler value for the added 02926 // constraint. 02927 const int summary_lines_counter_max = 15; 02928 int summary_lines_counter = 0; 02929 long int jd = 0; // + indice of constraint to delete from active set. 02930 // - indice of intially fixed variable to be freed 02931 long int last_jd = 0; // Last jd change to the active set 02932 value_type t_P; // Primal step length (constraint ja made active) 02933 value_type t_D; // Dual step length ( longest step without violating dual 02934 // feasibility of currently active constraints ). 02935 value_type beta; // +1 if multiplier of constraint being added is positive 02936 // -1 if multiplier of constraint being added is negative. 02937 bool warned_degeneracy = false; // Will warn the user if degeneracy 02938 // is detected. 02939 value_type dual_infeas_scale = 1.0; // Scaling for determining if a 02940 // Lagrange multiplier is near degenerate. 02941 bool return_to_init_fixed = false; // True if the constraint being added 02942 // to the active set is a variable returning to its orginally 02943 // fixed variable bound. 02944 bool using_iter_refinement = false; // Will be set to true if instability detected 02945 02946 for( itr = 0; itr <= max_iter_; ++itr, ++(*iter) ) { 02947 if( (int)output_level >= (int)OUTPUT_ITER_STEPS ) { 02948 *out 02949 << "\n************************************************" 02950 << "\n*** qp_iter = " << itr 02951 << "\n*** q_hat = " << act_set->q_hat() << std::endl; 02952 } 02953 bool schur_comp_update_failed = false; 02954 switch( next_step ) { // no break; statements in this switch statement. 02955 case PICK_VIOLATED_CONSTRAINT: { 02956 if( (int)output_level >= (int)OUTPUT_ITER_STEPS ) { 02957 *out 02958 << "\n*** PICK_VIOLATED_CONSTRAINT\n"; 02959 } 02960 // Check runtime 02961 if( timeout_return(timer,out,output_level) ) 02962 return MAX_RUNTIME_EXEEDED_DUAL_FEAS; 02963 // Save the indice of the last constriant to be added! 02964 last_ja = ja; 02965 // Set parts of x that are not currently fixed and may have changed. 02966 // Also, we want set specifially set those variables that where 02967 // initially free and then latter fixed to their bounds so that 02968 // they will not be seen as violated. 02969 set_x( *act_set, *v, x ); 02970 if( (int)output_level >= (int)OUTPUT_ITER_STEPS ) { 02971 *out 02972 << "\n||x||inf = " << norm_inf(*x) << std::endl; 02973 } 02974 if( (int)output_level >= (int)OUTPUT_ITER_QUANTITIES ) { 02975 *out 02976 << "\nx =\n" << *x; 02977 } 02978 if( test_what == RUN_TESTS ) { 02979 if( (int)output_level >= (int)OUTPUT_ITER_STEPS ) { 02980 *out 02981 << "\nChecking current iteration quantities ...\n"; 02982 } 02983 const char 02984 sep_line[] = "\n--------------------------------------------------------------------------------\n"; 02985 // AbstractLinAlgPack::TestingPack::CompareDenseVectors comp_v; 02986 02987 // 02988 // Check the optimality conditions of the augmented KKT system! 02989 // 02990 02991 // ToDo: Implement 02992 02993 // 02994 // Check mu_D_hat 02995 // 02996 if( act_set->q_D_hat() ) { 02997 if( (int)output_level >= (int)OUTPUT_ITER_STEPS ) { 02998 *out 02999 << sep_line 03000 << "\nComputing: mu_D_hat_calc = -Q_XD_hat'*g - Q_XD_hat'*G*x\n" 03001 << " - Q_XD_hat'*A*v(n_R+1:n_R+m) - Q_XD_hat'*A_bar*P_plus_hat*z_hat ...\n"; 03002 } 03003 Workspace<value_type> mu_D_hat_calc_ws( wss, act_set->q_D_hat() ); 03004 DVectorSlice mu_D_hat_calc( &mu_D_hat_calc_ws[0], mu_D_hat_calc_ws.size() ); 03005 calc_mu_D( *act_set, *x, *v, &mu_D_hat_calc ); 03006 if( (int)output_level >= (int)OUTPUT_ITER_STEPS ) { 03007 *out 03008 << "\n||mu_D_hat_calc||inf = " << norm_inf(mu_D_hat_calc) << std::endl; 03009 } 03010 if( (int)output_level >= (int)OUTPUT_ACT_SET ) { 03011 *out 03012 << "\nmu_D_hat_calc =\n" << mu_D_hat_calc; 03013 } 03014 if( (int)output_level >= (int)OUTPUT_ITER_STEPS ) { 03015 *out 03016 << "\nChecking mu_D_hat_calc == mu_D_hat\n"; 03017 } 03018 DVector mu_D_hat_diff(mu_D_hat_calc.dim()); 03019 LinAlgOpPack::V_VmV( &mu_D_hat_diff(), mu_D_hat_calc(), act_set->mu_D_hat() ); 03020 const value_type 03021 mu_D_hat_err = norm_inf(mu_D_hat_diff()) / (1.0 + norm_inf(mu_D_hat_calc())); 03022 if( (int)output_level >= (int)OUTPUT_ITER_STEPS ) { 03023 *out 03024 << "\n||mu_D_hat_calc-mu_D_hat||inf/(1.0+||mu_D_hat_calc||inf) = " 03025 << mu_D_hat_err << std::endl; 03026 } 03027 TEUCHOS_TEST_FOR_EXCEPTION( 03028 mu_D_hat_err >= error_tol(), TestFailed 03029 ,"QPSchur::qp_algo(...) : Error, " 03030 "||mu_D_hat_calc-mu_D_hat||inf/(1.0+||mu_D_hat_calc||inf) = " 03031 << mu_D_hat_err << " >= error_tol = " << error_tol() 03032 ); 03033 if( mu_D_hat_err >= warning_tol() && (int)output_level >= (int)OUTPUT_ACT_SET ) { 03034 *out 03035 << "\nWarning! ||mu_D_hat_calc-mu_D_hat||inf/(1.0+||mu_D_hat_calc||inf) = " 03036 << mu_D_hat_err << " >= warning_tol = " << warning_tol() << std::endl; 03037 } 03038 } 03039 if( (int)output_level >= (int)OUTPUT_ITER_STEPS ) { 03040 *out 03041 << sep_line; 03042 } 03043 } 03044 act_set->qp().constraints().pick_violated( *x, &ja, &con_ja_val 03045 , &b_a, &norm_2_constr, &bnd_ja, &can_ignore_ja ); 03046 assume_lin_dep_ja = false; // Assume this initially. 03047 if( ja > 0 && act_set->is_init_fixed(ja) && qp.x_init()(ja) == bnd_ja ) 03048 return_to_init_fixed = true; 03049 else 03050 return_to_init_fixed = false; 03051 // Print ja, bnd_ja, can_ignore_ja 03052 if( (int)output_level >= (int)OUTPUT_ITER_STEPS ) { 03053 *out 03054 << "\nja = " << ja << endl; 03055 if(ja) { 03056 *out 03057 << "\ncon_ja_val = " << con_ja_val 03058 << "\nb_a = " << b_a 03059 << "\nnorm_2_constr = " << norm_2_constr 03060 << "\nbnd_ja = " << bnd_str(bnd_ja) 03061 << "\ncan_ignore_ja = " << bool_str(can_ignore_ja) 03062 << "\nreturn_to_init_fixed = " << bool_str(return_to_init_fixed) 03063 << endl 03064 ; 03065 } 03066 } 03067 // Print header for itr, nact, change (ADD, DROP), indice (ja, jb) 03068 //, bound (LOWER, UPPER, EQUALITY), violation (primal or dual), rank (LD,LI) 03069 if( (int)output_level == (int)OUTPUT_ITER_SUMMARY ) { 03070 if( summary_lines_counter <= 0 ) { 03071 summary_lines_counter = summary_lines_counter_max; 03072 *out 03073 << endl 03074 << right << setw(6) << "itr" 03075 << right << setw(6) << "qhat" 03076 << right << setw(6) << "q(+)" 03077 << right << setw(6) << "q_F" 03078 << right << setw(6) << "q_C" 03079 << right << setw(6) << "q_D" 03080 << right << setw(8) << "change" 03081 << right << setw(9) << "type" 03082 << right << setw(6) << "j" 03083 << right << setw(10) << "bnd" 03084 << right << setw(dbl_w) << "viol, p_z(jd)" 03085 << right << setw(6) << "rank" << endl 03086 << right << setw(6) << "----" 03087 << right << setw(6) << "----" 03088 << right << setw(6) << "----" 03089 << right << setw(6) << "----" 03090 << right << setw(6) << "----" 03091 << right << setw(6) << "----" 03092 << right << setw(8) << "------" 03093 << right << setw(9) << "-------" 03094 << right << setw(6) << "----" 03095 << right << setw(10) << "--------" 03096 << right << setw(dbl_w) << "--------------" 03097 << right << setw(6) << "----" << endl; 03098 } 03099 } 03100 // Print first part of row for itr, q_hat, q(+), q_D, q_C, q_F, change, type, index, bound, violation 03101 if( (int)output_level == (int)OUTPUT_ITER_SUMMARY ) { 03102 *out 03103 << right << setw(6) << itr // itr 03104 << right << setw(6) << act_set->q_hat() // q_hat 03105 << right << setw(6) << act_set->q_plus_hat() // q(+) 03106 << right << setw(6) << act_set->q_F_hat() // q_F 03107 << right << setw(6) << act_set->q_C_hat() // q_C 03108 << right << setw(6) << act_set->q_D_hat() // q_D 03109 << right << setw(8) << ( ja ? "ADD" : "-" ) // change 03110 << right << setw(9); // type 03111 if( ja == 0 ) { 03112 *out << "-"; 03113 } 03114 else if( act_set->is_init_fixed(ja) ) { 03115 if( bnd_ja == qp.x_init()(ja) ) 03116 *out << "X_F_X"; 03117 else 03118 *out << "X_F_C"; 03119 } 03120 else if( ja <= n ) { 03121 *out << "R_X"; 03122 } 03123 else { 03124 *out << "GEN"; 03125 } 03126 *out 03127 << right << setw(6) << ja // index 03128 << right << setw(10) << ( ja ? bnd_str(bnd_ja) : "-" ) // bound 03129 << right << setw(dbl_w); // violation 03130 if(ja) 03131 *out << (con_ja_val - b_a); 03132 else 03133 *out << "-"; 03134 if(!ja) 03135 *out << right << setw(6) << "-" << endl; // rank for last iteration 03136 } 03137 bool found_solution = false; 03138 const size_type sa = act_set->s_map(ja); 03139 value_type scaled_viol = 0.0; 03140 if( ja == 0 ) { 03141 found_solution = true; 03142 } 03143 else if( sa != 0 || ( act_set->is_init_fixed(ja) && act_set->s_map(-ja) == 0 ) ) { 03144 const bool is_most_violated 03145 = (qp.constraints().pick_violated_policy() == QPSchurPack::Constraints::MOST_VIOLATED); 03146 if( (int)output_level >= (int)OUTPUT_BASIC_INFO ) { 03147 *out 03148 << "\n\nWarning, we have picked the constriant a(" << ja << ") with violation\n" 03149 << "(a(ja)'*x - b_a) = (" << con_ja_val << " - " << b_a << ") = " << (con_ja_val - b_a) 03150 << "\nto add to the active set but it is already part of the active set.\n"; 03151 } 03152 const EBounds act_bnd = ( sa != 0 ? act_set->bnd(sa) : qp.x_init()(ja) ); 03153 if( act_bnd != bnd_ja ) { 03154 if( (int)output_level >= (int)OUTPUT_BASIC_INFO ) { 03155 *out 03156 << "However, this is not the same bound as the active bound!\n"; 03157 } 03158 const value_type 03159 act_b_a = qp.constraints().get_bnd(ja,act_bnd); 03160 if( act_bnd == LOWER && act_b_a > b_a || act_bnd == UPPER && act_b_a < b_a ) { 03161 if( (int)output_level >= (int)OUTPUT_BASIC_INFO ) { 03162 *out 03163 << "\nError, c_L_bar(" << ja <<") = " << (act_b_a > b_a ? act_b_a : b_a) 03164 << " > c_U_bar(" << ja << ") = " << (act_b_a < b_a ? act_b_a : b_a) << ".\n"; 03165 } 03166 } 03167 else { 03168 TEUCHOS_TEST_FOR_EXCEPT(true); // Should not happen! 03169 } 03170 if( (int)output_level >= (int)OUTPUT_BASIC_INFO ) { 03171 *out 03172 << "The constraints are infeasible! Terminating the QP algorithm!\n"; 03173 } 03174 return INFEASIBLE_CONSTRAINTS; 03175 } 03176 else { 03177 if( (int)output_level >= (int)OUTPUT_BASIC_INFO ) { 03178 *out 03179 << "This is the active bound so this is an indication of instability\n" 03180 << "in the calculations.\n"; 03181 } 03182 } 03183 summary_lines_counter = 0; 03184 if( !is_most_violated ) { 03185 if( (int)output_level >= (int)OUTPUT_BASIC_INFO ) { 03186 *out 03187 << "\nThis is not the most violated constraint so set\n" 03188 << "pick_violated_policy = MOST_VIOLATED ...\n"; 03189 } 03190 summary_lines_counter = 0; 03191 qp.constraints().pick_violated_policy(QP::Constraints::MOST_VIOLATED); 03192 next_step = PICK_VIOLATED_CONSTRAINT; 03193 continue; // Go back and pick the most violated constraint 03194 } 03195 else if( !using_iter_refinement ) { 03196 if( (int)output_level >= (int)OUTPUT_BASIC_INFO ) { 03197 *out 03198 << "\nThis is the most violated constraint but we are currently not using\n" 03199 << "iterative refinement so let's switch it on ...\n"; 03200 } 03201 summary_lines_counter = 0; 03202 EIterRefineReturn status = iter_refine( 03203 *act_set, out, output_level, -1.0, &qp.fo(), -1.0, act_set->q_hat() ? &act_set->d_hat() : NULL 03204 ,v, act_set->q_hat() ? &act_set->z_hat() : NULL 03205 ,iter_refine_num_resid, iter_refine_num_solves 03206 ); 03207 if( status == ITER_REFINE_IMPROVED || status == ITER_REFINE_CONVERGED ) { 03208 using_iter_refinement = true; // Use iterative refinement from now on! 03209 next_step = PICK_VIOLATED_CONSTRAINT; 03210 continue; // Now go back 03211 } 03212 else { 03213 if( (int)output_level >= (int)OUTPUT_BASIC_INFO ) { 03214 *out 03215 << "\nError, iterative refinement was not allowed or failed to improve the residuals.\n" 03216 << "Terminating the QP algorithm ...\n"; 03217 } 03218 return SUBOPTIMAL_POINT; 03219 } 03220 } 03221 else { 03222 scaled_viol = std::fabs(con_ja_val - b_a)/(1.0 + std::fabs(con_ja_val)); 03223 if( (int)output_level >= (int)OUTPUT_BASIC_INFO ) { 03224 *out 03225 << "\n\nThis is the most violated constraint, we are using iterative refinement and\n" 03226 << "|a("<<ja<<")'*x - b_a| / (1 + |a("<<ja<<")'*x|) = |"<<con_ja_val<<" - "<<b_a 03227 << "| / (1 + |"<<con_ja_val<<"|) = "<<scaled_viol<< (scaled_viol<loose_feas_tol()?" < ":" > ") 03228 << "loose_feas_tol = "<<loose_feas_tol(); 03229 } 03230 if( scaled_viol < loose_feas_tol() ) { 03231 if( (int)output_level >= (int)OUTPUT_BASIC_INFO ) { 03232 *out 03233 << "\nTerminating the algorithm with a near optimal solution!\n"; 03234 } 03235 found_solution = true; 03236 } 03237 else { 03238 if( (int)output_level >= (int)OUTPUT_BASIC_INFO ) { 03239 *out 03240 << "\nError! The QP algorithm is terminated!\n"; 03241 } 03242 return SUBOPTIMAL_POINT; 03243 } 03244 } 03245 } 03246 else if( act_set->all_dof_used_up() 03247 && (scaled_viol = std::fabs(con_ja_val - b_a)/(1.0 + std::fabs(con_ja_val))) < feas_tol() ) 03248 { 03249 if( (int)output_level >= (int)OUTPUT_BASIC_INFO ) { 03250 *out 03251 << "\nWith all the dof used up, the violated inequality constriant " 03252 << "a("<< ja << ")'*x statisfies:\n" 03253 << "|a("<<ja<<")'*x - b_a| / (1 + |a("<<ja<<")'*x|) = |" << con_ja_val << " - " << b_a 03254 << "| / (1 + |"<<con_ja_val<<"|) = "<<scaled_viol<<" < feas_tol = "<<feas_tol(); 03255 } 03256 if( act_set->qp().constraints().pick_violated_policy() == QP::Constraints::MOST_VIOLATED ) { 03257 if( (int)output_level >= (int)OUTPUT_BASIC_INFO ) { 03258 *out 03259 << "\nThis is the most violated constraint so we will consider this\n" 03260 << "a near degenerate constraint so we are done!\n"; 03261 } 03262 found_solution = true; 03263 } 03264 else { 03265 if( (int)output_level >= (int)OUTPUT_ITER_STEPS ) { 03266 *out 03267 << "\nThis is not the most violated constraint so set pick_violated_policy = " 03268 << "MOST_VIOLATED and pick another violated constraint ....\n"; 03269 } 03270 act_set->qp().constraints().pick_violated_policy(QP::Constraints::MOST_VIOLATED); 03271 next_step = PICK_VIOLATED_CONSTRAINT; 03272 continue; // Go back and pick the most violated constraint 03273 } 03274 } 03275 if(found_solution) { 03276 // 03277 // Solution found! All of the inequality constraints are satisfied! 03278 // 03279 // Use iterative refinement if we need to 03280 if( iter_refine_at_solution() && !using_iter_refinement ) { 03281 // The user has requested iterative refinement at the solution 03282 // and we have not been using iterative refinement up to this point. 03283 if( (int)output_level >= (int)OUTPUT_BASIC_INFO ) { 03284 *out 03285 << "\nWe think we have found the solution and are not currently using iterative refinement\n" 03286 << "and iter_refine_at_solution==true so perform iterative refinement ...\n"; 03287 } 03288 using_iter_refinement = true; 03289 EIterRefineReturn status = iter_refine( 03290 *act_set, out, output_level, -1.0, &qp.fo(), -1.0, act_set->q_hat() ? &act_set->d_hat() : NULL 03291 ,v, act_set->q_hat() ? &act_set->z_hat() : NULL 03292 ,iter_refine_num_resid, iter_refine_num_solves 03293 ); 03294 switch(status) { 03295 case ITER_REFINE_ONE_STEP: 03296 case ITER_REFINE_IMPROVED: 03297 case ITER_REFINE_CONVERGED: 03298 if( (int)output_level >= (int)OUTPUT_BASIC_INFO ) { 03299 *out 03300 << "\nIterative refinement may have altered the unknowns so go back and look for another violated constraint ...\n"; 03301 } 03302 summary_lines_counter = 0; 03303 next_step = PICK_VIOLATED_CONSTRAINT; 03304 continue; 03305 case ITER_REFINE_NOT_PERFORMED: 03306 case ITER_REFINE_NOT_NEEDED: 03307 case ITER_REFINE_NOT_IMPROVED: 03308 if( (int)output_level >= (int)OUTPUT_BASIC_INFO ) { 03309 *out 03310 << "\nIterative refinement did not alter the unknowns so exit with this solution...\n"; 03311 } 03312 set_x( *act_set, *v, x ); 03313 break; 03314 default: 03315 TEUCHOS_TEST_FOR_EXCEPT(true); // Local programming error only! 03316 } 03317 } 03318 if( iter_refine_at_solution() || using_iter_refinement ) { 03319 // The user has requested iterative refinement at the solution 03320 // or we have been using iterative refinement so we should 03321 // compute the lagrange multipliers for the initially fixed 03322 // variables that are still fixed. 03323 if( act_set->q_D_hat() ) { 03324 if( (int)output_level >= (int)OUTPUT_BASIC_INFO ) { 03325 *out 03326 << "\nRecomputing final mu_D_hat at the solution ...\n"; 03327 } 03328 calc_mu_D( *act_set, *x, *v, &act_set->mu_D_hat() ); 03329 if( (int)output_level >= (int)OUTPUT_ITER_STEPS ) { 03330 *out 03331 << "\n||mu_D_hat||inf = " << norm_inf(act_set->mu_D_hat()) << std::endl; 03332 } 03333 if( (int)output_level >= (int)OUTPUT_ITER_QUANTITIES ) { 03334 *out 03335 << "\nmu_D_hat =\n" << act_set->mu_D_hat(); 03336 } 03337 } 03338 } 03339 return OPTIMAL_SOLUTION; // current point is optimal. 03340 } 03341 } 03342 case UPDATE_ACTIVE_SET: { 03343 if( (int)output_level >= (int)OUTPUT_ITER_STEPS ) { 03344 *out 03345 << "\n*** UPDATE_ACTIVE_SET\n"; 03346 } 03347 ++(*num_adds); 03348 if( act_set->all_dof_used_up() || act_set->is_init_fixed(ja) ) { 03349 // All of the degrees of freedom are currently used up so we must 03350 // assume that we must remove one of these currently active 03351 // constraints and replace it with the violated constraint. 03352 // In this case we know that this will make the schur 03353 // complement singular so let's just skip the update 03354 // and set this now. We also may be here if we are fixing 03355 // an initially fixed variable to some bound. 03356 assume_lin_dep_ja = true; 03357 if( (int)output_level >= (int)OUTPUT_ITER_STEPS ) { 03358 if(act_set->all_dof_used_up()) { 03359 *out 03360 << "\nAll of the degrees of freedom are used up so " 03361 << "the constraint ja must be linearly dependent.\n" 03362 << "The augmented KKT system is definitely singular!\n"; 03363 } 03364 else { 03365 *out 03366 << "\nThis is an initially fixed variable that was freed and " 03367 << "now is being fixed again.\n" 03368 << "The augmented KKT system could be singular or nonsingular, " 03369 << "we don't know at this point.\n"; 03370 } 03371 *out 03372 << "\nThe schur complement for the new KKT system will not be " 03373 << "updated yet in case it is singular!\n"; 03374 } 03375 } 03376 else { 03377 assume_lin_dep_ja = false; 03378 try { 03379 if(act_set->add_constraint( ja, bnd_ja, false, out, output_level, true )) 03380 summary_lines_counter = 0; 03381 else { 03382 // Print end of row for rank if the right print level 03383 if( (int)output_level == (int)OUTPUT_ITER_SUMMARY ) { 03384 *out << right << setw(6) << "LI" << endl; 03385 out->flush(); 03386 --summary_lines_counter; 03387 } 03388 } 03389 if( (int)output_level >= (int)OUTPUT_ITER_STEPS ) { 03390 *out << "\nNew KKT system is nonsingular! (linearly independent (LI) constraints)\n"; 03391 } 03392 if( (int)output_level >= (int)OUTPUT_ITER_QUANTITIES ) { 03393 *out 03394 << "\nPrinting active set after adding constraint ja = " << ja 03395 << " ...\n"; 03396 dump_act_set_quantities( *act_set, *out ); 03397 } 03398 } 03399 catch( const MatrixSymAddDelUpdateable::SingularUpdateException& excpt ) { 03400 // Constraint really is linearly dependent. 03401 if( (int)output_level >= (int)OUTPUT_ITER_SUMMARY ) { 03402 *out 03403 << "\n\nSchur complement update failed:\n" 03404 << "(" << excpt.what() << ")\n" 03405 << "\nConstraint ja = " << ja << " appears to be linearly dependent!\n\n"; 03406 } 03407 summary_lines_counter = 0; 03408 if( !(act_set->q_D_hat() + act_set->q_plus_hat()) ) { 03409 // Print omsg. 03410 if( (int)output_level >= (int)OUTPUT_BASIC_INFO ) { 03411 *out 03412 << "\nQPSchur::qp_algo(...) : " 03413 << "Error, constraint j = "<< ja << " is linearly dependent " 03414 << "and there are no other constraints to drop.\n" 03415 << "The QP must be infeasible\n"; 03416 } 03417 return INFEASIBLE_CONSTRAINTS; 03418 } 03419 assume_lin_dep_ja = true; 03420 schur_comp_update_failed = true; 03421 } 03422 catch( const MatrixSymAddDelUpdateable::WrongInertiaUpdateException& excpt ) { 03423 // Reduced Hessian has the wrong inertia 03424 if( (int)output_level >= (int)OUTPUT_BASIC_INFO ) { 03425 *out 03426 << "\n\nSchur complement appears to have the wrong inertia:\n" 03427 << "(" << excpt.what() << ")\n" 03428 << "\nThe QP appears to be nonconvex.\n" 03429 << "\nWe have no choice but to terminate the primal-dual QP algorithm!\n"; 03430 } 03431 return NONCONVEX_QP; 03432 } 03433 } 03434 if( assume_lin_dep_ja && can_ignore_ja ) { 03435 act_set->qp().constraints().ignore( ja ); 03436 next_step = PICK_VIOLATED_CONSTRAINT; 03437 continue; 03438 } 03439 // Infer the sign of the multiplier for the new constraint being added 03440 beta = ( con_ja_val > b_a ? +1.0 : -1.0 ); 03441 if( (int)output_level >= (int)OUTPUT_ITER_STEPS ) { 03442 *out 03443 << "\nbeta = " << beta << endl; 03444 } 03445 } 03446 case COMPUTE_SEARCH_DIRECTION: { 03447 if( (int)output_level >= (int)OUTPUT_ITER_STEPS ) { 03448 *out 03449 << "\n*** COMPUTE_SEARCH_DIRECTION\n"; 03450 } 03451 const EtaVector e_ja( ja, n + m_breve ); 03452 if( assume_lin_dep_ja ) { 03453 if( (int)output_level >= (int)OUTPUT_ITER_STEPS ) { 03454 *out 03455 << "\nThe KKT system for the trial active set is assumed or known to be singular!\n" 03456 << "\nComputing the steps from:" 03457 << "\np_z_hat = inv(S_hat)*(-v_a+U_hat'*inv(Ko)*u_a), p_v = inv(Ko)*(-u_a-U_hat*p_z_hat) ...\n"; 03458 } 03459 // 03460 // The schur complement is not updated so we must compute 03461 // p_z_hat and p_v explicitly. 03462 // 03463 // If all the degrees of freedom are used up then we know that the step for 03464 // the primal variables will be zero. However, if m > 0 then v and p_v also 03465 // contain terms for the Lagrange multipliers for the equality constriants 03466 // but we don't need to compute these during the algorithm. 03467 // Therefore we can just set p_v = 0 and save a solve with Ko. 03468 // If the QP is feasible then a constraint will be dropped, the 03469 // KKT system will be updated and then v_plus will be computed 03470 // at the next iteration and be used to compute v so all is good. 03471 // 03472 const bool all_dof_used_up = act_set->all_dof_used_up(); 03473 if( act_set->is_init_fixed(ja) ) { 03474 // 03475 // Fix a varaible that was fixed and then freed. 03476 // 03477 // [ Ko U_hat ] [ p_v ] = [ 0 ] 03478 // [ U_hat' V_hat ] [ p_z_hat ] [ -v_a ] 03479 // 03480 // v_a = e(sa) <: R^q_hat : where sa = s_map(-ja) 03481 // 03482 // / 0 : if x_init(ja) == bnd_ja 03483 // d_a = | 03484 // \ b_a - b_X(l_x_X_map(ja)) : otherwise 03485 // 03486 // p_z_hat = -inv(S_hat)*v_a 03487 // 03488 // p_v = inv(Ko)*(-U_hat*p_z_hat) 03489 // 03490 // gamma_plus = (d_a - v_a' * z_hat ) / ( v_a' * p_z_hat ) 03491 // 03492 const size_type 03493 sa = act_set->s_map(-int(ja)), 03494 la = act_set->qp().l_x_X_map()(ja); 03495 TEUCHOS_TEST_FOR_EXCEPT( !( sa ) ); 03496 TEUCHOS_TEST_FOR_EXCEPT( !( la ) ); 03497 // v_a = e(sa) <: R^q_hat 03498 Workspace<value_type> v_a_ws(wss,act_set->q_hat()); 03499 DVectorSlice v_a(&v_a_ws[0],v_a_ws.size()); 03500 v_a = 0.0; 03501 v_a(sa) = 1.0; 03502 // d_a 03503 const value_type 03504 d_a = ( bnd_ja == qp.x_init()(ja) 03505 ? 0.0 03506 : b_a - qp.b_X()(la) ); 03507 // p_z_hat = -inv(S_hat) * v_a 03508 V_InvMtV( &act_set->p_z_hat(), act_set->S_hat(), no_trans, v_a() ); 03509 Vt_S( &act_set->p_z_hat(), -1.0 ); 03510 // p_v = inv(Ko)*(-U_hat*p_z_hat) 03511 if(!all_dof_used_up) { 03512 calc_v( qp.Ko(), NULL, act_set->U_hat(), act_set->p_z_hat(), &p_v() ); 03513 } 03514 else { 03515 p_v = 0.0; 03516 } 03517 // Iterative refinement? 03518 if( using_iter_refinement ) { 03519 if( (int)output_level >= (int)OUTPUT_ITER_SUMMARY ) { 03520 *out 03521 << "\n\nPerforming iterative refinement on p_v, p_z_hat system ...\n"; 03522 } 03523 summary_lines_counter = 0; 03524 // [ Ko U_hat ] [ p_v ] = [ 0 ] 03525 // [ U_hat' V_hat ] [ p_z_hat ] [ -v_a ] 03526 EIterRefineReturn status = iter_refine( 03527 *act_set, out, output_level, 0.0, NULL, +1.0, &v_a 03528 ,&p_v, &act_set->p_z_hat() 03529 ,iter_refine_num_resid, iter_refine_num_solves 03530 ); 03531 } 03532 // gamma_plus = ( d_a - v_a'*z_hat ) / ( v_a'*p_z_hat ) 03533 if(!all_dof_used_up) 03534 gamma_plus = ( ( d_a - dot(v_a(),act_set->z_hat()) ) 03535 / ( dot(v_a(),act_set->p_z_hat()) ) ); 03536 else 03537 gamma_plus = beta * inf; 03538 } 03539 else { 03540 // 03541 // Add a constraint that is not an initially fixed 03542 // variable bound. 03543 // 03544 // [ Ko U_hat ] [ p_v ] = [ -u_a ] 03545 // [ U_hat' V_hat ] [ p_z_hat ] [ -v_a ] 03546 // 03547 // p_z_hat = inv(S_hat) * ( - v_a + U_hat' * inv(Ko) * u_a ) 03548 // 03549 // p_v = inv(Ko) * ( -u_a - U_hat * p_z_hat ) 03550 // 03551 // gamma_plus = ( d_a - u_a'*v - v_a'*z_hat ) / ( u_a'*p_v + v_a'*p_z_hat ) 03552 // 03553 // ToDo: (9/25/00): Make u_a and v_a both sparse and combine the following code. 03554 // 03555 if( ja <= n ) { 03556 // Fix an initially free variable 03557 // 03558 // u_a = [ Q_R' * e(ja) ] <: R^(n_R+m) 03559 // [ 0 ] 03560 // 03561 // v_a = 0 <: R^(q_hat) 03562 // 03563 // d_a = b_a <: R 03564 // 03565 const size_type 03566 la = act_set->qp().Q_R().lookup_col_j(ja); 03567 TEUCHOS_TEST_FOR_EXCEPT( !( la ) ); 03568 const EtaVector u_a = EtaVector(la,n_R+m); 03569 const value_type d_a = b_a; 03570 DVector t1; 03571 // t1 = inv(Ko) * u_a 03572 V_InvMtV( &t1, qp.Ko(), no_trans, u_a() ); 03573 if( act_set->q_hat() ) { 03574 // t2 = U_hat'*t1 03575 DVector t2; 03576 V_MtV( &t2, act_set->U_hat(), trans, t1() ); 03577 // p_z_hat = inv(S_hat) * t2 03578 V_InvMtV( &act_set->p_z_hat(), act_set->S_hat(), no_trans, t2() ); 03579 // t1 = - u_a 03580 V_StV( &t1, -1.0, u_a() ); 03581 // t1 += - U_hat * p_z_hat 03582 Vp_StMtV( &t1(), -1.0, act_set->U_hat(), no_trans, act_set->p_z_hat() ); 03583 // p_v = inv(Ko) * t1 03584 if(!all_dof_used_up) 03585 V_InvMtV( &p_v, qp.Ko(), no_trans, t1() ); 03586 else 03587 p_v = 0.0; 03588 } 03589 else { 03590 // p_v = -t1 03591 V_mV( &p_v, t1() ); 03592 } 03593 // Iterative refinement? 03594 if( using_iter_refinement ) { 03595 if( (int)output_level >= (int)OUTPUT_ITER_SUMMARY ) { 03596 *out 03597 << "\n\nPerforming iterative refinement on p_v, p_z_hat system ...\n"; 03598 } 03599 summary_lines_counter = 0; 03600 // [ Ko U_hat ] [ p_v ] = [ -u_a ] 03601 // [ U_hat' V_hat ] [ p_z_hat ] [ 0 ] 03602 Workspace<value_type> dense_u_a_ws(wss,u_a().dim()); 03603 DVectorSlice dense_u_a(&dense_u_a_ws[0],dense_u_a_ws.size()); 03604 dense_u_a = 0.0; // Make a dense copy of u_a! 03605 dense_u_a(u_a().begin()->index()+u_a().offset()) = 1.0; 03606 EIterRefineReturn status = iter_refine( 03607 *act_set, out, output_level, +1.0, &dense_u_a, 0.0, NULL 03608 ,&p_v, &act_set->p_z_hat() 03609 ,iter_refine_num_resid, iter_refine_num_solves 03610 ); 03611 summary_lines_counter = 0; 03612 } 03613 // gamma_plus = ( d_a - u_a'*v) / ( u_a'*p_v ) 03614 if(!all_dof_used_up) 03615 gamma_plus = ( d_a - dot(u_a(),*v) ) / dot(u_a(),p_v()); 03616 else 03617 gamma_plus = beta * inf; 03618 } 03619 else { 03620 // Add a general inequality (or equality) constraint 03621 // 03622 // u_a = [ Q_R' * A_bar * e(ja) ] <: R^(n_R + m) 03623 // [ 0 ] 03624 // 03625 // v_a = P_XF_hat' * A_bar * e_ja <: R^(q_hat) 03626 // 03627 // d_a = b_a - b_X' * (Q_X' * A_bar * e_ja) <: R 03628 // 03629 Workspace<value_type> u_a_ws( wss, n_R + m ); 03630 DVectorSlice u_a( &u_a_ws[0], u_a_ws.size() ); 03631 Workspace<value_type> v_a_ws( wss, act_set->q_hat() ); 03632 DVectorSlice v_a( &v_a_ws[0], v_a_ws.size() ); 03633 // u_a(1:n_R) = Q_R' * A_bar * e(ja) 03634 Vp_StPtMtV( &u_a(1,n_R), 1.0, qp.Q_R(), trans 03635 , qp.constraints().A_bar(), no_trans, e_ja(), 0.0 ); 03636 // u_a(n_R+1:n_R+m) = 0.0 03637 if(m) 03638 u_a(n_R+1,n_R+m) = 0.0; 03639 // t0 = Q_X' * A_bar * e_ja 03640 Workspace<value_type> t0_ws( wss, n-n_R ); 03641 DVectorSlice t0( &t0_ws[0], t0_ws.size() ); 03642 if( n > n_R ) 03643 Vp_StPtMtV( &t0(), 1.0, qp.Q_X(), trans 03644 , qp.constraints().A_bar(), no_trans, e_ja(), 0.0 ); 03645 // d_a = b_a - b_X'*t0 03646 const value_type 03647 d_a = b_a - ( n > n_R ? dot( qp.b_X(), t0() ) : 0.0 ); 03648 // t1 = inv(Ko) * u_a 03649 Workspace<value_type> t1_ws( wss, n_R + m ); 03650 DVectorSlice t1( &t1_ws[0], t1_ws.size() ); 03651 V_InvMtV( &t1, qp.Ko(), no_trans, u_a ); 03652 if( act_set->q_hat() ) { 03653 // t2 = U_hat'*t1 03654 Workspace<value_type> t2_ws( wss, act_set->q_hat() ); 03655 DVectorSlice t2( &t2_ws[0], t2_ws.size() ); 03656 V_MtV( &t2, act_set->U_hat(), trans, t1() ); 03657 // v_a = P_XF_hat' * A_bar * e_ja 03658 Vp_StPtMtV( &v_a(), 1.0, act_set->P_XF_hat(), trans 03659 , qp.constraints().A_bar(), no_trans, e_ja(), 0.0 ); 03660 // t2 += -v_a 03661 Vp_StV( &t2(), -1.0, v_a() ); 03662 // p_z_hat = inv(S_hat) * t2 03663 V_InvMtV( &act_set->p_z_hat(), act_set->S_hat(), no_trans, t2() ); 03664 if(!all_dof_used_up) { 03665 // t1 = - u_a 03666 V_StV( &t1, -1.0, u_a() ); 03667 // t1 += - U_hat * p_z_hat 03668 Vp_StMtV( &t1(), -1.0, act_set->U_hat(), no_trans, act_set->p_z_hat() ); 03669 // p_v = inv(Ko) * t1 03670 V_InvMtV( &p_v, qp.Ko(), no_trans, t1() ); 03671 } 03672 else { 03673 p_v = 0.0; 03674 } 03675 // Iterative refinement? 03676 if( using_iter_refinement ) { 03677 if( (int)output_level >= (int)OUTPUT_ITER_SUMMARY ) { 03678 *out 03679 << "\n\nPerforming iterative refinement on p_v, p_z_hat system ...\n"; 03680 } 03681 summary_lines_counter = 0; 03682 // [ Ko U_hat ] [ p_v ] = [ -u_a ] 03683 // [ U_hat' V_hat ] [ p_z_hat ] [ -v_a ] 03684 EIterRefineReturn status = iter_refine( 03685 *act_set, out, output_level, +1.0, &u_a, +1.0, act_set->q_hat() ? &v_a : NULL 03686 ,&p_v, act_set->q_hat() ? &act_set->p_z_hat() : NULL 03687 ,iter_refine_num_resid, iter_refine_num_solves 03688 ); 03689 summary_lines_counter = 0; 03690 } 03691 // gamma_plus = ( d_a - u_a'*v - v_a'*z_hat ) / ( u_a'*p_v + v_a'*p_z_hat ) 03692 if(!all_dof_used_up) 03693 gamma_plus = ( ( d_a - dot(u_a,*v) - dot(v_a(),act_set->z_hat()) ) 03694 / ( dot(u_a,p_v()) + dot(v_a(),act_set->p_z_hat()) ) ); 03695 else 03696 gamma_plus = beta * inf; 03697 } 03698 else { 03699 // p_v = -t1 03700 if(!all_dof_used_up) 03701 V_mV( &p_v, t1() ); 03702 else 03703 p_v = 0.0; 03704 // gamma_plus = ( d_a - u_a'*v) / ( u_a'*p_v ) 03705 if(!all_dof_used_up) 03706 gamma_plus = ( d_a - dot(u_a,*v) ) / dot(u_a,p_v()); 03707 else 03708 gamma_plus = beta * inf; 03709 } 03710 } 03711 } 03712 if( schur_comp_update_failed && gamma_plus * beta < 0 ) { 03713 if( (int)output_level >= (int)OUTPUT_ITER_STEPS ) { 03714 *out 03715 << "\nThe schur complement update failed and gamma_plus = " << gamma_plus << " is the wrong sign" 03716 << "\nso we will assume the sign error for (...)/+-0 was due to arbitrary roundoff" 03717 << "\nand therefore we will set gamma_plus = -gamma_plus\n"; 03718 } 03719 gamma_plus = -gamma_plus; 03720 } 03721 // Print the steps p_v and p_z_hat 03722 if(act_set->q_hat()) { 03723 if( (int)output_level >= (int)OUTPUT_ITER_STEPS ) { 03724 *out 03725 << "\n||p_z_hat||inf = " << norm_inf(act_set->p_z_hat()) << endl; 03726 } 03727 if( (int)output_level >= (int)OUTPUT_ITER_QUANTITIES ) { 03728 *out << "\np_z_hat =\n" << act_set->p_z_hat(); 03729 } 03730 } 03731 if( (int)output_level >= (int)OUTPUT_ITER_STEPS ) { 03732 *out 03733 << "\n||p_v||inf = " << norm_inf(p_v()) << endl; 03734 } 03735 if( (int)output_level >= (int)OUTPUT_ITER_QUANTITIES ) { 03736 *out << "\np_v =\n" << p_v(); 03737 } 03738 if( (int)output_level >= (int)OUTPUT_ITER_STEPS ) { 03739 *out 03740 << "\ngamma_plus = " << gamma_plus << endl; 03741 } 03742 // Compute step for mu_D_hat 03743 if( act_set->q_D_hat() ) { 03744 // Compute for steps of all the constraints in the current active set 03745 calc_p_mu_D( *act_set, p_v(), act_set->p_z_hat(), &ja, &act_set->p_mu_D_hat() ); 03746 if( (int)output_level >= (int)OUTPUT_ITER_STEPS ) { 03747 *out 03748 << "\n||p_mu_D_hat||inf = " << norm_inf(act_set->p_mu_D_hat()) << std::endl; 03749 } 03750 if( (int)output_level >= (int)OUTPUT_ITER_QUANTITIES ) { 03751 *out 03752 << "\np_mu_D_hat =\n" << act_set->p_mu_D_hat(); 03753 } 03754 } 03755 } 03756 else { 03757 // The new schur complement is already updated so compute 03758 // the solution outright. 03759 if( (int)output_level >= (int)OUTPUT_ITER_STEPS ) { 03760 *out 03761 << "\nThe KKT system for the new active set is or known to be nonsingular!\n" 03762 << "\nComputing the steps from:" 03763 << "\nz_hat_plus = inv(S_hat)*(d_hat-U_hat'vo), v_plus = inv(Ko)*(fo-U_hat*z_hat_plus) ...\n"; 03764 } 03765 03766 // Compute z_hat_plus, v_plus 03767 03768 // z_hat_plus = inv(S_hat) * ( d_hat - U_hat' * vo ) 03769 const size_type q_hat = act_set->q_hat(); 03770 z_hat_plus.bind(DVectorSlice(&z_hat_plus_ws[0],q_hat)); 03771 calc_z( act_set->S_hat(), act_set->d_hat(), act_set->U_hat(), &vo 03772 , &z_hat_plus ); 03773 if( (int)output_level >= (int)OUTPUT_ITER_STEPS ) { 03774 *out 03775 << "\n||z_hat_plus||inf = " << norm_inf(z_hat_plus()) << std::endl; 03776 } 03777 if( (int)output_level >= (int)OUTPUT_ITER_QUANTITIES ) { 03778 *out 03779 << "\nz_hat_plus =\n" << z_hat_plus(); 03780 } 03781 // v_plus = inv(Ko) * (fo - U_hat * z_hat_plus) 03782 calc_v( qp.Ko(), &qp.fo(), act_set->U_hat(), z_hat_plus 03783 , &v_plus ); 03784 if( (int)output_level >= (int)OUTPUT_ITER_STEPS ) { 03785 *out 03786 << "\n||v_plus||inf = " << norm_inf(v_plus()) << std::endl; 03787 } 03788 if( (int)output_level >= (int)OUTPUT_ITER_QUANTITIES ) { 03789 *out 03790 << "\nv_plus =\n" << v_plus(); 03791 } 03792 if( using_iter_refinement ) { 03793 if( (int)output_level >= (int)OUTPUT_ITER_SUMMARY ) { 03794 *out 03795 << "\n\nPerforming iterative refinement on v_plus, z_hat_plus system ...\n"; 03796 } 03797 summary_lines_counter = 0; 03798 // [ Ko U_hat ] [ v_plus ] = [ fo ] 03799 // [ U_hat' V_hat ] [ z_hat_plus ] [ d_hat ] 03800 EIterRefineReturn status = iter_refine( 03801 *act_set, out, output_level, -1.0, &qp.fo(), -1.0, &act_set->d_hat() 03802 ,&v_plus, &z_hat_plus 03803 ,iter_refine_num_resid, iter_refine_num_solves 03804 ); 03805 } 03806 // Compute p_z_hat (change in z_hat w.r.t newly added constriant multiplier) 03807 DVectorSlice p_z_hat = act_set->p_z_hat(); 03808 // p_z_hat = z_hat_plus - z_hat 03809 V_VmV( &p_z_hat(), z_hat_plus(), act_set->z_hat() ); 03810 // p_v = v_plus - v 03811 V_VmV( &p_v(), v_plus(), *v ); 03812 // p_mu_D_hat 03813 if( act_set->q_D_hat() ) 03814 calc_p_mu_D( *act_set, p_v(), p_z_hat(), NULL, &act_set->p_mu_D_hat() ); 03815 // gamma_plus 03816 const size_type sa = act_set->s_map(ja); 03817 if(sa) { 03818 // This is not an initially fixed variable that returned to its 03819 // initial. The multiplier for this constriant may not be the 03820 // last element if an ADD/DROP was performed on the last iteration 03821 // in order to get here where the DROP was an initially fixed variable 03822 // that was freed and therefore the KKT system was augmented so this 03823 // multiplier is not the last element of z_hat(...). 03824 gamma_plus = z_hat_plus(sa); 03825 } 03826 else { 03827 // This must be an initially fixed variable that returned to its 03828 // initial bound. This will be the last element even if an ADD/DROP 03829 // was just performed since a drop would only remove elements from 03830 // p_mu_D_hat, not add them. 03831 gamma_plus = act_set->p_mu_D_hat()(act_set->q_D_hat()); 03832 } 03833 // p_z_hat = p_z_hat / gamma_plus 03834 Vt_S( &p_z_hat(), 1.0 / gamma_plus ); 03835 // p_v = p_v / gamma_plus 03836 Vt_S( &p_v(), 1.0 / gamma_plus ); 03837 // Print gama_plus, p_z_hat, p_v and p_mu_D_hat 03838 if( (int)output_level >= (int)OUTPUT_ITER_STEPS ) { 03839 *out 03840 << "\ngamma_plus = " << gamma_plus << std::endl; 03841 } 03842 if( (int)output_level >= (int)OUTPUT_ITER_STEPS ) { 03843 *out 03844 << "\n||p_z_hat||inf = " << norm_inf(p_z_hat()) << std::endl; 03845 } 03846 if( (int)output_level >= (int)OUTPUT_ITER_QUANTITIES ) { 03847 *out 03848 << "\np_z_hat =\n" << p_z_hat(); 03849 } 03850 if( (int)output_level >= (int)OUTPUT_ITER_STEPS ) { 03851 *out 03852 << "\n||p_v||inf = " << norm_inf(p_v()) << std::endl; 03853 } 03854 if( (int)output_level >= (int)OUTPUT_ITER_QUANTITIES ) { 03855 *out 03856 << "\np_v =\n" << p_v(); 03857 } 03858 if( act_set->q_D_hat() ) { 03859 // p_mu_D_hat = p_mu_D_hat / gamma_plus 03860 Vt_S( &act_set->p_mu_D_hat(), 1.0 / gamma_plus ); 03861 if( (int)output_level >= (int)OUTPUT_ITER_STEPS ) { 03862 *out 03863 << "\n||p_mu_D_hat||inf =\n" << norm_inf(act_set->p_mu_D_hat()) << std::endl; 03864 } 03865 if( (int)output_level >= (int)OUTPUT_ITER_QUANTITIES ) { 03866 *out 03867 << "\np_mu_D_hat =\n" << act_set->p_mu_D_hat(); 03868 } 03869 } 03870 } 03871 } 03872 case COMPUTE_STEP_LENGTHS: { 03873 03874 if( (int)output_level >= (int)OUTPUT_ITER_STEPS ) { 03875 *out 03876 << "\n*** COMPUTE_STEP_LENGTHS\n"; 03877 } 03878 // Compute the dual infeasibility scaling 03879 const size_type q_hat = act_set->q_hat(); 03880 dual_infeas_scale = 1.0; 03881 // if( q_hat ) 03882 // dual_infeas_scale = my_max( dual_infeas_scale, norm_inf( act_set->z_hat() ) ); 03883 // if( m ) 03884 // dual_infeas_scale = my_max( dual_infeas_scale, norm_inf( (*v)(n_R+1,n_R+m) ) ); 03885 // if( act_set->q_D_hat() ) 03886 // dual_infeas_scale = my_max( dual_infeas_scale, norm_inf( act_set->mu_D_hat() ) ); 03887 03888 // Primal step length, t_P = beta * gamma_plus, z_plus = [ z_hat_plus; gama_plus ]. 03889 // Or constraint ja is linearly dependent in which case p_x is zero so 03890 // t_P is infinite. 03891 t_P = beta * gamma_plus; // Could be < 0 03892 if( t_P < 0.0 ) { 03893 if( (int)output_level >= (int)OUTPUT_BASIC_INFO ) { 03894 *out 03895 << "\nWarning, A near degenerate inequality constraint ja = " << ja 03896 << " is being added that has the wrong sign with:\n" 03897 << " t_P = " << t_P << std::endl 03898 << " dual_infeas_scale = " << dual_infeas_scale << std::endl 03899 << " norm_2_constr = " << norm_2_constr << std::endl 03900 << " |t_P/(norm_2_constr*dual_infeas_scale)| = " 03901 << std::fabs(t_P/(norm_2_constr*dual_infeas_scale)) 03902 << " <= dual_infeas_tol = " << dual_infeas_tol() << std::endl; 03903 } 03904 summary_lines_counter = 0; 03905 if( !using_iter_refinement ) { 03906 if( (int)output_level >= (int)OUTPUT_BASIC_INFO ) { 03907 *out << "We are not using iterative refinement yet so turn it on" 03908 << "\nthen recompute the steps ...\n"; 03909 } 03910 using_iter_refinement = true; 03911 next_step = COMPUTE_SEARCH_DIRECTION; 03912 continue; 03913 } 03914 else { 03915 if( (int)output_level >= (int)OUTPUT_BASIC_INFO ) { 03916 *out 03917 << "We are already using iterative refinement so the QP algorithm is terminated!\n"; 03918 } 03919 return DUAL_INFEASIBILITY; 03920 } 03921 } 03922 t_P = beta * gamma_plus; // Now guaranteed to be > 0 03923 if( (int)output_level >= (int)OUTPUT_ITER_STEPS ) { 03924 *out 03925 << "\nt_P = " << t_P << endl; 03926 } 03927 03929 // Dual step length. Largest step t that does not cause violation in 03930 // dual feasibility (i.e. lagrange multipliers for inequalities are 03931 // dual feasible, or primal optimal ). 03932 // lambda_hat_new = lambda_hat + beta * t_D * p_lambda_hat must be dual feasible. 03933 t_D = inf; 03934 jd = 0; 03935 value_type max_feas_viol = 0.0; // Remember the amount of violation. 03936 int j_degen = 0; // remember which (if any) constraint was near 03937 // degenerate and had an incorrect sign. 03938 EBounds bnd_jd; // The bound of the constraint to be dropped. 03939 03940 // Search through Lagrange multipliers in z_hat 03941 if( act_set->q_hat() ) { 03942 DVectorSlice z_hat = act_set->z_hat(); 03943 DVectorSlice p_z_hat = act_set->p_z_hat(); 03944 DVectorSlice::iterator 03945 z_itr = z_hat.begin(), 03946 p_z_itr = p_z_hat.begin(); 03947 const size_type 03948 qq = assume_lin_dep_ja || (!assume_lin_dep_ja && return_to_init_fixed) 03949 ? q_hat : q_hat - 1; 03950 // Print header for s, j, z_hat(s), p_z_hat(s), bnds(s), t, t_D, jd 03951 if( qq > 0 && (int)output_level >= (int)OUTPUT_ACT_SET ) { 03952 *out 03953 << "\nComputing the maximum step for multiplers for dual feasibility\n\n" 03954 << right << setw(5) << "s" 03955 << right << setw(5) << "j" 03956 << right << setw(dbl_w) << "z_hat" 03957 << right << setw(dbl_w) << "p_z_hat" 03958 << right << setw(20) << "bnd" 03959 << right << setw(dbl_w) << "t" 03960 << right << setw(dbl_w) << "t_D" 03961 << right << setw(5) << "jd" << endl 03962 << right << setw(5) << "----" 03963 << right << setw(5) << "----" 03964 << right << setw(dbl_w) << "--------------" 03965 << right << setw(dbl_w) << "--------------" 03966 << right << setw(20) << "--------------" 03967 << right << setw(dbl_w) << "--------------" 03968 << right << setw(dbl_w) << "--------------" 03969 << right << setw(5) << "----" << endl; 03970 } 03971 for( int s = 1; s <= qq; ++s, ++z_itr, ++p_z_itr) { 03972 int j = act_set->ij_map(s); 03973 if( j > 0 ) { 03974 namespace ns = QPSchurPack; 03975 EBounds bnd = act_set->bnd(s); 03976 // Print first part of row for s, j, z_hat(s), p_z_hat(s), bnds(s) .... 03977 if( (int)output_level >= (int)OUTPUT_ACT_SET ) { 03978 *out 03979 << right << setw(5) << s 03980 << right << setw(5) << j 03981 << right << setw(dbl_w) << *z_itr 03982 << right << setw(dbl_w) << *p_z_itr 03983 << right << setw(20) << bnd_str(bnd); 03984 } 03985 value_type t = inf; 03986 // Lookout for degeneracy. 03987 bool j_is_degen = false; 03988 value_type viol; 03989 const int dual_feas_status 03990 = correct_dual_infeas( 03991 j,bnd,t_P,1.0,dual_infeas_tol(),DEGENERATE_MULT 03992 ,out,output_level,true,"z_hat(s)",&(*z_itr),&viol 03993 ,"p_z_hat(s)",&(*p_z_itr),"z_hat_plus(s)" 03994 , (assume_lin_dep_ja ? NULL: &z_hat_plus(s) ) ); 03995 if( dual_feas_status < 0 ) { 03996 if( !using_iter_refinement ) { 03997 if( (int)output_level >= (int)OUTPUT_BASIC_INFO ) 03998 *out << "We are not using iterative refinement yet so turn it on" 03999 << "\nthen recompute the steps ...\n"; 04000 using_iter_refinement = true; 04001 next_step = COMPUTE_SEARCH_DIRECTION; 04002 continue; 04003 } 04004 else { 04005 if( (int)output_level >= (int)OUTPUT_BASIC_INFO ) 04006 *out << "We are already using iterative refinement so the QP algorithm is terminated!\n"; 04007 return DUAL_INFEASIBILITY; 04008 } 04009 } 04010 else if( dual_feas_status == 0 ) { 04011 j_is_degen = true; 04012 } 04013 // If we get here either the dual variable was feasible or it 04014 // was near degenerate and was corrected! 04015 const value_type feas_viol = beta*(*p_z_itr); 04016 if( bnd == EQUALITY ) 04017 ; // We don't care 04018 else if( bnd == LOWER && feas_viol <= 0.0 ) 04019 ; // dual feasible for all t > 0 04020 else if( bnd == UPPER && feas_viol >= 0.0 ) 04021 ; // dual feasible for all t > 0 04022 else { 04023 // finite t. 04024 t = -beta*(*z_itr)/(*p_z_itr); 04025 if( t < t_D ) { // remember minimum step length 04026 t_D = t; 04027 jd = j; 04028 if(j_is_degen) j_degen = j; 04029 max_feas_viol = feas_viol; 04030 bnd_jd = bnd; 04031 } 04032 } 04033 // Print rest of row for ... t, t_D, jd 04034 if( (int)output_level >= (int)OUTPUT_ACT_SET ) { 04035 *out 04036 << right << setw(dbl_w) << t 04037 << right << setw(dbl_w) << t_D 04038 << right << setw(5) << jd << endl; 04039 } 04040 } 04041 } 04042 } 04043 04044 // Search through Lagrange multipliers in mu_D_hat 04045 if( act_set->q_D_hat() ) { 04046 const QPSchurPack::QP::x_init_t &x_init = qp.x_init(); 04047 const QPSchurPack::QP::i_x_X_map_t &i_x_X_map = qp.i_x_X_map(); 04048 const size_type q_D_hat = act_set->q_D_hat(); 04049 DVectorSlice mu_D_hat = act_set->mu_D_hat(); 04050 DVectorSlice p_mu_D_hat = act_set->p_mu_D_hat(); 04051 const size_type 04052 qD = assume_lin_dep_ja && return_to_init_fixed ? q_D_hat-1 : q_D_hat; 04053 // Print header for k, i, mu_D_hat(k), p_mu_D_hat(k), x_init(k), t, t_D, jd 04054 if( qD > 0 && (int)output_level >= (int)OUTPUT_ACT_SET ) { 04055 *out 04056 << "\nComputing the maximum step for multiplers for dual feasibility\n\n" 04057 << right << setw(5) << "k" 04058 << right << setw(5) << "i" 04059 << right << setw(dbl_w) << "mu_D_hat" 04060 << right << setw(dbl_w) << "p_mu_D_hat" 04061 << right << setw(20) << "x_init" 04062 << right << setw(dbl_w) << "t" 04063 << right << setw(dbl_w) << "t_D" 04064 << right << setw(5) << "jd" << endl 04065 << right << setw(5) << "----" 04066 << right << setw(5) << "----" 04067 << right << setw(dbl_w) << "--------------" 04068 << right << setw(dbl_w) << "--------------" 04069 << right << setw(20) << "--------------" 04070 << right << setw(dbl_w) << "--------------" 04071 << right << setw(dbl_w) << "--------------" 04072 << right << setw(5) << "----" << endl; 04073 } 04074 GenPermMatrixSlice::const_iterator 04075 Q_XD_itr = act_set->Q_XD_hat().begin(), 04076 Q_XD_end = Q_XD_itr + qD; 04077 for( ; Q_XD_itr != Q_XD_end; ++Q_XD_itr ) { 04078 const size_type k = Q_XD_itr->col_j(); 04079 const size_type i = Q_XD_itr->row_i(); 04080 DVectorSlice::iterator 04081 mu_D_itr = mu_D_hat.begin() + (k-1), 04082 p_mu_D_itr = p_mu_D_hat.begin() + (k-1); 04083 const size_type l = act_set->l_fxfx(k); 04084 EBounds bnd = qp.x_init()(i); 04085 // Print first part of row for s, j, z_hat(s), p_z_hat(s), bnds(s) .... 04086 if( (int)output_level >= (int)OUTPUT_ACT_SET ) { 04087 *out 04088 << right << setw(5) << k 04089 << right << setw(5) << i 04090 << right << setw(dbl_w) << *mu_D_itr 04091 << right << setw(dbl_w) << *p_mu_D_itr 04092 << right << setw(20) << bnd_str(bnd); 04093 } 04094 value_type t = inf; 04095 // Lookout for degeneracy. 04096 bool j_is_degen = false; 04097 value_type viol; 04098 const int dual_feas_status 04099 = correct_dual_infeas( 04100 i,bnd,t_P,1.0,dual_infeas_tol(),DEGENERATE_MULT 04101 ,out,output_level,true,"mu_D_hat(k)",&(*mu_D_itr),&viol 04102 ,"p_mu_D_hat(k)",&(*p_mu_D_itr) ); 04103 if( dual_feas_status < 0 ) { 04104 if( !using_iter_refinement ) { 04105 if( (int)output_level >= (int)OUTPUT_BASIC_INFO ) 04106 *out << "We are not using iterative refinement yet so turn it on" 04107 << "\nthen recompute the steps ...\n"; 04108 using_iter_refinement = true; 04109 next_step = COMPUTE_SEARCH_DIRECTION; 04110 continue; 04111 } 04112 else { 04113 if( (int)output_level >= (int)OUTPUT_BASIC_INFO ) 04114 *out << "We are already using iterative refinement so the QP algorithm is terminated!\n"; 04115 return DUAL_INFEASIBILITY; 04116 } 04117 } 04118 else if( dual_feas_status == 0 ) { 04119 j_is_degen = true; 04120 } 04121 // If we get here either the dual variable was feasible or it 04122 // was near degenerate and was corrected! 04123 const value_type feas_viol = beta*(*p_mu_D_itr); 04124 if( bnd == EQUALITY ) 04125 ; // We don't care 04126 else if( bnd == LOWER && feas_viol <= 0.0 ) 04127 ; // dual feasible for all t > 0 04128 else if( bnd == UPPER && feas_viol >= 0.0 ) 04129 ; // dual feasible for all t > 0 04130 else { 04131 // finite t. 04132 t = -beta*(*mu_D_itr)/(*p_mu_D_itr); 04133 if( t < t_D ) { // remember minimum step length 04134 t_D = t; 04135 jd = -i; 04136 if(j_is_degen) j_degen = jd; 04137 max_feas_viol = feas_viol; 04138 bnd_jd = bnd; 04139 } 04140 } 04141 // Print rest of row for ... t, t_D, jd 04142 if( (int)output_level >= (int)OUTPUT_ACT_SET ) { 04143 *out 04144 << right << setw(dbl_w) << t 04145 << right << setw(dbl_w) << t_D 04146 << right << setw(5) << jd << endl; 04147 } 04148 } 04149 } 04150 // Print t_D, jd 04151 if( (int)output_level >= (int)OUTPUT_ITER_STEPS ) { 04152 *out 04153 << "\nt_D = " << t_D << endl 04154 << "jd = " << jd << endl; 04155 } 04156 if( jd == j_degen && jd != 0 && t_D < t_P ) { 04157 if( (int)output_level >= (int)OUTPUT_BASIC_INFO ) { 04158 *out 04159 << "\nWarning, the near degenerate constraint j = " 04160 << jd << " which had the incorrect sign\nand was adjusted " 04161 << "was selected to be dropped from the active set.\n"; 04162 } 04163 } 04164 // Print end of row for rank if the right print level 04165 if( assume_lin_dep_ja && !schur_comp_update_failed && (int)output_level == (int)OUTPUT_ITER_SUMMARY ) { 04166 if( t_P < huge_primal_step() ) 04167 *out << right << setw(6) << "LI" << endl; 04168 else 04169 *out << right << setw(6) << "LD" << endl; 04170 out->flush(); 04171 --summary_lines_counter; 04172 } 04173 // Print start of row for itr, q_hat, q(+), q_D, q_C, q_F, change, type, index, bound, violation 04174 if( t_D < t_P && (int)output_level == (int)OUTPUT_ITER_SUMMARY ) { 04175 *out 04176 << right << setw(6) << itr // itr 04177 << right << setw(6) << act_set->q_hat() // q_hat 04178 << right << setw(6) << act_set->q_plus_hat() // q(+) 04179 << right << setw(6) << act_set->q_F_hat() // q_F 04180 << right << setw(6) << act_set->q_C_hat() // q_C 04181 << right << setw(6) << act_set->q_D_hat() // q_D 04182 << right << setw(8) << "DROP" // change 04183 << right << setw(9); // type 04184 if( jd < 0 ) { 04185 *out << "X_F"; 04186 } 04187 else if( jd <= n ) { 04188 if( bnd_jd == qp.x_init()(jd) ) 04189 *out << "X_F_C_F"; 04190 else 04191 *out << "R_X_R"; 04192 } 04193 else { 04194 *out << "GEN"; 04195 } 04196 *out 04197 << right << setw(6) << jd // index 04198 << right << setw(10) << bnd_str(bnd_jd) // bound 04199 << right << setw(dbl_w) << max_feas_viol // violation 04200 << right << setw(6) << "LI" << endl; // rank (this should be true!) 04201 } 04202 } 04203 case TAKE_STEP: { 04204 if( (int)output_level >= (int)OUTPUT_ITER_STEPS ) { 04205 *out 04206 << "\n*** TAKE_STEP\n"; 04207 } 04208 if( t_P >= huge_primal_step() && t_D >= huge_dual_step() ) { 04209 if( (int)output_level >= (int)OUTPUT_BASIC_INFO ) { 04210 *out 04211 << "Error, QP is infeasible, inconsistent constraint a("<<ja<<") detected\n"; 04212 } 04213 if( using_iter_refinement ) { 04214 if( (int)output_level >= (int)OUTPUT_BASIC_INFO ) { 04215 *out 04216 << "We are already using iterative refinement so the QP algorithm is terminated!\n"; 04217 } 04218 return INFEASIBLE_CONSTRAINTS; 04219 } 04220 else { 04221 if( (int)output_level >= (int)OUTPUT_BASIC_INFO ) { 04222 *out << "We are not using iterative refinement yet so turn it on"; 04223 if(assume_lin_dep_ja) 04224 *out << "\nthen pick another violated constriant to add ... \n"; 04225 else 04226 *out << "\nthen recompute the steps ...\n"; 04227 } 04228 summary_lines_counter = 0; 04229 last_jd = 0; // erase this memory! 04230 last_ja = 0; // .. 04231 using_iter_refinement = true; 04232 if(assume_lin_dep_ja) { 04233 EIterRefineReturn status = iter_refine( 04234 *act_set, out, output_level, -1.0, &qp.fo(), -1.0, act_set->q_hat() ? &act_set->d_hat() : NULL 04235 ,v, act_set->q_hat() ? &act_set->z_hat() : NULL 04236 ,iter_refine_num_resid, iter_refine_num_solves 04237 ); 04238 next_step = PICK_VIOLATED_CONSTRAINT; 04239 } 04240 else { 04241 // Iterative refinement will be performed there 04242 next_step = COMPUTE_SEARCH_DIRECTION; 04243 } 04244 continue; 04245 } 04246 } 04247 else if( t_P > t_D ) { 04248 if( (int)output_level >= (int)OUTPUT_ITER_STEPS ) { 04249 if( t_P >= huge_primal_step() ) { 04250 *out 04251 << "\n*** (b) Dual Step (t_P = " << t_P << " >= huge_primal_step = " 04252 << huge_primal_step() << ")\n"; 04253 } 04254 else { 04255 *out 04256 << "\n*** (b) Partial Primal-Dual Step\n"; 04257 } 04258 } 04259 // Check for cycling 04260 if( ja == last_jd && jd == last_ja ) { 04261 if( (int)output_level >= (int)OUTPUT_BASIC_INFO ) { 04262 *out 04263 << "\n\nQPSchur::qp_algo(...) : Error, the constraint " 04264 << "a(" << ja << ") with violation\n" 04265 << "(a(ja)'*x - b_a) = (" << con_ja_val 04266 << " - " << b_a << ") = " << (con_ja_val - b_a) << "\n" 04267 << "we are adding to the active set and the constraint constriant\n" 04268 << "a(" << jd << ") we are dropping were just dropped and added respectively!\n" 04269 << "The algorithm is cycling!\n"; 04270 } 04271 if( using_iter_refinement ) { 04272 if( (int)output_level >= (int)OUTPUT_BASIC_INFO ) { 04273 *out 04274 << "We are already using iterative refinement so the QP algorithm is terminated!\n"; 04275 } 04276 return SUBOPTIMAL_POINT; 04277 } 04278 else { 04279 if( (int)output_level >= (int)OUTPUT_BASIC_INFO ) { 04280 *out << "We are not using iterative refinement yet so turn it on"; 04281 if(assume_lin_dep_ja) 04282 *out << "\nthen pick another violated constriant to add ... \n"; 04283 else 04284 *out << "\nthen recompute the steps ...\n"; 04285 } 04286 summary_lines_counter = 0; 04287 last_jd = 0; // erase this memory! 04288 last_ja = 0; // .. 04289 using_iter_refinement = true; 04290 if(assume_lin_dep_ja) { 04291 EIterRefineReturn status = iter_refine( 04292 *act_set, out, output_level, -1.0, &qp.fo(), -1.0, act_set->q_hat() ? &act_set->d_hat() : NULL 04293 ,v, act_set->q_hat() ? &act_set->z_hat() : NULL 04294 ,iter_refine_num_resid, iter_refine_num_solves 04295 ); 04296 next_step = PICK_VIOLATED_CONSTRAINT; 04297 } 04298 else { 04299 // Iterative refinement will be performed there 04300 next_step = COMPUTE_SEARCH_DIRECTION; 04301 } 04302 continue; 04303 } 04304 } 04305 // Update the augmented KKT system 04306 try { 04307 if( assume_lin_dep_ja ) { 04308 if(act_set->drop_add_constraints( jd, ja, bnd_ja, true, out, output_level )) 04309 summary_lines_counter = 0; 04310 } 04311 else { 04312 if(act_set->drop_constraint( jd, out, output_level, true, true )) 04313 summary_lines_counter = 0; 04314 } 04315 } 04316 catch( const MatrixSymAddDelUpdateable::SingularUpdateException& excpt ) { 04317 if( (int)output_level >= (int)OUTPUT_BASIC_INFO ) { 04318 *out 04319 << "\n\nSchur complement appears to be singular and should not be:\n" 04320 << excpt.what() 04321 << "\nThe QP appears to be nonconvex and we therefore terminate the primal-dual QP algorithm!\n"; 04322 } 04323 return NONCONVEX_QP; 04324 } 04325 catch( const MatrixSymAddDelUpdateable::WrongInertiaUpdateException& excpt ) { 04326 if( (int)output_level >= (int)OUTPUT_BASIC_INFO ) { 04327 *out 04328 << "\n\nSchur complement appears to have the wrong inertia:\n" 04329 << excpt.what() 04330 << "\nThe QP appears to be nonconvex and we therefore terminate the primal-dual QP algorithm!\n"; 04331 } 04332 return NONCONVEX_QP; 04333 } 04334 // z_hat = z_hat + beta * t_D * p_z_hat 04335 if(act_set->q_hat()) 04336 Vp_StV( &act_set->z_hat(), beta * t_D, act_set->p_z_hat() ); 04337 // v = v + beta * t_D * p_v 04338 Vp_StV( v, beta * t_D, p_v() ); 04339 // mu_D_hat = mu_D_hat + beta * t_D * p_mu_D_hat 04340 if(act_set->q_D_hat()) 04341 Vp_StV( &act_set->mu_D_hat(), beta * t_D, act_set->p_mu_D_hat() ); 04342 04343 ++(*num_drops); 04344 04345 if( (int)output_level >= (int)OUTPUT_ITER_STEPS ) 04346 { 04347 *out 04348 << "\nUpdated primal and dual variables:\n" 04349 << "\n||v||inf = " << norm_inf(*v) << endl; 04350 if(act_set->q_hat()) { 04351 *out 04352 << "||z_hat||inf = " << norm_inf(act_set->z_hat()) << endl; 04353 } 04354 if(act_set->q_D_hat()) { 04355 *out 04356 << "max(|mu_D_hat(i)|) = " << norm_inf(act_set->mu_D_hat()) << endl 04357 << "min(|mu_D_hat(i)|) = " << min_abs(act_set->mu_D_hat()) << endl; 04358 } 04359 } 04360 if( (int)output_level >= (int)OUTPUT_ITER_QUANTITIES ) 04361 { 04362 *out << "\nv = \n" << *v << endl; 04363 if(assume_lin_dep_ja) { 04364 *out 04365 << "\nPrinting active set after dropping constraint jd = " << jd 04366 << " and adding constraint ja = " << ja << " ...\n"; 04367 } 04368 else { 04369 *out 04370 << "\nPrinting active set after dropping constraint jd = " << jd 04371 << " ...\n"; 04372 } 04373 dump_act_set_quantities( *act_set, *out ); 04374 } 04375 last_jd = jd; 04376 assume_lin_dep_ja = false; // If we get here then we know these are true! 04377 next_step = COMPUTE_SEARCH_DIRECTION; 04378 continue; 04379 } 04380 else { // t_P < t_D 04381 if( (int)output_level >= (int)OUTPUT_ITER_STEPS ) { 04382 *out 04383 << "\n*** (c) Full Primal-Dual Step\n"; 04384 } 04385 ++(*num_adds); 04386 if( !assume_lin_dep_ja ) { 04387 act_set->z_hat() = z_hat_plus; 04388 *v = v_plus; 04389 } 04390 else { 04391 bool threw_exception = false; 04392 try { 04393 if(act_set->add_constraint( ja, bnd_ja, true, out, output_level, true, true )) 04394 summary_lines_counter = 0; 04395 } 04396 catch( const MatrixSymAddDelUpdateable::SingularUpdateException& excpt ) { 04397 if( (int)output_level >= (int)OUTPUT_BASIC_INFO ) { 04398 *out 04399 << "\n\nSchur complement appears to be singular and should not be:\n" 04400 << excpt.what() << std::endl; 04401 } 04402 threw_exception = true; 04403 } 04404 catch( const MatrixSymAddDelUpdateable::WrongInertiaUpdateException& excpt ) { 04405 if( (int)output_level >= (int)OUTPUT_BASIC_INFO ) { 04406 *out 04407 << "\n\nSchur complement appears to have the wrong inertia:\n" 04408 << excpt.what() << std::endl; 04409 } 04410 threw_exception = true; 04411 } 04412 if( threw_exception ) { 04413 if( !using_iter_refinement ) { 04414 if( (int)output_level >= (int)OUTPUT_BASIC_INFO ) { 04415 *out << "We are not using iterative refinement yet so turn it on and\n" 04416 << "go back and pick a new violated constraint to add to the active set ...\n"; 04417 } 04418 using_iter_refinement = true; 04419 if( (int)output_level >= (int)OUTPUT_ITER_SUMMARY ) { 04420 *out 04421 << "\n\nPerforming iterative refinement on v, z_hat system ...\n"; 04422 } 04423 summary_lines_counter = 0; 04424 // [ Ko U_hat ] [ v ] = [ fo ] 04425 // [ U_hat' V_hat ] [ z_hat ] [ d_hat ] 04426 EIterRefineReturn status = iter_refine( 04427 *act_set, out, output_level, -1.0, &qp.fo(), -1.0, &act_set->d_hat() 04428 ,v, &act_set->z_hat() 04429 ,iter_refine_num_resid, iter_refine_num_solves 04430 ); 04431 next_step = PICK_VIOLATED_CONSTRAINT; 04432 continue; 04433 } 04434 else { 04435 if( (int)output_level >= (int)OUTPUT_BASIC_INFO ) { 04436 *out << "Darn, we are already using iterative refinement!" 04437 << "\nThe QP appears to be nonconvex and we therefore terminate the primal-dual QP algorithm!\n"; 04438 } 04439 return NONCONVEX_QP; 04440 } 04441 } 04442 // z_hat = z_hat + beta * t_P * p_z_hat 04443 if(act_set->q_hat()) 04444 Vp_StV( &act_set->z_hat(), beta * t_P, act_set->p_z_hat() ); 04445 // v = v + beta * t_P * p_v 04446 Vp_StV( v, beta * t_P, p_v() ); 04447 } 04448 // mu_D_hat = mu_D_hat + beta * t_P * p_mu_D_hat 04449 if(act_set->q_D_hat()) 04450 Vp_StV( &act_set->mu_D_hat(), beta * t_P, act_set->p_mu_D_hat() ); 04451 04452 04453 if( (int)output_level >= (int)OUTPUT_ITER_STEPS ) 04454 { 04455 *out 04456 << "\nUpdated primal and dual variables:\n" 04457 << "\n||v||inf = " << norm_inf(*v) << endl; 04458 if(act_set->q_hat()) { 04459 *out 04460 << "||z_hat||inf = " << norm_inf(act_set->z_hat()) << endl; 04461 } 04462 if(act_set->q_D_hat()) { 04463 *out 04464 << "max(|mu_D_hat(i)|) = " << norm_inf(act_set->mu_D_hat()) << endl 04465 << "min(|mu_D_hat(i)|) = " << min_abs(act_set->mu_D_hat()) << endl; 04466 } 04467 } 04468 if( (int)output_level >= (int)OUTPUT_ITER_QUANTITIES ) 04469 { 04470 *out << "\nv = \n" << *v << endl; 04471 if( assume_lin_dep_ja ) { 04472 *out 04473 << "\nPrinting active set after adding constraint ja = " << ja 04474 << " ...\n"; 04475 dump_act_set_quantities( *act_set, *out ); 04476 } 04477 else { 04478 if(act_set->q_hat()) 04479 *out << "\nz_hat =\n" << act_set->z_hat(); 04480 if(act_set->q_D_hat()) 04481 *out << "\nmu_D_hat =\n" << act_set->mu_D_hat(); 04482 } 04483 } 04484 assume_lin_dep_ja = false; // If we get here then we know these are true! 04485 next_step = PICK_VIOLATED_CONSTRAINT; 04486 continue; 04487 } 04488 } 04489 default: 04490 TEUCHOS_TEST_FOR_EXCEPT(true); // only a local programming error 04491 } 04492 } 04493 04494 } // end try 04495 catch( std::exception& excpt ) { 04496 if( (int)output_level >= (int)OUTPUT_BASIC_INFO ) { 04497 *out 04498 << "\n\n*** Caught a standard exception :\n" 04499 << excpt.what() 04500 << "\n*** Rethrowing the exception ...\n"; 04501 } 04502 throw; 04503 } 04504 catch(...) { 04505 if( (int)output_level >= (int)OUTPUT_BASIC_INFO ) { 04506 *out 04507 << "\n\n*** Caught an unknown exception. Rethrowing the exception ...\n"; 04508 } 04509 throw; 04510 } 04511 // If you get here then the maximum number of QP iterations has been exceeded 04512 return MAX_ITER_EXCEEDED; 04513 } 04514 04515 void QPSchur::set_x( const ActiveSet& act_set, const DVectorSlice& v, DVectorSlice* x ) 04516 { 04517 using BLAS_Cpp::no_trans; 04518 using LinAlgOpPack::V_MtV; 04519 using LinAlgOpPack::Vp_MtV; 04520 04521 // x = Q_R * v(1:n_R) + Q_X * b_X + P_XF_hat * z_hat 04522 V_MtV( x, act_set.qp().Q_R(), no_trans, v(1,act_set.qp().n_R()) ); 04523 if( act_set.qp().n() > act_set.qp().n_R() ) 04524 Vp_MtV( x, act_set.qp().Q_X(), no_trans, act_set.qp().b_X() ); 04525 if( act_set.q_F_hat() ) 04526 Vp_MtV( x, act_set.P_XF_hat(), no_trans, act_set.z_hat() ); 04527 } 04528 04529 void QPSchur::set_multipliers( 04530 const ActiveSet& act_set, const DVectorSlice& v 04531 ,SpVector* mu, DVectorSlice* lambda, SpVector* lambda_breve 04532 ) 04533 { 04534 using BLAS_Cpp::no_trans; 04535 using LinAlgOpPack::V_MtV; 04536 using LinAlgOpPack::Vp_MtV; 04537 using LinAlgOpPack::Vp_StMtV; 04538 using LinAlgOpPack::V_MtV; 04539 using LinAlgOpPack::Vp_MtV; 04540 using AbstractLinAlgPack::V_MtV; 04541 using AbstractLinAlgPack::Vp_MtV; 04542 namespace GPMSTP = AbstractLinAlgPack::GenPermMatrixSliceIteratorPack; 04543 using Teuchos::Workspace; 04544 Teuchos::WorkspaceStore* wss = Teuchos::get_default_workspace_store().get(); 04545 04546 const size_type 04547 n = act_set.qp().n(), 04548 n_R = act_set.qp().n_R(), 04549 m = act_set.qp().m(), 04550 m_breve = act_set.qp().constraints().m_breve(), 04551 q_hat = act_set.q_hat(); 04552 const QPSchurPack::QP::x_init_t 04553 &x_init = act_set.qp().x_init(); 04554 04555 // 04556 // mu = P_plus_hat(1:n,:) * z_hat + Q_XD_hat * mu_D + (steps for initially fixed 04557 // variables fixed to the other bounds) 04558 // 04559 // lambda_breve = P_plus_hat(n+1:n+m_breve,:) * z_hat 04560 // 04561 typedef SpVector::element_type ele_t; 04562 mu->resize( n, n-m ); // Resize for the maxinum number 04563 lambda_breve->resize( m_breve, n-m ); // of active constraints possible. 04564 // mu += Q_XD_hat * mu_D_hat 04565 if( act_set.q_D_hat() ) 04566 Vp_MtV( mu, act_set.Q_XD_hat(), no_trans, act_set.mu_D_hat() ); 04567 // Set all the multipliers in z_hat 04568 if(q_hat){ 04569 const DVectorSlice 04570 z_hat = act_set.z_hat(); 04571 for( size_type s = 1; s <= q_hat; ++s ) { 04572 const int ij = act_set.ij_map(s); 04573 if(ij > 0) { 04574 const size_type j = ij; 04575 if( j <= n ) 04576 mu->add_element(ele_t(j,z_hat(s))); 04577 else 04578 lambda_breve->add_element(ele_t(j-n,z_hat(s))); 04579 } 04580 } 04581 } 04582 mu->sort(); 04583 lambda_breve->sort(); 04584 // lambda = v(n_R+1,n_R+m) 04585 if( m ) { 04586 *lambda = v(n_R+1,n_R+m); 04587 } 04588 } 04589 04590 bool QPSchur::timeout_return( StopWatchPack::stopwatch* timer, std::ostream *out, EOutputLevel output_level ) const 04591 { 04592 const value_type minutes = timer->read() / 60; 04593 if( minutes >= max_real_runtime() ) { 04594 if( (int)output_level >= (int)OUTPUT_BASIC_INFO ) { 04595 *out 04596 << "\n*** Runtime = " << minutes << " min >= max_real_runtime = " << max_real_runtime() << " min!\n" 04597 << "Must terminite the algorithm!\n"; 04598 } 04599 return true; 04600 } 04601 return false; 04602 } 04603 04604 QPSchur::EIterRefineReturn 04605 QPSchur::iter_refine( 04606 const ActiveSet &act_set 04607 ,std::ostream *out 04608 ,EOutputLevel output_level 04609 ,const value_type ao 04610 ,const DVectorSlice *bo 04611 ,const value_type aa 04612 ,const DVectorSlice *ba 04613 ,DVectorSlice *v 04614 ,DVectorSlice *z 04615 ,size_type *iter_refine_num_resid 04616 ,size_type *iter_refine_num_solves 04617 ) 04618 { 04619 using std::endl; 04620 using std::setw; 04621 using std::left; 04622 using std::right; 04623 using BLAS_Cpp::no_trans; 04624 using BLAS_Cpp::trans; 04625 using DenseLinAlgPack::norm_inf; 04626 using DenseLinAlgPack::Vp_StV; 04627 using LinAlgOpPack::Vp_V; 04628 using LinAlgOpPack::Vp_StMtV; 04629 using LinAlgOpPack::V_InvMtV; 04630 using Teuchos::Workspace; 04631 Teuchos::WorkspaceStore* wss = Teuchos::get_default_workspace_store().get(); 04632 04633 typedef DenseLinAlgPack::value_type extra_value_type; 04634 04635 const value_type small_num = std::numeric_limits<value_type>::min(); 04636 04637 const int int_w = 8; 04638 const char int_ul[] = "------"; 04639 const int dbl_min_w = 20; 04640 const int dbl_w = ( out ? my_max(dbl_min_w,int(out->precision()+8)): 20 ); 04641 const char dbl_ul[] = "------------------"; 04642 04643 const QPSchurPack::QP 04644 &qp = act_set.qp(); 04645 const MatrixSymOpNonsing 04646 &Ko = qp.Ko(), 04647 &S_hat = act_set.S_hat(); 04648 const MatrixOp 04649 &U_hat = act_set.U_hat(); 04650 const DenseLinAlgPack::size_type 04651 n = qp.n(), 04652 n_R = qp.n_R(), 04653 m = qp.m(), 04654 q_hat = act_set.q_hat(); 04655 const DVectorSlice 04656 fo = qp.fo(), 04657 d_hat = (q_hat ? act_set.d_hat() : DVectorSlice()); 04658 04659 Workspace<extra_value_type> 04660 ext_ro_ws(wss,n_R+m), 04661 ext_ra_ws(wss,q_hat); 04662 DenseLinAlgPack::VectorSliceTmpl<extra_value_type> 04663 ext_ro(&ext_ro_ws[0],ext_ro_ws.size()), 04664 ext_ra(ext_ra_ws.size()?&ext_ra_ws[0]:NULL,ext_ra_ws.size()); 04665 Workspace<value_type> 04666 ro_ws(wss,n_R+m), 04667 ra_ws(wss,q_hat), 04668 t1_ws(wss,n_R+m), 04669 del_v_ws(wss,n_R+m), 04670 del_z_ws(wss,q_hat), 04671 v_itr_ws(wss,n_R+m), 04672 z_itr_ws(wss,q_hat); 04673 DVectorSlice 04674 ro(&ro_ws[0],ro_ws.size()), 04675 ra(ra_ws.size()?&ra_ws[0]:NULL,ra_ws.size()), 04676 t1(&t1_ws[0],t1_ws.size()), 04677 del_v(&del_v_ws[0],del_v_ws.size()), 04678 del_z(del_z_ws.size()?&del_z_ws[0]:NULL,del_z_ws.size()), 04679 v_itr(&v_itr_ws[0],v_itr_ws.size()), 04680 z_itr(z_itr_ws.size()?&z_itr_ws[0]:NULL,z_itr_ws.size()); 04681 04682 // Accumulate into temporary variables 04683 v_itr = *v; 04684 if(q_hat) 04685 z_itr = *z; 04686 04687 // Print summary header 04688 if( out && (int)output_level >= (int)OUTPUT_ITER_SUMMARY ) { 04689 *out 04690 << "\nBeginning iterative refinement ..." 04691 << "\niter_refine_opt_tol = " << iter_refine_opt_tol() 04692 << ", iter_refine_feas_tol = " << iter_refine_feas_tol() 04693 << "\niter_refine_min_iter = " << iter_refine_min_iter() 04694 << ", iter_refine_max_iter = " << iter_refine_max_iter() << "\n\n"; 04695 // 04696 *out 04697 << right << setw(int_w) << "ir_itr" 04698 << right << setw(dbl_w) << "roR_scaling" 04699 << right << setw(dbl_w) << "||roR||s" 04700 << left << setw(1) << " "; 04701 if(m) { 04702 *out 04703 << right << setw(dbl_w) << "rom_scaling" 04704 << right << setw(dbl_w) << "||rom||s" 04705 << left << setw(1) << " "; 04706 } 04707 if(q_hat) { 04708 *out 04709 << right << setw(dbl_w) << "ra_scaling" 04710 << right << setw(dbl_w) << "||ra||s" 04711 << left << setw(1) << " "; 04712 } 04713 *out 04714 << right << setw(dbl_w) << "||del_v||/||v||inf" 04715 << right << setw(dbl_w) << "||del_z||/||z||inf" 04716 << endl; 04717 // 04718 *out 04719 << right << setw(int_w) << int_ul 04720 << right << setw(dbl_w) << dbl_ul 04721 << right << setw(dbl_w) << dbl_ul 04722 << left << setw(1) << " "; 04723 if(m) { 04724 *out 04725 << right << setw(dbl_w) << dbl_ul 04726 << right << setw(dbl_w) << dbl_ul 04727 << left << setw(1) << " "; 04728 } 04729 if(q_hat) { 04730 *out 04731 << right << setw(dbl_w) << dbl_ul 04732 << right << setw(dbl_w) << dbl_ul 04733 << left << setw(1) << " "; 04734 } 04735 *out 04736 << right << setw(dbl_w) << dbl_ul 04737 << right << setw(dbl_w) << dbl_ul 04738 << endl; 04739 } 04740 // 04741 // Perform iterative refinement iterations 04742 // 04743 EIterRefineReturn return_status = ITER_REFINE_NOT_PERFORMED; 04744 value_type 04745 roR_nrm_o, rom_nrm_o, ra_nrm_o, 04746 roR_nrm, rom_nrm, ra_nrm; 04747 for( size_type iter_refine_k = 0; 04748 iter_refine_k < iter_refine_max_iter() && return_status != ITER_REFINE_CONVERGED; 04749 ++iter_refine_k) 04750 { 04751 // 04752 // Compute the residual (in extended precision?) 04753 // 04754 // [ ro ] = [ Ko U_hat ] [ v ] + [ ao*bo ] 04755 // [ ra ] [ U_hat' V_hat ] [ z ] [ aa*ba ] 04756 // 04757 value_type 04758 roR_scaling = 0.0, 04759 rom_scaling = 0.0, 04760 ra_scaling = 0.0; 04761 ++(*iter_refine_num_resid); 04762 calc_resid( 04763 act_set 04764 ,v_itr, z_itr 04765 ,ao 04766 ,bo 04767 ,&ext_ro 04768 ,&roR_scaling 04769 ,m ? &rom_scaling : NULL 04770 ,aa 04771 ,ba 04772 ,q_hat ? &ext_ra : NULL 04773 ,q_hat ? &ra_scaling : NULL 04774 ); 04775 std::copy(ext_ro.begin(),ext_ro.end(),ro.begin()); // Convert back to standard precision 04776 if(q_hat) std::copy(ext_ra.begin(),ext_ra.end(),ra.begin()); 04777 // 04778 // Calcuate convergence criteria 04779 // 04780 roR_nrm = norm_inf(ro(1,n_R)); 04781 rom_nrm = (m ? norm_inf(ro(n_R+1,n_R+m)) : 0.0); 04782 ra_nrm = (q_hat ? norm_inf(ra) : 0.0); 04783 if( iter_refine_k == 0 ) { 04784 roR_nrm_o = roR_nrm; 04785 rom_nrm_o = rom_nrm; 04786 ra_nrm_o = rom_nrm; 04787 } 04788 const bool 04789 is_roR_conv = roR_nrm / (1.0 + roR_scaling) < iter_refine_opt_tol(), 04790 is_rom_conv = (m ? rom_nrm / (1.0 + rom_scaling) < iter_refine_feas_tol() : true ), 04791 is_ra_conv = (q_hat ? ra_nrm / (1.0 + ra_scaling) < iter_refine_feas_tol() : true ), 04792 is_conv = is_roR_conv && is_rom_conv && is_ra_conv; 04793 // 04794 // Print beginning of summary line for residuals 04795 // 04796 if( out && (int)output_level >= (int)OUTPUT_ITER_SUMMARY ) { 04797 *out 04798 << right << setw(int_w) << iter_refine_k 04799 << right << setw(dbl_w) << roR_scaling 04800 << right << setw(dbl_w) << (roR_nrm / (1.0 + roR_scaling)) 04801 << left << setw(1) << (is_roR_conv ? "*" : " "); 04802 if(m) { 04803 *out 04804 << right << setw(dbl_w) << rom_scaling 04805 << right << setw(dbl_w) << (rom_nrm /(1.0 + rom_scaling)) 04806 << left << setw(1) << (is_rom_conv ? "*" : " "); 04807 } 04808 if(q_hat) { 04809 *out 04810 << right << setw(dbl_w) << ra_scaling 04811 << right << setw(dbl_w) << (ra_nrm /(1.0 + ra_scaling)) 04812 << left << setw(1) << (is_ra_conv ? "*" : " "); 04813 } 04814 } 04815 // 04816 // Check for convergence 04817 // 04818 if( iter_refine_k + 1 < iter_refine_min_iter() ) { 04819 // Keep on going even if we have converged to desired tolerances! 04820 } 04821 else if( is_conv ) { 04822 if( out && (int)output_level >= (int)OUTPUT_ITER_SUMMARY ) { 04823 *out 04824 << right << setw(dbl_w) << "-" 04825 << right << setw(dbl_w) << "-" 04826 << endl; 04827 } 04828 if( iter_refine_k == 0 ) 04829 return_status = ITER_REFINE_NOT_NEEDED; 04830 else 04831 return_status = ITER_REFINE_CONVERGED; 04832 break; 04833 } 04834 // Make sure we have made progress 04835 if( roR_nrm_o < roR_nrm && rom_nrm_o < rom_nrm && ra_nrm_o < rom_nrm ) { 04836 return_status = ITER_REFINE_NOT_IMPROVED; 04837 break; // No progress was make in converging the equations! 04838 } 04839 // 04840 // Solve for the steps 04841 // 04842 // [ Ko U_hat ] [ del_v ] = [ ro ] 04843 // [ U_hat' V_hat ] [ del_z ] [ ra ] 04844 // 04845 ++(*iter_refine_num_solves); 04846 if( q_hat ) { 04847 // del_z = inv(S_hat)*(ra - U_hat'*inv(Ko)*ro) 04848 V_InvMtV( &t1, Ko, no_trans, ro ); 04849 calc_z( act_set.S_hat(), ra, act_set.U_hat(), &t1, &del_z ); 04850 } 04851 calc_v( Ko, &ro, U_hat, del_z, &del_v ); 04852 // 04853 // Compute steps: 04854 // 04855 // v += -del_v 04856 // z += -del_z 04857 // 04858 Vp_StV( &v_itr, -1.0, del_v ); 04859 if( q_hat ) 04860 Vp_StV( &z_itr, -1.0, del_z ); 04861 // 04862 // Print rest of summary line for steps 04863 // 04864 if( out && (int)output_level >= (int)OUTPUT_ITER_SUMMARY ) { 04865 *out 04866 << right << setw(dbl_w) << norm_inf(del_v) / (norm_inf(v_itr) + small_num) 04867 << right << setw(dbl_w) << norm_inf(del_z) / (norm_inf(z_itr) + small_num) 04868 << endl; 04869 } 04870 } 04871 if( iter_refine_max_iter() == 0 ) { 04872 if( (int)output_level >= (int)OUTPUT_ITER_SUMMARY ) { 04873 *out 04874 << "\nWarning, iter_refine_max_iter == 0. Iterative refinement was not performed." 04875 << "\nLeaving the original solution intact ...\n"; 04876 } 04877 return_status = ITER_REFINE_NOT_PERFORMED; 04878 } 04879 else { 04880 if( iter_refine_max_iter() == 1 ) { 04881 if( (int)output_level >= (int)OUTPUT_ITER_SUMMARY ) { 04882 *out 04883 << "\nWarning, iter_refine_max_iter == 1. Only one step of iterative refinement" 04884 << "was performed and the step is taken which out checking the residual ...\n"; 04885 } 04886 *v = v_itr; 04887 if(q_hat) 04888 *z = z_itr; 04889 return_status = ITER_REFINE_ONE_STEP; 04890 } 04891 else if( roR_nrm_o < roR_nrm && rom_nrm_o < rom_nrm && ra_nrm_o < rom_nrm ) { 04892 if( (int)output_level >= (int)OUTPUT_ITER_SUMMARY ) { 04893 *out 04894 << "\nNo progress was made in reducing the residuals." 04895 << "\nLeaving the original solution intact ...\n"; 04896 } 04897 return_status = ITER_REFINE_NOT_IMPROVED; 04898 } 04899 else { 04900 // The residuals were at least not increased so let's take the new solution 04901 *v = v_itr; 04902 if(q_hat) 04903 *z = z_itr; 04904 if( return_status != ITER_REFINE_CONVERGED && return_status != ITER_REFINE_NOT_NEEDED ) { 04905 if( (int)output_level >= (int)OUTPUT_ITER_SUMMARY ) { 04906 *out 04907 << "\nThe residuals were not converged but they were not increased either." 04908 << "\nTake the new point anyway ...\n"; 04909 } 04910 return_status = ITER_REFINE_IMPROVED; 04911 } 04912 } 04913 } 04914 return return_status; 04915 } 04916 04917 // private static member functions for QPSchur 04918 04919 void QPSchur::dump_act_set_quantities( 04920 const ActiveSet& act_set, std::ostream& out 04921 ,bool print_S_hat 04922 ) 04923 { 04924 using std::endl; 04925 using std::setw; 04926 using std::left; 04927 using std::right; 04928 04929 const QPSchurPack::QP 04930 &qp = act_set.qp(); 04931 const QPSchurPack::Constraints 04932 &constraints = qp.constraints(); 04933 04934 const int int_w = 10; 04935 const char int_ul[] = "--------"; 04936 const int dbl_min_w = 20; 04937 const int dbl_w = my_max(dbl_min_w,int(out.precision()+8)); 04938 const char dbl_ul[] = "------------------"; 04939 04940 out << "\n*** Dumping the current active set ***\n" 04941 << "\nDimensions of the current active set:\n" 04942 << "\nn = " << right << setw(int_w) << qp.n() << " (Number of unknowns)" 04943 << "\nn_R = " << right << setw(int_w) << qp.n_R() << " (Number of initially free variables in Ko)" 04944 << "\nm = " << right << setw(int_w) << qp.m() << " (Number of initially fixed variables not in Ko)" 04945 << "\nm_breve = " << right << setw(int_w) << constraints.m_breve() << " (Number of extra general equality/inequality constriants)" 04946 << "\nq_hat = " << right << setw(int_w) << act_set.q_hat() << " (Number of augmentations to the initial KKT system Ko)" 04947 << "\nq_plus_hat = " << right << setw(int_w) << act_set.q_plus_hat() << " (Number of added variable bounds and general constraints)" 04948 << "\nq_F_hat = " << right << setw(int_w) << act_set.q_F_hat() << " (Number of initially fixed variables not at their initial bound)" 04949 << "\nq_C_hat = " << right << setw(int_w) << act_set.q_C_hat() << " (Number of initially fixed variables at the other bound)" 04950 << "\nq_D_hat = " << right << setw(int_w) << act_set.q_D_hat() << " (Number of initially fixed variables still fixed at initial bound)" 04951 << endl; 04952 04953 // Print table of quantities in augmented KKT system 04954 out << "\nQuantities for augmentations to the initial KKT system:\n"; 04955 const size_type q_hat = act_set.q_hat(); 04956 out << endl 04957 << right << setw(int_w) << "s" 04958 << right << setw(int_w) << "ij_map(s)" 04959 << right << setw(int_w) << "bnd(s)" 04960 << right << setw(dbl_w) << "constr_norm(s)" 04961 << right << setw(dbl_w) << "d_hat(s)" 04962 << right << setw(dbl_w) << "z_hat(s)" 04963 << right << setw(dbl_w) << "p_z_hat(s)" 04964 << endl; 04965 out << right << setw(int_w) << int_ul 04966 << right << setw(int_w) << int_ul 04967 << right << setw(int_w) << int_ul 04968 << right << setw(dbl_w) << dbl_ul 04969 << right << setw(dbl_w) << dbl_ul 04970 << right << setw(dbl_w) << dbl_ul 04971 << right << setw(dbl_w) << dbl_ul 04972 << endl; 04973 {for( size_type s = 1; s <= q_hat; ++s ) { 04974 out << right << setw(int_w) << s 04975 << right << setw(int_w) << act_set.ij_map(s) 04976 << right << setw(int_w) << bnd_str(act_set.bnd(s)) 04977 << right << setw(dbl_w) << act_set.constr_norm(s) 04978 << right << setw(dbl_w) << act_set.d_hat()(s) 04979 << right << setw(dbl_w) << act_set.z_hat()(s) 04980 << right << setw(dbl_w) << act_set.p_z_hat()(s) 04981 << endl; 04982 }} 04983 04984 // Print P_XF_hat, P_FC_hat, P_plus_hat, U_hat and S_hat 04985 out << "\nP_XF_hat =\n" << act_set.P_XF_hat(); 04986 out << "\nP_FC_hat =\n" << act_set.P_FC_hat(); 04987 out << "\nP_plus_hat =\n" << act_set.P_plus_hat(); 04988 out << "\nU_hat =\n" << act_set.U_hat(); 04989 if(print_S_hat) 04990 out << "\nS_hat =\n" << act_set.S_hat(); 04991 04992 // Print table of multipliers for q_D_hat 04993 out << "\nQuantities for initially fixed variables which are still fixed at their initial bound:\n"; 04994 const size_type q_D_hat = act_set.q_D_hat(); 04995 out << endl 04996 << right << setw(int_w) << "k" 04997 << right << setw(int_w) << "l_fxfx(k)" 04998 << right << setw(dbl_w) << "mu_D_hat(k)" 04999 << right << setw(dbl_w) << "p_mu_D_hat(s)" 05000 << endl; 05001 out << right << setw(int_w) << int_ul 05002 << right << setw(int_w) << int_ul 05003 << right << setw(dbl_w) << dbl_ul 05004 << right << setw(dbl_w) << dbl_ul 05005 << endl; 05006 {for( size_type k = 1; k <= q_D_hat; ++k ) { 05007 out << right << setw(int_w) << k 05008 << right << setw(int_w) << act_set.l_fxfx(k) 05009 << right << setw(dbl_w) << act_set.mu_D_hat()(k) 05010 << right << setw(dbl_w) << act_set.p_mu_D_hat()(k) 05011 << endl; 05012 }} 05013 05014 // Print Q_XD_hat 05015 out << "\nQ_XD_hat =\n" << act_set.Q_XD_hat(); 05016 05017 out << "\n*** End dump of current active set ***\n"; 05018 } 05019 05020 // QPSchurPack::QP 05021 05022 void QPSchurPack::QP::dump_qp( std::ostream& out ) 05023 { 05024 using std::endl; 05025 using std::setw; 05026 using std::left; 05027 using std::right; 05028 using Teuchos::Workspace; 05029 Teuchos::WorkspaceStore* wss = Teuchos::get_default_workspace_store().get(); 05030 05031 const Constraints 05032 &constraints = this->constraints(); 05033 05034 const size_type 05035 n = this->n(), 05036 n_R = this->n_R(), 05037 m = this->m(), 05038 m_breve = constraints.m_breve(); 05039 05040 out << "\n*** Original QP ***\n" 05041 << "\nn = " << n 05042 << "\nm = " << m 05043 << "\nm_breve = " << m_breve 05044 << endl; 05045 out << "\ng =\n" << g(); 05046 out << "\nG =\n" << G(); 05047 if(m) { 05048 out << "\nA =\n" << A(); 05049 // Le'ts recover c from fo(n_R+1:n_R+m) = c - A' * Q_X * b_x 05050 throw std::logic_error( 05051 error_msg(__FILE__,__LINE__,"QPSchurPack::QP::dump_qp(...) : Error, " 05052 "m != not supported yet!")); 05053 // ToDo: Implement this when needed! 05054 } 05055 out << "\nA_bar =\n" << constraints.A_bar(); 05056 // Get c_L_bar and c_U_bar 05057 DVector c_L_bar(n+m_breve), c_U_bar(n+m_breve); 05058 {for( size_type j = 1; j <= n+m_breve; ++j ){ 05059 c_L_bar(j) = constraints.get_bnd(j,LOWER); 05060 c_U_bar(j) = constraints.get_bnd(j,UPPER); 05061 }} 05062 out << "\nc_L_bar =\n" << c_L_bar(); 05063 out << "\nc_U_bar =\n" << c_U_bar(); 05064 05065 out << "\n*** Initial KKT system (fixed and free variables) ***\n" 05066 << "\nn_R = " << n_R 05067 << endl; 05068 out << "\nb_X =\n" << b_X(); 05069 out << "\nQ_R =\n" << Q_R(); 05070 out << "\nQ_X =\n" << Q_X(); 05071 out << "\nKo =\n" << Ko(); 05072 out << "\nfo =\n" << fo(); 05073 } 05074 05075 } // end namespace ConstrainedOptPack
1.7.6.1