|
NLPInterfacePack: C++ Interfaces and Implementation for Non-Linear Programs
Version of the Day
|
00001 // @HEADER 00002 // *********************************************************************** 00003 // 00004 // Moocho: Multi-functional Object-Oriented arCHitecture for Optimization 00005 // Copyright (2003) Sandia Corporation 00006 // 00007 // Under terms of Contract DE-AC04-94AL85000, there is a non-exclusive 00008 // license for use of this work by or on behalf of the U.S. Government. 00009 // 00010 // Redistribution and use in source and binary forms, with or without 00011 // modification, are permitted provided that the following conditions are 00012 // met: 00013 // 00014 // 1. Redistributions of source code must retain the above copyright 00015 // notice, this list of conditions and the following disclaimer. 00016 // 00017 // 2. Redistributions in binary form must reproduce the above copyright 00018 // notice, this list of conditions and the following disclaimer in the 00019 // documentation and/or other materials provided with the distribution. 00020 // 00021 // 3. Neither the name of the Corporation nor the names of the 00022 // contributors may be used to endorse or promote products derived from 00023 // this software without specific prior written permission. 00024 // 00025 // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY 00026 // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 00027 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 00028 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE 00029 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 00030 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 00031 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 00032 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 00033 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 00034 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 00035 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00036 // 00037 // Questions? Contact Roscoe A. Bartlett (rabartl@sandia.gov) 00038 // 00039 // *********************************************************************** 00040 // @HEADER 00041 00042 #include <assert.h> 00043 00044 #include <iostream> // Debug only 00045 #include "DenseLinAlgPack_PermOut.hpp" 00046 00047 #include <algorithm> 00048 #include <sstream> 00049 #include <limits> 00050 #include <stdio.h> 00051 #include <fstream> 00052 00053 #include "NLPInterfacePack_NLPSerialPreprocess.hpp" 00054 #include "AbstractLinAlgPack_SpVectorOp.hpp" 00055 #include "AbstractLinAlgPack_PermutationSerial.hpp" 00056 #include "AbstractLinAlgPack_VectorDenseEncap.hpp" 00057 #include "AbstractLinAlgPack_VectorStdOps.hpp" 00058 #include "AbstractLinAlgPack_VectorAuxiliaryOps.hpp" 00059 #include "DenseLinAlgPack_DVectorOp.hpp" 00060 #include "DenseLinAlgPack_IVector.hpp" 00061 #include "DenseLinAlgPack_PermVecMat.hpp" 00062 #include "DenseLinAlgPack_LinAlgOpPack.hpp" 00063 #include "Teuchos_Assert.hpp" 00064 #include "Teuchos_AbstractFactoryStd.hpp" 00065 #include "Teuchos_dyn_cast.hpp" 00066 00067 namespace LinAlgOpPack { 00068 using AbstractLinAlgPack::Vp_StV; 00069 } 00070 00071 namespace NLPInterfacePack { 00072 00073 // NLPSerialPreprocess 00074 00075 // Static public members 00076 00077 value_type 00078 NLPSerialPreprocess::fixed_var_mult() 00079 { 00080 return std::numeric_limits<DenseLinAlgPack::DVector::value_type>::max()-100; // Don't know what to use? 00081 } 00082 00083 // Constructors / nitializers 00084 00085 NLPSerialPreprocess::NLPSerialPreprocess( 00086 ) 00087 :initialized_(false) 00088 ,force_xinit_in_bounds_(true) 00089 ,scale_f_(1.0) 00090 ,basis_selection_num_(0) 00091 00092 {} 00093 00094 // Overridden public members from NLP 00095 00096 void NLPSerialPreprocess::force_xinit_in_bounds(bool force_xinit_in_bounds) 00097 { 00098 force_xinit_in_bounds_ = force_xinit_in_bounds; 00099 } 00100 00101 bool NLPSerialPreprocess::force_xinit_in_bounds() const 00102 { 00103 return force_xinit_in_bounds_; 00104 } 00105 00106 void NLPSerialPreprocess::initialize(bool test_setup) 00107 { 00108 namespace mmp = MemMngPack; 00109 00110 const value_type inf_bnd = NLP::infinite_bound(); 00111 00112 basis_selection_num_ = 0; 00113 00114 if( initialized_ && !imp_nlp_has_changed() ) { 00115 // The subclass NLP has not changed so we can just 00116 // slip this preprocessing. 00117 NLPObjGrad::initialize(test_setup); 00118 return; 00119 } 00120 00121 // Get the dimensions of the original problem 00122 00123 n_orig_ = imp_n_orig(); 00124 m_orig_ = imp_m_orig(); // This may be zero! 00125 mI_orig_ = imp_mI_orig(); // This may be zero! 00126 00127 // Get the dimensions of the full problem 00128 00129 n_full_ = n_orig_ + mI_orig_; 00130 m_full_ = m_orig_ + mI_orig_; 00131 00132 // Initialize the storage for the intermediate quanities 00133 00134 xinit_full_.resize(n_full_); 00135 xl_full_.resize(n_full_); 00136 xu_full_.resize(n_full_); 00137 x_full_.resize(n_full_); 00138 c_orig_.resize(m_orig_); 00139 h_orig_.resize(mI_orig_); 00140 Gf_full_.resize(n_full_); 00141 var_full_to_fixed_.resize(n_full_); 00142 equ_perm_.resize(m_full_); 00143 inv_equ_perm_.resize(m_full_); 00144 space_c_.initialize(m_full_); 00145 space_c_breve_.initialize(m_orig_); 00146 space_h_breve_.initialize(mI_orig_); 00147 factory_P_var_ = Teuchos::rcp( new Teuchos::AbstractFactoryStd<Permutation,PermutationSerial>() ); 00148 factory_P_equ_ = Teuchos::rcp( new Teuchos::AbstractFactoryStd<Permutation,PermutationSerial>() ); 00149 00150 // Intialize xinit_full_, xl_full_ and xu_full_ for the initial point which will set the 00151 // fixed elements which will not change during the optimization. 00152 xinit_full_(1,n_orig_) = imp_xinit_orig(); 00153 xl_full_(1,n_orig_) = imp_xl_orig(); 00154 xu_full_(1,n_orig_) = imp_xu_orig(); 00155 if( n_full_ > n_orig_ ) { // Include slack varaibles 00156 xinit_full_(n_orig_+1,n_full_) = 0.0; 00157 xl_full_(n_orig_+1,n_full_) = imp_hl_orig(); 00158 xu_full_(n_orig_+1,n_full_) = imp_hu_orig(); 00159 } 00160 00161 const bool has_var_bounds = imp_has_var_bounds() || n_full_ > n_orig_; 00162 00163 // Force the initial point in bounds if it is not. 00164 if( force_xinit_in_bounds() && has_var_bounds ) { 00165 AbstractLinAlgPack::force_in_bounds( 00166 VectorMutableDense( xl_full_(), Teuchos::null ) 00167 ,VectorMutableDense( xu_full_(), Teuchos::null ) 00168 ,&VectorMutableDense( x_full_(), Teuchos::null ) 00169 ); 00170 } 00171 00172 // Determine which variables are fixed by bounds! 00173 size_type 00174 xl_nz = 0, 00175 xu_nz = 0, 00176 num_bnd_x = 0; 00177 if( has_var_bounds ) { 00178 // Determine which variables are fixed by bounds and 00179 // adjust the bounds if needed. 00180 DVector::iterator 00181 xl_full = xl_full_.begin(), 00182 xu_full = xu_full_.begin(); 00183 n_ = 0; 00184 size_type num_fixed = 0; 00185 for(int i = 1; i <= n_full_; ++i, ++xl_full, ++xu_full) { 00186 TEUCHOS_TEST_FOR_EXCEPTION( 00187 *xl_full > *xu_full, InconsistantBounds 00188 ,"NLPSerialPreprocess::initialize() : Error, Inconsistant bounds: xl_full(" 00189 << i << ") > xu_full(" << i << ")" ); 00190 if(*xl_full == *xu_full) { 00191 // 00192 // Fixed between bounds 00193 // 00194 var_full_to_fixed_(n_full_ - num_fixed) = i; 00195 num_fixed++; 00196 } 00197 else { 00198 // 00199 // Not Fixed between bounds 00200 // 00201 // Adjust the bounds if needed 00202 *xl_full = *xl_full < -inf_bnd ? -inf_bnd : *xl_full; 00203 *xu_full = *xu_full > +inf_bnd ? +inf_bnd : *xu_full; 00204 // 00205 n_++; 00206 var_full_to_fixed_(n_) = i; 00207 // Check if xl is bounded 00208 if( *xl_full != -inf_bnd ) 00209 ++xl_nz; 00210 // Check if xu is bounded 00211 if( *xu_full != inf_bnd ) 00212 ++xu_nz; 00213 if( *xl_full != -inf_bnd || *xu_full != inf_bnd ) 00214 ++num_bnd_x; 00215 } 00216 } 00217 } 00218 else { 00219 // None of the variables are fixed by bounds because there are no bounds 00220 n_ = n_full_; 00221 DenseLinAlgPack::identity_perm( &var_full_to_fixed_ ); 00222 } 00223 00224 // std::cerr << "n_ =" << n_ << std::endl; 00225 // std::cerr << "var_full_to_fixed_ =\n" << var_full_to_fixed_; 00226 00227 num_bounded_x_ = num_bnd_x; 00228 00229 // Validate that we still have a solvable problem 00230 TEUCHOS_TEST_FOR_EXCEPTION( 00231 n_ < m_full_, InvalidInitialization 00232 ,"NLPSerialPreprocess::initialize() : Error, after removing fixed " 00233 << "variables, n = " << n_ << " < m = " << m_full_ 00234 << ", and the NLP is over determined and can not be solved!" ); 00235 00236 // Initialize inverse of var_full_to_fixed_ 00237 DenseLinAlgPack::inv_perm( var_full_to_fixed_, &inv_var_full_to_fixed_ ); 00238 00239 // std::cerr << "inv_var_full_to_fixed_ =\n" << inv_var_full_to_fixed_; 00240 00241 var_perm_.resize(n_); 00242 space_x_.initialize(n_); 00243 00244 // Resize xinit, xl, xu, hl and hu 00245 xinit_.initialize(n_); 00246 xl_.initialize(n_); 00247 xu_.initialize(n_); 00248 if(mI_orig_) { 00249 hl_breve_.initialize(mI_orig_); 00250 hu_breve_.initialize(mI_orig_); 00251 } 00252 00253 if( m_full_ ) { 00254 // Get the first basis 00255 if( !nlp_selects_basis() ) { 00256 // The NLP is not selecting the first basis so set to the initial basis to 00257 // the indentity permutations and assume full column rank for Gc. 00258 DenseLinAlgPack::identity_perm(&var_perm_); 00259 // std::cerr << "var_perm_ =\n" << var_perm_; 00260 DenseLinAlgPack::identity_perm(&equ_perm_); 00261 // std::cerr << "equ_perm_ =\n" << equ_perm_; 00262 DenseLinAlgPack::identity_perm(&inv_equ_perm_); 00263 // std::cerr << "inv_equ_perm_ =\n" << inv_equ_perm_; 00264 r_ = m_full_; 00265 var_from_full( xinit_full_().begin(), xinit_.set_vec().begin() ); 00266 if(has_var_bounds) { 00267 var_from_full( xl_full_().begin(), xl_.set_vec().begin() ); 00268 var_from_full( xu_full_().begin(), xu_.set_vec().begin() ); 00269 do_force_xinit_in_bounds(); 00270 } 00271 else { 00272 xl_ = -inf_bnd; 00273 xu_ = +inf_bnd; 00274 } 00275 } 00276 else { 00277 // The nlp subclass is selecting the first basis. 00278 00279 // make intialized_ true temporaraly so you can call get_next_basis() 00280 // and assert_initialized() called in it will not throw an exception. 00281 initialized_ = true; 00282 00283 try { 00284 size_type rank; 00285 const bool 00286 get_next_basis_return = get_next_basis_remove_fixed( &var_perm_, &equ_perm_, &rank ); 00287 TEUCHOS_TEST_FOR_EXCEPTION( 00288 !get_next_basis_return, std::logic_error 00289 ,"NLPSerialPreprocess::initialize(): " 00290 " If nlp_selects_basis() is true then imp_get_next_basis() " 00291 " must return true for the first call" ); 00292 assert_and_set_basis( var_perm_, equ_perm_, rank ); 00293 // std::cerr << "var_perm_ =\n" << var_perm_; 00294 // std::cerr << "equ_perm_ =\n" << equ_perm_; 00295 } 00296 catch(...) { 00297 // In case an exception was thrown I don't want to leave #this# 00298 // in an inconsistant state. 00299 initialized_ = false; 00300 throw; 00301 } 00302 00303 initialized_ = false; // resize to false to continue initialization 00304 } 00305 } 00306 else { 00307 DenseLinAlgPack::identity_perm(&var_perm_); 00308 r_ = 0; 00309 var_from_full( xinit_full_().begin(), xinit_.set_vec().begin() ); 00310 if(has_var_bounds) { 00311 var_from_full( xl_full_().begin(), xl_.set_vec().begin() ); 00312 var_from_full( xu_full_().begin(), xu_.set_vec().begin() ); 00313 do_force_xinit_in_bounds(); 00314 } 00315 else { 00316 xl_ = -inf_bnd; 00317 xu_ = +inf_bnd; 00318 } 00319 } 00320 00321 // std::cerr << "n_full_ = " << n_full_ << std::endl; 00322 // std::cerr << "n_ = " << n_ << std::endl; 00323 // std::cerr << "var_full_to_fixed_ =\n" << var_full_to_fixed_; 00324 // std::cerr << "inv_var_full_to_fixed_ =\n" << inv_var_full_to_fixed_; 00325 // std::cerr << "var_perm_ =\n" << var_perm_; 00326 // std::cerr << "equ_perm_ =\n" << equ_perm_; 00327 00328 // If you get here then the initialization went Ok. 00329 NLPObjGrad::initialize(test_setup); 00330 initialized_ = true; 00331 } 00332 00333 bool NLPSerialPreprocess::is_initialized() const 00334 { 00335 return initialized_; 00336 } 00337 00338 size_type NLPSerialPreprocess::n() const 00339 { 00340 assert_initialized(); 00341 return n_; 00342 } 00343 00344 size_type NLPSerialPreprocess::m() const 00345 { 00346 assert_initialized(); 00347 return m_full_; 00348 } 00349 00350 NLP::vec_space_ptr_t NLPSerialPreprocess::space_x() const 00351 { 00352 namespace mmp = MemMngPack; 00353 return Teuchos::rcp(&space_x_,false); 00354 } 00355 00356 NLP::vec_space_ptr_t NLPSerialPreprocess::space_c() const 00357 { 00358 namespace mmp = MemMngPack; 00359 return ( m_full_ ? Teuchos::rcp(&space_c_,false) : Teuchos::null ); 00360 } 00361 00362 size_type NLPSerialPreprocess::num_bounded_x() const 00363 { 00364 return num_bounded_x_; 00365 } 00366 00367 const Vector& NLPSerialPreprocess::xl() const 00368 { 00369 assert_initialized(); 00370 return xl_; 00371 } 00372 00373 const Vector& NLPSerialPreprocess::xu() const 00374 { 00375 assert_initialized(); 00376 return xu_; 00377 } 00378 00379 const Vector& NLPSerialPreprocess::xinit() const 00380 { 00381 assert_initialized(); 00382 return xinit_; 00383 } 00384 00385 void NLPSerialPreprocess::get_init_lagrange_mult( 00386 VectorMutable* lambda 00387 ,VectorMutable* nu 00388 ) const 00389 { 00390 assert_initialized(); 00391 // ToDo: Get subclass to define what these are! 00392 if(lambda) 00393 *lambda = 0.0; 00394 if(nu) 00395 *nu = 0.0; 00396 } 00397 00398 void NLPSerialPreprocess::scale_f( value_type scale_f ) 00399 { 00400 assert_initialized(); 00401 scale_f_ = scale_f; 00402 } 00403 00404 value_type NLPSerialPreprocess::scale_f() const 00405 { 00406 assert_initialized(); 00407 return scale_f_; 00408 } 00409 00410 void NLPSerialPreprocess::report_final_solution( 00411 const Vector& x 00412 ,const Vector* lambda 00413 ,const Vector* nu 00414 ,bool is_optimal 00415 ) 00416 { 00417 assert_initialized(); 00418 // set x_full 00419 VectorDenseEncap x_d(x); 00420 DVector x_full(n_full_); 00421 x_full = x_full_; // set any fixed components (as well as the others at first) 00422 var_to_full( x_d().begin(), x_full().begin() ); // set the nonfixed components 00423 // set lambda_full 00424 DVector lambda_full; 00425 if( lambda ) { 00426 VectorDenseEncap lambda_d(*lambda); 00427 DVectorSlice lambda = lambda_d(); 00428 lambda_full.resize(m_full_); 00429 for(size_type j = 1; j <= m_full_; j++) 00430 lambda_full(equ_perm_(j)) = lambda(j); 00431 } 00432 // set nu_full 00433 DVector nu_full(n_full_); 00434 if(nu) { 00435 nu_full = 0.0; // We don't give lagrange multipliers for fixed varaibles! 00436 // ToDo: Define a special constrant for multiplier values for fixed variables 00437 VectorDenseEncap nu_d(*nu); 00438 var_to_full( nu_d().begin(), nu_full().begin() ); // set the nonfixed components 00439 } 00440 // Report the final solution 00441 DVectorSlice 00442 lambda_orig = lambda && m_orig_ ? lambda_full(1,m_orig_) : DVectorSlice(), 00443 lambdaI_orig = ( lambda && m_full_ > m_orig_ 00444 ? lambda_full(m_orig_+1,m_full_) 00445 : DVectorSlice() ), 00446 nu_orig = nu ? nu_full(1,n_orig_) : DVectorSlice(); 00447 imp_report_orig_final_solution( 00448 x_full() 00449 ,lambda_orig.dim() ? &lambda_orig : NULL 00450 ,lambdaI_orig.dim() ? &lambdaI_orig : NULL 00451 ,nu_orig.dim() ? &nu_orig : NULL 00452 ,is_optimal 00453 ); 00454 } 00455 00456 size_type NLPSerialPreprocess::ns() const 00457 { 00458 assert_initialized(); 00459 return mI_orig_; 00460 } 00461 00462 NLP::vec_space_ptr_t 00463 NLPSerialPreprocess::space_c_breve() const 00464 { 00465 namespace mmp = MemMngPack; 00466 assert_initialized(); 00467 return ( m_orig_ ? Teuchos::rcp(&space_c_breve_,false) : Teuchos::null ); 00468 } 00469 NLP::vec_space_ptr_t 00470 NLPSerialPreprocess::space_h_breve() const 00471 { 00472 namespace mmp = MemMngPack; 00473 assert_initialized(); 00474 return ( mI_orig_ ? Teuchos::rcp(&space_h_breve_,false) : Teuchos::null ); 00475 } 00476 00477 const Vector& NLPSerialPreprocess::hl_breve() const 00478 { 00479 assert_initialized(); 00480 return hl_breve_; 00481 } 00482 00483 const Vector& NLPSerialPreprocess::hu_breve() const 00484 { 00485 assert_initialized(); 00486 return hu_breve_; 00487 } 00488 00489 const Permutation& NLPSerialPreprocess::P_var() const 00490 { 00491 assert_initialized(); 00492 return P_var_; 00493 } 00494 00495 const Permutation& NLPSerialPreprocess::P_equ() const 00496 { 00497 assert_initialized(); 00498 return P_equ_; 00499 } 00500 00501 // Overridden public members from NLPVarReductPerm 00502 00503 const NLPVarReductPerm::perm_fcty_ptr_t 00504 NLPSerialPreprocess::factory_P_var() const 00505 { 00506 return factory_P_var_; 00507 } 00508 00509 const NLPVarReductPerm::perm_fcty_ptr_t 00510 NLPSerialPreprocess::factory_P_equ() const 00511 { 00512 return factory_P_equ_; 00513 } 00514 00515 Range1D NLPSerialPreprocess::var_dep() const 00516 { 00517 assert_initialized(); 00518 return r_ ? Range1D(1,r_) : Range1D::Invalid; 00519 } 00520 00521 Range1D NLPSerialPreprocess::var_indep() const 00522 { 00523 assert_initialized(); 00524 return Range1D(r_+1,n_); 00525 } 00526 00527 Range1D NLPSerialPreprocess::equ_decomp() const 00528 { 00529 assert_initialized(); 00530 return r_ ? Range1D(1,r_) : Range1D::Invalid; 00531 } 00532 00533 Range1D NLPSerialPreprocess::equ_undecomp() const 00534 { 00535 assert_initialized(); 00536 return r_ < m_full_ ? Range1D(r_+1,m_full_) : Range1D::Invalid; 00537 } 00538 00539 bool NLPSerialPreprocess::nlp_selects_basis() const 00540 { 00541 // Check if the user has supplied a basis from a file 00542 char ind[17]; 00543 sprintf(ind, "%d", basis_selection_num_); 00544 std::string fname = "basis_"; 00545 fname += ind; 00546 fname += ".sel"; 00547 00548 std::ifstream basis_file(fname.c_str()); 00549 if (basis_file) 00550 { 00551 return true; 00552 } 00553 00554 return false; 00555 } 00556 00557 bool NLPSerialPreprocess::get_next_basis( 00558 Permutation* P_var, Range1D* var_dep 00559 ,Permutation* P_equ, Range1D* equ_decomp 00560 ) 00561 { 00562 assert_initialized(); 00563 size_type rank = 0; 00564 const bool 00565 get_next_basis_return = get_next_basis_remove_fixed( &var_perm_, &equ_perm_, &rank ); 00566 if(get_next_basis_return) 00567 assert_and_set_basis( var_perm_, equ_perm_, rank ); 00568 else 00569 return false; // The NLP subclass did not have a new basis to give us! 00570 this->get_basis(P_var,var_dep,P_equ,equ_decomp); 00571 return true; 00572 } 00573 00574 void NLPSerialPreprocess::set_basis( 00575 const Permutation &P_var, const Range1D &var_dep 00576 ,const Permutation *P_equ, const Range1D *equ_decomp 00577 ) 00578 { 00579 namespace mmp = MemMngPack; 00580 using Teuchos::dyn_cast; 00581 TEUCHOS_TEST_FOR_EXCEPTION( 00582 (m_full_ > 0 && (P_equ == NULL || equ_decomp == NULL)) 00583 ,std::invalid_argument 00584 ,"NLPSerialPreprocess::set_basis(...) : Error!" ); 00585 TEUCHOS_TEST_FOR_EXCEPTION( 00586 m_full_ > 0 && var_dep.size() != equ_decomp->size() 00587 ,InvalidBasis 00588 ,"NLPSerialPreprocess::set_basis(...) : Error!" ); 00589 // Get the concrete types 00590 const PermutationSerial 00591 &P_var_s = dyn_cast<const PermutationSerial>(P_var), 00592 *P_equ_s = m_full_ ? &dyn_cast<const PermutationSerial>(*P_equ) : NULL; 00593 // Get the underlying permutation vectors 00594 Teuchos::RCP<IVector> 00595 var_perm = Teuchos::rcp_const_cast<IVector>(P_var_s.perm()), 00596 equ_perm = ( m_full_ 00597 ? Teuchos::rcp_const_cast<IVector>(P_equ_s->perm()) 00598 : Teuchos::null ); 00599 TEUCHOS_TEST_FOR_EXCEPTION( 00600 (m_full_ > 0 && equ_perm.get() == NULL) 00601 ,std::invalid_argument 00602 ,"NLPSerialPreprocess::set_basis(...) : Error, P_equ is not initialized properly!" ); 00603 // Set the basis 00604 assert_and_set_basis( *var_perm, *equ_perm, var_dep.size() ); 00605 } 00606 00607 void NLPSerialPreprocess::get_basis( 00608 Permutation* P_var, Range1D* var_dep 00609 ,Permutation* P_equ, Range1D* equ_decomp 00610 ) const 00611 { 00612 namespace mmp = MemMngPack; 00613 using Teuchos::dyn_cast; 00614 assert_initialized(); 00615 TEUCHOS_TEST_FOR_EXCEPTION( 00616 P_var == NULL || var_dep == NULL 00617 || (m_full_ > 0 && (P_equ == NULL || equ_decomp == NULL)) 00618 ,std::invalid_argument 00619 ,"NLPSerialPreprocess::get_basis(...) : Error!" ); 00620 // Get the concrete types 00621 PermutationSerial 00622 &P_var_s = dyn_cast<PermutationSerial>(*P_var), 00623 *P_equ_s = m_full_ ? &dyn_cast<PermutationSerial>(*P_equ) : NULL; 00624 // Get the underlying permutation vectors 00625 Teuchos::RCP<IVector> 00626 var_perm = Teuchos::rcp_const_cast<IVector>(P_var_s.perm()), 00627 equ_perm = ( m_full_ 00628 ? Teuchos::rcp_const_cast<IVector>(P_equ_s->perm()) 00629 : Teuchos::null ); 00630 // Allocate permutation vectors if none allocated yet or someone else has reference to them 00631 if( var_perm.get() == NULL || var_perm.total_count() > 2 ) // P_var reference and my reference 00632 var_perm = Teuchos::rcp( new IVector(n_) ); 00633 if( m_full_ && ( equ_perm.get() == NULL || equ_perm.total_count() > 2 ) ) // P_equ reference and my reference 00634 equ_perm = Teuchos::rcp( new IVector(m_full_) ); 00635 // Copy the basis selection 00636 (*var_perm) = var_perm_; 00637 (*var_dep) = Range1D(1,r_); 00638 if(m_full_) { 00639 (*equ_perm) = equ_perm_; 00640 (*equ_decomp) = Range1D(1,r_); 00641 } 00642 // Reinitialize the Permutation arguments. 00643 P_var_s.initialize( var_perm, Teuchos::null, true ); // Allocate the inverse permuation as well! 00644 if(m_full_) 00645 P_equ_s->initialize( equ_perm, Teuchos::null, true ); 00646 } 00647 00648 // Overridden protected members from NLP 00649 00650 void NLPSerialPreprocess::imp_calc_f( 00651 const Vector &x 00652 ,bool newx 00653 ,const ZeroOrderInfo &zero_order_info 00654 ) const 00655 { 00656 assert_initialized(); 00657 VectorDenseEncap x_d(x); 00658 set_x_full( x_d(), newx, &x_full_() ); 00659 imp_calc_f_orig( x_full(), newx, zero_order_orig_info() ); 00660 *zero_order_info.f = scale_f_ * f_orig_; 00661 } 00662 00663 void NLPSerialPreprocess::imp_calc_c( 00664 const Vector &x 00665 ,bool newx 00666 ,const ZeroOrderInfo &zero_order_info 00667 ) const 00668 { 00669 assert_initialized(); 00670 VectorDenseEncap x_d(x); 00671 set_x_full( x_d(), newx, &x_full_() ); 00672 if( m_orig_ ) 00673 imp_calc_c_orig( x_full(), newx, zero_order_orig_info() ); 00674 if( mI_orig_ ) 00675 imp_calc_h_orig( x_full(), newx, zero_order_orig_info() ); 00676 VectorDenseMutableEncap c_d(*zero_order_info.c); 00677 equ_from_full( 00678 m_orig_ ? c_orig_() : DVectorSlice() 00679 ,mI_orig_ ? h_orig_() : DVectorSlice() 00680 ,mI_orig_ ? x_full()(n_orig_+1,n_full_) : DVectorSlice() // s_orig 00681 ,&c_d() 00682 ); 00683 } 00684 00685 void NLPSerialPreprocess::imp_calc_c_breve( 00686 const Vector &x 00687 ,bool newx 00688 ,const ZeroOrderInfo &zero_order_info_breve 00689 ) const 00690 { 00691 assert_initialized(); 00692 VectorDenseEncap x_d(x); 00693 set_x_full( x_d(), newx, &x_full_() ); 00694 if( m_orig_ ) 00695 imp_calc_c_orig( x_full(), newx, zero_order_orig_info() ); 00696 if( mI_orig_ ) 00697 imp_calc_h_orig( x_full(), newx, zero_order_orig_info() ); 00698 VectorDenseMutableEncap c_breve_d(*zero_order_info_breve.c); 00699 c_breve_d() = c_orig_(); 00700 } 00701 00702 void NLPSerialPreprocess::imp_calc_h_breve( 00703 const Vector &x 00704 ,bool newx 00705 ,const ZeroOrderInfo &zero_order_info_breve 00706 ) const 00707 { 00708 // If this function gets called then this->mI() > 0 must be true 00709 // which means that convert_inequ_to_equ must be false! 00710 assert_initialized(); 00711 VectorDenseEncap x_d(x); 00712 set_x_full( x_d(), newx, &x_full_() ); 00713 imp_calc_h_orig( x_full(), newx, zero_order_orig_info() ); 00714 VectorDenseMutableEncap h_breve_d(*zero_order_info_breve.h); 00715 h_breve_d() = h_orig_(); // Nothing fancy right now 00716 } 00717 00718 // Overridden protected members from NLPObjGrad 00719 00720 void NLPSerialPreprocess::imp_calc_Gf( 00721 const Vector &x 00722 ,bool newx 00723 ,const ObjGradInfo &obj_grad_info 00724 ) const 00725 { 00726 using DenseLinAlgPack::Vt_S; 00727 assert_initialized(); 00728 VectorDenseEncap x_d(x); 00729 set_x_full( x_d(), newx, &x_full_() ); 00730 if( n_full_ > n_orig_ ) Gf_full_(n_orig_+1,n_full_) = 0.0; // Initialize terms for slacks to zero! 00731 imp_calc_Gf_orig( x_full(), newx, obj_grad_orig_info() ); 00732 VectorDenseMutableEncap Gf_d(*obj_grad_info.Gf); 00733 var_from_full( Gf_full_.begin(), Gf_d().begin() ); 00734 Vt_S( &Gf_d(), scale_f_ ); 00735 } 00736 00737 // protected members 00738 00739 bool NLPSerialPreprocess::imp_get_next_basis( 00740 IVector *var_perm_full 00741 ,IVector *equ_perm_full 00742 ,size_type *rank_full 00743 ,size_type *rank 00744 ) 00745 { 00746 return false; // default is that the subclass does not select the basis 00747 } 00748 00749 void NLPSerialPreprocess::assert_initialized() const 00750 { 00751 TEUCHOS_TEST_FOR_EXCEPTION( 00752 !initialized_, UnInitialized 00753 ,"NLPSerialPreprocess : The nlp has not been initialized yet" ); 00754 } 00755 00756 void NLPSerialPreprocess::set_x_full( 00757 const DVectorSlice& x, bool newx 00758 ,DVectorSlice* x_full 00759 ) const 00760 { 00761 DenseLinAlgPack::assert_vs_sizes(x.dim(),n_); 00762 if(newx) 00763 var_to_full(x.begin(), x_full->begin()); 00764 } 00765 00766 void NLPSerialPreprocess::var_from_full( 00767 DVectorSlice::const_iterator vec_full 00768 ,DVectorSlice::iterator vec 00769 ) const 00770 { 00771 // std::cout << "\nvar_from_full(...) : "; 00772 for(size_type i = 1; i <= n_; i++) { 00773 *vec++ = vec_full[var_full_to_fixed_(var_perm_(i)) - 1]; 00774 // std::cout 00775 // << "\ni = " << i 00776 // << "\nvar_perm_(i) = " << var_perm_(i) 00777 // << "\nvar_full_to_fixed_(var_perm_(i)) = " << var_full_to_fixed_(var_perm_(i)) 00778 // << "\nvec_full[var_full_to_fixed_(var_perm_(i)) - 1] = " << vec_full[var_full_to_fixed_(var_perm_(i)) - 1] 00779 // << "\nvec[i] = " << *(vec-1) << "\n\n"; 00780 } 00781 } 00782 00783 void NLPSerialPreprocess::var_to_full( DVectorSlice::const_iterator vec, DVectorSlice::iterator vec_full ) const 00784 { 00785 for(size_type i = 1; i <= n_; i++) 00786 vec_full[var_full_to_fixed_(var_perm_(i)) - 1] = *vec++; 00787 } 00788 00789 void NLPSerialPreprocess::equ_from_full( 00790 const DVectorSlice &c_orig 00791 ,const DVectorSlice &h_orig 00792 ,const DVectorSlice &s_orig 00793 ,DVectorSlice *c_full 00794 ) const 00795 { 00796 size_type i; 00797 // c_full = [ c_orig; h_orig - s_orig ] 00798 for(i = 1; i <= m_orig_; ++i) 00799 (*c_full)(inv_equ_perm_(i)) = c_orig(i); 00800 for(i = 1; i <= mI_orig_; ++i) 00801 (*c_full)(inv_equ_perm_(m_orig_+i)) = h_orig(i) - s_orig(i); 00802 } 00803 00804 // private members 00805 00806 bool NLPSerialPreprocess::get_next_basis_remove_fixed( 00807 IVector* var_perm, IVector* equ_perm, size_type* rank 00808 ) 00809 { 00810 IVector var_perm_full(n_full_); 00811 equ_perm->resize(m_full_); 00812 size_type rank_full, rank_fixed_removed; 00813 if( imp_get_next_basis( &var_perm_full, equ_perm, &rank_full, &rank_fixed_removed ) ) { 00814 // std::cerr << "var_perm_full =\n" << var_perm_full; 00815 // std::cerr << "equ_perm =\n" << *equ_perm; 00816 // std::cerr << "rank_full = " << rank_full << std::endl; 00817 // 00818 // The NLP subclass has another basis to select 00819 // 00820 // Translate the basis by removing variables fixed by bounds. 00821 // This is where it is important that var_perm_full is 00822 // sorted in assending order for basis and non-basis variables 00823 // 00824 // This is a little bit of a tricky algorithm. We have to 00825 // essentially loop through the set of basic and non-basic 00826 // variables, remove fixed variables and adjust the indexes 00827 // of the remaining variables. Since the set of indexes in 00828 // the basic and non-basis sets are sorted, this will not 00829 // be too bad of an algorithm. 00830 00831 // Iterator for the fixed variables that we are to remove 00832 IVector::const_iterator fixed_itr = var_full_to_fixed_.begin() + n_; 00833 IVector::const_iterator fixed_end = var_full_to_fixed_.end(); 00834 00835 // Iterator for the basis variables 00836 IVector::iterator basic_itr = var_perm_full.begin(); 00837 IVector::iterator basic_end = basic_itr + rank_full; 00838 00839 // Iterator for the non-basis variables 00840 IVector::iterator nonbasic_itr = basic_end; 00841 IVector::iterator nonbasic_end = var_perm_full.end(); 00842 00843 // Count the number of fixed basic and non-basic variables 00844 index_type 00845 count_fixed = 0, 00846 count_basic_fixed = 0, 00847 count_nonbasic_fixed = 0; 00848 00849 // Loop through all of the fixed variables and remove and compact 00850 for( ; fixed_itr != fixed_end; ++fixed_itr ) { 00851 const index_type 00852 next_fixed = ( fixed_itr +1 != fixed_end ? *(fixed_itr+1) : n_full_+1); 00853 // Bring the basic and nonbasic variables up to this fixed variable 00854 for( ; *basic_itr < *fixed_itr; ++basic_itr ) 00855 *(basic_itr - count_basic_fixed) = *basic_itr - count_fixed; 00856 for( ; *nonbasic_itr < *fixed_itr; ++nonbasic_itr ) 00857 *(nonbasic_itr - count_nonbasic_fixed) = *nonbasic_itr - count_fixed; 00858 // Update the count of the fixed variables 00859 if( *basic_itr == *fixed_itr ) { 00860 ++count_basic_fixed; 00861 ++basic_itr; 00862 } 00863 else { 00864 TEUCHOS_TEST_FOR_EXCEPT( !( *nonbasic_itr == *fixed_itr ) ); // If basic was not fixed then nonbasic better be! 00865 ++count_nonbasic_fixed; 00866 ++nonbasic_itr; 00867 00868 } 00869 ++count_fixed; 00870 // Now update the indexes until the next fixed variable 00871 for( ; *basic_itr < next_fixed; ++basic_itr ) 00872 *(basic_itr - count_basic_fixed) = *basic_itr - count_fixed; 00873 for( ; *nonbasic_itr < next_fixed; ++nonbasic_itr ) 00874 *(nonbasic_itr - count_nonbasic_fixed) = *nonbasic_itr - count_fixed; 00875 } 00876 TEUCHOS_TEST_FOR_EXCEPT( !( count_fixed == n_full_ - n_ ) ); // Basic check 00877 00878 var_perm->resize(n_); 00879 std::copy( 00880 var_perm_full.begin() 00881 ,var_perm_full.begin() + rank_fixed_removed 00882 ,var_perm->begin() 00883 ); 00884 std::copy( 00885 var_perm_full.begin() + rank_full 00886 ,var_perm_full.begin() + rank_full + (n_-rank_fixed_removed) 00887 ,var_perm->begin() + rank_fixed_removed 00888 ); 00889 *rank = rank_fixed_removed; 00890 return true; 00891 } 00892 else { 00893 00894 // try to find the file giving the basis... 00895 char ind[17]; 00896 sprintf(ind, "%d", basis_selection_num_); 00897 std::string fname = "basis_"; 00898 fname += ind; 00899 fname += ".sel"; 00900 basis_selection_num_++; 00901 00902 std::ifstream basis_file(fname.c_str()); 00903 if (basis_file) 00904 { 00905 // try to read the basis file 00906 std::string tags; 00907 00908 int n; 00909 basis_file >> tags; 00910 TEUCHOS_TEST_FOR_EXCEPTION( 00911 tags != "n", std::logic_error 00912 ,"Incorrect basis file format - \"n\" expected, \"" << tags << "\" found."); 00913 basis_file >> n; 00914 TEUCHOS_TEST_FOR_EXCEPTION( 00915 n <= 0, std::logic_error 00916 , "Incorrect basis file format - n > 0 \"" << n << "\" found."); 00917 00918 int m; 00919 basis_file >> tags; 00920 TEUCHOS_TEST_FOR_EXCEPTION( 00921 tags != "m", std::logic_error 00922 ,"Incorrect basis file format - \"m\" expected, \"" << tags << "\" found."); 00923 basis_file >> m; 00924 TEUCHOS_TEST_FOR_EXCEPTION( 00925 m > n , std::logic_error 00926 ,"Incorrect basis file format - 0 < m <= n expected, \"" << m << "\" found."); 00927 00928 int r; 00929 basis_file >> tags; 00930 TEUCHOS_TEST_FOR_EXCEPTION( 00931 tags != "rank", std::logic_error 00932 ,"Incorrect basis file format - \"rank\" expected, \"" << tags << "\" found."); 00933 basis_file >> r; 00934 TEUCHOS_TEST_FOR_EXCEPTION( 00935 r > m, std::logic_error 00936 ,"Incorrect basis file format - 0 < rank <= m expected, \"" << r << "\" found."); 00937 if (rank) 00938 { *rank = r; } 00939 00940 // var_permutation 00941 basis_file >> tags; 00942 TEUCHOS_TEST_FOR_EXCEPTION( 00943 tags != "var_perm", std::logic_error 00944 ,"Incorrect basis file format -\"var_perm\" expected, \"" << tags << "\" found."); 00945 var_perm->resize(n); 00946 {for (int i=0; i < n; i++) 00947 { 00948 int var_index; 00949 basis_file >> var_index; 00950 TEUCHOS_TEST_FOR_EXCEPTION( 00951 var_index < 1 || var_index > n, std::logic_error 00952 ,"Incorrect basis file format for var_perm: 1 <= indice <= n expected, \"" << n << "\" found."); 00953 (*var_perm)[i] = var_index; 00954 }} 00955 00956 // eqn_permutation 00957 basis_file >> tags; 00958 TEUCHOS_TEST_FOR_EXCEPTION( 00959 tags != "equ_perm", std::logic_error 00960 ,"Incorrect basis file format -\"equ_perm\" expected, \"" << tags << "\" found."); 00961 equ_perm->resize(r); 00962 {for (int i=0; i < r; i++) 00963 { 00964 int equ_index; 00965 basis_file >> equ_index; 00966 TEUCHOS_TEST_FOR_EXCEPTION( 00967 equ_index < 1 || equ_index > m, std::logic_error 00968 ,"Incorrect basis file format for equ_perm: 1 <= indice <= m expected, \"" << m << "\" found."); 00969 (*equ_perm)[i] = equ_index; 00970 }} 00971 00972 return true; 00973 } 00974 } 00975 00976 return false; 00977 } 00978 00979 void NLPSerialPreprocess::assert_and_set_basis( 00980 const IVector& var_perm, const IVector& equ_perm, size_type rank 00981 ) 00982 { 00983 namespace mmp = MemMngPack; 00984 00985 // Assert that this is a valid basis and set the internal basis. Also repivot 'xinit', 00986 // 'xl', and 'xu'. 00987 00988 const value_type inf_bnd = NLPSerialPreprocess::infinite_bound(); 00989 00990 // Assert the preconditions 00991 TEUCHOS_TEST_FOR_EXCEPTION( 00992 var_perm.size() != n_ || equ_perm.size() != m_full_, std::length_error 00993 ,"NLPSerialPreprocess::set_basis(): The sizes " 00994 "of the permutation vectors does not match the size of the NLP" ); 00995 TEUCHOS_TEST_FOR_EXCEPTION( 00996 rank > m_full_, InvalidBasis 00997 ,"NLPSerialPreprocess::set_basis(): The rank " 00998 "of the basis can not be greater that the number of constraints" ); 00999 01000 // Set the basis 01001 r_ = rank; 01002 if( &var_perm_ != &var_perm ) 01003 var_perm_ = var_perm; 01004 if( &equ_perm_ != &equ_perm ) 01005 equ_perm_ = equ_perm; 01006 DenseLinAlgPack::inv_perm( equ_perm_, &inv_equ_perm_ ); 01007 01008 var_from_full( xinit_full_().begin(), xinit_.set_vec().begin() ); 01009 if(num_bounded_x_) { 01010 var_from_full( xl_full_().begin(), xl_.set_vec().begin() ); 01011 var_from_full( xu_full_().begin(), xu_.set_vec().begin() ); 01012 do_force_xinit_in_bounds(); 01013 } 01014 else { 01015 xl_ = -NLP::infinite_bound(); 01016 xu_ = +NLP::infinite_bound(); 01017 } 01018 P_var_.initialize(Teuchos::rcp(new IVector(var_perm)),Teuchos::null); 01019 P_equ_.initialize(Teuchos::rcp(new IVector(equ_perm)),Teuchos::null); 01020 } 01021 01022 void NLPSerialPreprocess::assert_bounds_on_variables() const 01023 { 01024 TEUCHOS_TEST_FOR_EXCEPTION( 01025 !(imp_has_var_bounds() || n_full_ > n_orig_), NLP::NoBounds 01026 ,"There are no bounds on the variables for this NLP" ); 01027 } 01028 01029 void NLPSerialPreprocess::do_force_xinit_in_bounds() 01030 { 01031 AbstractLinAlgPack::force_in_bounds( xl_, xu_, &xinit_ ); 01032 } 01033 01034 } // end namespace NLPInterfacePack
1.7.6.1