proxsuite-nlp  0.11.0
A primal-dual augmented Lagrangian-type solver for nonlinear programming on manifolds.
 
Loading...
Searching...
No Matches
prox-solver.hxx
Go to the documentation of this file.
1
5#pragma once
6
9
10#include <fmt/ostream.h>
11#include <fmt/color.h>
12
13namespace proxsuite {
14namespace nlp {
15template <typename Scalar>
16ProxNLPSolverTpl<Scalar>::ProxNLPSolverTpl(
17 Problem &prob, const Scalar tol, const Scalar mu_init,
18 const Scalar rho_init, const VerboseLevel verbose, const Scalar mu_lower,
19 const Scalar prim_alpha, const Scalar prim_beta, const Scalar dual_alpha,
20 const Scalar dual_beta, LDLTChoice ldlt_choice,
21 const LinesearchOptions ls_options)
22 : problem_(&prob), merit_fun(*problem_, pdal_beta_),
23 prox_penalty(prob.manifold_, manifold().neutral(),
24 rho_init *
25 MatrixXs::Identity(manifold().ndx(), manifold().ndx())),
26 verbose(verbose), ldlt_choice_(ldlt_choice), rho_init_(rho_init),
27 mu_init_(mu_init), mu_lower_(mu_lower),
28 bcl_params{prim_alpha, prim_beta, dual_alpha, dual_beta},
29 ls_options(ls_options), target_tol(tol) {}
30
31template <typename Scalar>
32ConvergenceFlag
33ProxNLPSolverTpl<Scalar>::solve(const ConstVectorRef &x0,
34 const std::vector<VectorRef> &lams0) {
35 VectorXs new_lam(problem_->getTotalConstraintDim());
36 new_lam.setZero();
37 int nr = 0;
38 const std::size_t numc = problem_->getNumConstraints();
39 if (numc != lams0.size()) {
40 PROXSUITE_NLP_RUNTIME_ERROR(
41 "Specified number of constraints is not the same "
42 "as the provided number of multipliers!");
43 }
44 for (std::size_t i = 0; i < numc; i++) {
45 nr = problem_->getConstraintDim(i);
46 new_lam.segment(problem_->getIndex(i), nr) = lams0[i];
47 }
48 return solve(x0, new_lam);
49}
50
51template <typename Scalar>
52ConvergenceFlag ProxNLPSolverTpl<Scalar>::solve(const ConstVectorRef &x0,
53 const ConstVectorRef &lams0) {
54 if (verbose == 0)
55 logger.active = false;
56
57 if ((results_ == nullptr) || (workspace_ == nullptr)) {
58 PROXSUITE_NLP_RUNTIME_ERROR(
59 "Either Results or Workspace are unitialized. Call setup() first.");
60 }
61
62 auto &results = *results_;
63 auto &workspace = *workspace_;
64
65 setPenalty(mu_init_);
66 setProxParameter(rho_init_);
67
68 // init variables
69 results.x_opt = x0;
70 workspace.x_prev = x0;
71 if (lams0.size() == workspace.numdual) {
72 results.data_lams_opt = lams0;
73 workspace.data_lams_prev = lams0;
74 }
75
77
78 results.converged = ConvergenceFlag::UNINIT;
79
80 std::size_t &i = results.num_iters;
81 std::size_t &al_iter = results.al_iters;
82 i = 0;
83 al_iter = 0;
84 logger.start();
85 while ((i < max_iters) && (al_iter < max_al_iters)) {
86 results.mu = mu_;
87 results.rho = rho_;
88 innerLoop(workspace, results);
89
90 // accept new primal iterate
91 workspace.x_prev = results.x_opt;
92 prox_penalty.updateTarget(workspace.x_prev);
93
94 if (results.prim_infeas < prim_tol_) {
95 acceptMultipliers(results, workspace);
97 } else {
100 }
101 if (std::max(results.prim_infeas, results.dual_infeas) < target_tol) {
102 results.converged = ConvergenceFlag::SUCCESS;
103 break;
104 }
105 setProxParameter(rho_ * bcl_params.rho_update_factor);
106
107 al_iter++;
108 }
109
110 if (results.converged == SUCCESS)
111 fmt::print(fmt::fg(fmt::color::dodger_blue),
112 "Solver successfully converged");
113
114 switch (results.converged) {
115 case MAX_ITERS_REACHED:
116 fmt::print(fmt::fg(fmt::color::orange_red),
117 "Max number of iterations reached.");
118 break;
119 default:
120 break;
121 }
122 fmt::print("\n");
123
124 invokeCallbacks(workspace, results);
125
126 return results.converged;
127}
128
129InertiaFlag checkInertia(const int ndx, const int numc,
130 const Eigen::VectorXi &signature) {
131 auto inertiaTuple = computeInertiaTuple(signature);
132 int numpos = inertiaTuple[0];
133 int numneg = inertiaTuple[1];
134 int numzer = inertiaTuple[2];
135 InertiaFlag flag = INERTIA_OK;
136 bool pos_ok = numpos == ndx;
137 bool neg_ok = numneg == numc;
138 bool zer_ok = numzer == 0;
139 if (!(pos_ok && neg_ok && zer_ok)) {
140 if (!zer_ok)
141 flag = INERTIA_HAS_ZEROS;
142 else
143 flag = INERTIA_BAD;
144 } else {
145 flag = INERTIA_OK;
146 }
147 return flag;
148}
149
150template <typename Scalar>
152 Workspace &workspace) const {
153 switch (mul_update_mode) {
154 case MultiplierUpdateMode::NEWTON:
155 workspace.data_lams_prev = results.data_lams_opt;
156 break;
157 case MultiplierUpdateMode::PRIMAL:
158 workspace.data_lams_prev = workspace.data_lams_plus;
159 break;
160 case MultiplierUpdateMode::PRIMAL_DUAL:
161 workspace.data_lams_prev = workspace.data_lams_pdal;
162 break;
163 default:
164 break;
165 }
166 results.data_lams_opt = workspace.data_lams_prev;
167}
168
169template <typename Scalar>
171 const ConstVectorRef &inner_lams_data, Workspace &workspace) const {
173 workspace.data_shift_cstr_values =
174 workspace.data_cstr_values + mu_ * workspace.data_lams_prev;
175 // project multiplier estimate
176 for (std::size_t i = 0; i < problem_->getNumConstraints(); i++) {
177 const ConstraintSet &cstr_set = *problem_->getConstraint(i).set_;
178 // apply proximal op to shifted constraint
179 cstr_set.normalConeProjection(workspace.shift_cstr_values[i],
180 workspace.lams_plus[i]);
181 }
182 workspace.data_lams_plus = mu_inv_ * workspace.data_lams_plus;
183 // compute primal-dual multiplier estimates:
184 // normalConeProj(w), w = c(x) + mu(lambda_k - (beta-1)lambda)
185 workspace.data_shift_cstr_pdal =
186 workspace.data_shift_cstr_values - 0.5 * mu_ * inner_lams_data;
187 for (std::size_t i = 0; i < problem_->getNumConstraints(); i++) {
188 const ConstraintSet &cstr_set = *problem_->getConstraint(i).set_;
189 cstr_set.normalConeProjection(workspace.shift_cstr_pdal[i],
190 workspace.lams_pdal[i]);
191 }
192 workspace.data_lams_pdal *= mu_inv_ / pdal_beta_;
193
194 workspace.data_lams_plus_reproj = workspace.data_lams_plus;
195 workspace.data_lams_pdal_reproj = workspace.data_lams_pdal;
196 for (std::size_t i = 0; i < problem_->getNumConstraints(); i++) {
197 const ConstraintSet &cstr_set = *problem_->getConstraint(i).set_;
198 // reapply the prox operator Jacobian to multiplier estimate
199 cstr_set.applyProjectionJacobian(workspace.shift_cstr_values[i],
200 workspace.lams_plus_reproj[i]);
201 cstr_set.applyProjectionJacobian(workspace.shift_cstr_pdal[i],
202 workspace.lams_pdal_reproj[i]);
203 }
205}
206
207template <typename Scalar>
209 const ConstVectorRef &x, Workspace &workspace, boost::mpl::false_) const {
210 problem_->computeDerivatives(x, workspace);
211
212 workspace.data_jacobians_proj = workspace.data_jacobians;
213 for (std::size_t i = 0; i < problem_->getNumConstraints(); i++) {
214 const ConstraintSet &cstr_set = *problem_->getConstraint(i).set_;
215 switch (kkt_system_) {
216 case KKT_CLASSIC:
218 workspace.shift_cstr_values[i], workspace.cstr_jacobians_proj[i]);
219 break;
220 case KKT_PRIMAL_DUAL:
222 workspace.shift_cstr_pdal[i], workspace.cstr_jacobians_proj[i]);
223 break;
224 }
225 }
226}
227
228template <typename Scalar>
230 const ConstVectorRef &x, Workspace &workspace, boost::mpl::true_) const {
231 this->computeProblemDerivatives(x, workspace, boost::mpl::false_());
232
233 switch (hess_approx) {
235 bfgs_strategy_.update(x, workspace.objective_gradient,
236 workspace.objective_hessian);
237 break;
238
240 workspace.objective_hessian.setIdentity();
241 break;
242
243 default: // handle both EXACT and GAUSS_NEWTON
244 problem_->computeHessians(x, workspace,
246 break;
247 }
248}
249
250template <typename Scalar>
252 Results &results) const {
254 workspace.data_shift_cstr_values =
255 workspace.data_cstr_values + mu_ * results.data_lams_opt;
256
257 for (std::size_t i = 0; i < problem_->getNumConstraints(); i++) {
258 const ConstraintSet &cstr_set = *problem_->getConstraint(i).set_;
259 auto displ_cstr = workspace.shift_cstr_values[i];
260 // apply proximal operator
261 cstr_set.projection(displ_cstr, displ_cstr);
262
263 auto cstr_prox_err = workspace.cstr_values[i] - displ_cstr;
264 results.constraint_violations(long(i)) = math::infty_norm(cstr_prox_err);
265 }
266 results.prim_infeas = math::infty_norm(results.constraint_violations);
268}
269
270template <typename Scalar> void ProxNLPSolverTpl<Scalar>::updatePenalty() {
271 if (mu_ == mu_lower_) {
272 setPenalty(mu_init_);
273 } else {
274 setPenalty(std::max(mu_ * bcl_params.mu_update_factor, mu_lower_));
275 }
276}
277
278template <typename Scalar>
279void ProxNLPSolverTpl<Scalar>::innerLoop(Workspace &workspace,
280 Results &results) {
281 const int ndx = manifold().ndx();
282 const long ntot = workspace.kkt_rhs.size();
283 const long ndual = ntot - ndx;
284 const std::size_t num_c = problem_->getNumConstraints();
285
286 Scalar delta_last = 0.;
287 Scalar delta = delta_last;
288 Scalar phi_new = 0.;
289
290 // lambda for evaluating the merit function
291 auto phi_eval = [&](const Scalar alpha) {
292 tryStep(workspace, results, alpha);
293 problem_->evaluate(workspace.x_trial, workspace);
294 computeMultipliers(workspace.data_lams_trial, workspace);
295 return merit_fun.evaluate(workspace.x_trial, workspace.lams_trial,
296 workspace) +
297 prox_penalty.call(workspace.x_trial);
298 };
299
300 while (true) {
301
302 problem_->evaluate(results.x_opt, workspace);
303 computeMultipliers(results.data_lams_opt, workspace);
304 computeProblemDerivatives(results.x_opt, workspace, boost::mpl::true_());
305
306 for (std::size_t i = 0; i < num_c; i++) {
307 const ConstraintSet &cstr_set = *problem_->getConstraint(i).set_;
308 cstr_set.computeActiveSet(workspace.shift_cstr_values[i],
309 results.active_set[i]);
310 }
311
312 results.value = workspace.objective_value;
313 results.merit =
314 merit_fun.evaluate(results.x_opt, results.lams_opt, workspace);
315
316 if (rho_ > 0.) {
317 results.merit += prox_penalty.call(results.x_opt);
318 prox_penalty.computeGradient(results.x_opt, workspace.prox_grad);
319 prox_penalty.computeHessian(results.x_opt, workspace.prox_hess);
320 }
321
324 workspace.kkt_rhs.setZero();
325 workspace.kkt_rhs_corr.setZero();
326
327 // add jacobian-vector products to gradients
328 workspace.kkt_rhs.head(ndx) = workspace.objective_gradient;
329 workspace.kkt_rhs.head(ndx).noalias() +=
330 workspace.data_jacobians.transpose() * results.data_lams_opt;
331
332 switch (kkt_system_) {
333 case KKT_CLASSIC:
334 workspace.kkt_rhs.tail(ndual) =
335 mu_ * (workspace.data_lams_plus - results.data_lams_opt);
336 break;
337 case KKT_PRIMAL_DUAL:
338 workspace.kkt_rhs.tail(ndual) =
339 0.5 * mu_ * (workspace.data_lams_pdal - results.data_lams_opt);
340 break;
341 }
342
343 merit_fun.computeGradient(results.lams_opt, workspace);
344 // add proximal penalty terms
345 if (rho_ > 0.) {
346 workspace.kkt_rhs.head(ndx) += workspace.prox_grad;
347 workspace.merit_gradient += workspace.prox_grad;
348 }
349
350 PROXSUITE_NLP_RAISE_IF_NAN_NAME(workspace.kkt_rhs, "kkt_rhs");
351 PROXSUITE_NLP_RAISE_IF_NAN_NAME(workspace.kkt_matrix, "kkt_matrix");
352
353 computePrimalResiduals(workspace, results);
354
355 // compute dual residual
356 workspace.dual_residual = workspace.objective_gradient;
357 workspace.dual_residual.noalias() +=
358 workspace.data_jacobians.transpose() * results.data_lams_opt;
359 results.dual_infeas = math::infty_norm(workspace.dual_residual);
360 Scalar inner_crit = math::infty_norm(workspace.kkt_rhs);
361 Scalar outer_crit = std::max(results.prim_infeas, results.dual_infeas);
362
363 bool inner_cond = inner_crit <= inner_tol_;
364 bool outer_cond = outer_crit <= target_tol; // allows early stopping
365 if (inner_cond || outer_cond) {
366 return;
367 }
368
369 // If not optimal: compute the step
370
371 // correct the rhs for the symmetric system
372 workspace.kkt_rhs_corr.head(ndx).noalias() -=
373 workspace.data_jacobians.transpose() * results.data_lams_opt;
374 workspace.kkt_rhs_corr.head(ndx).noalias() +=
375 workspace.data_jacobians_proj.transpose() * results.data_lams_opt;
376 switch (kkt_system_) {
377 case KKT_CLASSIC:
378 workspace.kkt_rhs_corr.head(ndx).noalias() +=
379 workspace.data_jacobians.transpose() *
380 workspace.data_lams_plus_reproj;
381 break;
382 case KKT_PRIMAL_DUAL:
383 workspace.kkt_rhs_corr.head(ndx).noalias() +=
384 workspace.data_jacobians.transpose() *
385 workspace.data_lams_pdal_reproj;
386 break;
387 }
388 // apply correction
389 workspace.kkt_rhs += workspace.kkt_rhs_corr;
390
391 // fill in KKT matrix
392 assembleKktMatrix(workspace);
393
394 // choose regularisation level
395
396 delta = DELTA_INIT;
397 InertiaFlag is_inertia_correct = INERTIA_BAD;
398
399 while (!(is_inertia_correct == INERTIA_OK) && delta <= DELTA_MAX) {
400 if (delta > 0.)
401 workspace.kkt_matrix.diagonal().head(ndx).array() += delta;
402
403 boost::apply_visitor(
404 [&](auto &&fac) { fac.compute(workspace.kkt_matrix); },
405 workspace.ldlt_);
406 boost::apply_visitor(ComputeSignatureVisitor{workspace.signature},
407 workspace.ldlt_);
408 workspace.kkt_matrix.diagonal().head(ndx).array() -= delta;
409 is_inertia_correct =
410 checkInertia(manifold().ndx(), problem_->getTotalConstraintDim(),
411 workspace.signature);
412
413 if (is_inertia_correct == INERTIA_OK) {
414 delta_last = delta;
415 break;
416 } else if (delta == 0.) {
417 // check if previous was zero
418 if (delta_last == 0.)
419 delta = DELTA_NONZERO_INIT; // try a set nonzero value
420 else
421 delta = std::max(DELTA_MIN, del_dec_k * delta_last);
422 } else {
423 // check previous; decide increase factor
424 if (delta_last == 0.)
425 delta *= del_inc_big;
426 else
427 delta *= del_inc_k;
428 }
429 }
430
431 iterativeRefinement(workspace);
432
434 PROXSUITE_NLP_RAISE_IF_NAN_NAME(workspace.pd_step, "pd_step");
435
436 // Take the step
437
438 workspace.dmerit_dir =
439 workspace.merit_gradient.dot(workspace.prim_step) +
440 workspace.merit_dual_gradient.dot(workspace.dual_step);
441
442 Scalar phi0 = results.merit;
443 Scalar dphi0 = workspace.dmerit_dir;
444 switch (ls_strat) {
445 case LinesearchStrategy::ARMIJO: {
446 phi_new = ArmijoLinesearch<Scalar>(ls_options)
447 .run(phi_eval, results.merit, dphi0, workspace.alpha_opt);
448 break;
449 }
450 default:
451 PROXSUITE_NLP_RUNTIME_ERROR("Unrecognized linesearch alternative.\n");
452 break;
453 }
454
455 tryStep(workspace, results, workspace.alpha_opt);
456
457 PROXSUITE_NLP_RAISE_IF_NAN_NAME(workspace.alpha_opt, "alpha_opt");
458 PROXSUITE_NLP_RAISE_IF_NAN_NAME(workspace.x_trial, "x_trial");
459 PROXSUITE_NLP_RAISE_IF_NAN_NAME(workspace.data_lams_trial, "lams_trial");
460 results.x_opt = workspace.x_trial;
461 results.data_lams_opt = workspace.data_lams_trial;
462 results.merit = phi_new;
463 PROXSUITE_NLP_RAISE_IF_NAN_NAME(results.merit, "merit");
464
465 invokeCallbacks(workspace, results);
466
467 LogRecord record{results.num_iters + 1,
468 workspace.alpha_opt,
469 inner_crit,
470 results.prim_infeas,
471 results.dual_infeas,
472 delta,
473 dphi0,
474 results.merit,
475 phi_new - phi0,
476 results.al_iters + 1};
477
478 logger.log(record);
479
480 results.num_iters++;
481 if (results.num_iters >= max_iters) {
482 results.converged = ConvergenceFlag::MAX_ITERS_REACHED;
483 break;
484 }
485 }
486
487 return;
488}
489
490template <typename Scalar>
491void ProxNLPSolverTpl<Scalar>::assembleKktMatrix(Workspace &workspace) {
492 const long ndx = (long)manifold().ndx();
493 const long ndual = workspace.numdual;
494 workspace.kkt_matrix.setZero();
495 workspace.kkt_matrix.topLeftCorner(ndx, ndx) = workspace.objective_hessian;
496 workspace.kkt_matrix.topRightCorner(ndx, ndual) =
497 workspace.data_jacobians_proj.transpose();
498 workspace.kkt_matrix.bottomLeftCorner(ndual, ndx) =
499 workspace.data_jacobians_proj;
500 auto lower_right_block = workspace.kkt_matrix.bottomRightCorner(ndual, ndual);
501 lower_right_block.diagonal().setConstant(-mu_);
502
503 if (rho_ > 0.) {
504 workspace.kkt_matrix.topLeftCorner(ndx, ndx) += workspace.prox_hess;
505 }
506 for (std::size_t i = 0; i < workspace.numblocks; i++) {
507 const ConstraintSet &cstr_set = *problem_->getConstraint(i).set_;
508 bool use_vhp =
509 !cstr_set.disableGaussNewton() || (hess_approx == HessianApprox::EXACT);
510 if (use_vhp) {
511 workspace.kkt_matrix.topLeftCorner(ndx, ndx) +=
512 workspace.cstr_vector_hessian_prod[i];
513 }
514 if (kkt_system_ == KKT_PRIMAL_DUAL) {
515 // correct lower right corner in primal-dual case
516 int idx = problem_->getIndex(i);
517 int nr = problem_->getConstraintDim(i);
518 auto d_sub = lower_right_block.diagonal().segment(idx, nr);
519 VectorXs d_sub2(d_sub);
520 // apply normal cone jacobian op
521 cstr_set.applyNormalConeProjectionJacobian(workspace.shift_cstr_pdal[i],
522 d_sub2);
523 d_sub = 0.5 * (d_sub + d_sub2);
524 }
525 }
526}
527
528template <typename Scalar>
529bool ProxNLPSolverTpl<Scalar>::iterativeRefinement(Workspace &workspace) const {
530 workspace.pd_step = -workspace.kkt_rhs;
531 boost::apply_visitor([&](auto &&fac) { fac.solveInPlace(workspace.pd_step); },
532 workspace.ldlt_);
533 for (std::size_t n = 0; n < max_refinement_steps_; n++) {
534 workspace.kkt_err = -workspace.kkt_rhs;
535 workspace.kkt_err.noalias() -= workspace.kkt_matrix * workspace.pd_step;
536 if (math::infty_norm(workspace.kkt_err) < kkt_tolerance_)
537 return true;
538 boost::apply_visitor(
539 [&](auto &&fac) { fac.solveInPlace(workspace.kkt_err); },
540 workspace.ldlt_);
541 workspace.pd_step += workspace.kkt_err;
542 }
543 return false;
544}
545
546template <typename Scalar>
547void ProxNLPSolverTpl<Scalar>::setPenalty(const Scalar &new_mu) noexcept {
548 mu_ = new_mu;
549 mu_inv_ = 1. / mu_;
550 for (std::size_t i = 0; i < problem_->getNumConstraints(); i++) {
551 const ConstraintObject &cstr = problem_->getConstraint(i);
552 cstr.set_->setProxParameter(mu_);
553 }
554}
555
556template <typename Scalar>
558 const Scalar &new_rho) noexcept {
559 rho_ = new_rho;
560 prox_penalty.weights_.setZero();
561 prox_penalty.weights_.diagonal().setConstant(rho_);
562}
563
564template <typename Scalar>
566 prim_tol_ = prim_tol0 * std::pow(mu_, bcl_params.prim_alpha);
567 inner_tol_ = inner_tol0 * std::pow(mu_, bcl_params.dual_alpha);
568 tolerancePostUpdate();
569}
570
571template <typename Scalar>
573 prim_tol_ = prim_tol_ * std::pow(mu_ / mu_upper_, bcl_params.prim_beta);
574 inner_tol_ = inner_tol_ * std::pow(mu_ / mu_upper_, bcl_params.dual_beta);
575 tolerancePostUpdate();
576}
577
578template <typename Scalar>
579void ProxNLPSolverTpl<Scalar>::tolerancePostUpdate() noexcept {
580 inner_tol_ = std::max(inner_tol_, inner_tol_min);
581 prim_tol_ = std::max(prim_tol_, target_tol);
582}
583
584template <typename Scalar>
585void ProxNLPSolverTpl<Scalar>::tryStep(Workspace &workspace,
586 const Results &results, Scalar alpha) {
588 workspace.tmp_dx_scaled = alpha * workspace.prim_step;
589 manifold().integrate(results.x_opt, workspace.tmp_dx_scaled,
590 workspace.x_trial);
591 workspace.data_lams_trial =
592 results.data_lams_opt + alpha * workspace.dual_step;
594}
595} // namespace nlp
596} // namespace proxsuite
void tryStep(Workspace &workspace, const Results &results, Scalar alpha)
Scalar target_tol
Target tolerance for the problem.
void updateToleranceSuccess() noexcept
Update primal-dual subproblem tolerances upon successful outer-loop iterate (good primal feasibility)
void setProxParameter(const Scalar &new_rho) noexcept
Set proximal penalty parameter.
void computeMultipliers(const ConstVectorRef &inner_lams_data, Workspace &workspace) const
void acceptMultipliers(Results &results, Workspace &workspace) const
Accept Lagrange multiplier estimates.
void updateToleranceFailure() noexcept
Update primal-dual subproblem tolerances upon failure (insufficient primal feasibility)
void computePrimalResiduals(Workspace &workspace, Results &results) const
void setPenalty(const Scalar &new_mu) noexcept
Set penalty parameter, its inverse and update the merit function.
Problem * problem_
General nonlinear program to solve.
PROXSUITE_NLP_INLINE bool iterativeRefinement(Workspace &workspace) const
Iterative refinement of the KKT linear system.
BCLParamsTpl< Scalar > bcl_params
BCL strategy parameters.
VerboseLevel verbose
Level of verbosity of the solver.
ConvergenceFlag solve(const ConstVectorRef &x0, const std::vector< VectorRef > &lams0)
Solve the problem.
QuadraticDistanceCostTpl< Scalar > prox_penalty
Proximal regularization penalty.
void computeProblemDerivatives(const ConstVectorRef &x, Workspace &workspace, boost::mpl::false_) const
HessianApprox hess_approx
Use a Gauss-Newton approximation for the Lagrangian Hessian.
std::size_t max_refinement_steps_
linear algebra opts
VerboseLevel
Verbosity level.
Definition fwd.hpp:126
Implements a basic Armijo back-tracking strategy.
#define PROXSUITE_NLP_NOMALLOC_END
Exiting performance-critical code.
Definition macros.hpp:23
#define PROXSUITE_NLP_NOMALLOC_BEGIN
Entering performance-critical code.
Definition macros.hpp:20
Main package namespace.
Definition bcl-params.hpp:5
InertiaFlag checkInertia(const int ndx, const int nc, const Eigen::VectorXi &signature)
Check the matrix has the desired inertia.
@ BFGS
BFGS approximation.
@ EXACT
Exact Hessian construction from provided function Hessians.
virtual bool disableGaussNewton() const
virtual void applyProjectionJacobian(const ConstVectorRef &z, MatrixRef Jout) const
Apply a jacobian of the projection/proximal operator to a matrix.
virtual void computeActiveSet(const ConstVectorRef &z, Eigen::Ref< ActiveType > out) const =0
virtual void projection(const ConstVectorRef &z, VectorRef zout) const =0
Compute projection of variable z onto the constraint set.
virtual void applyNormalConeProjectionJacobian(const ConstVectorRef &z, MatrixRef Jout) const
Apply the jacobian of the projection on the normal cone.
virtual void normalConeProjection(const ConstVectorRef &z, VectorRef zout) const =0
Compute projection of z onto the normal cone to the set. The default implementation is just .
Scalar prim_infeas
Primal infeasibility error.
Definition results.hpp:38
VectorXs constraint_violations
Violations for each constraint.
Definition results.hpp:40
std::vector< VectorRef > lams_plus
First-order multipliers .
VectorXs pd_step
Primal-dual step .
Definition workspace.hpp:48
std::vector< VectorRef > cstr_values
Values of each constraint.
Definition workspace.hpp:76
MatrixXs kkt_matrix
KKT iteration matrix.
Definition workspace.hpp:40
VectorXs kkt_rhs
KKT iteration right-hand side.
Definition workspace.hpp:42
std::vector< VectorRef > lams_plus_reproj
Product of the projector Jacobians with the first-order multipliers.
VectorXs objective_gradient
Objective function gradient.
Definition workspace.hpp:81
std::vector< VectorRef > lams_pdal
Primal-dual multiplier estimates (from the pdBCL algorithm)
MatrixXs objective_hessian
Objective function Hessian.
Definition workspace.hpp:83
VectorXs kkt_err
KKT linear system error (for refinement)
Definition workspace.hpp:46
std::vector< VectorRef > shift_cstr_values
Buffer for shifted constraints.
LDLTVariant< Scalar > ldlt_
LDLT storage.
Definition workspace.hpp:55