5#ifdef ALIGATOR_WITH_CHOLMOD
6#include <Eigen/CholmodSupport>
16template <
bool Update,
typename InType,
typename OutScalar>
17void sparseAssignDenseBlock(Eigen::Index i0, Eigen::Index j0,
18 const Eigen::DenseBase<InType> &input,
19 Eigen::SparseMatrix<OutScalar> &out) {
20 assert(i0 + input.rows() <= out.rows() &&
"Inconsistent rows");
21 assert(j0 + input.cols() <= out.cols() &&
"Inconsistent cols");
23 for (Index i = 0; i < input.rows(); i++) {
24 for (Index j = 0; j < input.cols(); j++) {
25 if constexpr (Update) {
26 out.coeffRef(i0 + i, j0 + j) = input(i, j);
28 out.insert(i0 + i, j0 + j) = input(i, j);
35template <
bool Update,
typename Scalar>
36void lqrCreateSparseMatrix(
const LQRProblemTpl<Scalar> &problem,
37 const Scalar mudyn,
const Scalar mueq,
38 Eigen::SparseMatrix<Scalar> &mat,
39 Eigen::Matrix<Scalar, -1, 1> &rhs) {
42 using knot_t = LQRKnotTpl<Scalar>;
43 const auto &knots = problem.stages;
45 if constexpr (!Update) {
46 mat.conservativeResize(nrows, nrows);
50 rhs.conservativeResize(nrows);
52 const size_t N = size_t(problem.horizon());
55 uint nc0 = problem.nc0();
56 rhs.head(nc0) = problem.g0;
57 helpers::sparseAssignDenseBlock<Update>(0, nc0, problem.G0, mat);
58 helpers::sparseAssignDenseBlock<Update>(nc0, 0, problem.G0.transpose(),
60 for (Index kk = 0; kk < nc0; kk++) {
61 if constexpr (Update) {
62 mat.coeffRef(kk, kk) = -mudyn;
64 mat.insert(kk, kk) = -mudyn;
70 for (
size_t t = 0; t <= N; t++) {
71 const knot_t &model = knots[t];
72 const uint n = model.nx + model.nu + model.nc;
74 auto rhsblk = rhs.segment(idx, n);
76 rhsblk.head(model.nx) = model.q;
77 rhsblk.segment(model.nx, model.nu) = model.r;
78 rhsblk.tail(model.nc) = model.d;
81 helpers::sparseAssignDenseBlock<Update>(idx, idx, model.Q, mat);
82 const Index i0 = idx + model.nx;
84 helpers::sparseAssignDenseBlock<Update>(i0, idx, model.S.transpose(), mat);
85 helpers::sparseAssignDenseBlock<Update>(idx, i0, model.S, mat);
87 helpers::sparseAssignDenseBlock<Update>(i0, i0, model.R, mat);
89 const Index i1 = i0 + model.nu;
91 helpers::sparseAssignDenseBlock<Update>(i1, idx, model.C, mat);
92 helpers::sparseAssignDenseBlock<Update>(idx, i1, model.C.transpose(), mat);
94 helpers::sparseAssignDenseBlock<Update>(i1, i0, model.D, mat);
95 helpers::sparseAssignDenseBlock<Update>(i0, i1, model.D.transpose(), mat);
97 const Index i2 = i1 + model.nc;
99 for (Index kk = i1; kk < i2; kk++) {
100 if constexpr (Update) {
101 mat.coeffRef(kk, kk) = -mueq;
103 mat.insert(kk, kk) = -mueq;
108 rhs.segment(idx + n, model.nx2) = model.f;
111 helpers::sparseAssignDenseBlock<Update>(i2, idx, model.A, mat);
112 helpers::sparseAssignDenseBlock<Update>(idx, i2, model.A.transpose(),
115 helpers::sparseAssignDenseBlock<Update>(i2, i0, model.B, mat);
116 helpers::sparseAssignDenseBlock<Update>(i0, i2, model.B.transpose(), mat);
118 const Index i3 = i2 + model.nx2;
119 helpers::sparseAssignDenseBlock<Update>(i2, i3, model.E, mat);
120 helpers::sparseAssignDenseBlock<Update>(i3, i2, model.E.transpose(), mat);
122 for (Index kk = i2; kk < i3; kk++) {
123 if constexpr (Update) {
124 mat.coeffRef(kk, kk) = -mudyn;
126 mat.insert(kk, kk) = -mudyn;
130 idx += n + model.nx2;
136template <
typename _Scalar>
class CholmodLqSolver {
138 using Scalar = _Scalar;
140 using Problem = LQRProblemTpl<Scalar>;
141 using SparseType = Eigen::SparseMatrix<Scalar>;
142 using Triplet = Eigen::Triplet<Scalar>;
144 explicit CholmodLqSolver(
const Problem &problem, uint numRefinementSteps = 1);
146 bool backward(
const Scalar mudyn,
const Scalar mueq) {
148 lqrCreateSparseMatrix<true>(*problem_, mudyn, mueq, kktMatrix, kktRhs);
149 cholmod.factorize(kktMatrix);
150 return cholmod.info() == Eigen::Success;
153 bool forward(std::vector<VectorXs> &xs, std::vector<VectorXs> &us,
154 std::vector<VectorXs> &vs, std::vector<VectorXs> &lbdas)
const {
155 kktSol = cholmod.solve(-kktRhs);
156 kktResidual = kktRhs;
157 for (uint i = 0; i < numRefinementSteps; i++) {
158 kktResidual.noalias() += kktMatrix * kktSol;
159 kktSol += cholmod.solve(-kktResidual);
165 Scalar computeSparseResidual()
const {
166 kktResidual = kktRhs;
167 kktResidual.noalias() += kktMatrix * kktSol;
168 return math::infty_norm(kktResidual);
172 SparseType kktMatrix;
176 mutable VectorXs kktResidual;
178 mutable VectorXs kktSol;
179 Eigen::CholmodSimplicialLDLT<SparseType> cholmod;
181 uint numRefinementSteps;
184 const Problem *problem_;
187template <
typename Scalar>
188CholmodLqSolver<Scalar>::CholmodLqSolver(
const Problem &problem,
189 uint numRefinementSteps)
190 : kktMatrix(), kktRhs(), cholmod(), numRefinementSteps(numRefinementSteps),
192 lqrCreateSparseMatrix<false>(problem, 1., 1., kktMatrix, kktRhs);
193 assert(kktMatrix.cols() == kktRhs.rows());
194 kktSol.resize(kktRhs.rows());
195 kktResidual.resize(kktRhs.rows());
196 cholmod.analyzePattern(kktMatrix);
199#ifdef ALIGATOR_ENABLE_TEMPLATE_INSTANTIATION
200extern template class CholmodLqSolver<context::Scalar>;
#define ALIGATOR_DYNAMIC_TYPEDEFS(Scalar)
void lqrDenseSolutionToTraj(const LQRProblemTpl< Scalar > &problem, const typename math_types< Scalar >::ConstVectorRef solution, std::vector< typename math_types< Scalar >::VectorXs > &xs, std::vector< typename math_types< Scalar >::VectorXs > &us, std::vector< typename math_types< Scalar >::VectorXs > &vs, std::vector< typename math_types< Scalar >::VectorXs > &lbdas)
Convert dense RHS solution to its trajectory [x,u,v,lambda] solution.
uint lqrNumRows(const LQRProblemTpl< Scalar > &problem)
Compute the number of rows in the problem matrix.
LQRKnotTpl< context::Scalar > knot_t