proxsuite 0.7.2
The Advanced Proximal Optimization Toolbox
 
Loading...
Searching...
No Matches
qplayer.py
Go to the documentation of this file.
1import os
2import scipy.sparse as spa
3import numpy as np
4import torch
5
6import proxsuite
7
8from torch.autograd import Function
9from .utils import expandParam, extract_nBatch, bger
10
11
13 eps=1e-9,
14 maxIter=1000,
15 eps_backward=1.0e-4,
16 rho_backward=1.0e-6,
17 mu_backward=1.0e-6,
18 omp_parallel=False,
19 structural_feasibility=True,
20):
21 """!
22 Solve a batch of Quadratic Programming (QP) problems.
23
24 This function solves QP problems of the form:
25
26 $$
27 \begin{align}
28 \min_{z} & ~\frac{1}{2}z^{T} Q (\theta) z +p(\theta)^{T}z \\\
29 \text{s.t.} & ~A(\theta) z = b(\theta) \\\
30 & ~l(\theta) \leq G(\theta) z \leq u(\theta)
31 \end{align}
32 $$
33
34 The QP can be infeasible - in this case the solver will return a solution to
35 the closest feasible QP.
36
37 \param[in] eps Tolerance for the primal infeasibility. Defaults to 1e-9.
38 \param[in] maxIter Maximum number of iterations. Defaults to 1000.
39 \param[in] eps_backward Tolerance for the backward pass. Defaults to 1e-4.
40 \param[in] rho_backward The new value for the primal proximal parameter. Defaults to 1e-6.
41 \param[in] mu_backward The new dual proximal parameter used for both equality and inequality. Defaults to 1e-6.
42 \param[in] omp_parallel Whether to solve the QP in parallel. Requires that proxsuite is compiled with openmp support. Defaults to False.
43 \param[in] structural_feasibility Whether to solve the QP with structural feasibility. Defaults to True.
44
45 \returns QPFunctionFn or QPFunctionFn_infeas: A callable object that represents the QP problem solver.
46 We disinguish two cases:
47 1. The QP is feasible. In this case, we solve the QP problem.
48 2. The QP is infeasible. In this case, we solve the closest feasible QP problem.
49
50 The callable object has two main methods:
51
52 \section qpfunction-forward Forward method
53
54 Solve the QP problem.
55
56 \param[in] Q Batch of quadratic cost matrices of size (nBatch, n, n) or (n, n).
57 \param[in] p Batch of linear cost vectors of size (nBatch, n) or (n).
58 \param[in] A Optional batch of eq. constraint matrices of size (nBatch, p, n) or (p, n).
59 \param[in] b Optional batch of eq. constraint vectors of size (nBatch, p) or (p).
60 \param[in] G Batch of ineq. constraint matrices of size (nBatch, m, n) or (m, n).
61 \param[in] l Batch of ineq. lower bound vectors of size (nBatch, m) or (m).
62 \param[in] u Batch of ineq. upper bound vectors of size (nBatch, m) or (m).
63
64 \returns \p zhats Batch of optimal primal solutions of size (nBatch, n).
65 \returns \p lams Batch of dual variables for eq. constraint of size (nBatch, m).
66 \returns \p nus Batch of dual variables for ineq. constraints of size (nBatch, p).
67 \returns \p s_e Only returned in the infeasible case: batch of slack variables for eq. constraints of size (nBatch, m).
68 \returns \p s_i Only returned in the infeasible case: batch of slack variables for ineq. constraints of size (nBatch, p).
69
70 \section qpfunction-backward Backward method
71
72 Compute the gradients of the QP problem with respect to its parameters.
73
74 \param[in] dl_dzhat Batch of gradients of size (nBatch, n).
75 \param[in] dl_dlams Optional batch of gradients of size (nBatch, p).
76 \param[in] dl_dnus Optional batch of gradients of size (nBatch, m).
77 \param[in] dl_ds_e Only applicable in the infeasible case: optional batch of gradients of size (nBatch, m).
78 \param[in] dl_ds_i Only applicable in the infeasible case: optional batch of gradients of size (nBatch, m).
79
80 \returns \p dQs Batch of gradients of size (nBatch, n, n).
81 \returns \p dps Batch of gradients of size (nBatch, n).
82 \returns \p dAs Batch of gradients of size (nBatch, p, n).
83 \returns \p dbs Batch of gradients of size (nBatch, p).
84 \returns \p dGs Batch of gradients of size (nBatch, m, n).
85 \returns \p dls Batch of gradients of size (nBatch, m).
86 \returns \p dus Batch of gradients of size (nBatch, m).
87 """
88 global proxqp_parallel
89 proxqp_parallel = omp_parallel
90
91 class QPFunctionFn(Function):
92 @staticmethod
93 def forward(ctx, Q_, p_, A_, b_, G_, l_, u_):
94 nBatch = extract_nBatch(Q_, p_, A_, b_, G_, l_, u_)
95 Q, _ = expandParam(Q_, nBatch, 3)
96 p, _ = expandParam(p_, nBatch, 2)
97 G, _ = expandParam(G_, nBatch, 3)
98 u, _ = expandParam(u_, nBatch, 2)
99 l, _ = expandParam(l_, nBatch, 2)
100 A, _ = expandParam(A_, nBatch, 3)
101 b, _ = expandParam(b_, nBatch, 2)
102
103 ctx.vector_of_qps = proxsuite.proxqp.dense.BatchQP()
104
105 ctx.nBatch = nBatch
106
107 _, nineq, nz = G.size()
108 neq = A.size(1) if A.nelement() > 0 else 0
109 assert neq > 0 or nineq > 0
110 ctx.neq, ctx.nineq, ctx.nz = neq, nineq, nz
111
112 ctx.cpu = os.cpu_count()
113 if ctx.cpu is not None:
114 ctx.cpu = max(1, int(ctx.cpu / 2))
115
116 zhats = torch.empty((nBatch, ctx.nz), dtype=Q.dtype)
117 lams = torch.empty((nBatch, ctx.neq), dtype=Q.dtype)
118 nus = torch.empty((nBatch, ctx.nineq), dtype=Q.dtype)
119
120 for i in range(nBatch):
121 qp = ctx.vector_of_qps.init_qp_in_place(ctx.nz, ctx.neq, ctx.nineq)
122 qp.settings.primal_infeasibility_solving = False
123 qp.settings.max_iter = maxIter
124 qp.settings.max_iter_in = 100
125 default_rho = 5.0e-5
126 qp.settings.default_rho = default_rho
127 qp.settings.refactor_rho_threshold = default_rho # no refactorization
128 qp.settings.eps_abs = eps
129 Ai, bi = (A[i], b[i]) if neq > 0 else (None, None)
130 H__ = None
131 if Q[i] is not None:
132 H__ = Q[i].cpu().numpy()
133 p__ = None
134 if p[i] is not None:
135 p__ = p[i].cpu().numpy()
136 G__ = None
137 if G[i] is not None:
138 G__ = G[i].cpu().numpy()
139 u__ = None
140 if u[i] is not None:
141 u__ = u[i].cpu().numpy()
142 l__ = None
143 if l[i] is not None:
144 l__ = l[i].cpu().numpy()
145 A__ = None
146 if Ai is not None:
147 A__ = Ai.cpu().numpy()
148 b__ = None
149 if bi is not None:
150 b__ = bi.cpu().numpy()
151
152 qp.init(
153 H=H__, g=p__, A=A__, b=b__, C=G__, l=l__, u=u__, rho=default_rho
154 )
155
156 if proxqp_parallel:
158 num_threads=ctx.cpu, qps=ctx.vector_of_qps
159 )
160 else:
161 for i in range(ctx.vector_of_qps.size()):
162 ctx.vector_of_qps.get(i).solve()
163
164 for i in range(nBatch):
165 zhats[i] = torch.tensor(ctx.vector_of_qps.get(i).results.x)
166 lams[i] = torch.tensor(ctx.vector_of_qps.get(i).results.y)
167 nus[i] = torch.tensor(ctx.vector_of_qps.get(i).results.z)
168
169 return zhats, lams, nus
170
171 @staticmethod
172 def backward(ctx, dl_dzhat, dl_dlams, dl_dnus):
173 device = dl_dzhat.device
174 nBatch, dim, neq, nineq = ctx.nBatch, ctx.nz, ctx.neq, ctx.nineq
175 dQs = torch.empty(nBatch, ctx.nz, ctx.nz, device=device)
176 dps = torch.empty(nBatch, ctx.nz, device=device)
177 dGs = torch.empty(nBatch, ctx.nineq, ctx.nz, device=device)
178 dus = torch.empty(nBatch, ctx.nineq, device=device)
179 dls = torch.empty(nBatch, ctx.nineq, device=device)
180 dAs = torch.empty(nBatch, ctx.neq, ctx.nz, device=device)
181 dbs = torch.empty(nBatch, ctx.neq, device=device)
182
183 ctx.cpu = os.cpu_count()
184 if ctx.cpu is not None:
185 ctx.cpu = max(1, int(ctx.cpu / 2))
186
187 n_tot = dim + neq + nineq
188
189 if proxqp_parallel:
190 vector_of_loss_derivatives = (
191 proxsuite.proxqp.dense.VectorLossDerivatives()
192 )
193
194 for i in range(nBatch):
195 rhs = np.zeros(n_tot)
196 rhs[:dim] = dl_dzhat[i]
197 if dl_dlams is not None:
198 rhs[dim : dim + neq] = dl_dlams[i]
199 if dl_dnus is not None:
200 rhs[dim + neq :] = dl_dnus[i]
201 vector_of_loss_derivatives.append(rhs)
202
203 proxsuite.proxqp.dense.solve_backward_in_parallel(
204 num_threads=ctx.cpu,
205 qps=ctx.vector_of_qps,
206 loss_derivatives=vector_of_loss_derivatives,
207 eps=eps_backward,
208 rho_backward=rho_backward,
209 mu_backward=mu_backward,
210 ) # try with systematic fwd bwd
211 else:
212 for i in range(nBatch):
213 rhs = np.zeros(n_tot)
214 rhs[:dim] = dl_dzhat[i].cpu()
215 if dl_dlams is not None:
216 rhs[dim : dim + neq] = dl_dlams[i].cpu()
217 if dl_dnus is not None:
218 rhs[dim + neq :] = dl_dnus[i].cpu()
219 qpi = ctx.vector_of_qps.get(i)
221 qp=qpi,
222 loss_derivative=rhs,
223 eps=eps_backward,
224 rho_backward=rho_backward,
225 mu_backward=mu_backward,
226 )
227
228 for i in range(nBatch):
229 dQs[i] = torch.tensor(
230 ctx.vector_of_qps.get(i).model.backward_data.dL_dH
231 )
232 dps[i] = torch.tensor(
233 ctx.vector_of_qps.get(i).model.backward_data.dL_dg
234 )
235 dGs[i] = torch.tensor(
236 ctx.vector_of_qps.get(i).model.backward_data.dL_dC
237 )
238 dus[i] = torch.tensor(
239 ctx.vector_of_qps.get(i).model.backward_data.dL_du
240 )
241 dls[i] = torch.tensor(
242 ctx.vector_of_qps.get(i).model.backward_data.dL_dl
243 )
244 dAs[i] = torch.tensor(
245 ctx.vector_of_qps.get(i).model.backward_data.dL_dA
246 )
247 dbs[i] = torch.tensor(
248 ctx.vector_of_qps.get(i).model.backward_data.dL_db
249 )
250
251 grads = (dQs, dps, dAs, dbs, dGs, dls, dus)
252
253 return grads
254
255 class QPFunctionFn_infeas(Function):
256 @staticmethod
257 def forward(ctx, Q_, p_, A_, b_, G_, l_, u_):
258 n_in, nz = G_.size() # true double-sided inequality size
259 nBatch = extract_nBatch(Q_, p_, A_, b_, G_, l_, u_)
260
261 Q, _ = expandParam(Q_, nBatch, 3)
262 p, _ = expandParam(p_, nBatch, 2)
263 G, _ = expandParam(G_, nBatch, 3)
264 u, _ = expandParam(u_, nBatch, 2)
265 l, _ = expandParam(l_, nBatch, 2)
266 A, _ = expandParam(A_, nBatch, 3)
267 b, _ = expandParam(b_, nBatch, 2)
268
269 h = torch.cat((-l, u), axis=1) # single-sided inequality
270 G = torch.cat((-G, G), axis=1) # single-sided inequality
271
272 _, nineq, nz = G.size()
273 neq = A.size(1) if A.nelement() > 0 else 0
274 assert neq > 0 or nineq > 0
275 ctx.neq, ctx.nineq, ctx.nz = neq, nineq, nz
276
277 zhats = torch.empty((nBatch, ctx.nz), dtype=Q.dtype)
278 nus = torch.empty((nBatch, ctx.nineq), dtype=Q.dtype)
279 nus_sol = torch.empty(
280 (nBatch, n_in), dtype=Q.dtype
281 ) # double-sided inequality multiplier
282 lams = (
283 torch.empty(nBatch, ctx.neq, dtype=Q.dtype)
284 if ctx.neq > 0
285 else torch.empty()
286 )
287 s_e = (
288 torch.empty(nBatch, ctx.neq, dtype=Q.dtype)
289 if ctx.neq > 0
290 else torch.empty()
291 )
292 slacks = torch.empty((nBatch, ctx.nineq), dtype=Q.dtype)
293 s_i = torch.empty(
294 (nBatch, n_in), dtype=Q.dtype
295 ) # this one is of size the one of the original n_in
296
297 vector_of_qps = proxsuite.proxqp.dense.BatchQP()
298
299 ctx.cpu = os.cpu_count()
300 if ctx.cpu is not None:
301 ctx.cpu = max(1, int(ctx.cpu / 2))
302 l = -np.ones(ctx.nineq) * 1.0e20
303
304 for i in range(nBatch):
305 qp = vector_of_qps.init_qp_in_place(ctx.nz, ctx.neq, ctx.nineq)
306 qp.settings.primal_infeasibility_solving = True
307 qp.settings.max_iter = maxIter
308 qp.settings.max_iter_in = 100
309 default_rho = 5.0e-5
310 qp.settings.default_rho = default_rho
311 qp.settings.refactor_rho_threshold = default_rho # no refactorization
312 qp.settings.eps_abs = eps
313 Ai, bi = (A[i], b[i]) if neq > 0 else (None, None)
314 H__ = None
315 if Q[i] is not None:
316 H__ = Q[i].cpu().numpy()
317 p__ = None
318 if p[i] is not None:
319 p__ = p[i].cpu().numpy()
320 G__ = None
321 if G[i] is not None:
322 G__ = G[i].cpu().numpy()
323 u__ = None
324 if h[i] is not None:
325 u__ = h[i].cpu().numpy()
326 # l__ = None
327 # if (l[i] is not None):
328 # l__ = l[i].cpu().numpy()
329 A__ = None
330 if Ai is not None:
331 A__ = Ai.cpu().numpy()
332 b__ = None
333 if bi is not None:
334 b__ = bi.cpu().numpy()
335
336 qp.init(H=H__, g=p__, A=A__, b=b__, C=G__, l=l, u=u__, rho=default_rho)
337
338 if proxqp_parallel:
340 num_threads=ctx.cpu, qps=vector_of_qps
341 )
342 else:
343 for i in range(vector_of_qps.size()):
344 vector_of_qps.get(i).solve()
345
346 for i in range(nBatch):
347 zhats[i] = torch.tensor(vector_of_qps.get(i).results.x)
348 if nineq > 0:
349 # we re-convert the solution to a double sided inequality QP
350 slack = -h[i] + G[i] @ vector_of_qps.get(i).results.x
351 nus_sol[i] = torch.Tensor(
352 -vector_of_qps.get(i).results.z[:n_in]
353 + vector_of_qps.get(i).results.z[n_in:]
354 ) # de-projecting this one may provoke loss of information when using inexact solution
355 nus[i] = torch.tensor(vector_of_qps.get(i).results.z)
356 slacks[i] = slack.clone().detach()
357 s_i[i] = torch.tensor(
358 -vector_of_qps.get(i).results.si[:n_in]
359 + vector_of_qps.get(i).results.si[n_in:]
360 )
361 if neq > 0:
362 lams[i] = torch.tensor(vector_of_qps.get(i).results.y)
363 s_e[i] = torch.tensor(vector_of_qps.get(i).results.se)
364
365 ctx.lams = lams
366 ctx.nus = nus
367 ctx.slacks = slacks
368 ctx.save_for_backward(zhats, s_e, Q_, p_, G_, l_, u_, A_, b_)
369 return zhats, lams, nus_sol, s_e, s_i
370
371 @staticmethod
372 def backward(ctx, dl_dzhat, dl_dlams, dl_dnus, dl_ds_e, dl_ds_i):
373 zhats, s_e, Q, p, G, l, u, A, b = ctx.saved_tensors
374 nBatch = extract_nBatch(Q, p, A, b, G, l, u)
375
376 Q, Q_e = expandParam(Q, nBatch, 3)
377 p, p_e = expandParam(p, nBatch, 2)
378 G, G_e = expandParam(G, nBatch, 3)
379 _, u_e = expandParam(u, nBatch, 2)
380 _, l_e = expandParam(l, nBatch, 2)
381 A, A_e = expandParam(A, nBatch, 3)
382 b, b_e = expandParam(b, nBatch, 2)
383
384 h_e = l_e or u_e
385 G = torch.cat((-G, G), axis=1)
386
387 neq, nineq = ctx.neq, ctx.nineq
388 # true size
389 n_in_sol = int(nineq / 2)
390 dx = torch.zeros((nBatch, Q.shape[1]))
391 dnu = None
392 b_5 = None
393 dlam = None
394 if nineq > 0:
395 dnu = torch.zeros((nBatch, nineq))
396 if neq > 0:
397 dlam = torch.zeros((nBatch, neq))
398 b_5 = torch.zeros((nBatch, Q.shape[1]))
399
400 b_6 = torch.zeros((nBatch, Q.shape[1]))
401 P_2_c_s_i = torch.zeros((nBatch, nineq))
402
403 n_row = Q.shape[1] + 2 * nineq
404 n_col = 2 * Q.shape[1] + 2 * nineq
405 if neq > 0:
406 n_col += neq + Q.shape[1]
407 n_row += 2 * neq
408 ctx.cpu = os.cpu_count()
409 if ctx.cpu is not None:
410 ctx.cpu = max(1, int(ctx.cpu / 2))
411
412 kkt = np.zeros((n_row, n_col))
413 vector_of_qps = proxsuite.proxqp.sparse.BatchQP()
414
415 for i in range(nBatch):
416 Q_i = Q[i].numpy()
417 C_i = G[i].numpy()
418 A_i = None
419 if A is not None:
420 if A.shape[0] != 0:
421 A_i = A[i].numpy()
422 z_i = ctx.nus[i]
423 s_i = ctx.slacks[i] # G @ z_- h = slacks
424
425 dim = Q_i.shape[0]
426 n_eq = neq
427 n_in = nineq
428
429 P_1 = np.minimum(s_i, 0.0) + z_i >= 0.0
430 P_2 = s_i <= 0.0
431 P_2_c_s_i[i] = np.maximum(
432 s_i, 0.0
433 ) # keep only (1-P_2)s_i for backward calculation afterward
434
435 kkt[:dim, :dim] = Q_i
436 if neq > 0:
437 kkt[:dim, dim : dim + n_eq] = A_i.transpose()
438 kkt[dim : dim + n_eq, :dim] = A_i
439 kkt[
440 dim + n_eq + n_in : dim + 2 * n_eq + n_in, dim : dim + n_eq
441 ] = -np.eye(n_eq)
442 kkt[
443 dim + n_eq + n_in : dim + 2 * n_eq + n_in,
444 dim + n_eq + 2 * n_in : 2 * dim + n_eq + 2 * n_in,
445 ] = A_i
446
447 kkt[:dim, dim + n_eq : dim + n_eq + n_in] = C_i.transpose()
448 kkt[dim + n_eq : dim + n_eq + n_in, :dim] = C_i
449
450 D_1_c = np.eye(n_in) # represents [s_i]_- + z_i < 0
451 D_1_c[P_1, P_1] = 0.0
452 D_1 = np.eye(n_in) - D_1_c # represents [s_i]_- + z_i >= 0
453 D_2_c = np.eye(n_in) # represents s_i > 0
454 D_2_c[P_2, P_2] = 0.0
455 D_2 = np.eye(n_in) - D_2_c # represents s_i <= 0
456 kkt[dim + 2 * n_eq + n_in :, dim + n_eq : dim + n_eq + n_in] = -np.eye(
457 n_in
458 )
459 kkt[
460 dim + n_eq : dim + n_eq + n_in,
461 dim + n_eq + n_in : dim + n_eq + 2 * n_in,
462 ] = D_1_c
463 kkt[
464 dim + 2 * n_eq + n_in :, dim + n_eq + n_in : dim + n_eq + 2 * n_in
465 ] = -np.multiply(np.diag(D_1)[:, None], D_2)
466 dim_ = 0
467 if n_eq > 0:
468 dim_ += dim
469 kkt[dim + 2 * n_eq + n_in :, dim + n_eq + 2 * n_in + dim_ :] = (
470 np.multiply(np.diag(D_2_c)[:, None], C_i)
471 )
472
473 rhs = np.zeros(kkt.shape[0])
474 rhs[:dim] = -dl_dzhat[i]
475 if dl_dlams is not None:
476 if n_eq != 0:
477 rhs[dim : dim + n_eq] = -dl_dlams[i]
478 active_set = None
479 if n_in != 0:
480 active_set = -z_i[:n_in_sol] + z_i[n_in_sol:] >= 0
481 if dl_dnus is not None:
482 if n_in != 0:
483 # we must convert dl_dnus to a uni sided version
484 # to do so we reconstitute the active set
485 rhs[dim + n_eq : dim + n_eq + n_in_sol][~active_set] = dl_dnus[
486 i
487 ][~active_set]
488 rhs[dim + n_eq + n_in_sol : dim + n_eq + n_in][
489 active_set
490 ] = -dl_dnus[i][active_set]
491 if dl_ds_e is not None:
492 if dl_ds_e.shape[0] != 0:
493 rhs[dim + n_eq + n_in : dim + 2 * n_eq + n_in] = -dl_ds_e[i]
494 if dl_ds_i is not None:
495 if dl_ds_i.shape[0] != 0:
496 # we must convert dl_dnus to a uni sided version
497 # to do so we reconstitute the active set
498 rhs[dim + 2 * n_eq + n_in : dim + 2 * n_eq + n_in + n_in_sol][
499 ~active_set
500 ] = dl_ds_i[i][~active_set]
501 rhs[dim + 2 * n_eq + n_in + n_in_sol :][active_set] = -dl_ds_i[
502 i
503 ][active_set]
504
505 l = np.zeros(0)
506 u = np.zeros(0)
507
508 C = spa.csc_matrix((0, n_col))
509 H = spa.csc_matrix((n_col, n_col))
510 g = np.zeros(n_col)
511
512 qp = vector_of_qps.init_qp_in_place(
513 H.shape[0], kkt.shape[0], C.shape[0]
514 )
515
516 qp.settings.primal_infeasibility_solving = True
517 qp.settings.eps_abs = eps_backward
518 qp.settings.max_iter = 10
519 qp.settings.default_rho = 1.0e-3
520 qp.settings.refactor_rho_threshold = 1.0e-3
521 qp.init(
522 H,
523 g,
524 spa.csc_matrix(kkt),
525 rhs,
526 C,
527 l,
528 u,
529 )
530
531 if proxqp_parallel:
533 num_threads=ctx.cpu, qps=vector_of_qps
534 )
535 else:
536 for i in range(vector_of_qps.size()):
537 vector_of_qps.get(i).solve()
538
539 for i in range(nBatch):
540 dx[i] = torch.from_numpy(
541 np.float64(vector_of_qps.get(i).results.x[:dim])
542 )
543 if n_eq > 0:
544 dlam[i] = torch.from_numpy(
545 np.float64(vector_of_qps.get(i).results.x[dim : dim + n_eq])
546 )
547 dnu[i] = torch.from_numpy(
548 np.float64(
549 vector_of_qps.get(i).results.x[dim + n_eq : dim + n_eq + n_in]
550 )
551 )
552 dim_ = 0
553 if n_eq > 0:
554 b_5[i] = torch.from_numpy(
555 np.float64(
556 vector_of_qps.get(i).results.x[
557 dim + n_eq + 2 * n_in : 2 * dim + n_eq + 2 * n_in
558 ]
559 )
560 )
561 dim_ += dim
562 b_6[i] = torch.from_numpy(
563 np.float64(
564 vector_of_qps.get(i).results.x[dim + n_eq + 2 * n_in + dim_ :]
565 )
566 )
567
568 dps = dx
569 dGs = (
570 bger(dnu.double(), zhats.double())
571 + bger(ctx.nus.double(), dx.double())
572 + bger(P_2_c_s_i.double(), b_6.double())
573 )
574 if G_e:
575 dGs = dGs.mean(0)
576 dhs = -dnu
577 if h_e:
578 dhs = dhs.mean(0)
579 if neq > 0:
580 dAs = (
581 bger(dlam.double(), zhats.double())
582 + bger(ctx.lams.double(), dx.double())
583 + bger(s_e.double(), b_5.double())
584 )
585 dbs = -dlam
586 if A_e:
587 dAs = dAs.mean(0)
588 if b_e:
589 dbs = dbs.mean(0)
590 else:
591 dAs, dbs = None, None
592 dQs = 0.5 * (
593 bger(dx.double(), zhats.double()) + bger(zhats.double(), dx.double())
594 )
595 if Q_e:
596 dQs = dQs.mean(0)
597 if p_e:
598 dps = dps.mean(0)
599
600 grads = (
601 dQs,
602 dps,
603 dAs,
604 dbs,
605 dGs[n_in_sol:, :],
606 -dhs[:n_in_sol],
607 dhs[n_in_sol:],
608 )
609
610 return grads
611
612 if structural_feasibility:
613 return QPFunctionFn.apply
614 else:
615 return QPFunctionFn_infeas.apply
void compute_backward(dense::QP< T > &solved_qp, VecRef< T > loss_derivative, T eps=1.E-4, T rho_new=1.E-6, T mu_new=1.E-6)
void solve_in_parallel(std::vector< proxqp::dense::QP< T > > &qps, const optional< size_t > num_threads=nullopt)
Definition qp_solve.hpp:19
void solve_in_parallel(proxqp::sparse::BatchQP< T, I > &qps, const optional< size_t > num_threads=nullopt)
Definition qp_solve.hpp:144
QPFunction(eps=1e-9, maxIter=1000, eps_backward=1.0e-4, rho_backward=1.0e-6, mu_backward=1.0e-6, omp_parallel=False, structural_feasibility=True)
Solve a batch of Quadratic Programming (QP) problems.
Definition qplayer.py:20