6 const Real TOL=1e-2; // roughly 1/10 mm
9 Active_constraints::status() const
11 String s("Active|Inactive [");
12 for (int i=0; i< active.size(); i++) {
13 s += String(active[i]) + " ";
17 for (int i=0; i< inactive.size(); i++) {
18 s += String(inactive[i]) + " ";
26 Active_constraints::OK()
31 assert(active.size() +inactive.size() == opt->cons.size());
32 assert(H.dim() == opt->dim());
33 assert(active.size() == A.rows());
36 for (int i=0; i < opt->cons.size(); i++)
38 for (int i=0; i < active.size(); i++) {
42 for (int i=0; i < inactive.size(); i++) {
46 for (int i=0; i < allcons.size(); i++)
47 assert(allcons[i] == 1);
52 Active_constraints::get_lagrange(Vector gradient)
60 Active_constraints::add(int k)
66 inactive.swap(k,inactive.size()-1);
69 Vector a( opt->cons[cidx] );
73 Vector addrow(Ha.dim());
76 a != 0, so if Ha = O(EPS), then
77 Ha * aH / aHa = O(EPS^2/EPS)
79 if H*a == 0, the constraints are dependent.
81 H -= Matrix(Ha/aHa , Ha);
85 sorry, don't know how to justify this. ..
89 A -= Matrix(A*a, addrow);
90 A.insert_row(addrow,A.rows());
92 WARN << "degenerate constraints";
96 Active_constraints::drop(int k)
98 int q=active.size()-1;
101 inactive.push(active[k]);
107 if (a.norm() > EPS) {
111 Real q = a*opt->quad*a;
113 A -= A*opt->quad*Matrix(a,a/q);
115 WARN << "degenerate constraints";
117 Vector rem_row(A.row(q));
118 assert(rem_row.norm() < EPS);
125 Active_constraints::Active_constraints(Ineq_constrained_qp const *op)
130 for (int i=0; i < op->cons.size(); i++)
132 Choleski_decomposition chol(op->quad);
136 /* Find the optimum which is in the planes generated by the active
140 Active_constraints::find_active_optimum(Vector g)
145 /****************************************************************/
148 min_elt_index(Vector v)
150 Real m=INFTY; int idx=-1;
151 for (int i = 0; i < v.dim(); i++){
156 assert(v(i) <= INFTY);
161 ///the numerical solving
163 Ineq_constrained_qp::solve(Vector start) const
165 Active_constraints act(this);
172 Vector gradient=quad*x+lin;
175 Vector last_gradient(gradient);
178 while (iterations++ < MAXITER) {
179 Vector direction= - act.find_active_optimum(gradient);
181 mtor << "gradient "<< gradient<< "\ndirection " << direction<<"\n";
183 if (direction.norm() > EPS) {
184 mtor << act.status() << '\n';
188 Inactive_iter minidx(act);
192 we know the optimum on this "hyperplane". Check if we
193 bump into the edges of the simplex
196 for (Inactive_iter ia(act); ia.ok(); ia++) {
198 if (ia.vec() * direction >= 0)
200 Real alfa= - (ia.vec()*x - ia.rhs())/
201 (ia.vec()*direction);
208 Real unbounded_alfa = 1.0;
209 Real optimal_step = min(minalf, unbounded_alfa);
211 Vector deltax=direction * optimal_step;
213 gradient += optimal_step * (quad * deltax);
215 mtor << "step = " << optimal_step<< " (|dx| = " <<
216 deltax.norm() << ")\n";
218 if (minalf < unbounded_alfa) {
219 /* bumped into an edge. try again, in smaller space. */
220 act.add(minidx.idx());
221 mtor << "adding cons "<< minidx.idx()<<'\n';
224 /*ASSERT: we are at optimal solution for this "plane"*/
229 Vector lagrange_mult=act.get_lagrange(gradient);
230 int m= min_elt_index(lagrange_mult);
232 if (m>=0 && lagrange_mult(m) > 0) {
233 break; // optimal sol.
235 assert(gradient.norm() < EPS) ;
240 mtor << "dropping cons " << m<<'\n';
243 if (iterations >= MAXITER)
244 WARN<<"didn't converge!\n";
246 mtor << ": found " << x<<" in " << iterations <<" iterations\n";
251 /** Mordecai Avriel, Nonlinear Programming: analysis and methods (1976)
256 This is a "projected gradient" algorithm. Starting from a point x
257 the next point is found in a direction determined by projecting
258 the gradient onto the active constraints. (well, not really the
259 gradient. The optimal solution obeying the active constraints is
260 tried. This is why H = Q^-1 in initialisation) )