30 #ifndef SHARK_ALGORITHMS_QP_SVMPROBLEMS_H 31 #define SHARK_ALGORITHMS_QP_SVMPROBLEMS_H 54 template<
class Problem>
55 double operator()(Problem& problem, std::size_t& i, std::size_t& j)
57 double largestUp = -1e100;
58 double smallestDown = 1e100;
60 for (std::size_t a=0; a < problem.active(); a++)
62 double ga = problem.gradient(a);
63 if (!problem.isUpperBound(a))
71 if (!problem.isLowerBound(a))
73 if (ga < smallestDown)
82 return largestUp - smallestDown;
102 template<
class Problem>
103 double operator()(Problem& problem, std::size_t& i, std::size_t& j)
108 double smallestDown = 1e100;
109 double largestUp = -1e100;
111 for (std::size_t a=0; a < problem.active(); a++)
113 double ga = problem.gradient(a);
115 if (!problem.isUpperBound(a))
124 if (largestUp == -1e100)
return 0.0;
127 typename Problem::QpFloatType* q = problem.quadratic().row(i, 0, problem.active());
129 for (std::size_t a = 0; a < problem.active(); a++){
130 double ga = problem.gradient(a);
131 if (!problem.isLowerBound(a))
133 smallestDown=std::min(smallestDown,ga);
135 double gain = detail::maximumGainQuadratic2DOnLine(
149 if (best == 0.0)
return 0.0;
152 return largestUp - smallestDown;
168 template<
class Problem>
169 double operator()(Problem& problem, std::size_t& i, std::size_t& j)
171 if (smallProblem || useLibSVM || isInCorner(problem))
174 if(!smallProblem &&
sqr(problem.active()) < problem.quadratic().getMaxCacheSize())
177 double value = libSVMSelection(problem,i, j);
183 MGStep besti = selectMGVariable(problem,last_i);
184 if(besti.violation == 0.0)
186 MGStep bestj = selectMGVariable(problem,last_j);
188 if(bestj.gain > besti.gain){
195 if (problem.gradient(i) < problem.gradient(j))
199 return besti.violation;
204 smallProblem =
false;
208 template<
class Problem>
209 bool isInCorner(Problem
const& problem)
const{
210 double Li = problem.boxMin(last_i);
211 double Ui = problem.boxMax(last_i);
212 double Lj = problem.boxMin(last_j);
213 double Uj = problem.boxMax(last_j);
214 double eps_i = 1e-8 * (Ui - Li);
215 double eps_j = 1e-8 * (Uj - Lj);
217 if ((problem.alpha(last_i) <= Li + eps_i || problem.alpha(last_i) >= Ui - eps_i)
218 && ((problem.alpha(last_j) <= Lj + eps_j || problem.alpha(last_j) >= Uj - eps_j)))
229 template<
class Problem>
230 MGStep selectMGVariable(Problem& problem,std::size_t i)
const{
233 std::size_t bestIndex = 0;
236 double largestUp = -1e100;
237 double smallestDown = 1e100;
240 typename Problem::QpFloatType* q = problem.quadratic().row(i, 0, problem.active());
241 double ab = problem.alpha(i);
242 double db = problem.diagonal(i);
243 double Lb = problem.boxMin(i);
244 double Ub = problem.boxMax(i);
245 double gb = problem.gradient(i);
246 for (std::size_t a = 0; a < problem.active(); a++)
248 double ga = problem.gradient(a);
250 if (!problem.isUpperBound(a))
251 largestUp = std::max(largestUp,ga);
252 if (!problem.isLowerBound(a))
253 smallestDown = std::min(smallestDown,ga);
255 if (a == i)
continue;
257 double denominator = (problem.diagonal(a) + db - 2.0 * q[a]);
258 double mu_max = (ga - gb) / denominator;
265 double aa = problem.alpha(a);
266 double La = problem.boxMin(a);
267 double Ua = problem.boxMax(a);
268 double mu_star = mu_max;
269 if (aa + mu_star < La) mu_star = La - aa;
270 else if (mu_star + aa > Ua) mu_star = Ua - aa;
271 if (ab - mu_star < Lb) mu_star = ab - Lb;
272 else if (ab - mu_star > Ub) mu_star = ab - Ub;
274 double gain = mu_star * (2.0 * mu_max - mu_star) * denominator;
284 step.violation= largestUp-smallestDown;
285 step.index = bestIndex;
299 template<
class Problem>
308 , m_gradient(problem.linear)
309 , m_active(problem.dimensions())
310 , m_alphaStatus(problem.dimensions(),
AlphaFree){
312 for (std::size_t i=0; i != dimensions(); i++){
315 QpFloatType* q = quadratic().row(i, 0, dimensions());
316 for (std::size_t a=0; a < dimensions(); a++)
317 m_gradient(a) -= q[a] * v;
319 updateAlphaStatus(i);
323 return m_problem.dimensions();
345 return m_problem.quadratic;
349 return m_problem.linear(i);
353 return m_problem.alpha(i);
357 return m_problem.diagonal(i);
361 return m_gradient(i);
365 return m_problem.permutation[i];
369 RealVector alpha(dimensions());
370 for (std::size_t i=0; i<dimensions(); i++)
371 alpha(m_problem.permutation[i]) = m_problem.alpha(i);
380 QpFloatType* qi = quadratic().row(i, 0, active());
383 double numerator = gradient(i) - gradient(j);
384 double denominator = diagonal(i) + diagonal(j) - 2.0 * qi[j];
385 denominator = std::max(denominator,1.e-12);
386 double step = numerator/denominator;
390 double Ui = boxMax(i);
391 double Lj = boxMin(j);
392 double aiOld = m_problem.alpha(i);
393 double ajOld = m_problem.alpha(j);
394 double& ai = m_problem.alpha(i);
395 double& aj = m_problem.alpha(j);
396 if (step >= std::min(Ui - ai, aj - Lj))
398 if (Ui - ai > aj - Lj)
404 else if (Ui - ai < aj - Lj)
423 if(ai == aiOld && aj == ajOld)
return;
426 QpFloatType* qj = quadratic().row(j, 0, active());
427 for (std::size_t a = 0; a < active(); a++)
428 m_gradient(a) -= step * qi[a] - step * qj[a];
431 updateAlphaStatus(i);
432 updateAlphaStatus(j);
437 return 0.5*inner_prod(m_gradient+m_problem.linear,m_problem.alpha);
451 std::size_t n = dimensions();
454 for (std::size_t i=0; i<n; i++)
456 std::size_t j = permutation(i);
457 SHARK_ASSERT(alpha(j) >= boxMin(j) && alpha(j) <= boxMax(j));
458 m_problem.alpha(i) = alpha(j);
459 m_gradient(i) = gradient(j);
460 updateAlphaStatus(i);
472 std::size_t n = dimensions();
474 RealVector gradient = m_problem.linear;
475 blas::vector<QpFloatType> q(n);
476 for (std::size_t i=0; i<n; i++)
479 if (a == 0.0)
continue;
480 m_problem.quadratic.row(i, 0, n, q.storage());
481 noalias(gradient) -= a * q;
483 setInitialSolution(alpha, gradient);
494 for (std::size_t j=0; j<dimensions(); j++){
497 applyStep(i,j, -alpha(i));
498 if(alpha(i) == 0.0)
break;
506 updateAlphaStatus(i);
516 m_problem.flipCoordinates(i, j);
517 std::swap( m_gradient[i], m_gradient[j]);
518 std::swap( m_alphaStatus[i], m_alphaStatus[j]);
523 m_problem.scaleBoxConstraints(factor,variableScalingFactor);
524 for(std::size_t i = 0; i != this->dimensions(); ++i){
527 m_gradient(i) -= linear(i);
528 m_gradient(i) *= variableScalingFactor;
529 m_gradient(i) += linear(i);
530 updateAlphaStatus(i);
536 m_gradient(i) -= linear(i);
537 m_gradient(i) += newValue;
538 m_problem.linear(i) = newValue;
542 double largestUp = -1e100;
543 double smallestDown = 1e100;
544 for (std::size_t a = 0; a < active(); a++){
545 if (!isLowerBound(a))
546 smallestDown = std::min(smallestDown,gradient(a));
547 if (!isUpperBound(a))
548 largestUp = std::max(largestUp,gradient(a));
550 return largestUp - smallestDown;
572 virtual void applyStep(std::size_t i, std::size_t j,
double step){
576 double Ui = boxMax(i);
577 double Lj = boxMin(j);
578 double aiOld = m_problem.alpha(i);
579 double ajOld = m_problem.alpha(j);
580 double& ai = m_problem.alpha(i);
581 double& aj = m_problem.alpha(j);
582 if (step >= std::min(Ui - ai, aj - Lj))
584 if (Ui - ai > aj - Lj)
590 else if (Ui - ai < aj - Lj)
609 if(ai == aiOld && aj == ajOld)
return;
612 QpFloatType* qi = quadratic().row(i, 0, active());
613 QpFloatType* qj = quadratic().row(j, 0, active());
614 for (std::size_t a = 0; a < active(); a++)
615 m_gradient(a) -= step * qi[a] - step * qj[a];
618 updateAlphaStatus(i);
619 updateAlphaStatus(j);
626 if(m_problem.alpha(i) == boxMax(i))
628 if(m_problem.alpha(i) == boxMin(i))
634 ( isLowerBound(a) && gradient(a) < smallestDown)
635 || ( isUpperBound(a) && gradient(a) >largestUp)
645 template<
class Problem>