1 /* cobyla : contrained optimization by linear approximation */
4 * Copyright (c) 1992, Michael J. D. Powell (M.J.D.Powell@damtp.cam.ac.uk)
5 * Copyright (c) 2004, Jean-Sebastien Roy (js@jeannot.org)
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sublicense, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice shall be included
16 * in all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
22 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28 * This software is a C version of COBYLA2, a contrained optimization by linear
29 * approximation package developed by Michael J. D. Powell in Fortran.
31 * The original source code can be found at :
32 * http://plato.la.asu.edu/topics/problems/nlores.html
35 static char const rcsid[] =
36 "@(#) $Jeannot: cobyla.c,v 1.11 2004/04/18 09:51:36 js Exp $";
44 /* SGJ, 2008: modified COBYLA code to take explicit account of bound
45 constraints. Since bound constraints are linear, these should
46 already be handled exactly when COBYLA optimizes it linear model.
47 However, they were not handled properly when COBYLA created its
48 initial simplex, or when COBYLA updated unacceptable simplices.
49 Since NLopt guarantees that the objective will not be evaluated
50 outside the bound constraints, this required us to handle such
51 points by putting a slope discontinuity into the objective &
52 constraints (below), which slows convergence considerably for
53 smooth functions. Instead, handling them explicitly prevents this
55 #define ENFORCE_BOUNDS 1
57 #define min(a,b) ((a) <= (b) ? (a) : (b))
58 #define max(a,b) ((a) >= (b) ? (a) : (b))
59 #define abs(x) ((x) >= 0 ? (x) : -(x))
61 /**************************************************************************/
62 /* SGJ, 2008: NLopt-style interface function: */
70 const double *lb, *ub;
73 static int func_wrap(int n, int m, double *x, double *f, double *con,
77 double *xtmp = s->xtmp;
78 const double *lb = s->lb, *ub = s->ub;
80 /* in nlopt, we guarante that the function is never evaluated outside
81 the lb and ub bounds, so we need force this with xtmp ... note
82 that this leads to discontinuity in the first derivative, which
83 slows convergence if we don't enable the ENFORCE_BOUNDS feature
85 for (j = 0; j < n; ++j) {
86 if (x[j] < lb[j]) xtmp[j] = lb[j];
87 else if (x[j] > ub[j]) xtmp[j] = ub[j];
91 *f = s->f(n, xtmp, NULL, s->f_data);
92 for (i = 0; i < s->m_orig; ++i)
93 con[i] = -s->fc[i].f(n, xtmp, NULL, s->fc[i].f_data);
94 for (j = 0; j < n; ++j) {
95 if (!nlopt_isinf(lb[j]))
96 con[i++] = x[j] - lb[j];
97 if (!nlopt_isinf(ub[j]))
98 con[i++] = ub[j] - x[j];
100 if (m != i) return 1; /* ... bug?? */
108 COBYLA_MSG_NONE = 0, /* No messages */
109 COBYLA_MSG_EXIT = 1, /* Exit reasons */
110 COBYLA_MSG_ITER = 2, /* Rho and Sigma changes */
111 COBYLA_MSG_INFO = 3 /* Informational messages */
115 * A function as required by cobyla
116 * state is a void pointer provided to the function at each call
118 * n : the number of variables
119 * m : the number of constraints
120 * x : on input, then vector of variables (should not be modified)
121 * f : on output, the value of the function
122 * con : on output, the value of the constraints (vector of size m)
123 * state : on input, the value of the state variable as provided to cobyla
125 * COBYLA will try to make all the values of the constraints positive.
126 * So if you want to input a constraint j such as x[i] <= MAX, set:
127 * con[j] = MAX - x[i]
128 * The function must returns 0 if no error occurs or 1 to immediately end the
132 typedef int cobyla_function(int n, int m, double *x, double *f, double *con,
133 func_wrap_state *state);
136 * cobyla : minimize a function subject to constraints
138 * n : number of variables (>=0)
139 * m : number of constraints (>=0)
140 * x : on input, initial estimate ; on output, the solution
141 * minf : on output, minimum objective function
142 * rhobeg : a reasonable initial change to the variables
143 * stop : the NLopt stopping criteria
144 * lb, ub : lower and upper bounds on x
145 * message : see the cobyla_message enum
146 * calcfc : the function to minimize (see cobyla_function)
147 * state : used by function (see cobyla_function)
149 * The cobyla function returns the usual nlopt_result codes.
152 extern nlopt_result cobyla(int n, int m, double *x, double *minf, double rhobeg, nlopt_stopping *stop, const double *lb, const double *ub,
153 int message, cobyla_function *calcfc, func_wrap_state *state);
155 nlopt_result cobyla_minimize(int n, nlopt_func f, void *f_data,
156 int m, nlopt_constraint *fc,
157 const double *lb, const double *ub, /* bounds */
158 double *x, /* in: initial guess, out: minimizer */
160 nlopt_stopping *stop,
167 s.f = f; s.f_data = f_data;
170 s.lb = lb; s.ub = ub;
171 s.xtmp = (double *) malloc(sizeof(double) * n);
172 if (!s.xtmp) return NLOPT_OUT_OF_MEMORY;
174 /* add constraints for lower/upper bounds (if any) */
175 for (j = 0; j < n; ++j) {
176 if (!nlopt_isinf(lb[j]))
178 if (!nlopt_isinf(ub[j]))
182 ret = cobyla(n, m, x, minf, rhobegin, stop, lb, ub, COBYLA_MSG_NONE,
185 /* make sure e.g. rounding errors didn't push us slightly out of bounds */
186 for (j = 0; j < n; ++j) {
187 if (x[j] < lb[j]) x[j] = lb[j];
188 if (x[j] > ub[j]) x[j] = ub[j];
195 /**************************************************************************/
197 /* SGJ, 2010: I find that it seems to increase robustness of the algorithm
198 if, on "simplex" steps (which are intended to improve the linear
199 independence of the simplex, not improve the objective), we multiply
200 the steps by pseudorandom numbers in [0,1). Intuitively, pseudorandom
201 steps are likely to avoid any accidental dependency in the simplex
202 vertices. However, since I still want COBYLA to be a deterministic
203 algorithm, and I don't care all that much about the quality of the
204 randomness here, I implement a simple linear congruential generator
205 rather than use nlopt_urand, and set the initial seed deterministically. */
207 #if defined(HAVE_STDINT_H)
211 #ifndef HAVE_UINT32_T
212 # if SIZEOF_UNSIGNED_LONG == 4
213 typedef unsigned long uint32_t;
214 # elif SIZEOF_UNSIGNED_INT == 4
215 typedef unsigned int uint32_t;
217 # error No 32-bit unsigned integer type
221 /* a simple linear congruential generator */
223 static uint32_t lcg_rand(uint32_t *seed)
225 return (*seed = *seed * 1103515245 + 12345);
228 static double lcg_urand(uint32_t *seed, double a, double b)
230 return a + lcg_rand(seed) * (b - a) / ((uint32_t) -1);
233 /**************************************************************************/
235 static nlopt_result cobylb(int *n, int *m, int *mpp, double *x, double *minf, double *rhobeg,
236 nlopt_stopping *stop, const double *lb, const double *ub, int *iprint, double *con, double *sim,
237 double *simi, double *datmat, double *a, double *vsig, double *veta,
238 double *sigbar, double *dx, double *w, int *iact, cobyla_function *calcfc,
239 func_wrap_state *state);
240 static void trstlp(int *n, int *m, double *a, double *b, double *rho,
241 double *dx, int *ifull, int *iact, double *z__, double *zdota, double *vmultc,
242 double *sdirn, double *dxnew, double *vmultd);
244 /* ------------------------------------------------------------------------ */
246 nlopt_result cobyla(int n, int m, double *x, double *minf, double rhobeg, nlopt_stopping *stop, const double *lb, const double *ub, int iprint,
247 cobyla_function *calcfc, func_wrap_state *state)
249 int icon, isim, isigb, idatm, iveta, isimi, ivsig, iwork, ia, idx, mpp;
255 * This subroutine minimizes an objective function F(X) subject to M
256 * inequality constraints on X, where X is a vector of variables that has
257 * N components. The algorithm employs linear approximations to the
258 * objective and constraint functions, the approximations being formed by
259 * linear interpolation at N+1 points in the space of the variables.
260 * We regard these interpolation points as vertices of a simplex. The
261 * parameter RHO controls the size of the simplex and it is reduced
262 * automatically from RHOBEG to RHOEND. For each RHO the subroutine tries
263 * to achieve a good vector of variables for the current size, and then
264 * RHO is reduced until the value RHOEND is reached. Therefore RHOBEG and
265 * RHOEND should be set to reasonable initial changes to and the required
266 * accuracy in the variables respectively, but this accuracy should be
267 * viewed as a subject for experimentation because it is not guaranteed.
268 * The subroutine has an advantage over many of its competitors, however,
269 * which is that it treats each constraint individually when calculating
270 * a change to the variables, instead of lumping the constraints together
271 * into a single penalty function. The name of the subroutine is derived
272 * from the phrase Constrained Optimization BY Linear Approximations.
274 * The user must set the values of N, M, RHOBEG and RHOEND, and must
275 * provide an initial vector of variables in X. Further, the value of
276 * IPRINT should be set to 0, 1, 2 or 3, which controls the amount of
277 * printing during the calculation. Specifically, there is no output if
278 * IPRINT=0 and there is output only at the end of the calculation if
279 * IPRINT=1. Otherwise each new value of RHO and SIGMA is printed.
280 * Further, the vector of variables and some function information are
281 * given either when RHO is reduced or when each new value of F(X) is
282 * computed in the cases IPRINT=2 or IPRINT=3 respectively. Here SIGMA
283 * is a penalty parameter, it being assumed that a change to X is an
284 * improvement if it reduces the merit function
285 * F(X)+SIGMA*MAX(0.0,-C1(X),-C2(X),...,-CM(X)),
286 * where C1,C2,...,CM denote the constraint functions that should become
287 * nonnegative eventually, at least to the precision of RHOEND. In the
288 * printed output the displayed term that is multiplied by SIGMA is
289 * called MAXCV, which stands for 'MAXimum Constraint Violation'. The
290 * argument MAXFUN is an int variable that must be set by the user to a
291 * limit on the number of calls of CALCFC, the purpose of this routine being
292 * given below. The value of MAXFUN will be altered to the number of calls
293 * of CALCFC that are made. The arguments W and IACT provide real and
294 * int arrays that are used as working space. Their lengths must be at
295 * least N*(3*N+2*M+11)+4*M+6 and M+1 respectively.
297 * In order to define the objective and constraint functions, we require
298 * a subroutine that has the name and arguments
299 * SUBROUTINE CALCFC (N,M,X,F,CON)
300 * DIMENSION X(*),CON(*) .
301 * The values of N and M are fixed and have been defined already, while
302 * X is now the current vector of variables. The subroutine should return
303 * the objective and constraint functions at X in F and CON(1),CON(2),
304 * ...,CON(M). Note that we are trying to adjust X so that F(X) is as
305 * small as possible subject to the constraint functions being nonnegative.
307 * Partition the working space array W to provide the storage that is needed
308 * for the main calculation.
315 if (iprint>=1) fprintf(stderr, "cobyla: N==0.\n");
316 return NLOPT_SUCCESS;
321 if (iprint>=1) fprintf(stderr, "cobyla: N<0 or M<0.\n");
322 return NLOPT_INVALID_ARGS;
325 /* workspace allocation */
326 w = (double*) malloc((n*(3*n+2*m+11)+4*m+6)*sizeof(*w));
329 if (iprint>=1) fprintf(stderr, "cobyla: memory allocation error.\n");
330 return NLOPT_OUT_OF_MEMORY;
332 iact = (int*)malloc((m+1)*sizeof(*iact));
335 if (iprint>=1) fprintf(stderr, "cobyla: memory allocation error.\n");
337 return NLOPT_OUT_OF_MEMORY;
340 /* Parameter adjustments */
350 isimi = isim + n * n + n;
351 idatm = isimi + n * n;
352 ia = idatm + n * mpp + mpp;
353 ivsig = ia + m * n + n;
358 rc = cobylb(&n, &m, &mpp, &x[1], minf, &rhobeg, stop, &lb[1], &ub[1], &iprint,
359 &w[icon], &w[isim], &w[isimi], &w[idatm], &w[ia], &w[ivsig], &w[iveta],
360 &w[isigb], &w[idx], &w[iwork], &iact[1], calcfc, state);
362 /* Parameter adjustments (reverse) */
372 /* ------------------------------------------------------------------------- */
373 static nlopt_result cobylb(int *n, int *m, int *mpp,
374 double *x, double *minf, double *rhobeg,
375 nlopt_stopping *stop, const double *lb, const double *ub,
376 int *iprint, double *con, double *sim, double *simi,
377 double *datmat, double *a, double *vsig, double *veta,
378 double *sigbar, double *dx, double *w, int *iact, cobyla_function *calcfc,
379 func_wrap_state *state)
381 /* System generated locals */
382 int sim_dim1, sim_offset, simi_dim1, simi_offset, datmat_dim1,
383 datmat_offset, a_dim1, a_offset, i__1, i__2, i__3;
386 /* Local variables */
387 double alpha, delta, denom, tempa, barmu;
388 double beta, cmin = 0.0, cmax = 0.0;
389 double cvmaxm, dxsign, prerem = 0.0;
390 double edgmax, pareta, prerec = 0.0, phimin, parsig = 0.0;
392 double phi, rho, sum = 0.0;
393 double ratio, vmold, parmu, error, vmnew;
394 double resmax, cvmaxp;
395 double resnew, trured;
396 double temp, wsig, f;
405 int mp, np, iz, ibrnch;
406 int nbest, ifull, iptem, jdrop;
407 nlopt_result rc = NLOPT_SUCCESS;
409 uint32_t seed = (uint32_t) (*n + *m); /* arbitrary deterministic LCG seed */
412 /* SGJ, 2008: compute rhoend from NLopt stop info */
413 rhoend = stop->xtol_rel * (*rhobeg);
414 for (j = 0; j < *n; ++j)
415 if (rhoend < stop->xtol_abs[j])
416 rhoend = stop->xtol_abs[j];
418 /* SGJ, 2008: added code to keep track of minimum feasible function val */
421 /* Set the initial values of some parameters. The last column of SIM holds */
422 /* the optimal vertex of the current simplex, and the preceding N columns */
423 /* hold the displacements from the optimal vertex to the other vertices. */
424 /* Further, SIMI holds the inverse of the matrix that is contained in the */
425 /* first N columns of SIM. */
427 /* Parameter adjustments */
429 a_offset = 1 + a_dim1 * 1;
432 simi_offset = 1 + simi_dim1 * 1;
435 sim_offset = 1 + sim_dim1 * 1;
438 datmat_offset = 1 + datmat_dim1 * 1;
439 datmat -= datmat_offset;
463 "cobyla: the initial value of RHO is %12.6E and PARMU is set to zero.\n",
468 for (i__ = 1; i__ <= i__1; ++i__) {
470 sim[i__ + np * sim_dim1] = x[i__];
472 for (j = 1; j <= i__2; ++j) {
473 sim[i__ + j * sim_dim1] = 0.;
474 simi[i__ + j * simi_dim1] = 0.;
478 /* SGJ: make sure step rhocur stays inside [lb,ub] */
479 if (x[i__] + rhocur > ub[i__]) {
480 if (x[i__] - rhocur >= lb[i__])
482 else if (ub[i__] - x[i__] > x[i__] - lb[i__])
483 rhocur = 0.5 * (ub[i__] - x[i__]);
485 rhocur = 0.5 * (x[i__] - lb[i__]);
488 sim[i__ + i__ * sim_dim1] = rhocur;
489 simi[i__ + i__ * simi_dim1] = 1.0 / rhocur;
494 /* Make the next call of the user-supplied subroutine CALCFC. These */
495 /* instructions are also used for calling CALCFC during the iterations of */
498 /* SGJ comment: why the hell couldn't he just use a damn subroutine?
499 #*&!%*@ Fortran-66 spaghetti code */
502 if (nlopt_stop_forced(stop)) rc = NLOPT_FORCE_STOP;
503 else if (nlopt_stop_evals(stop)) rc = NLOPT_MAXEVAL_REACHED;
504 else if (nlopt_stop_time(stop)) rc = NLOPT_MAXTIME_REACHED;
505 if (rc != NLOPT_SUCCESS) goto L600;
508 if (calcfc(*n, *m, &x[1], &f, &con[1], state))
511 fprintf(stderr, "cobyla: user requested end of minimization.\n");
518 feasible = 1; /* SGJ, 2010 */
521 for (k = 1; k <= i__1; ++k) {
522 d__1 = resmax, d__2 = -con[k];
523 resmax = max(d__1,d__2);
524 if (d__2 > (k <= state->m_orig ? state->fc[k-1].tol : 0))
525 feasible = 0; /* SGJ, 2010 */
529 /* SGJ, 2008: check for minf_max reached by feasible point */
530 if (f < stop->minf_max && feasible) {
531 rc = NLOPT_MINF_MAX_REACHED;
532 goto L620; /* not L600 because we want to use current x, f, resmax */
535 if (stop->nevals == *iprint - 1 || *iprint == 3) {
536 fprintf(stderr, "cobyla: NFVALS = %4d, F =%13.6E, MAXCV =%13.6E\n",
537 stop->nevals, f, resmax);
539 fprintf(stderr, "cobyla: X =");
540 for (i__ = 1; i__ <= i__1; ++i__) {
541 if (i__>1) fprintf(stderr, " ");
542 fprintf(stderr, "%13.6E", x[i__]);
546 for (i__ = iptemp; i__ <= i__1; ++i__) {
547 if (!((i__-1) % 4)) fprintf(stderr, "\ncobyla: ");
548 fprintf(stderr, "%15.6E", x[i__]);
551 fprintf(stderr, "\n");
559 /* Set the recently calculated function values in a column of DATMAT. This */
560 /* array has a column for each vertex of the current simplex, the entries of */
561 /* each column being the values of the constraint functions (if any) */
562 /* followed by the objective function and the greatest constraint violation */
566 for (k = 1; k <= i__1; ++k) {
567 datmat[k + jdrop * datmat_dim1] = con[k];
569 if (stop->nevals > np) {
573 /* Exchange the new vertex of the initial simplex with the optimal vertex if */
574 /* necessary. Then, if the initial simplex is not complete, pick its next */
575 /* vertex and calculate the function values there. */
578 if (datmat[mp + np * datmat_dim1] <= f) {
579 x[jdrop] = sim[jdrop + np * sim_dim1];
580 } else { /* improvement in function val */
581 double rhocur = x[jdrop] - sim[jdrop + np * sim_dim1];
582 /* SGJ: use rhocur instead of rho. In original code, rhocur == rho
583 always, but I want to change this to ensure that simplex points
584 fall within [lb,ub]. */
585 sim[jdrop + np * sim_dim1] = x[jdrop];
587 for (k = 1; k <= i__1; ++k) {
588 datmat[k + jdrop * datmat_dim1] = datmat[k + np * datmat_dim1]
590 datmat[k + np * datmat_dim1] = con[k];
593 for (k = 1; k <= i__1; ++k) {
594 sim[jdrop + k * sim_dim1] = -rhocur;
597 for (i__ = k; i__ <= i__2; ++i__) {
598 temp -= simi[i__ + k * simi_dim1];
600 simi[jdrop + k * simi_dim1] = temp;
604 if (stop->nevals <= *n) { /* evaluating initial simplex */
605 jdrop = stop->nevals;
606 /* SGJ: was += rho, but using sim[jdrop,jdrop] enforces consistency
607 if we change the stepsize above to stay in [lb,ub]. */
608 x[jdrop] += sim[jdrop + jdrop * sim_dim1];
614 /* Identify the optimal vertex of the current simplex. */
617 phimin = datmat[mp + np * datmat_dim1] + parmu * datmat[*mpp + np *
621 for (j = 1; j <= i__1; ++j) {
622 temp = datmat[mp + j * datmat_dim1] + parmu * datmat[*mpp + j *
627 } else if (temp == phimin && parmu == 0.) {
628 if (datmat[*mpp + j * datmat_dim1] < datmat[*mpp + nbest *
635 /* Switch the best vertex into pole position if it is not there already, */
636 /* and also update SIM, SIMI and DATMAT. */
640 for (i__ = 1; i__ <= i__1; ++i__) {
641 temp = datmat[i__ + np * datmat_dim1];
642 datmat[i__ + np * datmat_dim1] = datmat[i__ + nbest * datmat_dim1]
644 datmat[i__ + nbest * datmat_dim1] = temp;
647 for (i__ = 1; i__ <= i__1; ++i__) {
648 temp = sim[i__ + nbest * sim_dim1];
649 sim[i__ + nbest * sim_dim1] = 0.;
650 sim[i__ + np * sim_dim1] += temp;
653 for (k = 1; k <= i__2; ++k) {
654 sim[i__ + k * sim_dim1] -= temp;
655 tempa -= simi[k + i__ * simi_dim1];
657 simi[nbest + i__ * simi_dim1] = tempa;
661 /* Make an error return if SIGI is a poor approximation to the inverse of */
662 /* the leading N by N submatrix of SIG. */
666 for (i__ = 1; i__ <= i__1; ++i__) {
668 for (j = 1; j <= i__2; ++j) {
674 for (k = 1; k <= i__3; ++k) if (sim[k + j * sim_dim1] != 0) {
675 temp += simi[i__ + k * simi_dim1] * sim[k + j * sim_dim1];
677 d__1 = error, d__2 = abs(temp);
678 error = max(d__1,d__2);
683 fprintf(stderr, "cobyla: rounding errors are becoming damaging.\n");
689 /* Calculate the coefficients of the linear approximations to the objective */
690 /* and constraint functions, placing minus the objective function gradient */
691 /* after the constraint gradients in the array A. The vector W is used for */
695 for (k = 1; k <= i__2; ++k) {
696 con[k] = -datmat[k + np * datmat_dim1];
698 for (j = 1; j <= i__1; ++j) {
699 w[j] = datmat[k + j * datmat_dim1] + con[k];
702 for (i__ = 1; i__ <= i__1; ++i__) {
705 for (j = 1; j <= i__3; ++j) {
706 temp += w[j] * simi[j + i__ * simi_dim1];
711 a[i__ + k * a_dim1] = temp;
715 /* Calculate the values of sigma and eta, and set IFLAG=0 if the current */
716 /* simplex is not acceptable. */
719 parsig = alpha * rho;
722 for (j = 1; j <= i__1; ++j) {
726 for (i__ = 1; i__ <= i__2; ++i__) {
727 d__1 = simi[j + i__ * simi_dim1];
729 d__1 = sim[i__ + j * sim_dim1];
732 vsig[j] = 1. / sqrt(wsig);
733 veta[j] = sqrt(weta);
734 if (vsig[j] < parsig || veta[j] > pareta) {
739 /* If a new vertex is needed to improve acceptability, then decide which */
740 /* vertex to drop from the simplex. */
742 if (ibrnch == 1 || iflag == 1) {
748 for (j = 1; j <= i__1; ++j) {
749 if (veta[j] > temp) {
756 for (j = 1; j <= i__1; ++j) {
757 if (vsig[j] < temp) {
764 /* Calculate the step to the new vertex and its sign. */
766 temp = gamma_ * rho * vsig[jdrop];
768 for (i__ = 1; i__ <= i__1; ++i__) {
769 dx[i__] = temp * simi[jdrop + i__ * simi_dim1];
774 for (k = 1; k <= i__1; ++k) {
777 for (i__ = 1; i__ <= i__2; ++i__) {
778 sum += a[i__ + k * a_dim1] * dx[i__];
781 temp = datmat[k + np * datmat_dim1];
782 d__1 = cvmaxp, d__2 = -sum - temp;
783 cvmaxp = max(d__1,d__2);
784 d__1 = cvmaxm, d__2 = sum - temp;
785 cvmaxm = max(d__1,d__2);
789 if (parmu * (cvmaxp - cvmaxm) > sum + sum) {
793 /* Update the elements of SIM and SIMI, and set the next X. */
797 for (i__ = 1; i__ <= i__1; ++i__) {
798 /* SGJ, 2010: pseudo-randomize simplex steps (see LCG comments above) */
799 dx[i__] = dxsign * dx[i__] * lcg_urand(&seed, 0.01, 1);
800 /* SGJ: make sure dx step says in [lb,ub] */
803 double xi = sim[i__ + np * sim_dim1];
805 if (xi + dx[i__] > ub[i__])
807 if (xi + dx[i__] < lb[i__]) {
808 if (xi - dx[i__] <= ub[i__])
810 else { /* try again with halved step */
817 sim[i__ + jdrop * sim_dim1] = dx[i__];
818 temp += simi[jdrop + i__ * simi_dim1] * dx[i__];
821 for (i__ = 1; i__ <= i__1; ++i__) {
822 simi[jdrop + i__ * simi_dim1] /= temp;
825 for (j = 1; j <= i__1; ++j) {
829 for (i__ = 1; i__ <= i__2; ++i__) {
830 temp += simi[j + i__ * simi_dim1] * dx[i__];
833 for (i__ = 1; i__ <= i__2; ++i__) {
834 simi[j + i__ * simi_dim1] -= temp * simi[jdrop + i__ *
838 x[j] = sim[j + np * sim_dim1] + dx[j];
842 /* Calculate DX=x(*)-x(0). Branch if the length of DX is less than 0.5*RHO. */
846 izdota = iz + *n * *n;
849 idxnew = isdirn + *n;
851 trstlp(n, m, &a[a_offset], &con[1], &rho, &dx[1], &ifull, &iact[1], &w[
852 iz], &w[izdota], &w[ivmc], &w[isdirn], &w[idxnew], &w[ivmd]);
854 /* SGJ: since the bound constraints are linear, we should never get
855 a dx that lies outside the [lb,ub] constraints here, but we'll
856 enforce this anyway just to be paranoid */
858 for (i__ = 1; i__ <= i__1; ++i__) {
859 double xi = sim[i__ + np * sim_dim1];
860 if (xi + dx[i__] > ub[i__]) dx[i__] = ub[i__] - xi;
861 if (xi + dx[i__] < lb[i__]) dx[i__] = xi - lb[i__];
867 for (i__ = 1; i__ <= i__1; ++i__) {
871 if (temp < rho * .25 * rho) {
877 /* Predict the change to F and the new maximum constraint violation if the */
878 /* variables are altered from x(0) to x(0)+DX. */
883 for (k = 1; k <= i__1; ++k) {
886 for (i__ = 1; i__ <= i__2; ++i__) {
887 sum -= a[i__ + k * a_dim1] * dx[i__];
890 resnew = max(resnew,sum);
894 /* Increase PARMU if necessary and branch back if this change alters the */
895 /* optimal vertex. Otherwise PREREM and PREREC will be set to the predicted */
896 /* reductions in the merit function and the maximum constraint violation */
900 prerec = datmat[*mpp + np * datmat_dim1] - resnew;
902 barmu = sum / prerec;
904 if (parmu < barmu * 1.5) {
907 fprintf(stderr, "cobyla: increase in PARMU to %12.6E\n", parmu);
909 phi = datmat[mp + np * datmat_dim1] + parmu * datmat[*mpp + np *
912 for (j = 1; j <= i__1; ++j) {
913 temp = datmat[mp + j * datmat_dim1] + parmu * datmat[*mpp + j *
918 if (temp == phi && parmu == 0.f) {
919 if (datmat[*mpp + j * datmat_dim1] < datmat[*mpp + np *
926 prerem = parmu * prerec - sum;
928 /* Calculate the constraint and objective functions at x(*). Then find the */
929 /* actual reduction in the merit function. */
932 for (i__ = 1; i__ <= i__1; ++i__) {
933 x[i__] = sim[i__ + np * sim_dim1] + dx[i__];
938 vmold = datmat[mp + np * datmat_dim1] + parmu * datmat[*mpp + np *
940 vmnew = f + parmu * resmax;
941 trured = vmold - vmnew;
942 if (parmu == 0. && f == datmat[mp + np * datmat_dim1]) {
944 trured = datmat[*mpp + np * datmat_dim1] - resmax;
947 /* Begin the operations that decide whether x(*) should replace one of the */
948 /* vertices of the current simplex, the change being mandatory if TRURED is */
949 /* positive. Firstly, JDROP is set to the index of the vertex that is to be */
958 for (j = 1; j <= i__1; ++j) {
961 for (i__ = 1; i__ <= i__2; ++i__) {
962 temp += simi[j + i__ * simi_dim1] * dx[i__];
969 sigbar[j] = temp * vsig[j];
972 /* Calculate the value of ell. */
974 edgmax = delta * rho;
977 for (j = 1; j <= i__1; ++j) {
978 if (sigbar[j] >= parsig || sigbar[j] >= vsig[j]) {
983 for (i__ = 1; i__ <= i__2; ++i__) {
984 d__1 = dx[i__] - sim[i__ + j * sim_dim1];
1002 /* Revise the simplex by updating the elements of SIM, SIMI and DATMAT. */
1006 for (i__ = 1; i__ <= i__1; ++i__) {
1007 sim[i__ + jdrop * sim_dim1] = dx[i__];
1008 temp += simi[jdrop + i__ * simi_dim1] * dx[i__];
1011 for (i__ = 1; i__ <= i__1; ++i__) {
1012 simi[jdrop + i__ * simi_dim1] /= temp;
1015 for (j = 1; j <= i__1; ++j) {
1019 for (i__ = 1; i__ <= i__2; ++i__) {
1020 temp += simi[j + i__ * simi_dim1] * dx[i__];
1023 for (i__ = 1; i__ <= i__2; ++i__) {
1024 simi[j + i__ * simi_dim1] -= temp * simi[jdrop + i__ *
1030 for (k = 1; k <= i__1; ++k) {
1031 datmat[k + jdrop * datmat_dim1] = con[k];
1034 /* Branch back for further iterations with the current RHO. */
1036 if (trured > 0. && trured >= prerem * .1) {
1037 /* SGJ, 2010: following a suggestion in the SAS manual (which
1038 mentions a similar modification to COBYLA, although they didn't
1039 publish their source code), increase rho if predicted reduction
1040 is sufficiently close to the actual reduction. Otherwise,
1041 COBLYA seems to easily get stuck making very small steps.
1042 Also require iflag != 0 (i.e., acceptable simplex), again
1043 following SAS suggestion (otherwise I get convergence failure
1045 if (trured >= prerem * 0.9 && trured <= prerem * 1.1 && iflag) {
1056 /* SGJ, 2008: convergence tests for function vals; note that current
1057 best val is stored in datmat[mp + np * datmat_dim1], or in f if
1058 ifull == 1, and previous best is in *minf. This seems like a
1059 sensible place to put the convergence test, as it is the same
1060 place that Powell checks the x tolerance (rho > rhoend). */
1062 double fbest = ifull == 1 ? f : datmat[mp + np * datmat_dim1];
1063 if (fbest < *minf && nlopt_stop_ftol(stop, fbest, *minf)) {
1064 rc = NLOPT_FTOL_REACHED;
1070 /* Otherwise reduce RHO if it is not at its least value and reset PARMU. */
1074 if (rho <= rhoend * 1.5) {
1080 for (k = 1; k <= i__1; ++k) {
1081 cmin = datmat[k + np * datmat_dim1];
1084 for (i__ = 1; i__ <= i__2; ++i__) {
1085 d__1 = cmin, d__2 = datmat[k + i__ * datmat_dim1];
1086 cmin = min(d__1,d__2);
1087 d__1 = cmax, d__2 = datmat[k + i__ * datmat_dim1];
1088 cmax = max(d__1,d__2);
1090 if (k <= *m && cmin < cmax * .5) {
1091 temp = max(cmax,0.) - cmin;
1095 denom = min(denom,temp);
1101 } else if (cmax - cmin < parmu * denom) {
1102 parmu = (cmax - cmin) / denom;
1106 fprintf(stderr, "cobyla: reduction in RHO to %12.6E and PARMU =%13.6E\n",
1110 fprintf(stderr, "cobyla: NFVALS = %4d, F =%13.6E, MAXCV =%13.6E\n",
1111 stop->nevals, datmat[mp + np * datmat_dim1], datmat[*mpp + np * datmat_dim1]);
1113 fprintf(stderr, "cobyla: X =");
1115 for (i__ = 1; i__ <= i__1; ++i__) {
1116 if (i__>1) fprintf(stderr, " ");
1117 fprintf(stderr, "%13.6E", sim[i__ + np * sim_dim1]);
1121 for (i__ = iptemp; i__ <= i__1; ++i__) {
1122 if (!((i__-1) % 4)) fprintf(stderr, "\ncobyla: ");
1123 fprintf(stderr, "%15.6E", x[i__]);
1126 fprintf(stderr, "\n");
1131 rc = NLOPT_XTOL_REACHED;
1133 /* Return the best calculated values of the variables. */
1136 fprintf(stderr, "cobyla: normal return.\n");
1143 for (i__ = 1; i__ <= i__1; ++i__) {
1144 x[i__] = sim[i__ + np * sim_dim1];
1146 f = datmat[mp + np * datmat_dim1];
1147 resmax = datmat[*mpp + np * datmat_dim1];
1151 fprintf(stderr, "cobyla: NFVALS = %4d, F =%13.6E, MAXCV =%13.6E\n",
1152 stop->nevals, f, resmax);
1154 fprintf(stderr, "cobyla: X =");
1155 for (i__ = 1; i__ <= i__1; ++i__) {
1156 if (i__>1) fprintf(stderr, " ");
1157 fprintf(stderr, "%13.6E", x[i__]);
1161 for (i__ = iptemp; i__ <= i__1; ++i__) {
1162 if (!((i__-1) % 4)) fprintf(stderr, "\ncobyla: ");
1163 fprintf(stderr, "%15.6E", x[i__]);
1166 fprintf(stderr, "\n");
1171 /* ------------------------------------------------------------------------- */
1172 static void trstlp(int *n, int *m, double *a,
1173 double *b, double *rho, double *dx, int *ifull,
1174 int *iact, double *z__, double *zdota, double *vmultc,
1175 double *sdirn, double *dxnew, double *vmultd)
1177 /* System generated locals */
1178 int a_dim1, a_offset, z_dim1, z_offset, i__1, i__2;
1181 /* Local variables */
1182 double alpha, tempa;
1184 double optnew, stpful, sum, tot, acca, accb;
1185 double ratio, vsave, zdotv, zdotw, dd;
1187 double sp, ss, resold = 0.0, zdvabs, zdwabs, sumabs, resmax, optold;
1191 int iout, i__, j, k;
1195 int nact, icon = 0, mcon;
1199 /* This subroutine calculates an N-component vector DX by applying the */
1200 /* following two stages. In the first stage, DX is set to the shortest */
1201 /* vector that minimizes the greatest violation of the constraints */
1202 /* A(1,K)*DX(1)+A(2,K)*DX(2)+...+A(N,K)*DX(N) .GE. B(K), K=2,3,...,M, */
1203 /* subject to the Euclidean length of DX being at most RHO. If its length is */
1204 /* strictly less than RHO, then we use the resultant freedom in DX to */
1205 /* minimize the objective function */
1206 /* -A(1,M+1)*DX(1)-A(2,M+1)*DX(2)-...-A(N,M+1)*DX(N) */
1207 /* subject to no increase in any greatest constraint violation. This */
1208 /* notation allows the gradient of the objective function to be regarded as */
1209 /* the gradient of a constraint. Therefore the two stages are distinguished */
1210 /* by MCON .EQ. M and MCON .GT. M respectively. It is possible that a */
1211 /* degeneracy may prevent DX from attaining the target length RHO. Then the */
1212 /* value IFULL=0 would be set, but usually IFULL=1 on return. */
1214 /* In general NACT is the number of constraints in the active set and */
1215 /* IACT(1),...,IACT(NACT) are their indices, while the remainder of IACT */
1216 /* contains a permutation of the remaining constraint indices. Further, Z is */
1217 /* an orthogonal matrix whose first NACT columns can be regarded as the */
1218 /* result of Gram-Schmidt applied to the active constraint gradients. For */
1219 /* J=1,2,...,NACT, the number ZDOTA(J) is the scalar product of the J-th */
1220 /* column of Z with the gradient of the J-th active constraint. DX is the */
1221 /* current vector of variables and here the residuals of the active */
1222 /* constraints should be zero. Further, the active constraints have */
1223 /* nonnegative Lagrange multipliers that are held at the beginning of */
1224 /* VMULTC. The remainder of this vector holds the residuals of the inactive */
1225 /* constraints at DX, the ordering of the components of VMULTC being in */
1226 /* agreement with the permutation of the indices of the constraints that is */
1227 /* in IACT. All these residuals are nonnegative, which is achieved by the */
1228 /* shift RESMAX that makes the least residual zero. */
1230 /* Initialize Z and some other variables. The value of RESMAX will be */
1231 /* appropriate to DX=0, while ICON will be the index of a most violated */
1232 /* constraint if RESMAX is positive. Usually during the first stage the */
1233 /* vector SDIRN gives a search direction that reduces all the active */
1234 /* constraint violations by one simultaneously. */
1236 /* Parameter adjustments */
1238 z_offset = 1 + z_dim1 * 1;
1241 a_offset = 1 + a_dim1 * 1;
1258 for (i__ = 1; i__ <= i__1; ++i__) {
1260 for (j = 1; j <= i__2; ++j) {
1261 z__[i__ + j * z_dim1] = 0.;
1263 z__[i__ + i__ * z_dim1] = 1.;
1268 for (k = 1; k <= i__1; ++k) {
1269 if (b[k] > resmax) {
1275 for (k = 1; k <= i__1; ++k) {
1277 vmultc[k] = resmax - b[k];
1284 for (i__ = 1; i__ <= i__1; ++i__) {
1288 /* End the current stage of the calculation if 3 consecutive iterations */
1289 /* have either failed to reduce the best calculated value of the objective */
1290 /* function or to increase the number of active constraints since the best */
1291 /* value was calculated. This strategy prevents cycling, but there is a */
1292 /* remote possibility that it will cause premature termination. */
1303 for (i__ = 1; i__ <= i__1; ++i__) {
1304 optnew -= dx[i__] * a[i__ + mcon * a_dim1];
1307 if (icount == 0 || optnew < optold) {
1311 } else if (nact > nactx) {
1321 /* If ICON exceeds NACT, then we add the constraint with index IACT(ICON) to */
1322 /* the active set. Apply Givens rotations so that the last N-NACT-1 columns */
1323 /* of Z are orthogonal to the gradient of the new constraint, a scalar */
1324 /* product being set to zero if its nonzero value could be due to computer */
1325 /* rounding errors. The array DXNEW is used for working space. */
1332 for (i__ = 1; i__ <= i__1; ++i__) {
1333 dxnew[i__] = a[i__ + kk * a_dim1];
1342 for (i__ = 1; i__ <= i__1; ++i__) {
1343 temp = z__[i__ + k * z_dim1] * dxnew[i__];
1347 acca = spabs + abs(sp) * .1;
1348 accb = spabs + abs(sp) * .2;
1349 if (spabs >= acca || acca >= accb) {
1356 temp = sqrt(sp * sp + tot * tot);
1361 for (i__ = 1; i__ <= i__1; ++i__) {
1362 temp = alpha * z__[i__ + k * z_dim1] + beta * z__[i__ + kp *
1364 z__[i__ + kp * z_dim1] = alpha * z__[i__ + kp * z_dim1] -
1365 beta * z__[i__ + k * z_dim1];
1366 z__[i__ + k * z_dim1] = temp;
1373 /* Add the new constraint if this can be done without a deletion from the */
1379 vmultc[icon] = vmultc[nact];
1384 /* The next instruction is reached if a deletion has to be made from the */
1385 /* active set in order to make room for the new active constraint, because */
1386 /* the new constraint gradient is a linear combination of the gradients of */
1387 /* the old active constraints. Set the elements of VMULTD to the multipliers */
1388 /* of the linear combination. Further, set IOUT to the index of the */
1389 /* constraint to be deleted, but branch if no suitable index can be found. */
1397 for (i__ = 1; i__ <= i__1; ++i__) {
1398 temp = z__[i__ + k * z_dim1] * dxnew[i__];
1400 zdvabs += abs(temp);
1402 acca = zdvabs + abs(zdotv) * .1;
1403 accb = zdvabs + abs(zdotv) * .2;
1404 if (zdvabs < acca && acca < accb) {
1405 temp = zdotv / zdota[k];
1406 if (temp > 0. && iact[k] <= *m) {
1407 tempa = vmultc[k] / temp;
1408 if (ratio < 0. || tempa < ratio) {
1416 for (i__ = 1; i__ <= i__1; ++i__) {
1417 dxnew[i__] -= temp * a[i__ + kw * a_dim1];
1432 /* Revise the Lagrange multipliers and reorder the active constraints so */
1433 /* that the one to be replaced is at the end of the list. Also calculate the */
1434 /* new value of ZDOTA(NACT) and branch if it is not acceptable. */
1437 for (k = 1; k <= i__1; ++k) {
1438 d__1 = 0., d__2 = vmultc[k] - ratio * vmultd[k];
1439 vmultc[k] = max(d__1,d__2);
1443 vsave = vmultc[icon];
1450 for (i__ = 1; i__ <= i__1; ++i__) {
1451 sp += z__[i__ + k * z_dim1] * a[i__ + kw * a_dim1];
1454 temp = sqrt(sp * sp + d__1 * d__1);
1455 alpha = zdota[kp] / temp;
1457 zdota[kp] = alpha * zdota[k];
1460 for (i__ = 1; i__ <= i__1; ++i__) {
1461 temp = alpha * z__[i__ + kp * z_dim1] + beta * z__[i__ + k *
1463 z__[i__ + kp * z_dim1] = alpha * z__[i__ + k * z_dim1] - beta *
1464 z__[i__ + kp * z_dim1];
1465 z__[i__ + k * z_dim1] = temp;
1468 vmultc[k] = vmultc[kp];
1478 for (i__ = 1; i__ <= i__1; ++i__) {
1479 temp += z__[i__ + nact * z_dim1] * a[i__ + kk * a_dim1];
1486 vmultc[nact] = ratio;
1488 /* Update IACT and ensure that the objective function continues to be */
1489 /* treated as the last active constraint when MCON>M. */
1492 iact[icon] = iact[nact];
1494 if (mcon > *m && kk != mcon) {
1498 for (i__ = 1; i__ <= i__1; ++i__) {
1499 sp += z__[i__ + k * z_dim1] * a[i__ + kk * a_dim1];
1502 temp = sqrt(sp * sp + d__1 * d__1);
1503 alpha = zdota[nact] / temp;
1505 zdota[nact] = alpha * zdota[k];
1508 for (i__ = 1; i__ <= i__1; ++i__) {
1509 temp = alpha * z__[i__ + nact * z_dim1] + beta * z__[i__ + k *
1511 z__[i__ + nact * z_dim1] = alpha * z__[i__ + k * z_dim1] - beta *
1512 z__[i__ + nact * z_dim1];
1513 z__[i__ + k * z_dim1] = temp;
1515 iact[nact] = iact[k];
1518 vmultc[k] = vmultc[nact];
1519 vmultc[nact] = temp;
1522 /* If stage one is in progress, then set SDIRN to the direction of the next */
1523 /* change to the current vector of variables. */
1531 for (i__ = 1; i__ <= i__1; ++i__) {
1532 temp += sdirn[i__] * a[i__ + kk * a_dim1];
1535 temp /= zdota[nact];
1537 for (i__ = 1; i__ <= i__1; ++i__) {
1538 sdirn[i__] -= temp * z__[i__ + nact * z_dim1];
1542 /* Delete the constraint that has the index IACT(ICON) from the active set. */
1547 vsave = vmultc[icon];
1554 for (i__ = 1; i__ <= i__1; ++i__) {
1555 sp += z__[i__ + k * z_dim1] * a[i__ + kk * a_dim1];
1558 temp = sqrt(sp * sp + d__1 * d__1);
1559 alpha = zdota[kp] / temp;
1561 zdota[kp] = alpha * zdota[k];
1564 for (i__ = 1; i__ <= i__1; ++i__) {
1565 temp = alpha * z__[i__ + kp * z_dim1] + beta * z__[i__ + k *
1567 z__[i__ + kp * z_dim1] = alpha * z__[i__ + k * z_dim1] - beta *
1568 z__[i__ + kp * z_dim1];
1569 z__[i__ + k * z_dim1] = temp;
1572 vmultc[k] = vmultc[kp];
1582 /* If stage one is in progress, then set SDIRN to the direction of the next */
1583 /* change to the current vector of variables. */
1590 for (i__ = 1; i__ <= i__1; ++i__) {
1591 temp += sdirn[i__] * z__[i__ + (nact + 1) * z_dim1];
1594 for (i__ = 1; i__ <= i__1; ++i__) {
1595 sdirn[i__] -= temp * z__[i__ + (nact + 1) * z_dim1];
1599 /* Pick the next search direction of stage two. */
1602 temp = 1. / zdota[nact];
1604 for (i__ = 1; i__ <= i__1; ++i__) {
1605 sdirn[i__] = temp * z__[i__ + nact * z_dim1];
1608 /* Calculate the step to the boundary of the trust region or take the step */
1609 /* that reduces RESMAX to zero. The two statements below that include the */
1610 /* factor 1.0E-6 prevent some harmless underflows that occurred in a test */
1611 /* calculation. Further, we skip the step if it could be zero within a */
1612 /* reasonable tolerance for computer rounding errors. */
1619 for (i__ = 1; i__ <= i__1; ++i__) {
1620 if ((d__1 = dx[i__], abs(d__1)) >= *rho * 1e-6f) {
1624 sd += dx[i__] * sdirn[i__];
1631 temp = sqrt(ss * dd);
1632 if (abs(sd) >= temp * 1e-6f) {
1633 temp = sqrt(ss * dd + sd * sd);
1635 stpful = dd / (temp + sd);
1638 acca = step + resmax * .1;
1639 accb = step + resmax * .2;
1640 if (step >= acca || acca >= accb) {
1643 step = min(step,resmax);
1646 /* Set DXNEW to the new variables if STEP is the steplength, and reduce */
1647 /* RESMAX to the corresponding maximum residual if stage one is being done. */
1648 /* Because DXNEW will be changed during the calculation of some Lagrange */
1649 /* multipliers, it will be restored to the following value later. */
1652 for (i__ = 1; i__ <= i__1; ++i__) {
1653 dxnew[i__] = dx[i__] + step * sdirn[i__];
1659 for (k = 1; k <= i__1; ++k) {
1663 for (i__ = 1; i__ <= i__2; ++i__) {
1664 temp -= a[i__ + kk * a_dim1] * dxnew[i__];
1666 resmax = max(resmax,temp);
1670 /* Set VMULTD to the VMULTC vector that would occur if DX became DXNEW. A */
1671 /* device is included to force VMULTD(K)=0.0 if deviations from this value */
1672 /* can be attributed to computer rounding errors. First calculate the new */
1673 /* Lagrange multipliers. */
1680 for (i__ = 1; i__ <= i__1; ++i__) {
1681 temp = z__[i__ + k * z_dim1] * dxnew[i__];
1683 zdwabs += abs(temp);
1685 acca = zdwabs + abs(zdotw) * .1;
1686 accb = zdwabs + abs(zdotw) * .2;
1687 if (zdwabs >= acca || acca >= accb) {
1690 vmultd[k] = zdotw / zdota[k];
1694 for (i__ = 1; i__ <= i__1; ++i__) {
1695 dxnew[i__] -= vmultd[k] * a[i__ + kk * a_dim1];
1701 d__1 = 0., d__2 = vmultd[nact];
1702 vmultd[nact] = max(d__1,d__2);
1705 /* Complete VMULTC by finding the new constraint residuals. */
1708 for (i__ = 1; i__ <= i__1; ++i__) {
1709 dxnew[i__] = dx[i__] + step * sdirn[i__];
1714 for (k = kl; k <= i__1; ++k) {
1716 sum = resmax - b[kk];
1717 sumabs = resmax + (d__1 = b[kk], abs(d__1));
1719 for (i__ = 1; i__ <= i__2; ++i__) {
1720 temp = a[i__ + kk * a_dim1] * dxnew[i__];
1722 sumabs += abs(temp);
1724 acca = sumabs + abs(sum) * .1f;
1725 accb = sumabs + abs(sum) * .2f;
1726 if (sumabs >= acca || acca >= accb) {
1733 /* Calculate the fraction of the step from DX to DXNEW that will be taken. */
1738 for (k = 1; k <= i__1; ++k) {
1739 if (vmultd[k] < 0.) {
1740 temp = vmultc[k] / (vmultc[k] - vmultd[k]);
1748 /* Update DX, VMULTC and RESMAX. */
1752 for (i__ = 1; i__ <= i__1; ++i__) {
1753 dx[i__] = temp * dx[i__] + ratio * dxnew[i__];
1756 for (k = 1; k <= i__1; ++k) {
1757 d__1 = 0., d__2 = temp * vmultc[k] + ratio * vmultd[k];
1758 vmultc[k] = max(d__1,d__2);
1761 resmax = resold + ratio * (resmax - resold);
1764 /* If the full step is not acceptable then begin another iteration. */
1765 /* Otherwise switch to stage two or end the calculation. */
1770 if (step == stpful) {
1780 /* We employ any freedom that may be available to reduce the objective */
1781 /* function before returning a DX whose length is less than RHO. */