CXX_LIBS = stogo/libstogo.la
endif
-SUBDIRS= util subplex direct cdirect $(CXX_DIRS) praxis luksan crs mlsl mma cobyla newuoa lbfgs neldermead api . octave test
+SUBDIRS= util direct cdirect $(CXX_DIRS) praxis luksan crs mlsl mma cobyla newuoa lbfgs neldermead api . octave test
EXTRA_DIST=COPYRIGHT autogen.sh nlopt.pc.in m4
if WITH_NOCEDAL
endif
libnlopt@NLOPT_SUFFIX@_la_SOURCES =
-libnlopt@NLOPT_SUFFIX@_la_LIBADD = subplex/libsubplex.la \
+libnlopt@NLOPT_SUFFIX@_la_LIBADD = \
direct/libdirect.la cdirect/libcdirect.la $(CXX_LIBS) \
praxis/libpraxis.la $(NOCEDAL_LBFGS) luksan/libluksan.la crs/libcrs.la \
mlsl/libmlsl.la mma/libmma.la cobyla/libcobyla.la newuoa/libnewuoa.la neldermead/libneldermead.la api/libapi.la util/libutil.la
-AM_CPPFLAGS = -I$(top_srcdir)/cdirect -I$(top_srcdir)/direct -I$(top_srcdir)/stogo -I$(top_srcdir)/subplex -I$(top_srcdir)/praxis -I$(top_srcdir)/lbfgs -I$(top_srcdir)/luksan -I$(top_srcdir)/crs -I$(top_srcdir)/mlsl -I$(top_srcdir)/mma -I$(top_srcdir)/cobyla -I$(top_srcdir)/newuoa -I$(top_srcdir)/neldermead -I$(top_srcdir)/util
+AM_CPPFLAGS = -I$(top_srcdir)/cdirect -I$(top_srcdir)/direct -I$(top_srcdir)/stogo -I$(top_srcdir)/praxis -I$(top_srcdir)/lbfgs -I$(top_srcdir)/luksan -I$(top_srcdir)/crs -I$(top_srcdir)/mlsl -I$(top_srcdir)/mma -I$(top_srcdir)/cobyla -I$(top_srcdir)/newuoa -I$(top_srcdir)/neldermead -I$(top_srcdir)/util
include_HEADERS = nlopt.h nlopt.f
noinst_LTLIBRARIES = libapi.la
"Unscaled Randomized DIRECT-L (global, no-derivative)",
"Original DIRECT version (global, no-derivative)",
"Original DIRECT-L version (global, no-derivative)",
- "Subplex (local, no-derivative)",
#ifdef WITH_CXX
"StoGO (global, derivative-based)",
"StoGO with randomized search (global, derivative-based)",
const double *lb, *ub;
} nlopt_data;
-#include "subplex.h"
#include "praxis.h"
-static double f_subplex(int n, const double *x, void *data_)
+static double f_bound(int n, const double *x, void *data_)
{
int i;
nlopt_data *data = (nlopt_data *) data_;
double f;
- /* subplex does not support bound constraints, but it supports
+ /* some methods do not support bound constraints, but support
discontinuous objectives so we can just return Inf for invalid x */
for (i = 0; i < n; ++i)
if (x[i] < data->lb[i] || x[i] > data->ub[i])
return NLOPT_FAILURE;
#endif
+#if 0
+ /* lacking a free/open-source license, we no longer use
+ Rowan's code, and instead use by "sbplx" re-implementation */
case NLOPT_LN_SUBPLEX: {
int iret;
double *scale = (double *) malloc(sizeof(double) * n);
if (!scale) return NLOPT_OUT_OF_MEMORY;
for (i = 0; i < n; ++i)
scale[i] = initial_step(1, lb+i, ub+i, x+i);
- iret = nlopt_subplex(f_subplex, minf, x, n, &d, &stop, scale);
+ iret = nlopt_subplex(f_bound, minf, x, n, &d, &stop, scale);
free(scale);
switch (iret) {
case -2: return NLOPT_INVALID_ARGS;
}
break;
}
+#endif
case NLOPT_LN_PRAXIS:
return praxis_(0.0, DBL_EPSILON,
- initial_step(n, lb, ub, x), n, x, f_subplex, &d,
+ initial_step(n, lb, ub, x), n, x, f_bound, &d,
&stop, minf);
#ifdef WITH_NOCEDAL
NLOPT_GN_ORIG_DIRECT,
NLOPT_GN_ORIG_DIRECT_L,
- NLOPT_LN_SUBPLEX,
-
NLOPT_GD_STOGO,
NLOPT_GD_STOGO_RAND,
and should be linked via -lnlopt_cxx (via a C++ compiler, in order
to link the C++ standard libraries).
.TP
-.B NLOPT_LN_SUBPLEX
+.B NLOPT_LN_NELDERMEAD
Perform a local (L) derivative-free (N) optimization, starting at
.IR x ,
-using the Subplex algorithm of Rowan et al., which is an improved
-variant of Nelder-Mead simplex algorithm. (Like Nelder-Mead, Subplex
-often works well in practice, even for discontinuous objectives, but
-there is no rigorous guarantee that it will converge.) Subplex is
-best for unconstrained optimization, but constrained optimization also
-works (both for simple bound constraints via
-.I lb
-and
-.I ub
-as well as nonlinear constraints via the crude technique of returning
-+Inf when the constraints are violated, as explained above).
+using the Nelder-Mead simplex algorithm, modified to support bound
+constraints. Nelder-Mead, while popular, is known to occasionally
+fail to converge for some objective functions, so it should be
+used with caution. Anecdotal evidence, on the other hand, suggests
+that it works fairly well for discontinuous objectives. See also
+.B NLOPT_LN_SBPLX
+below.
+.TP
+.B NLOPT_LN_SBPLX
+Perform a local (L) derivative-free (N) optimization, starting at
+.IR x ,
+using an algorithm based on the Subplex algorithm of Rowan et al.,
+which is an improved variant of Nelder-Mead (above). Our
+implementation does not use Rowan's original code, and has some minor
+modifications such as explicit support for bound constraints. (Like
+Nelder-Mead, Subplex often works well in practice, even for
+discontinuous objectives, but there is no rigorous guarantee that it
+will converge.) Nonlinear constraints can be crudely supported
+by returning +Inf when the constraints are violated, as explained above.
.TP
.B NLOPT_LN_PRAXIS
Local (L) derivative-free (N) optimization using the principal-axis
and should be linked via -lnlopt_cxx (via a C++ compiler, in order
to link the C++ standard libraries).
.TP
-.B NLOPT_LN_SUBPLEX
+.B NLOPT_LN_NELDERMEAD
Perform a local (L) derivative-free (N) optimization, starting at
.IR x ,
-using the Subplex algorithm of Rowan et al., which is an improved
-variant of Nelder-Mead simplex algorithm. (Like Nelder-Mead, Subplex
-often works well in practice, even for discontinuous objectives, but
-there is no rigorous guarantee that it will converge.) Subplex is
-best for unconstrained optimization, but constrained optimization also
-works (both for simple bound constraints via
-.I lb
-and
-.I ub
-as well as nonlinear constraints via the crude technique of returning
-+Inf when the constraints are violated, as explained above).
+using the Nelder-Mead simplex algorithm, modified to support bound
+constraints. Nelder-Mead, while popular, is known to occasionally
+fail to converge for some objective functions, so it should be
+used with caution. Anecdotal evidence, on the other hand, suggests
+that it works fairly well for discontinuous objectives. See also
+.B NLOPT_LN_SBPLX
+below.
+.TP
+.B NLOPT_LN_SBPLX
+Perform a local (L) derivative-free (N) optimization, starting at
+.IR x ,
+using an algorithm based on the Subplex algorithm of Rowan et al.,
+which is an improved variant of Nelder-Mead (above). Our
+implementation does not use Rowan's original code, and has some minor
+modifications such as explicit support for bound constraints. (Like
+Nelder-Mead, Subplex often works well in practice, even for
+discontinuous objectives, but there is no rigorous guarantee that it
+will converge.) Nonlinear constraints can be crudely supported
+by returning +Inf when the constraints are violated, as explained above.
.TP
.B NLOPT_LN_PRAXIS
Local (L) derivative-free (N) optimization using the principal-axis
-Nelder-Mead and variations thereof. Possibly the algorithms:
+This directory contains Nelder-Mead and variations thereof.
------------------
+Currently, we implement two algorithms.
-First, the original Nelder-Mead algorithm.
+-----------------------------------------------------------------------
------------------
+First, (almost) the original Nelder-Mead simplex algorithm
+(NLOPT_LN_NELDERMEAD), as described in:
-Second, the provably convergent variant of Nelder-Mead described in:
+ J. A. Nelder and R. Mead, "A simplex method for function
+ minimization," The Computer Journal 7, p. 308-313 (1965).
- C. J. Price, I. D. Coope, and D. Byatt, "A convergent variant
- of the Nelder-Mead algorithm," J. Optim. Theory Appl. 113 (1),
- p. 5-19 (2002).
+This method is simple and has demonstrated enduring popularity,
+despite the later discovery that it fails to converge at all for some
+functions. Anecdotal evidence suggests that it often performs well
+even for noisy and/or discontinuous objective functions. I would tend
+to recommend the Subplex method (below) instead, however.
-And/or possibly the (claimed superior) one in:
-
- A. Burmen, J. Puhan, and T. Tuma, "Grid restrained Nelder-Mead
- algorithm," Computational Optim. Appl. 34(3), 359-375 (2006).
+The main variation is that I implemented explicit support for bound
+constraints, using essentially the method described in:
------------------
+ J. A. Richardson and J. L. Kuester, "The complex method for
+ constrained optimization," Commun. ACM 16(8), 487-489 (1973).
-My own independent implemention of Tom Rowan's Subplex algorithm (a
-more-efficient variant of Nelder-Mead simplex), which was described at:
+Whenever a new point would lie outside the bound constraints,
+Richardson and Kuester advocate moving it "just inside" the
+constraints. I couldn't see any advantage to using a fixed distance
+inside the constraints, especially if the optimum is on the
+constraint, so instead I move the point exactly onto the constraint in
+that case.
- http://www.netlib.org/opt/subplex.tgz
+The danger with implementing bound constraints in this way (or by
+Richardson and Kuester's method) is that you may collapse the simplex
+into a lower-dimensional subspace. I'm not aware of a better way,
+however. In any case, this collapse of the simplex is ameliorated by
+restarting, such as when Nelder-Mead is used within the Subplex
+algorithm below.
+
+-----------------------------------------------------------------------
+
+Second, I re-implemented Tom Rowan's "Subplex" algorithm. As Rowan
+expressed a preference that other implementations of his algorithm use
+a different name, I called my implementation "Sbplx" (NLOPT_LN_SBPLX).
+Subplex (a variant of Nelder-Mead that uses Nelder-Mead on a sequence
+of subspaces) is claimed to be much more efficient and robust than the
+original Nelder-Mead, while retaining the latter's facility with
+discontinuous objectives, and in my experience these claims seem to be
+true. (However, I'm not aware of any proof that Subplex is globally
+convergent, and may fail for some objectives like Nelder-Mead; YMMV.)
+
+I used the description of Rowan's algorithm in his PhD thesis:
T. Rowan, "Functional Stability Analysis of Numerical Algorithms",
Ph.D. thesis, Department of Computer Sciences, University of Texas
at Austin, 1990.
-I would have liked to use Rowan's original implementation, but its
-legal status is unfortunately unclear. Rowan didn't include any
-license statement at all with the original code, which makes it
-technically illegal to redistribute. I contacted Rowan about getting
-a clear open-source/free-software license for it, and he was very
-agreeable, but he said he had to think about the specific license
-choice and would get back to me. Unfortunately, a year later I still
-haven't heard from him, and his old email address no longer seems to
-work, so I don't know how to contact him for permission.
-
-Although I now have other derivative-free optimization routines in
-NLopt, the subplex algorithm is nice to have because it is somewhat
-tolerant of discontinuous and/or noisy objectives, which may make it a
-good choice for some problems.
-
-Tom Rowan expressed a preference that modified versions of his code
-use a different name from "subplex". Since this is a complete
-from-scratch re-implementation, I figured that he would want a
-different name too, so I am calling it "sbplx".
+I would have preferred to use Rowan's original implementation, posted
+by him on Netlib:
+
+ http://www.netlib.org/opt/subplex.tgz
+
+Unfortunately, the legality of redistributing or modifying this code
+is unclear. Rowan didn't include any license statement at all with
+the original code, which makes it technically illegal to redistribute.
+I contacted Rowan about getting a clear open-source/free-software
+license for it, and he was very agreeable, but he said he had to think
+about the specific license choice and would get back to me.
+Unfortunately, a year later I still haven't heard from him, and his
+old email address no longer seems to work, so I don't know how to
+contact him for permission.
+
+Since the algorithm is not too complicated, however, I just rewrote
+it. There seem to be slight differences between the behavior of my
+implementation and his (probably due to different choices of initial
+subspace and other slight variations, where his paper was ambiguous),
+but the number of iterations to converge on my test problems seems to
+be quite close (within 10% for most problems).
+
+The only major difference between my implementation and Rowan's, as
+far as I can tell, is that I implemented explicit support for bound
+constraints (via the method in the Richardson and Kuester paper cited
+above). This seems to be a big improvement in the case where the
+optimum lies against one of the constraints.
+
+-----------------------------------------------------------------------
+
+Future possibilities:
+
+ C. J. Price, I. D. Coope, and D. Byatt, "A convergent variant
+ of the Nelder-Mead algorithm," J. Optim. Theory Appl. 113 (1),
+ p. 5-19 (2002).
+
+ A. Burmen, J. Puhan, and T. Tuma, "Grid restrained Nelder-Mead
+ algorithm," Computational Optim. Appl. 34(3), 359-375 (2006).
+
+Both of these are provably convergent variations of Nelder-Mead; the
+latter authors claim that theirs is superior.
AM_CPPFLAGS = -I$(top_srcdir)/api
-MFILES = NLOPT_GN_DIRECT.m NLOPT_GN_DIRECT_L.m NLOPT_GN_DIRECT_L_RAND.m NLOPT_GN_DIRECT_NOSCAL.m NLOPT_GN_DIRECT_L_NOSCAL.m NLOPT_GN_DIRECT_L_RAND_NOSCAL.m NLOPT_GN_ORIG_DIRECT.m NLOPT_GN_ORIG_DIRECT_L.m NLOPT_LN_SUBPLEX.m NLOPT_GD_STOGO.m NLOPT_GD_STOGO_RAND.m NLOPT_LD_LBFGS_NOCEDAL.m NLOPT_LD_LBFGS.m NLOPT_LN_PRAXIS.m NLOPT_LD_VAR1.m NLOPT_LD_VAR2.m NLOPT_LD_TNEWTON.m NLOPT_LD_TNEWTON_RESTART.m NLOPT_LD_TNEWTON_PRECOND.m NLOPT_LD_TNEWTON_PRECOND_RESTART.m NLOPT_GN_CRS2_LM.m NLOPT_GN_MLSL.m NLOPT_GD_MLSL.m NLOPT_GN_MLSL_LDS.m NLOPT_GD_MLSL_LDS.m NLOPT_LD_MMA.m NLOPT_LN_COBYLA.m NLOPT_LN_NEWUOA.m NLOPT_LN_NEWUOA_BOUND.m
+MFILES = NLOPT_GN_DIRECT.m NLOPT_GN_DIRECT_L.m NLOPT_GN_DIRECT_L_RAND.m NLOPT_GN_DIRECT_NOSCAL.m NLOPT_GN_DIRECT_L_NOSCAL.m NLOPT_GN_DIRECT_L_RAND_NOSCAL.m NLOPT_GN_ORIG_DIRECT.m NLOPT_GN_ORIG_DIRECT_L.m NLOPT_GD_STOGO.m NLOPT_GD_STOGO_RAND.m NLOPT_LD_LBFGS_NOCEDAL.m NLOPT_LD_LBFGS.m NLOPT_LN_PRAXIS.m NLOPT_LD_VAR1.m NLOPT_LD_VAR2.m NLOPT_LD_TNEWTON.m NLOPT_LD_TNEWTON_RESTART.m NLOPT_LD_TNEWTON_PRECOND.m NLOPT_LD_TNEWTON_PRECOND_RESTART.m NLOPT_GN_CRS2_LM.m NLOPT_GN_MLSL.m NLOPT_GD_MLSL.m NLOPT_GN_MLSL_LDS.m NLOPT_GD_MLSL_LDS.m NLOPT_LD_MMA.m NLOPT_LN_COBYLA.m NLOPT_LN_NEWUOA.m NLOPT_LN_NEWUOA_BOUND.m NLOPT_LN_NELDERMEAD.m NLOPT_LN_SBPLX.m
#######################################################################
octdir = $(OCT_INSTALL_DIR)
%
% See nlopt_minimize for more information.
function val = NLOPT_GD_MLSL
- val = 22;
+ val = 21;
%
% See nlopt_minimize for more information.
function val = NLOPT_GD_MLSL_LDS
- val = 24;
+ val = 23;
%
% See nlopt_minimize for more information.
function val = NLOPT_GD_STOGO
- val = 9;
+ val = 8;
%
% See nlopt_minimize for more information.
function val = NLOPT_GD_STOGO_RAND
- val = 10;
+ val = 9;
%
% See nlopt_minimize for more information.
function val = NLOPT_GN_CRS2_LM
- val = 20;
+ val = 19;
%
% See nlopt_minimize for more information.
function val = NLOPT_GN_MLSL
- val = 21;
+ val = 20;
%
% See nlopt_minimize for more information.
function val = NLOPT_GN_MLSL_LDS
- val = 23;
+ val = 22;
%
% See nlopt_minimize for more information.
function val = NLOPT_LD_LBFGS
- val = 12;
+ val = 11;
%
% See nlopt_minimize for more information.
function val = NLOPT_LD_LBFGS_NOCEDAL
- val = 11;
+ val = 10;
%
% See nlopt_minimize for more information.
function val = NLOPT_LD_MMA
- val = 25;
+ val = 24;
%
% See nlopt_minimize for more information.
function val = NLOPT_LD_TNEWTON
- val = 16;
+ val = 15;
%
% See nlopt_minimize for more information.
function val = NLOPT_LD_TNEWTON_PRECOND
- val = 18;
+ val = 17;
%
% See nlopt_minimize for more information.
function val = NLOPT_LD_TNEWTON_PRECOND_RESTART
- val = 19;
+ val = 18;
%
% See nlopt_minimize for more information.
function val = NLOPT_LD_TNEWTON_RESTART
- val = 17;
+ val = 16;
%
% See nlopt_minimize for more information.
function val = NLOPT_LD_VAR1
- val = 14;
+ val = 13;
%
% See nlopt_minimize for more information.
function val = NLOPT_LD_VAR2
- val = 15;
+ val = 14;
%
% See nlopt_minimize for more information.
function val = NLOPT_LN_COBYLA
- val = 26;
+ val = 25;
--- /dev/null
+% NLOPT_LN_NELDERMEAD: Nelder-Mead simplex algorithm (local, no-derivative)
+%
+% See nlopt_minimize for more information.
+function val = NLOPT_LN_NELDERMEAD
+ val = 28;
%
% See nlopt_minimize for more information.
function val = NLOPT_LN_NEWUOA
- val = 27;
+ val = 26;
%
% See nlopt_minimize for more information.
function val = NLOPT_LN_NEWUOA_BOUND
- val = 28;
+ val = 27;
%
% See nlopt_minimize for more information.
function val = NLOPT_LN_PRAXIS
- val = 13;
+ val = 12;
--- /dev/null
+% NLOPT_LN_SBPLX: Sbplx variant of Nelder-Mead (re-implementation of Rowan's Subplex) (local, no-derivative)
+%
+% See nlopt_minimize for more information.
+function val = NLOPT_LN_SBPLX
+ val = 29;
+++ /dev/null
-% NLOPT_LN_SUBPLEX: Subplex (local, no-derivative)
-%
-% See nlopt_minimize for more information.
-function val = NLOPT_LN_SUBPLEX
- val = 8;