[liboptimization-java] 01/02: Imported Upstream version 0.1
Andreas Tille
tille at debian.org
Thu May 15 13:08:57 UTC 2014
This is an automated email from the git hooks/post-receive script.
tille pushed a commit to branch master
in repository liboptimization-java.
commit 5b39adbe5f76213c8efdba0127002e9a628bf3c1
Author: Andreas Tille <tille at debian.org>
Date: Thu May 15 15:02:21 2014 +0200
Imported Upstream version 0.1
---
Blas_f77.java | 1660 +++++++++++++++
Fmin.java | 351 +++
Fmin_methods.java | 7 +
Fzero.java | 498 +++++
Fzero_methods.java | 7 +
Lmder_fcn.java | 8 +
Lmdif_fcn.java | 8 +
Minpack_f77.java | 3733 ++++++++++++++++++++++++++++++++
README | 214 ++
Uncmin_f77.java | 5886 +++++++++++++++++++++++++++++++++++++++++++++++++++
Uncmin_methods.java | 11 +
copyright | 34 +
disclaimer | 15 +
13 files changed, 12432 insertions(+)
diff --git a/Blas_f77.java b/Blas_f77.java
new file mode 100644
index 0000000..4bded7c
--- /dev/null
+++ b/Blas_f77.java
@@ -0,0 +1,1660 @@
+/*
+ Blas_f77.java copyright claim:
+
+ This software is based on public domain LINPACK routines.
+ It was translated from FORTRAN to Java by a US government employee
+ on official time. Thus this software is also in the public domain.
+
+
+ The translator's mail address is:
+
+ Steve Verrill
+ USDA Forest Products Laboratory
+ 1 Gifford Pinchot Drive
+ Madison, Wisconsin
+ 53705
+
+
+ The translator's e-mail address is:
+
+ steve at ws13.fpl.fs.fed.us
+
+
+***********************************************************************
+
+DISCLAIMER OF WARRANTIES:
+
+THIS SOFTWARE IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND.
+THE TRANSLATOR DOES NOT WARRANT, GUARANTEE OR MAKE ANY REPRESENTATIONS
+REGARDING THE SOFTWARE OR DOCUMENTATION IN TERMS OF THEIR CORRECTNESS,
+RELIABILITY, CURRENTNESS, OR OTHERWISE. THE ENTIRE RISK AS TO
+THE RESULTS AND PERFORMANCE OF THE SOFTWARE IS ASSUMED BY YOU.
+IN NO CASE WILL ANY PARTY INVOLVED WITH THE CREATION OR DISTRIBUTION
+OF THE SOFTWARE BE LIABLE FOR ANY DAMAGE THAT MAY RESULT FROM THE USE
+OF THIS SOFTWARE.
+
+Sorry about that.
+
+***********************************************************************
+
+
+History:
+
+Date Translator Changes
+
+2/25/97 Steve Verrill Translated
+3/10/98 Steve Verrill isamax and colisamax added
+
+*/
+
+
+package linear_algebra;
+
+import linear_algebra.*;
+
+
+/**
+*
+*<p>
+*This class contains Java versions of a number of the LINPACK
+*basic linear algebra subroutines (blas):
+*<ol>
+*<li> isamax_f77
+*<li> daxpy_f77
+*<li> ddot_f77
+*<li> dscal_f77
+*<li> dswap_f77
+*<li> dnrm2_f77
+*<li> dcopy_f77
+*<li> drotg_f77
+*</ol>
+*It also contains utility routines that the translator found useful
+*while translating the FORTRAN code to Java code. "col" indicates that
+*the routine operates on two columns of a matrix. "colv" indicates that
+*the routine operates on a column of a matrix and a vector. The "p"
+*at the end of dscalp, dnrm2p, and dcopyp indicates that these
+*routines operate on a portion of a vector:
+*
+*<ol>
+*<li> colisamax_f77
+*<li> colaxpy_f77
+*<li> colvaxpy_f77
+*<li> colvraxpy_f77
+*<li> coldot_f77
+*<li> colvdot_f77
+*<li> colscal_f77
+*<li> dscalp_f77
+*<li> colswap_f77
+*<li> colnrm2_f77
+*<li> dnrm2p_f77
+*<li> dcopyp_f77
+*<li> colrot_f77
+*<li> sign_f77
+*</ol>
+*
+*<p>
+*<b>IMPORTANT:</b> The "_f77" suffixes indicate that these routines use
+*FORTRAN style indexing. For example, you will see
+*
+* for (i = 1; i <= n; i++)
+*
+*rather than
+*
+* for (i = 0; i < n; i++)
+*
+*To use the "_f77" routines you will have to declare your vectors
+*and matrices to be one element larger (e.g., v[101] rather than
+*v[100], and a[101][101] rather than a[100][100]), and you will have
+*to fill elements 1 through n rather than elements 0 through n - 1.
+*Versions of these programs that use C/Java style indexing will
+*soon be available. They will end with the suffix "_j".
+*
+*<p>
+*This class was translated by a statistician from FORTRAN versions of
+*the LINPACK blas. It is NOT an official translation. It wastes
+*memory by failing to use the first elements of vectors. When
+*public domain Java numerical analysis routines become available
+*from the people who produce LAPACK, then <b>THE CODE PRODUCED
+*BY THE NUMERICAL ANALYSTS SHOULD BE USED</b>.
+*
+*<p>
+*Meanwhile, if you have suggestions for improving this
+*code, please contact Steve Verrill at steve at ws13.fpl.fs.fed.us.
+*
+*@author Steve Verrill
+*@version .5 --- February 25, 1997
+*
+*/
+
+public class Blas_f77 extends Object {
+
+/**
+*
+*<p>
+*This method finds the index of the element of a vector
+*that has the maximum absolute value. It is a translation
+*from FORTRAN to Java of the LINPACK function ISAMAX.
+*In the LINPACK listing ISAMAX is attributed to Jack Dongarra
+*with a date of March 11, 1978.
+*
+*Translated by Steve Verrill, March 10, 1998.
+*
+*@param n The number of elements to be checked
+*@param x[ ] vector
+*@param incx The subscript increment for x[ ]
+*
+*/
+
+
+ public static int isamax_f77 (int n, double x[], int incx) {
+
+ double xmax;
+ int isamax,i,ix;
+
+ if (n < 1) {
+
+ isamax = 0;
+
+ } else if (n == 1) {
+
+ isamax = 1;
+
+ } else if (incx == 1) {
+
+ isamax = 1;
+ xmax = Math.abs(x[1]);
+
+ for (i = 2; i <= n; i++) {
+
+ if (Math.abs(x[i]) > xmax) {
+
+ isamax = i;
+ xmax = Math.abs(x[i]);
+
+ }
+
+ }
+
+ } else {
+
+ isamax = 1;
+ ix = 1;
+ xmax = Math.abs(x[ix]);
+ ix += incx;
+
+ for (i = 2; i <= n; i++) {
+
+ if (Math.abs(x[ix]) > xmax) {
+
+ isamax = i;
+ xmax = Math.abs(x[ix]);
+
+ }
+
+ ix += incx;
+
+ }
+
+ }
+
+ return isamax;
+
+ }
+
+
+
+/**
+*
+*<p>
+*This method finds the index of the element of a portion of a
+*column of a matrix that has the maximum absolute value.
+*It is a modification of the LINPACK function ISAMAX.
+*In the LINPACK listing ISAMAX is attributed to Jack Dongarra
+*with a date of March 11, 1978.
+*
+*Translated by Steve Verrill, March 10, 1998.
+*
+*@param n The number of elements to be checked
+*@param x[ ][ ] The matrix
+*@param incx The subscript increment for x[ ][ ]
+*@param begin The starting row
+*@param j The id of the column
+*
+*/
+
+
+ public static int colisamax_f77 (int n, double x[][], int incx,
+ int begin, int j) {
+
+ double xmax;
+ int isamax,i,ix;
+
+ if (n < 1) {
+
+ isamax = 0;
+
+ } else if (n == 1) {
+
+ isamax = 1;
+
+ } else if (incx == 1) {
+
+ isamax = 1;
+ ix = begin;
+ xmax = Math.abs(x[ix][j]);
+ ix++;
+
+ for (i = 2; i <= n; i++) {
+
+ if (Math.abs(x[ix][j]) > xmax) {
+
+ isamax = i;
+ xmax = Math.abs(x[ix][j]);
+
+ }
+
+ ix++;
+
+ }
+
+ } else {
+
+ isamax = 1;
+ ix = begin;
+ xmax = Math.abs(x[ix][j]);
+ ix += incx;
+
+ for (i = 2; i <= n; i++) {
+
+ if (Math.abs(x[ix][j]) > xmax) {
+
+ isamax = i;
+ xmax = Math.abs(x[ix][j]);
+
+ }
+
+ ix += incx;
+
+ }
+
+ }
+
+ return isamax;
+
+ }
+
+
+
+/**
+*
+*<p>
+*This method multiplies a constant times a vector and adds the product
+*to another vector --- dy[ ] = da*dx[ ] + dy[ ].
+*It uses unrolled loops for increments equal to
+*one. It is a translation from FORTRAN to Java of the LINPACK subroutine
+*DAXPY. In the LINPACK listing DAXPY is attributed to Jack Dongarra
+*with a date of 3/11/78.
+*
+*Translated by Steve Verrill, February 25, 1997.
+*
+*@param n The order of the vectors dy[ ] and dx[ ]
+*@param da The constant
+*@param dx[ ] This vector will be multiplied by the constant da
+*@param incx The subscript increment for dx[ ]
+*@param dy[ ] This vector will be added to da*dx[ ]
+*@param incy The subscript increment for dy[ ]
+*
+*/
+
+ public static void daxpy_f77 (int n, double da, double dx[], int incx, double
+ dy[], int incy) {
+
+ int i,ix,iy,m;
+
+ if (n <= 0) return;
+ if (da == 0.0) return;
+
+ if ((incx == 1) && (incy == 1)) {
+
+// both increments equal to 1
+
+ m = n%4;
+
+ for (i = 1; i <= m; i++) {
+
+ dy[i] += da*dx[i];
+
+ }
+
+ for (i = m+1; i <= n; i += 4) {
+
+ dy[i] += da*dx[i];
+ dy[i+1] += da*dx[i+1];
+ dy[i+2] += da*dx[i+2];
+ dy[i+3] += da*dx[i+3];
+
+ }
+
+ return;
+
+ } else {
+
+// at least one increment not equal to 1
+
+ ix = 1;
+ iy = 1;
+
+ if (incx < 0) ix = (-n+1)*incx + 1;
+ if (incy < 0) iy = (-n+1)*incy + 1;
+
+ for (i = 1; i <= n; i++) {
+
+ dy[iy] += da*dx[ix];
+
+ ix += incx;
+ iy += incy;
+
+ }
+
+ return;
+
+ }
+
+ }
+
+
+
+/**
+*
+*<p>
+*This method calculates the dot product of two vectors.
+*It uses unrolled loops for increments equal to
+*one. It is a translation from FORTRAN to Java of the LINPACK function
+*DDOT. In the LINPACK listing DDOT is attributed to Jack Dongarra
+*with a date of 3/11/78.
+*
+*Translated by Steve Verrill, February 25, 1997.
+*
+*@param n The order of the vectors dx[ ] and dy[ ]
+*@param dx[ ] vector
+*@param incx The subscript increment for dx[ ]
+*@param dy[ ] vector
+*@param incy The subscript increment for dy[ ]
+*
+*/
+
+ public static double ddot_f77 (int n, double dx[], int incx, double
+ dy[], int incy) {
+
+ double ddot;
+ int i,ix,iy,m;
+
+ ddot = 0.0;
+
+ if (n <= 0) return ddot;
+
+ if ((incx == 1) && (incy == 1)) {
+
+// both increments equal to 1
+
+ m = n%5;
+
+ for (i = 1; i <= m; i++) {
+
+ ddot += dx[i]*dy[i];
+
+ }
+
+ for (i = m+1; i <= n; i += 5) {
+
+ ddot += dx[i]*dy[i] + dx[i+1]*dy[i+1] + dx[i+2]*dy[i+2] +
+ dx[i+3]*dy[i+3] + dx[i+4]*dy[i+4];
+
+ }
+
+ return ddot;
+
+ } else {
+
+// at least one increment not equal to 1
+
+ ix = 1;
+ iy = 1;
+
+ if (incx < 0) ix = (-n+1)*incx + 1;
+ if (incy < 0) iy = (-n+1)*incy + 1;
+
+ for (i = 1; i <= n; i++) {
+
+ ddot += dx[ix]*dy[iy];
+
+ ix += incx;
+ iy += incy;
+
+ }
+
+ return ddot;
+
+ }
+
+ }
+
+
+/**
+*
+*<p>
+*This method scales a vector by a constant.
+*It uses unrolled loops for an increment equal to
+*one. It is a translation from FORTRAN to Java of the LINPACK subroutine
+*DSCAL. In the LINPACK listing DSCAL is attributed to Jack Dongarra
+*with a date of 3/11/78.
+*
+*Translated by Steve Verrill, February 25, 1997.
+*
+*@param n The order of the vector dx[ ]
+*@param da The constant
+*@param dx[ ] This vector will be multiplied by the constant da
+*@param incx The subscript increment for dx[ ]
+*
+*/
+
+ public static void dscal_f77 (int n, double da, double dx[], int incx) {
+
+ int i,m,nincx;
+
+ if (n <= 0 || incx <= 0) return;
+
+ if (incx == 1) {
+
+// increment equal to 1
+
+ m = n%5;
+
+ for (i = 1; i <= m; i++) {
+
+ dx[i] *= da;
+
+ }
+
+ for (i = m+1; i <= n; i += 5) {
+
+ dx[i] *= da;
+ dx[i+1] *= da;
+ dx[i+2] *= da;
+ dx[i+3] *= da;
+ dx[i+4] *= da;
+
+ }
+
+ return;
+
+ } else {
+
+// increment not equal to 1
+
+ nincx = n*incx;
+
+ for (i = 1; i <= nincx; i += incx) {
+
+ dx[i] *= da;
+
+ }
+
+ return;
+
+ }
+
+ }
+
+
+/**
+*
+*<p>
+*This method interchanges two vectors.
+*It uses unrolled loops for increments equal to
+*one. It is a translation from FORTRAN to Java of the LINPACK function
+*DSWAP. In the LINPACK listing DSWAP is attributed to Jack Dongarra
+*with a date of 3/11/78.
+*
+*Translated by Steve Verrill, February 25, 1997.
+*
+*@param n The order of the vectors dx[ ] and dy[ ]
+*@param dx[ ] vector
+*@param incx The subscript increment for dx[ ]
+*@param dy[ ] vector
+*@param incy The subscript increment for dy[ ]
+*
+*/
+
+ public static void dswap_f77 (int n, double dx[], int incx, double
+ dy[], int incy) {
+
+ double dtemp;
+ int i,ix,iy,m;
+
+ if (n <= 0) return;
+
+ if ((incx == 1) && (incy == 1)) {
+
+// both increments equal to 1
+
+ m = n%3;
+
+ for (i = 1; i <= m; i++) {
+
+ dtemp = dx[i];
+ dx[i] = dy[i];
+ dy[i] = dtemp;
+
+ }
+
+ for (i = m+1; i <= n; i += 3) {
+
+ dtemp = dx[i];
+ dx[i] = dy[i];
+ dy[i] = dtemp;
+
+ dtemp = dx[i+1];
+ dx[i+1] = dy[i+1];
+ dy[i+1] = dtemp;
+
+ dtemp = dx[i+2];
+ dx[i+2] = dy[i+2];
+ dy[i+2] = dtemp;
+
+ }
+
+ return;
+
+ } else {
+
+// at least one increment not equal to 1
+
+ ix = 1;
+ iy = 1;
+
+ if (incx < 0) ix = (-n+1)*incx + 1;
+ if (incy < 0) iy = (-n+1)*incy + 1;
+
+ for (i = 1; i <= n; i++) {
+
+ dtemp = dx[ix];
+ dx[ix] = dy[iy];
+ dy[iy] = dtemp;
+
+ ix += incx;
+ iy += incy;
+
+ }
+
+ return;
+
+ }
+
+ }
+
+
+/**
+*
+*<p>
+*This method calculates the Euclidean norm of the vector
+*stored in dx[ ] with storage increment incx.
+*It is a translation from FORTRAN to Java of the LINPACK function
+*DNRM2.
+*In the LINPACK listing DNRM2 is attributed to C.L. Lawson
+*with a date of January 8, 1978. The routine below is based
+*on a more recent DNRM2 version that is attributed in LAPACK
+*documentation to Sven Hammarling.
+*
+*Translated by Steve Verrill, February 25, 1997.
+*
+*@param n The order of the vector x[ ]
+*@param x[ ] vector
+*@param incx The subscript increment for x[ ]
+*
+*/
+
+
+ public static double dnrm2_f77 (int n, double x[], int incx) {
+
+ double absxi,norm,scale,ssq,fac;
+ int ix,limit;
+
+ if (n < 1 || incx < 1) {
+
+ norm = 0.0;
+
+ } else if (n == 1) {
+
+ norm = Math.abs(x[1]);
+
+ } else {
+
+ scale = 0.0;
+ ssq = 1.0;
+
+ limit = 1 + (n - 1)*incx;
+
+ for (ix = 1; ix <= limit; ix += incx) {
+
+ if (x[ix] != 0.0) {
+
+ absxi = Math.abs(x[ix]);
+
+ if (scale < absxi) {
+
+ fac = scale/absxi;
+ ssq = 1.0 + ssq*fac*fac;
+ scale = absxi;
+
+ } else {
+
+ fac = absxi/scale;
+ ssq += fac*fac;
+
+ }
+
+ }
+
+ }
+
+ norm = scale*Math.sqrt(ssq);
+
+ }
+
+ return norm;
+
+ }
+
+
+/**
+*
+*<p>
+*This method copies the vector dx[ ] to the vector dy[ ].
+*It uses unrolled loops for increments equal to
+*one. It is a translation from FORTRAN to Java of the LINPACK subroutine
+*DCOPY. In the LINPACK listing DCOPY is attributed to Jack Dongarra
+*with a date of 3/11/78.
+*
+*Translated by Steve Verrill, March 1, 1997.
+*
+*@param n The order of dx[ ] and dy[ ]
+*@param dx[ ] vector
+*@param incx The subscript increment for dx[ ]
+*@param dy[ ] vector
+*@param incy The subscript increment for dy[ ]
+*
+*/
+
+ public static void dcopy_f77 (int n, double dx[], int incx, double
+ dy[], int incy) {
+
+ double dtemp;
+ int i,ix,iy,m;
+
+ if (n <= 0) return;
+
+ if ((incx == 1) && (incy == 1)) {
+
+// both increments equal to 1
+
+ m = n%7;
+
+ for (i = 1; i <= m; i++) {
+
+ dy[i] = dx[i];
+
+ }
+
+ for (i = m+1; i <= n; i += 7) {
+
+ dy[i] = dx[i];
+ dy[i+1] = dx[i+1];
+ dy[i+2] = dx[i+2];
+ dy[i+3] = dx[i+3];
+ dy[i+4] = dx[i+4];
+ dy[i+5] = dx[i+5];
+ dy[i+6] = dx[i+6];
+
+ }
+
+ return;
+
+ } else {
+
+// at least one increment not equal to 1
+
+ ix = 1;
+ iy = 1;
+
+ if (incx < 0) ix = (-n+1)*incx + 1;
+ if (incy < 0) iy = (-n+1)*incy + 1;
+
+ for (i = 1; i <= n; i++) {
+
+ dy[iy] = dx[ix];
+
+ ix += incx;
+ iy += incy;
+
+ }
+
+ return;
+
+ }
+
+ }
+
+
+
+/**
+*
+*<p>
+*This method constructs a Givens plane rotation.
+*It is a translation from FORTRAN to Java of the LINPACK subroutine
+*DROTG. In the LINPACK listing DROTG is attributed to Jack Dongarra
+*with a date of 3/11/78.
+*
+*Translated by Steve Verrill, March 3, 1997.
+*
+*@param rotvec[] Contains the a,b,c,s values. In Java they
+* cannot be passed as primitive types (e.g., double
+* or int or ...) if we want their return values to
+* be altered.
+*
+*/
+
+ public static void drotg_f77 (double rotvec[]) {
+
+// construct a Givens plane rotation
+
+ double a,b,c,s,roe,scale,r,z,ra,rb;
+
+ a = rotvec[1];
+ b = rotvec[2];
+
+ roe = b;
+
+ if (Math.abs(a) > Math.abs(b)) roe = a;
+
+ scale = Math.abs(a) + Math.abs(b);
+
+ if (scale != 0.0) {
+
+ ra = a/scale;
+ rb = b/scale;
+ r = scale*Math.sqrt(ra*ra + rb*rb);
+ r = sign_f77(1.0,roe)*r;
+ c = a/r;
+ s = b/r;
+ z = 1.0;
+ if (Math.abs(a) > Math.abs(b)) z = s;
+ if ((Math.abs(b) >= Math.abs(a)) && (c != 0.0)) z = 1.0/c;
+
+ } else {
+
+ c = 1.0;
+ s = 0.0;
+ r = 0.0;
+ z = 0.0;
+
+ }
+
+ a = r;
+ b = z;
+
+ rotvec[1] = a;
+ rotvec[2] = b;
+ rotvec[3] = c;
+ rotvec[4] = s;
+
+ return;
+
+ }
+
+
+
+/**
+*
+*<p>
+*This method multiplies a constant times a portion of a column
+*of a matrix and adds the product to the corresponding portion
+*of another column of the matrix --- a portion of col2 is
+replaced by the corresponding portion of a*col1 + col2.
+*It uses unrolled loops.
+*It is a modification of the LINPACK subroutine
+*DAXPY. In the LINPACK listing DAXPY is attributed to Jack Dongarra
+*with a date of 3/11/78.
+*
+*Translated and modified by Steve Verrill, February 26, 1997.
+*
+*@param nrow The number of rows involved
+*@param a The constant
+*@param x[ ][ ] The matrix
+*@param begin The starting row
+*@param j1 The id of col1
+*@param j2 The id of col2
+*
+*/
+
+ public static void colaxpy_f77 (int nrow, double a, double x[][], int begin,
+ int j1, int j2) {
+
+ int i,m,mpbegin,end;
+
+ if (nrow <= 0) return;
+ if (a == 0.0) return;
+
+ m = nrow%4;
+ mpbegin = m + begin;
+ end = begin + nrow - 1;
+
+ for (i = begin; i < mpbegin; i++) {
+
+ x[i][j2] += a*x[i][j1];
+
+ }
+
+ for (i = mpbegin; i <= end; i += 4) {
+
+ x[i][j2] += a*x[i][j1];
+ x[i+1][j2] += a*x[i+1][j1];
+ x[i+2][j2] += a*x[i+2][j1];
+ x[i+3][j2] += a*x[i+3][j1];
+
+ }
+
+ return;
+
+ }
+
+
+/**
+*
+*<p>
+*This method multiplies a constant times a portion of a column
+*of a matrix x[ ][ ] and adds the product to the corresponding portion
+*of a vector y[ ] --- a portion of y[ ] is replaced by the corresponding
+*portion of ax[ ][j] + y[ ].
+*It uses unrolled loops.
+*It is a modification of the LINPACK subroutine
+*DAXPY. In the LINPACK listing DAXPY is attributed to Jack Dongarra
+*with a date of 3/11/78.
+*
+*Translated and modified by Steve Verrill, March 1, 1997.
+*
+*@param nrow The number of rows involved
+*@param a The constant
+*@param x[ ][ ] The matrix
+*@param y[ ] The vector
+*@param begin The starting row
+*@param j The id of the column of the x matrix
+*
+*/
+
+ public static void colvaxpy_f77 (int nrow, double a, double x[][], double y[],
+ int begin, int j) {
+
+ int i,m,mpbegin,end;
+
+ if (nrow <= 0) return;
+ if (a == 0.0) return;
+
+ m = nrow%4;
+ mpbegin = m + begin;
+ end = begin + nrow - 1;
+
+ for (i = begin; i < mpbegin; i++) {
+
+ y[i] += a*x[i][j];
+
+ }
+
+ for (i = mpbegin; i <= end; i += 4) {
+
+ y[i] += a*x[i][j];
+ y[i+1] += a*x[i+1][j];
+ y[i+2] += a*x[i+2][j];
+ y[i+3] += a*x[i+3][j];
+
+ }
+
+ return;
+
+ }
+
+
+/**
+*
+*<p>
+*This method multiplies a constant times a portion of a vector y[ ]
+*and adds the product to the corresponding portion
+*of a column of a matrix x[ ][ ] --- a portion of column j of x[ ][ ]
+*is replaced by the corresponding
+*portion of ay[ ] + x[ ][j].
+*It uses unrolled loops.
+*It is a modification of the LINPACK subroutine
+*DAXPY. In the LINPACK listing DAXPY is attributed to Jack Dongarra
+*with a date of 3/11/78.
+*
+*Translated and modified by Steve Verrill, March 3, 1997.
+*
+*@param nrow The number of rows involved
+*@param a The constant
+*@param y[ ] The vector
+*@param x[ ][ ] The matrix
+*@param begin The starting row
+*@param j The id of the column of the x matrix
+*
+*/
+
+ public static void colvraxpy_f77 (int nrow, double a, double y[], double x[][],
+ int begin, int j) {
+
+ int i,m,mpbegin,end;
+
+ if (nrow <= 0) return;
+ if (a == 0.0) return;
+
+ m = nrow%4;
+ mpbegin = m + begin;
+ end = begin + nrow - 1;
+
+ for (i = begin; i < mpbegin; i++) {
+
+ x[i][j] += a*y[i];
+
+ }
+
+ for (i = mpbegin; i <= end; i += 4) {
+
+ x[i][j] += a*y[i];
+ x[i+1][j] += a*y[i+1];
+ x[i+2][j] += a*y[i+2];
+ x[i+3][j] += a*y[i+3];
+
+ }
+
+ return;
+
+ }
+
+
+/**
+*
+*<p>
+*This method calculates the dot product of portions of two
+*columns of a matrix. It uses unrolled loops.
+*It is a modification of the LINPACK function
+*DDOT. In the LINPACK listing DDOT is attributed to Jack Dongarra
+*with a date of 3/11/78.
+*
+*Translated and modified by Steve Verrill, February 27, 1997.
+*
+*@param nrow The number of rows involved
+*@param x[ ][ ] The matrix
+*@param begin The starting row
+*@param j1 The id of the first column
+*@param j2 The id of the second column
+*
+*/
+
+ public static double coldot_f77 (int nrow, double x[][], int begin,
+ int j1, int j2) {
+
+ double coldot;
+ int i,m,mpbegin,end;
+
+ coldot = 0.0;
+
+ if (nrow <= 0) return coldot;
+
+ m = nrow%5;
+ mpbegin = m + begin;
+ end = begin + nrow - 1;
+
+ for (i = begin; i < mpbegin; i++) {
+
+ coldot += x[i][j1]*x[i][j2];
+
+ }
+
+ for (i = mpbegin; i <= end; i += 5) {
+
+ coldot += x[i][j1]*x[i][j2] +
+ x[i+1][j1]*x[i+1][j2] +
+ x[i+2][j1]*x[i+2][j2] +
+ x[i+3][j1]*x[i+3][j2] +
+ x[i+4][j1]*x[i+4][j2];
+
+ }
+
+ return coldot;
+
+ }
+
+
+/**
+*
+*<p>
+*This method calculates the dot product of a portion of a
+*column of a matrix and the corresponding portion of a
+*vector. It uses unrolled loops.
+*It is a modification of the LINPACK function
+*DDOT. In the LINPACK listing DDOT is attributed to Jack Dongarra
+*with a date of 3/11/78.
+*
+*Translated and modified by Steve Verrill, March 1, 1997.
+*
+*@param nrow The number of rows involved
+*@param x[ ][ ] The matrix
+*@param y[ ] The vector
+*@param begin The starting row
+*@param j The id of the column of the matrix
+*
+*/
+
+ public static double colvdot_f77 (int nrow, double x[][], double y[],
+ int begin, int j) {
+
+ double colvdot;
+ int i,m,mpbegin,end;
+
+ colvdot = 0.0;
+
+ if (nrow <= 0) return colvdot;
+
+ m = nrow%5;
+ mpbegin = m + begin;
+ end = begin + nrow - 1;
+
+ for (i = begin; i < mpbegin; i++) {
+
+ colvdot += x[i][j]*y[i];
+
+ }
+
+ for (i = mpbegin; i <= end; i += 5) {
+
+ colvdot += x[i][j]*y[i] +
+ x[i+1][j]*y[i+1] +
+ x[i+2][j]*y[i+2] +
+ x[i+3][j]*y[i+3] +
+ x[i+4][j]*y[i+4];
+
+ }
+
+ return colvdot;
+
+ }
+
+
+/**
+*
+*<p>
+*This method scales a portion of a column of a matrix by a constant.
+*It uses unrolled loops.
+*It is a modification of the LINPACK subroutine
+*DSCAL. In the LINPACK listing DSCAL is attributed to Jack Dongarra
+*with a date of 3/11/78.
+*
+*Translated and modified by Steve Verrill, February 27, 1997.
+*
+*@param nrow The number of rows involved
+*@param a The constant
+*@param x[ ][ ] The matrix
+*@param begin The starting row
+*@param j The id of the column
+*
+*/
+
+ public static void colscal_f77 (int nrow, double a, double x[][], int begin, int j) {
+
+ int i,m,mpbegin,end;
+
+ if (nrow <= 0) return;
+
+ m = nrow%5;
+ mpbegin = m + begin;
+ end = begin + nrow - 1;
+
+ for (i = begin; i < mpbegin; i++) {
+
+ x[i][j] *= a;
+
+ }
+
+ for (i = mpbegin; i <= end; i += 5) {
+
+ x[i][j] *= a;
+ x[i+1][j] *= a;
+ x[i+2][j] *= a;
+ x[i+3][j] *= a;
+ x[i+4][j] *= a;
+
+ }
+
+ return;
+
+ }
+
+
+/**
+*
+*<p>
+*This method scales a portion of a vector by a constant.
+*It uses unrolled loops.
+*It is a modification of the LINPACK subroutine
+*DSCAL. In the LINPACK listing DSCAL is attributed to Jack Dongarra
+*with a date of 3/11/78.
+*
+*Translated and modified by Steve Verrill, March 3, 1997.
+*
+*@param nrow The number of rows involved
+*@param a The constant
+*@param x[ ] The vector
+*@param begin The starting row
+*
+*/
+
+ public static void dscalp_f77 (int nrow, double a, double x[], int begin) {
+
+ int i,m,mpbegin,end;
+
+ if (nrow <= 0) return;
+
+ m = nrow%5;
+ mpbegin = m + begin;
+ end = begin + nrow - 1;
+
+ for (i = begin; i < mpbegin; i++) {
+
+ x[i] *= a;
+
+ }
+
+ for (i = mpbegin; i <= end; i += 5) {
+
+ x[i] *= a;
+ x[i+1] *= a;
+ x[i+2] *= a;
+ x[i+3] *= a;
+ x[i+4] *= a;
+
+ }
+
+ return;
+
+ }
+
+
+
+/**
+*
+*<p>
+*This method interchanges two columns of a matrix.
+*It uses unrolled loops.
+*It is a modification of the LINPACK function
+*DSWAP. In the LINPACK listing DSWAP is attributed to Jack Dongarra
+*with a date of 3/11/78.
+*
+*Translated and modified by Steve Verrill, February 26, 1997.
+*
+*@param n The number of rows of the matrix
+*@param x[ ][ ] The matrix
+*@param j1 The id of the first column
+*@param j2 The id of the second column
+*
+*/
+
+ public static void colswap_f77 (int n, double x[][], int j1, int j2) {
+
+ double temp;
+ int i,m;
+
+ if (n <= 0) return;
+
+ m = n%3;
+
+ for (i = 1; i <= m; i++) {
+
+ temp = x[i][j1];
+ x[i][j1] = x[i][j2];
+ x[i][j2] = temp;
+
+ }
+
+ for (i = m+1; i <= n; i += 3) {
+
+ temp = x[i][j1];
+ x[i][j1] = x[i][j2];
+ x[i][j2] = temp;
+
+ temp = x[i+1][j1];
+ x[i+1][j1] = x[i+1][j2];
+ x[i+1][j2] = temp;
+
+ temp = x[i+2][j1];
+ x[i+2][j1] = x[i+2][j2];
+ x[i+2][j2] = temp;
+
+ }
+
+ return;
+
+ }
+
+
+
+/**
+*
+*<p>
+*This method calculates the Euclidean norm of a portion of a
+*column of a matrix.
+*It is a modification of the LINPACK function
+*dnrm2.
+*In the LINPACK listing dnrm2 is attributed to C.L. Lawson
+*with a date of January 8, 1978. The routine below is based
+*on a more recent dnrm2 version that is attributed in LAPACK
+*documentation to Sven Hammarling.
+*
+*Translated and modified by Steve Verrill, February 26, 1997.
+*
+*@param nrow The number of rows involved
+*@param x[ ][ ] The matrix
+*@param begin The starting row
+*@param j The id of the column
+*
+*/
+
+
+ public static double colnrm2_f77 (int nrow, double x[][], int begin, int j) {
+
+ double absxij,norm,scale,ssq,fac;
+ int i,end;
+
+ if (nrow < 1) {
+
+ norm = 0.0;
+
+ } else if (nrow == 1) {
+
+ norm = Math.abs(x[begin][j]);
+
+ } else {
+
+ scale = 0.0;
+ ssq = 1.0;
+
+ end = begin + nrow - 1;
+
+ for (i = begin; i <= end; i++) {
+
+ if (x[i][j] != 0.0) {
+
+ absxij = Math.abs(x[i][j]);
+
+ if (scale < absxij) {
+
+ fac = scale/absxij;
+ ssq = 1.0 + ssq*fac*fac;
+ scale = absxij;
+
+ } else {
+
+ fac = absxij/scale;
+ ssq += fac*fac;
+
+ }
+
+ }
+
+ }
+
+ norm = scale*Math.sqrt(ssq);
+
+ }
+
+ return norm;
+
+ }
+
+
+/**
+*
+*<p>
+*This method calculates the Euclidean norm of a portion
+*of a vector x[ ].
+*It is a modification of the LINPACK function
+*dnrm2.
+*In the LINPACK listing dnrm2 is attributed to C.L. Lawson
+*with a date of January 8, 1978. The routine below is based
+*on a more recent dnrm2 version that is attributed in LAPACK
+*documentation to Sven Hammarling.
+*
+*Translated by Steve Verrill, March 3, 1997.
+*
+*@param nrow The number of rows involved
+*@param x[ ] vector
+*@param begin The starting row
+*
+*/
+
+
+ public static double dnrm2p_f77 (int nrow, double x[], int begin) {
+
+ double absxi,norm,scale,ssq,fac;
+ int i,end;
+
+ if (nrow < 1) {
+
+ norm = 0.0;
+
+ } else if (nrow == 1) {
+
+ norm = Math.abs(x[begin]);
+
+ } else {
+
+ scale = 0.0;
+ ssq = 1.0;
+
+ end = begin + nrow - 1;
+
+ for (i = begin; i <= end; i++) {
+
+ if (x[i] != 0.0) {
+
+ absxi = Math.abs(x[i]);
+
+ if (scale < absxi) {
+
+ fac = scale/absxi;
+ ssq = 1.0 + ssq*fac*fac;
+ scale = absxi;
+
+ } else {
+
+ fac = absxi/scale;
+ ssq += fac*fac;
+
+ }
+
+ }
+
+ }
+
+ norm = scale*Math.sqrt(ssq);
+
+ }
+
+ return norm;
+
+ }
+
+
+
+/**
+*
+*<p>
+*This method copies a portion of vector x[ ] to the corresponding
+portion of vector y[ ].
+*It uses unrolled loops.
+*It is a modification of the LINPACK subroutine
+*dcopy. In the LINPACK listing dcopy is attributed to Jack Dongarra
+*with a date of 3/11/78.
+*
+*Translated by Steve Verrill, March 1, 1997.
+*
+*@param nrow The number of rows involved
+*@param x[ ] vector
+*@param y[ ] vector
+*@param begin The starting row
+*
+*/
+
+ public static void dcopyp_f77 (int nrow, double x[], double y[], int begin) {
+
+ double temp;
+ int i,m,mpbegin,end;
+
+ m = nrow%7;
+ mpbegin = m + begin;
+ end = begin + nrow - 1;
+
+ for (i = begin; i < mpbegin; i++) {
+
+ y[i] = x[i];
+
+ }
+
+ for (i = mpbegin; i <= end; i += 7) {
+
+ y[i] = x[i];
+ y[i+1] = x[i+1];
+ y[i+2] = x[i+2];
+ y[i+3] = x[i+3];
+ y[i+4] = x[i+4];
+ y[i+5] = x[i+5];
+ y[i+6] = x[i+6];
+
+ }
+
+ return;
+
+ }
+
+
+/**
+*
+*<p>
+*This method "applies a plane rotation."
+*It is a modification of the LINPACK function
+*DROT. In the LINPACK listing DROT is attributed to Jack Dongarra
+*with a date of 3/11/78.
+*
+*Translated and modified by Steve Verrill, March 4, 1997.
+*
+*@param n The order of x[ ][ ]
+*@param x[ ][ ] The matrix
+*@param j1 The id of the first column
+*@param j2 The id of the second column
+*@param c "cos"
+*@param s "sin"
+*
+*/
+
+ public static void colrot_f77 (int n, double x[][],
+ int j1, int j2, double c, double s) {
+
+ double temp;
+ int i;
+
+
+ if (n <= 0) return;
+
+ for (i = 1; i <= n; i++) {
+
+ temp = c*x[i][j1] + s*x[i][j2];
+ x[i][j2] = c*x[i][j2] - s*x[i][j1];
+ x[i][j1] = temp;
+
+ }
+
+ return;
+
+ }
+
+
+
+/**
+*
+*<p>
+*This method implements the FORTRAN sign (not sin) function.
+*See the code for details.
+*
+*Created by Steve Verrill, March 1997.
+*
+*@param a a
+*@param b b
+*
+*/
+
+ public static double sign_f77 (double a, double b) {
+
+ if (b < 0.0) {
+
+ return -Math.abs(a);
+
+ } else {
+
+ return Math.abs(a);
+
+ }
+
+ }
+
+
+
+/**
+*
+*<p>
+*This method multiplies an n x p matrix by a p x r matrix.
+*
+*Created by Steve Verrill, March 1997.
+*
+*@param a[ ][ ] The left matrix
+*@param b[ ][ ] The right matrix
+*@param c[ ][ ] The product
+*@param n n
+*@param p p
+*@param r r
+*
+*/
+
+ public static void matmat_f77 (double a[][], double b[][], double c[][],
+ int n, int p, int r) {
+
+ double vdot;
+ int i,j,k,m;
+
+ for (i = 1; i <= n; i++) {
+
+ for (j = 1; j <= r; j++) {
+
+ vdot = 0.0;
+
+ m = p%5;
+
+ for (k = 1; k <= m; k++) {
+
+ vdot += a[i][k]*b[k][j];
+
+ }
+
+ for (k = m+1; k <= p; k += 5) {
+
+ vdot += a[i][k]*b[k][j] +
+ a[i][k+1]*b[k+1][j] +
+ a[i][k+2]*b[k+2][j] +
+ a[i][k+3]*b[k+3][j] +
+ a[i][k+4]*b[k+4][j];
+
+ }
+
+ c[i][j] = vdot;
+
+ }
+
+ }
+
+ }
+
+
+
+/**
+*
+*<p>
+*This method obtains the transpose of an n x p matrix.
+*
+*Created by Steve Verrill, March 1997.
+*
+*@param a[ ][ ] matrix
+*@param at[ ][ ] transpose of the matrix
+*@param n n
+*@param p p
+*
+*/
+
+ public static void mattran_f77 (double a[][], double at[][],
+ int n, int p) {
+
+ int i,j;
+
+ for (i = 1; i <= n; i++) {
+
+ for (j = 1; j <= p; j++) {
+
+ at[j][i] = a[i][j];
+
+ }
+
+ }
+
+ }
+
+
+
+/**
+*
+*<p>
+*This method multiplies an n x p matrix by a p x 1 vector.
+*
+*Created by Steve Verrill, March 1997.
+*
+*@param a[ ][ ] The matrix
+*@param b[ ] The vector
+*@param c[ ] The product
+*@param n n
+*@param p p
+*
+*/
+
+ public static void matvec_f77 (double a[][], double b[], double c[],
+ int n, int p) {
+
+ double vdot;
+ int i,j,m;
+
+ for (i = 1; i <= n; i++) {
+
+ vdot = 0.0;
+
+ m = p%5;
+
+ for (j = 1; j <= m; j++) {
+
+ vdot += a[i][j]*b[j];
+
+ }
+
+ for (j = m+1; j <= p; j += 5) {
+
+ vdot += a[i][j]*b[j] +
+ a[i][j+1]*b[j+1] +
+ a[i][j+2]*b[j+2] +
+ a[i][j+3]*b[j+3] +
+ a[i][j+4]*b[j+4];
+
+ }
+
+ c[i] = vdot;
+
+ }
+
+ }
+
+
+
+
+}
diff --git a/Fmin.java b/Fmin.java
new file mode 100644
index 0000000..03d14df
--- /dev/null
+++ b/Fmin.java
@@ -0,0 +1,351 @@
+/*
+ Fmin.java copyright claim:
+
+ This software is based on the public domain fmin routine.
+ The FORTRAN version can be found at
+
+ www.netlib.org
+
+ This software was translated from the FORTRAN version
+ to Java by a US government employee on official time.
+ Thus this software is also in the public domain.
+
+
+ The translator's mail address is:
+
+ Steve Verrill
+ USDA Forest Products Laboratory
+ 1 Gifford Pinchot Drive
+ Madison, Wisconsin
+ 53705
+
+
+ The translator's e-mail address is:
+
+ steve at ws13.fpl.fs.fed.us
+
+
+***********************************************************************
+
+DISCLAIMER OF WARRANTIES:
+
+THIS SOFTWARE IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND.
+THE TRANSLATOR DOES NOT WARRANT, GUARANTEE OR MAKE ANY REPRESENTATIONS
+REGARDING THE SOFTWARE OR DOCUMENTATION IN TERMS OF THEIR CORRECTNESS,
+RELIABILITY, CURRENTNESS, OR OTHERWISE. THE ENTIRE RISK AS TO
+THE RESULTS AND PERFORMANCE OF THE SOFTWARE IS ASSUMED BY YOU.
+IN NO CASE WILL ANY PARTY INVOLVED WITH THE CREATION OR DISTRIBUTION
+OF THE SOFTWARE BE LIABLE FOR ANY DAMAGE THAT MAY RESULT FROM THE USE
+OF THIS SOFTWARE.
+
+Sorry about that.
+
+***********************************************************************
+
+
+History:
+
+Date Translator Changes
+
+3/24/98 Steve Verrill Translated
+
+*/
+
+
+package optimization;
+
+import optimization.*;
+
+
+/**
+*
+*<p>
+*This class was translated by a statistician from the FORTRAN
+*version of fmin. It is NOT an official translation. When
+*public domain Java optimization routines become available
+*from professional numerical analysts, then <b>THE CODE PRODUCED
+*BY THE NUMERICAL ANALYSTS SHOULD BE USED</b>.
+*
+*<p>
+*Meanwhile, if you have suggestions for improving this
+*code, please contact Steve Verrill at steve at ws13.fpl.fs.fed.us.
+*
+*@author Steve Verrill
+*@version .5 --- March 24, 1998
+*
+*/
+
+public class Fmin extends Object {
+
+/**
+*
+*<p>
+*This method performs a 1-dimensional minimization.
+*It implements Brent's method which combines a golden-section
+*search and parabolic interpolation. The introductory comments from
+*the FORTRAN version are provided below.
+*
+*This method is a translation from FORTRAN to Java of the Netlib
+*function fmin. In the Netlib listing no author is given.
+*
+*Translated by Steve Verrill, March 24, 1998.
+*
+*@param a Left endpoint of initial interval
+*@param b Right endpoint of initial interval
+*@param minclass A class that defines a method, f_to_minimize,
+* to minimize. The class must implement
+* the Fmin_methods interface (see the definition
+* in Fmin_methods.java). See FminTest.java
+* for an example of such a class.
+* f_to_minimize must have one
+* double valued argument.
+*@param tol Desired length of the interval in which
+* the minimum will be determined to lie
+* (This should be greater than, roughly, 3.0e-8.)
+*
+*/
+
+
+ public static double fmin (double a, double b, Fmin_methods minclass,
+ double tol) {
+
+/*
+
+Here is a copy of the Netlib documentation:
+
+c
+c An approximation x to the point where f attains a minimum on
+c the interval (ax,bx) is determined.
+c
+c input..
+c
+c ax left endpoint of initial interval
+c bx right endpoint of initial interval
+c f function subprogram which evaluates f(x) for any x
+c in the interval (ax,bx)
+c tol desired length of the interval of uncertainty of the final
+c result (.ge.0.)
+c
+c output..
+c
+c fmin abcissa approximating the point where f attains a
+c minimum
+c
+c The method used is a combination of golden section search and
+c successive parabolic interpolation. Convergence is never much slower
+c than that for a Fibonacci search. If f has a continuous second
+c derivative which is positive at the minimum (which is not at ax or
+c bx), then convergence is superlinear, and usually of the order of
+c about 1.324.
+c The function f is never evaluated at two points closer together
+c than eps*abs(fmin)+(tol/3), where eps is approximately the square
+c root of the relative machine precision. If f is a unimodal
+c function and the computed values of f are always unimodal when
+c separated by at least eps*abs(x)+(tol/3), then fmin approximates
+c the abcissa of the global minimum of f on the interval (ax,bx) with
+c an error less than 3*eps*abs(fmin)+tol. If f is not unimodal,
+c then fmin may approximate a local, but perhaps non-global, minimum to
+c the same accuracy.
+c This function subprogram is a slightly modified version of the
+c Algol 60 procedure localmin given in Richard Brent, Algorithms For
+c Minimization Without Derivatives, Prentice-Hall, Inc. (1973).
+c
+
+*/
+
+ double c,d,e,eps,xm,p,q,r,tol1,t2,
+ u,v,w,fu,fv,fw,fx,x,tol3;
+
+ c = .5*(3.0 - Math.sqrt(5.0));
+ d = 0.0;
+
+// 1.1102e-16 is machine precision
+
+ eps = 1.2e-16;
+ tol1 = eps + 1.0;
+ eps = Math.sqrt(eps);
+
+ v = a + c*(b-a);
+ w = v;
+ x = v;
+ e = 0.0;
+ fx = minclass.f_to_minimize(x);
+ fv = fx;
+ fw = fx;
+ tol3 = tol/3.0;
+
+ xm = .5*(a + b);
+ tol1 = eps*Math.abs(x) + tol3;
+ t2 = 2.0*tol1;
+
+// main loop
+
+ while (Math.abs(x-xm) > (t2 - .5*(b-a))) {
+
+ p = q = r = 0.0;
+
+ if (Math.abs(e) > tol1) {
+
+// fit the parabola
+
+ r = (x-w)*(fx-fv);
+ q = (x-v)*(fx-fw);
+ p = (x-v)*q - (x-w)*r;
+ q = 2.0*(q-r);
+
+ if (q > 0.0) {
+
+ p = -p;
+
+ } else {
+
+ q = -q;
+
+ }
+
+ r = e;
+ e = d;
+
+// brace below corresponds to statement 50
+ }
+
+ if ((Math.abs(p) < Math.abs(.5*q*r)) &&
+ (p > q*(a-x)) &&
+ (p < q*(b-x))) {
+
+// a parabolic interpolation step
+
+ d = p/q;
+ u = x+d;
+
+// f must not be evaluated too close to a or b
+
+ if (((u-a) < t2) || ((b-u) < t2)) {
+
+ d = tol1;
+ if (x >= xm) d = -d;
+
+ }
+
+// brace below corresponds to statement 60
+ } else {
+
+// a golden-section step
+
+ if (x < xm) {
+
+ e = b-x;
+
+ } else {
+
+ e = a-x;
+
+ }
+
+ d = c*e;
+
+ }
+
+// f must not be evaluated too close to x
+
+ if (Math.abs(d) >= tol1) {
+
+ u = x+d;
+
+ } else {
+
+ if (d > 0.0) {
+
+ u = x + tol1;
+
+ } else {
+
+ u = x - tol1;
+
+ }
+
+ }
+
+ fu = minclass.f_to_minimize(u);
+
+// Update a, b, v, w, and x
+
+ if (fx <= fu) {
+
+ if (u < x) {
+
+ a = u;
+
+ } else {
+
+ b = u;
+
+ }
+
+// brace below corresponds to statement 140
+ }
+
+ if (fu <= fx) {
+
+ if (u < x) {
+
+ b = x;
+
+ } else {
+
+ a = x;
+
+ }
+
+ v = w;
+ fv = fw;
+ w = x;
+ fw = fx;
+ x = u;
+ fx = fu;
+
+ xm = .5*(a + b);
+ tol1 = eps*Math.abs(x) + tol3;
+ t2 = 2.0*tol1;
+
+// brace below corresponds to statement 170
+ } else {
+
+ if ((fu <= fw) || (w == x)) {
+
+ v = w;
+ fv = fw;
+ w = u;
+ fw = fu;
+
+ xm = .5*(a + b);
+ tol1 = eps*Math.abs(x) + tol3;
+ t2 = 2.0*tol1;
+
+ } else if ((fu > fv) && (v != x) && (v != w)) {
+
+ xm = .5*(a + b);
+ tol1 = eps*Math.abs(x) + tol3;
+ t2 = 2.0*tol1;
+
+ } else {
+
+ v = u;
+ fv = fu;
+
+ xm = .5*(a + b);
+ tol1 = eps*Math.abs(x) + tol3;
+ t2 = 2.0*tol1;
+
+ }
+
+ }
+
+// brace below corresponds to statement 190
+ }
+
+ return x;
+
+ }
+
+}
diff --git a/Fmin_methods.java b/Fmin_methods.java
new file mode 100644
index 0000000..3f579b5
--- /dev/null
+++ b/Fmin_methods.java
@@ -0,0 +1,7 @@
+package optimization;
+
+public interface Fmin_methods {
+
+ double f_to_minimize(double x);
+
+}
diff --git a/Fzero.java b/Fzero.java
new file mode 100644
index 0000000..b49a300
--- /dev/null
+++ b/Fzero.java
@@ -0,0 +1,498 @@
+/*
+ Fzero.java copyright claim:
+
+ This software is based on the public domain fzero routine.
+ The FORTRAN version can be found at
+
+ www.netlib.org
+
+ This software was translated from the FORTRAN version
+ to Java by a US government employee on official time.
+ Thus this software is also in the public domain.
+
+
+ The translator's mail address is:
+
+ Steve Verrill
+ USDA Forest Products Laboratory
+ 1 Gifford Pinchot Drive
+ Madison, Wisconsin
+ 53705
+
+
+ The translator's e-mail address is:
+
+ steve at ws13.fpl.fs.fed.us
+
+
+***********************************************************************
+
+DISCLAIMER OF WARRANTIES:
+
+THIS SOFTWARE IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND.
+THE TRANSLATOR DOES NOT WARRANT, GUARANTEE OR MAKE ANY REPRESENTATIONS
+REGARDING THE SOFTWARE OR DOCUMENTATION IN TERMS OF THEIR CORRECTNESS,
+RELIABILITY, CURRENTNESS, OR OTHERWISE. THE ENTIRE RISK AS TO
+THE RESULTS AND PERFORMANCE OF THE SOFTWARE IS ASSUMED BY YOU.
+IN NO CASE WILL ANY PARTY INVOLVED WITH THE CREATION OR DISTRIBUTION
+OF THE SOFTWARE BE LIABLE FOR ANY DAMAGE THAT MAY RESULT FROM THE USE
+OF THIS SOFTWARE.
+
+Sorry about that.
+
+***********************************************************************
+
+
+History:
+
+Date Translator Changes
+
+4/17/01 Steve Verrill Translated
+
+*/
+
+
+package optimization;
+
+import optimization.*;
+import linear_algebra.Blas_f77;
+
+/**
+*
+*<p>
+*This class was translated by a statistician from the FORTRAN
+*version of dfzero. It is NOT an official translation. When
+*public domain Java optimization routines become available
+*from professional numerical analysts, then <b>THE CODE PRODUCED
+*BY THE NUMERICAL ANALYSTS SHOULD BE USED</b>.
+*
+*<p>
+*Meanwhile, if you have suggestions for improving this
+*code, please contact Steve Verrill at steve at ws13.fpl.fs.fed.us.
+*
+*@author Steve Verrill
+*@version .5 --- April 17, 2001
+*
+*/
+
+public class Fzero extends Object {
+
+/**
+*
+*<p>
+*This method searches for a zero of a function f(x) between
+*the given values b and c until the width of the interval
+*(b,c) has collapsed to within a tolerance specified by
+*the stopping criterion, Math.abs(b-c) <= 2.0*(rw*Math.abs(b)+ae).
+*The method used is an efficient combination of bisection
+*and the secant rule.
+*The introductory comments from
+*the FORTRAN version are provided below.
+*
+*This method is a translation from FORTRAN to Java of the Netlib
+*(actually it is in the SLATEC library which is available at Netlib)
+*function dfzero. In the FORTRAN code, L.F. Shampine and H.A. Watts
+*are listed as the authors.
+*
+*Translated by Steve Verrill, April 17, 2001.
+*
+*@param zeroclass A class that defines a method, f_to_zero,
+* that returns a value that is to be zeroed.
+* The class must implement
+* the Fzero_methods interface (see the definition
+* in Fzero_methods.java). See FzeroTest.java
+* for an example of such a class.
+* f_to_zero must have one
+* double valued argument.
+*@param b[1] One endpoint of the initial interval. The value returned
+* for b[1] is usually the better approximation to a
+* zero of f_to_zero.
+*@param c[1] The other endpoint of the initial interval.
+*@param r Initial guess of a zero. A (better) guess of a zero
+* of f_to_zero which could help in
+* speeding up convergence. If f_to_zero(b) and f_to_zero(r) have
+* opposite signs, a root will be found in the interval
+* (b,r); if not, but f_to_zero(r) and f_to_zero(c) have opposite
+* signs, a root will be found in the interval (r,c);
+* otherwise, the interval (b,c) will be searched for a
+* possible root. When no better guess is known, it is
+* recommended that r be set to b or c; because if r is
+* not interior to the interval (b,c), it will be ignored.
+*
+*@param re Relative error used for rw in the stopping criterion.
+* If the requested re is less than machine precision,
+* then rw is set to approximately machine precision.
+*
+*@param ae Absolute error used in the stopping criterion. If the
+* given interval (b,c) contains the origin, then a
+* nonzero value should be chosen for AE.
+*
+*@param iflag[1] A status code. User must check iflag[1] after each call.
+* Control returns to the user from dfzero in all cases.
+*
+* 1: b is within the requested tolerance of a zero.
+* The interval (b,c) collapsed to the requested
+* tolerance, the function changes sign in (b,c), and
+* f_to_zero(x) decreased in magnitude as (b,c) collapsed.
+*
+* 2: f_to_zero(b) = 0. However, the interval (b,c) may not have
+* collapsed to the requested tolerance.
+*
+* 3: b may be near a singular point of f_to_zero(x).
+* The interval (b,c) collapsed to the requested tol-
+* erance and the function changes sign in (b,c), but
+* f_to_zero(x) increased in magnitude as (b,c) collapsed,i.e.
+* Math.abs(f_to_zero(b out)) >
+* Math.max(Math.abs(f_to_zero(b in)),
+* Math.abs(f_to_zero(c in)))
+*
+* 4: No change in sign of f_to_zero(x) was found although the
+* interval (b,c) collapsed to the requested tolerance.
+* The user must examine this case and decide whether
+* b is near a local minimum of f_to_zero(x), or b is near a
+* zero of even multiplicity, or neither of these.
+*
+* 5: Too many (> 500) function evaluations used.
+*
+*/
+
+
+ public static void fzero (Fzero_methods zeroclass, double b[], double c[],
+ double r, double re, double ae, int iflag[]) {
+
+/*
+
+Here is a copy of the Netlib documentation:
+
+ SUBROUTINE DFZERO(F,B,C,R,RE,AE,IFLAG)
+C***BEGIN PROLOGUE DFZERO
+C***DATE WRITTEN 700901 (YYMMDD)
+C***REVISION DATE 861211 (YYMMDD)
+C***CATEGORY NO. F1B
+C***KEYWORDS LIBRARY=SLATEC,TYPE=DOUBLE PRECISION(FZERO-S DFZERO-D),
+C BISECTION,NONLINEAR,NONLINEAR EQUATIONS,ROOTS,ZEROES,
+C ZEROS
+C***AUTHOR SHAMPINE,L.F.,SNLA
+C WATTS,H.A.,SNLA
+C***PURPOSE Search for a zero of a function F(X) in a given
+C interval (B,C). It is designed primarily for problems
+C where F(B) and F(C) have opposite signs.
+C***DESCRIPTION
+C
+C **** Double Precision version of FZERO ****
+C
+C Based on a method by T J Dekker
+C written by L F Shampine and H A Watts
+C
+C DFZERO searches for a zero of a function F(X) between
+C the given values B and C until the width of the interval
+C (B,C) has collapsed to within a tolerance specified by
+C the stopping criterion, DABS(B-C) .LE. 2.*(RW*DABS(B)+AE).
+C The method used is an efficient combination of bisection
+C and the secant rule.
+C
+C Description Of Arguments
+C
+C F,B,C,R,RE and AE are DOUBLE PRECISION input parameters.
+C B and C are DOUBLE PRECISION output parameters and IFLAG (flagged
+C by an * below).
+C
+C F - Name of the DOUBLE PRECISION valued external function.
+C This name must be in an EXTERNAL statement in the
+C calling program. F must be a function of one double
+C precision argument.
+C
+C *B - One end of the interval (B,C). The value returned for
+C B usually is the better approximation to a zero of F.
+C
+C *C - The other end of the interval (B,C)
+C
+C R - A (better) guess of a zero of F which could help in
+C speeding up convergence. If F(B) and F(R) have
+C opposite signs, a root will be found in the interval
+C (B,R); if not, but F(R) and F(C) have opposite
+C signs, a root will be found in the interval (R,C);
+C otherwise, the interval (B,C) will be searched for a
+C possible root. When no better guess is known, it is
+C recommended that r be set to B or C; because if R is
+C not interior to the interval (B,C), it will be ignored.
+C
+C RE - Relative error used for RW in the stopping criterion.
+C If the requested RE is less than machine precision,
+C then RW is set to approximately machine precision.
+C
+C AE - Absolute error used in the stopping criterion. If the
+C given interval (B,C) contains the origin, then a
+C nonzero value should be chosen for AE.
+C
+C *IFLAG - A status code. User must check IFLAG after each call.
+C Control returns to the user from FZERO in all cases.
+C XERROR does not process diagnostics in these cases.
+C
+C 1 B is within the requested tolerance of a zero.
+C The interval (B,C) collapsed to the requested
+C tolerance, the function changes sign in (B,C), and
+C F(X) decreased in magnitude as (B,C) collapsed.
+C
+C 2 F(B) = 0. However, the interval (B,C) may not have
+C collapsed to the requested tolerance.
+C
+C 3 B may be near a singular point of F(X).
+C The interval (B,C) collapsed to the requested tol-
+C erance and the function changes sign in (B,C), but
+C F(X) increased in magnitude as (B,C) collapsed,i.e.
+C abs(F(B out)) .GT. max(abs(F(B in)),abs(F(C in)))
+C
+C 4 No change in sign of F(X) was found although the
+C interval (B,C) collapsed to the requested tolerance.
+C The user must examine this case and decide whether
+C B is near a local minimum of F(X), or B is near a
+C zero of even multiplicity, or neither of these.
+C
+C 5 Too many (.GT. 500) function evaluations used.
+C***REFERENCES L. F. SHAMPINE AND H. A. WATTS, *FZERO, A ROOT-SOLVING
+C CODE*, SC-TM-70-631, SEPTEMBER 1970.
+C T. J. DEKKER, *FINDING A ZERO BY MEANS OF SUCCESSIVE
+C LINEAR INTERPOLATION*, 'CONSTRUCTIVE ASPECTS OF THE
+C FUNDAMENTAL THEOREM OF ALGEBRA', EDITED BY B. DEJON
+C P. HENRICI, 1969.
+C***ROUTINES CALLED D1MACH
+C***END PROLOGUE DFZERO
+c
+*/
+
+ int i,ic,icnt,ierr,ipass,ipss,j,klus,kount,kprint,lun,ndeg;
+
+ int itest[] = new int[37];
+ int itmp[] = new int[16];
+
+
+ double a,acbs,acmb,aw,cmb,er,fa,fb,fc,fx,fz,p,q,rel,rw,
+ t,tol,wi,work,wr,z;
+
+// 1.1102e-16 is machine precision
+
+ er = 2.0*1.2e-16;
+
+// Initialize
+
+ z = r;
+
+ if (r <= Math.min(b[1],c[1]) || r >= Math.max(b[1],c[1])) {
+
+ z = c[1];
+
+ }
+
+ rw = Math.max(re,er);
+ aw = Math.max(ae,0.0);
+
+ ic = 0;
+ t = z;
+
+ fz = zeroclass.f_to_zero(t);
+ fc = fz;
+
+ t = b[1];
+
+ fb = zeroclass.f_to_zero(t);
+
+ kount = 2;
+
+ if (Blas_f77.sign_f77(1.0,fz) != Blas_f77.sign_f77(1.0,fb)) {
+
+ c[1] = z;
+
+ } else {
+
+ if (z != c[1]) {
+
+ t = c[1];
+
+ fc = zeroclass.f_to_zero(t);
+
+ kount = 3;
+
+ if (Blas_f77.sign_f77(1.0,fz) != Blas_f77.sign_f77(1.0,fc)) {
+
+ b[1] = z;
+
+ fb = fz;
+
+ }
+
+ }
+
+ }
+
+ a = c[1];
+ fa = fc;
+ acbs = Math.abs(b[1] - c[1]);
+ fx = Math.max(Math.abs(fb),Math.abs(fc));
+
+// Main loop
+
+ while (true) {
+
+ if (Math.abs(fc) < Math.abs(fb)) {
+
+// Perform interchange
+
+ a = b[1];
+ fa = fb;
+ b[1] = c[1];
+ fb = fc;
+ c[1] = a;
+ fc = fa;
+
+ }
+
+ cmb = 0.5*(c[1] - b[1]);
+ acmb = Math.abs(cmb);
+ tol = rw*Math.abs(b[1]) + aw;
+
+// Test stopping criterion and function count.
+
+ if (acmb <= tol) {
+
+// Finished. Process the results for the proper setting
+// of the flag.
+
+ if (Blas_f77.sign_f77(1.0,fb) == Blas_f77.sign_f77(1.0,fc)) {
+
+ iflag[1] = 4;
+
+ return;
+
+ }
+
+
+ if (Math.abs(fb) > fx) {
+
+ iflag[1] = 3;
+
+ return;
+
+ }
+
+
+ iflag[1] = 1;
+
+ return;
+
+ }
+
+ if (fb == 0.0) {
+
+ iflag[1] = 2;
+
+ return;
+
+ }
+
+ if (kount >= 500) {
+
+ iflag[1] = 5;
+
+ return;
+
+ }
+
+
+/*
+
+C CALCULATE NEW ITERATE IMPLICITLY AS B+P/Q
+C WHERE WE ARRANGE P .GE. 0.
+C THE IMPLICIT FORM IS USED TO PREVENT OVERFLOW.
+
+*/
+
+ p = (b[1] - a)*fb;
+ q = fa - fb;
+
+ if (p < 0.0) {
+
+ p = -p;
+ q = -q;
+
+ }
+
+/*
+
+C UPDATE A AND CHECK FOR SATISFACTORY REDUCTION
+C IN THE SIZE OF THE BRACKETING INTERVAL.
+C IF NOT, PERFORM BISECTION.
+
+*/
+
+ a = b[1];
+ fa = fb;
+
+ ic++;
+
+ if (ic >= 4 && 8.0*acmb >= acbs) {
+
+// Use bisection
+
+ b[1] = .5*(c[1] + b[1]);
+
+ } else {
+
+ if (ic >= 4) {
+
+ ic = 0;
+ acbs = acmb;
+
+ }
+
+// Test for too small a change.
+
+ if (p <= Math.abs(q)*tol) {
+
+// Increment by the tolerance.
+
+ b[1] += Blas_f77.sign_f77(tol,cmb);
+
+ } else {
+
+// Root ought to be between b[1] and (c[1] + b[1])/2.
+
+ if (p >= cmb*q) {
+
+// Use bisection.
+
+ b[1] = 0.5*(c[1] + b[1]);
+
+ } else {
+
+// Use the secant rule.
+
+ b[1] += p/q;
+
+ }
+
+ }
+
+ }
+
+// Have completed the computation for a new iterate b[1].
+
+ t = b[1];
+ fb = zeroclass.f_to_zero(t);
+ kount++;
+
+// Decide whether the next step is an interpolation
+// or an extrapolation.
+
+ if (Blas_f77.sign_f77(1.0,fb) == Blas_f77.sign_f77(1.0,fc)) {
+
+ c[1] = a;
+ fc = fa;
+
+ }
+
+ }
+
+ }
+
+}
diff --git a/Fzero_methods.java b/Fzero_methods.java
new file mode 100644
index 0000000..3b8747a
--- /dev/null
+++ b/Fzero_methods.java
@@ -0,0 +1,7 @@
+package optimization;
+
+public interface Fzero_methods {
+
+ double f_to_zero(double x);
+
+}
diff --git a/Lmder_fcn.java b/Lmder_fcn.java
new file mode 100644
index 0000000..2de427f
--- /dev/null
+++ b/Lmder_fcn.java
@@ -0,0 +1,8 @@
+package optimization;
+
+public interface Lmder_fcn {
+
+ void fcn(int m, int n, double x[], double fvec[],
+ double fjac[][], int iflag[]);
+
+}
diff --git a/Lmdif_fcn.java b/Lmdif_fcn.java
new file mode 100644
index 0000000..ab50fc7
--- /dev/null
+++ b/Lmdif_fcn.java
@@ -0,0 +1,8 @@
+package optimization;
+
+public interface Lmdif_fcn {
+
+ void fcn(int m, int n, double x[], double fvec[],
+ int iflag[]);
+
+}
diff --git a/Minpack_f77.java b/Minpack_f77.java
new file mode 100644
index 0000000..cb0584f
--- /dev/null
+++ b/Minpack_f77.java
@@ -0,0 +1,3733 @@
+/*
+ Minpack_f77.java copyright claim:
+
+ This software is based on the public domain MINPACK routines.
+ It was translated from FORTRAN to Java by a US government employee
+ on official time. Thus this software is also in the public domain.
+
+
+ The translator's mail address is:
+
+ Steve Verrill
+ USDA Forest Products Laboratory
+ 1 Gifford Pinchot Drive
+ Madison, Wisconsin
+ 53705
+
+
+ The translator's e-mail address is:
+
+ steve at ws13.fpl.fs.fed.us
+
+
+***********************************************************************
+
+DISCLAIMER OF WARRANTIES:
+
+THIS SOFTWARE IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND.
+THE TRANSLATOR DOES NOT WARRANT, GUARANTEE OR MAKE ANY REPRESENTATIONS
+REGARDING THE SOFTWARE OR DOCUMENTATION IN TERMS OF THEIR CORRECTNESS,
+RELIABILITY, CURRENCY, OR OTHERWISE. THE ENTIRE RISK AS TO
+THE RESULTS AND PERFORMANCE OF THE SOFTWARE IS ASSUMED BY YOU.
+IN NO CASE WILL ANY PARTY INVOLVED WITH THE CREATION OR DISTRIBUTION
+OF THE SOFTWARE BE LIABLE FOR ANY DAMAGE THAT MAY RESULT FROM THE USE
+OF THIS SOFTWARE.
+
+Sorry about that.
+
+***********************************************************************
+
+
+History:
+
+Date Translator Changes
+
+11/3/00 Steve Verrill Translated
+
+*/
+
+
+package optimization;
+
+import optimization.*;
+
+/**
+*
+*<p>
+*This class contains Java translations of the MINPACK nonlinear least
+*squares routines. As of November 2000, it does not yet contain
+*the MINPACK solvers of systems of nonlinear equations. They should
+*be added in the Spring of 2001.<p>
+*The original FORTRAN MINPACK package was produced by
+*Burton S. Garbow, Kenneth E. Hillstrom, and Jorge J. More
+*as part of the Argonne National Laboratory MINPACK project, March 1980.
+*
+*<p>
+*<b>IMPORTANT:</b> The "_f77" suffixes indicate that these routines use
+*FORTRAN style indexing. For example, you will see
+*<pre>
+* for (i = 1; i <= n; i++)
+*</pre>
+*rather than
+*<pre>
+* for (i = 0; i < n; i++)
+*</pre>
+*To use the "_f77" routines you will have to declare your vectors
+*and matrices to be one element larger (e.g., v[101] rather than
+*v[100], and a[101][101] rather than a[100][100]), and you will have
+*to fill elements 1 through n rather than elements 0 through n - 1.
+*Versions of these programs that use C/Java style indexing might
+*eventually be available. They would end with the suffix "_j".
+*
+*<p>
+*This class was translated by a statistician from FORTRAN versions of
+*lmder and lmdif. It is NOT an official translation. It wastes
+*memory by failing to use the first elements of vectors. When
+*public domain Java optimization routines become available
+*from the people who produced MINPACK, then <b>THE CODE PRODUCED
+*BY THE NUMERICAL ANALYSTS SHOULD BE USED</b>.
+*
+*<p>
+*Meanwhile, if you have suggestions for improving this
+*code, please contact Steve Verrill at steve at ws13.fpl.fs.fed.us.
+*
+*@author (translator)Steve Verrill
+*@version .5 --- November 3, 2000
+*
+*/
+
+
+public class Minpack_f77 extends Object {
+
+// epsmch is the machine precision
+
+ static final double epsmch = 2.22044604926e-16;
+
+// minmag is the smallest magnitude
+
+ static final double minmag = 2.22507385852e-308;
+
+ static final double zero = 0.0;
+ static final double one = 1.0;
+ static final double p0001 = .0001;
+ static final double p001 = .001;
+ static final double p05 = .05;
+ static final double p1 = .1;
+ static final double p25 = .25;
+ static final double p5 = .5;
+ static final double p75 = .75;
+
+/**
+*
+*<p>
+*The lmder1_f77 method minimizes the sum of the squares of
+*m nonlinear functions in n variables by a modification of the
+*Levenberg-Marquardt algorithm. This is done by using the more
+*general least-squares solver lmder_f77. The user must provide a
+*method which calculates the functions and the Jacobian.
+*<p>
+*Translated by Steve Verrill on November 17, 2000
+*from the FORTRAN MINPACK source produced by Garbow, Hillstrom, and More.<p>
+*
+*
+*@param nlls A class that implements the Lmder_fcn interface
+* (see the definition in Lmder_fcn.java). See
+* LmderTest_f77.java for an example of such a class.
+* The class must define a method, fcn, that must
+* have the form
+*
+* public static void fcn(int m, int n, double x[],
+* double fvec[], double fjac[][], int iflag[])
+*
+* If iflag[1] equals 1, fcn calculates the values of
+* the m functions [residuals] at x and returns this
+* vector in fvec. If iflag[1] equals 2, fcn calculates
+* the Jacobian at x and returns this matrix in fjac
+* (and does not alter fvec).
+*
+* The value of iflag[1] should not be changed by fcn unless
+* the user wants to terminate execution of lmder_f77.
+* In this case set iflag[1] to a negative integer.
+*
+*@param m A positive integer set to the number of functions
+* [number of observations]
+*@param n A positive integer set to the number of variables
+* [number of parameters]. n must not exceed m.
+*@param x On input, it contains the initial estimate of
+* the solution vector [the least squares parameters].
+* On output it contains the final estimate of the
+* solution vector.
+*@param fvec An output vector that contains the m functions
+* [residuals] evaluated at x.
+*@param fjac An output m by n array. The upper n by n submatrix
+* of fjac contains an upper triangular matrix R with
+* diagonal elements of nonincreasing magnitude such that
+*<pre>
+* t t t
+* P (jac jac)P = R R,
+*</pre>
+* where P is a permutation matrix and jac is the final
+* calculated Jacobian. Column j of P is column ipvt[j]
+* of the identity matrix. The lower trapezoidal
+* part of fjac contains information generated during
+* the computation of R.
+*@param tol tol is a nonnegative input variable. Termination occurs
+* when the algorithm estimates either that the relative
+* error in the sum of squares is at most tol or that
+* the relative error between x and the solution is at
+* most tol.
+*@param info An integer output variable. If the user has
+* terminated execution, info is set to the (negative)
+* value of iflag[1]. See description of fcn. Otherwise,
+* info is set as follows.
+*
+* info = 0 improper input parameters.
+*
+* info = 1 algorithm estimates that the relative error
+* in the sum of squares is at most tol.
+*
+* info = 2 algorithm estimates that the relative error
+* between x and the solution is at most tol.
+*
+* info = 3 conditions for info = 1 and info = 2 both hold.
+*
+* info = 4 fvec is orthogonal to the columns of the
+* Jacobian to machine precision.
+*
+* info = 5 number of calls to fcn with iflag[1] = 1 has
+* reached 100*(n+1).
+*
+* info = 6 tol is too small. No further reduction in
+* the sum of squares is possible.
+*
+* info = 7 tol is too small. No further improvement in
+* the approximate solution x is possible.
+*@param ipvt An integer output array of length n. ipvt
+* defines a permutation matrix P such that jac*P = QR,
+* where jac is the final calculated Jacobian, Q is
+* orthogonal (not stored), and R is upper triangular
+* with diagonal elements of nonincreasing magnitude.
+* Column j of P is column ipvt[j] of the identity matrix.
+*
+*/
+
+
+ public static void lmder1_f77(Lmder_fcn nlls, int m, int n,
+ double x[], double fvec[],
+ double fjac[][], double tol,
+ int info[], int ipvt[]) {
+
+/*
+
+Here is a copy of the lmder1 FORTRAN documentation:
+
+
+ subroutine lmder1(fcn,m,n,x,fvec,fjac,ldfjac,tol,info,ipvt,wa,
+ * lwa)
+
+ integer m,n,ldfjac,info,lwa
+ integer ipvt(n)
+ double precision tol
+ double precision x(n),fvec(m),fjac(ldfjac,n),wa(lwa)
+ external fcn
+
+c **********
+c
+c subroutine lmder1
+c
+c the purpose of lmder1 is to minimize the sum of the squares of
+c m nonlinear functions in n variables by a modification of the
+c levenberg-marquardt algorithm. this is done by using the more
+c general least-squares solver lmder. the user must provide a
+c subroutine which calculates the functions and the jacobian.
+c
+c the subroutine statement is
+c
+c subroutine lmder1(fcn,m,n,x,fvec,fjac,ldfjac,tol,info,
+c ipvt,wa,lwa)
+c
+c where
+c
+c fcn is the name of the user-supplied subroutine which
+c calculates the functions and the jacobian. fcn must
+c be declared in an external statement in the user
+c calling program, and should be written as follows.
+c
+c subroutine fcn(m,n,x,fvec,fjac,ldfjac,iflag)
+c integer m,n,ldfjac,iflag
+c double precision x(n),fvec(m),fjac(ldfjac,n)
+c ----------
+c if iflag = 1 calculate the functions at x and
+c return this vector in fvec. do not alter fjac.
+c if iflag = 2 calculate the jacobian at x and
+c return this matrix in fjac. do not alter fvec.
+c ----------
+c return
+c end
+c
+c the value of iflag should not be changed by fcn unless
+c the user wants to terminate execution of lmder1.
+c in this case set iflag to a negative integer.
+c
+c m is a positive integer input variable set to the number
+c of functions.
+c
+c n is a positive integer input variable set to the number
+c of variables. n must not exceed m.
+c
+c x is an array of length n. on input x must contain
+c an initial estimate of the solution vector. on output x
+c contains the final estimate of the solution vector.
+c
+c fvec is an output array of length m which contains
+c the functions evaluated at the output x.
+c
+c fjac is an output m by n array. the upper n by n submatrix
+c of fjac contains an upper triangular matrix r with
+c diagonal elements of nonincreasing magnitude such that
+c
+c t t t
+c p *(jac *jac)*p = r *r,
+c
+c where p is a permutation matrix and jac is the final
+c calculated jacobian. column j of p is column ipvt(j)
+c (see below) of the identity matrix. the lower trapezoidal
+c part of fjac contains information generated during
+c the computation of r.
+c
+c ldfjac is a positive integer input variable not less than m
+c which specifies the leading dimension of the array fjac.
+c
+c tol is a nonnegative input variable. termination occurs
+c when the algorithm estimates either that the relative
+c error in the sum of squares is at most tol or that
+c the relative error between x and the solution is at
+c most tol.
+c
+c info is an integer output variable. if the user has
+c terminated execution, info is set to the (negative)
+c value of iflag. see description of fcn. otherwise,
+c info is set as follows.
+c
+c info = 0 improper input parameters.
+c
+c info = 1 algorithm estimates that the relative error
+c in the sum of squares is at most tol.
+c
+c info = 2 algorithm estimates that the relative error
+c between x and the solution is at most tol.
+c
+c info = 3 conditions for info = 1 and info = 2 both hold.
+c
+c info = 4 fvec is orthogonal to the columns of the
+c jacobian to machine precision.
+c
+c info = 5 number of calls to fcn with iflag = 1 has
+c reached 100*(n+1).
+c
+c info = 6 tol is too small. no further reduction in
+c the sum of squares is possible.
+c
+c info = 7 tol is too small. no further improvement in
+c the approximate solution x is possible.
+c
+c ipvt is an integer output array of length n. ipvt
+c defines a permutation matrix p such that jac*p = q*r,
+c where jac is the final calculated jacobian, q is
+c orthogonal (not stored), and r is upper triangular
+c with diagonal elements of nonincreasing magnitude.
+c column j of p is column ipvt(j) of the identity matrix.
+c
+c wa is a work array of length lwa.
+c
+c lwa is a positive integer input variable not less than 5*n+m.
+c
+c subprograms called
+c
+c user-supplied ...... fcn
+c
+c minpack-supplied ... lmder
+c
+c argonne national laboratory. minpack project. march 1980.
+c burton s. garbow, kenneth e. hillstrom, jorge j. more
+c
+c **********
+
+*/
+
+
+ int maxfev,mode,nprint;
+
+ int nfev[] = new int[2];
+ int njev[] = new int[2];
+
+ double diag[] = new double[n+1];
+ double qtf[] = new double[n+1];
+
+// double factor,ftol,gtol,xtol,zero;
+ double factor,ftol,gtol,xtol;
+
+ factor = 1.0e+2;
+// zero = 0.0;
+
+ info[1] = 0;
+
+// Check the input parameters for errors.
+
+ if (n <= 0 || m < n || tol < zero) {
+
+ return;
+
+ } else {
+
+// Call lmder_f77.
+
+ maxfev = 100*(n + 1);
+ ftol = tol;
+ xtol = tol;
+ gtol = zero;
+ mode = 1;
+ nprint = 0;
+
+ Minpack_f77.lmder_f77(nlls,m,n,x,fvec,fjac,ftol,xtol,gtol,maxfev,
+ diag,mode,factor,nprint,info,nfev,njev,ipvt,qtf);
+
+ if (info[1] == 8) info[1] = 4;
+
+ return;
+
+ }
+
+
+ }
+
+
+
+
+/**
+*
+*<p>
+*The lmder_f77 method minimizes the sum of the squares of
+*m nonlinear functions in n variables by a modification of
+*the Levenberg-Marquardt algorithm. The user must provide a
+*method which calculates the functions and the Jacobian.
+*<p>
+*Translated by Steve Verrill on November 3, 2000
+*from the FORTRAN MINPACK source produced by Garbow, Hillstrom, and More.<p>
+*
+*
+*@param nlls A class that implements the Lmder_fcn interface
+* (see the definition in Lmder_fcn.java). See
+* LmderTest_f77.java for an example of such a class.
+* The class must define a method, fcn, that must
+* have the form
+*
+* public static void fcn(int m, int n, double x[],
+* double fvec[], double fjac[][], int iflag[])
+*
+* If iflag[1] equals 1, fcn calculates the values of
+* the m functions [residuals] at x and returns this
+* vector in fvec. If iflag[1] equals 2, fcn calculates
+* the Jacobian at x and returns this matrix in fjac
+* (and does not alter fvec).
+*
+* The value of iflag[1] should not be changed by fcn unless
+* the user wants to terminate execution of lmder_f77.
+* In this case set iflag[1] to a negative integer.
+*
+*@param m A positive integer set to the number of functions
+* [number of observations]
+*@param n A positive integer set to the number of variables
+* [number of parameters]. n must not exceed m.
+*@param x On input, it contains the initial estimate of
+* the solution vector [the least squares parameters].
+* On output it contains the final estimate of the
+* solution vector.
+*@param fvec An output vector that contains the m functions
+* [residuals] evaluated at x.
+*@param fjac An output m by n array. The upper n by n submatrix
+* of fjac contains an upper triangular matrix R with
+* diagonal elements of nonincreasing magnitude such that
+*<pre>
+* t t t
+* P (jac jac)P = R R,
+*</pre>
+* where P is a permutation matrix and jac is the final
+* calculated Jacobian. Column j of P is column ipvt[j]
+* of the identity matrix. The lower trapezoidal
+* part of fjac contains information generated during
+* the computation of R.
+*@param ftol A nonnegative input variable. Termination
+* occurs when both the actual and predicted relative
+* reductions in the sum of squares are at most ftol.
+* Therefore, ftol measures the relative error desired
+* in the sum of squares.
+*@param xtol A nonnegative input variable. Termination
+* occurs when the relative error between two consecutive
+* iterates is at most xtol. Therefore, xtol measures the
+* relative error desired in the approximate solution.
+*@param gtol A nonnegative input variable. Termination
+* occurs when the cosine of the angle between fvec and
+* any column of the Jacobian is at most gtol in absolute
+* value. Therefore, gtol measures the orthogonality
+* desired between the function vector and the columns
+* of the Jacobian.
+*@param maxfev A positive integer input variable. Termination
+* occurs when the number of calls to fcn with iflag[1] = 1
+* has reached maxfev.
+*@param diag An vector of length n. If mode = 1 (see
+* below), diag is internally set. If mode = 2, diag
+* must contain positive entries that serve as
+* multiplicative scale factors for the variables.
+*@param mode If mode = 1, the
+* variables will be scaled internally. If mode = 2,
+* the scaling is specified by the input diag. Other
+* values of mode are equivalent to mode = 1.
+*@param factor A positive input variable used in determining the
+* initial step bound. This bound is set to the product of
+* factor and the euclidean norm of diag*x if nonzero, or else
+* to factor itself. In most cases factor should lie in the
+* interval (.1,100). 100 is a generally recommended value.
+*@param nprint An integer input variable that enables controlled
+* printing of iterates if it is positive. In this case,
+* fcn is called with iflag[1] = 0 at the beginning of the first
+* iteration and every nprint iterations thereafter and
+* immediately prior to return, with x, fvec, and fjac
+* available for printing. fvec and fjac should not be
+* altered. If nprint is not positive, no special calls
+* of fcn with iflag[1] = 0 are made.
+*@param info An integer output variable. If the user has
+* terminated execution, info is set to the (negative)
+* value of iflag[1]. See description of fcn. Otherwise,
+* info is set as follows.
+*
+* info = 0 improper input parameters.
+*
+* info = 1 both actual and predicted relative reductions
+* in the sum of squares are at most ftol.
+*
+* info = 2 relative error between two consecutive iterates
+* is at most xtol.
+*
+* info = 3 conditions for info = 1 and info = 2 both hold.
+*
+* info = 4 the cosine of the angle between fvec and any
+* column of the Jacobian is at most gtol in
+* absolute value.
+*
+* info = 5 number of calls to fcn with iflag[1] = 1 has
+* reached maxfev.
+*
+* info = 6 ftol is too small. no further reduction in
+* the sum of squares is possible.
+*
+* info = 7 xtol is too small. no further improvement in
+* the approximate solution x is possible.
+*
+* info = 8 gtol is too small. fvec is orthogonal to the
+* columns of the Jacobian to machine precision.
+*
+*@param nfev An integer output variable set to the number of
+* calls to fcn with iflag[1] = 1.
+*@param njev An integer output variable set to the number of
+* calls to fcn with iflag[1] = 2.
+*@param ipvt An integer output array of length n. ipvt
+* defines a permutation matrix P such that jac*P = QR,
+* where jac is the final calculated Jacobian, Q is
+* orthogonal (not stored), and R is upper triangular
+* with diagonal elements of nonincreasing magnitude.
+* column j of P is column ipvt[j] of the identity matrix.
+*
+*@param qtf An output array of length n which contains
+* the first n elements of the vector (Q transpose)fvec.
+*
+*
+*/
+
+/*
+Note that since Java passes primitive types by value rather than
+by reference, if they need
+to return a value, they need to be passed as arrays (here we
+place the actual value in location [1]). For example, info
+is passed as info[].
+*/
+
+
+ public static void lmder_f77(Lmder_fcn nlls, int m, int n,
+ double x[], double fvec[],
+ double fjac[][], double ftol,
+ double xtol, double gtol,
+ int maxfev, double diag[], int mode,
+ double factor, int nprint,int info[],
+ int nfev[], int njev[],
+ int ipvt[], double qtf[]) {
+
+/*
+
+Here is a copy of the lmder FORTRAN documentation:
+
+
+ subroutine lmder(fcn,m,n,x,fvec,fjac,ldfjac,ftol,xtol,gtol,
+ * maxfev,diag,mode,factor,nprint,info,nfev,njev,
+ * ipvt,qtf,wa1,wa2,wa3,wa4)
+ integer m,n,ldfjac,maxfev,mode,nprint,info,nfev,njev
+ integer ipvt(n)
+ double precision ftol,xtol,gtol,factor
+ double precision x(n),fvec(m),fjac(ldfjac,n),diag(n),qtf(n),
+ * wa1(n),wa2(n),wa3(n),wa4(m)
+c
+c subroutine lmder
+c
+c the purpose of lmder is to minimize the sum of the squares of
+c m nonlinear functions in n variables by a modification of
+c the Levenberg-Marquardt algorithm. the user must provide a
+c subroutine which calculates the functions and the Jacobian.
+c
+c the subroutine statement is
+c
+c subroutine lmder(fcn,m,n,x,fvec,fjac,ldfjac,ftol,xtol,gtol,
+c maxfev,diag,mode,factor,nprint,info,nfev,
+c njev,ipvt,qtf,wa1,wa2,wa3,wa4)
+c
+c where
+c
+c fcn is the name of the user-supplied subroutine which
+c calculates the functions and the Jacobian. fcn must
+c be declared in an external statement in the user
+c calling program, and should be written as follows.
+c
+c subroutine fcn(m,n,x,fvec,fjac,ldfjac,iflag)
+c integer m,n,ldfjac,iflag
+c double precision x(n),fvec(m),fjac(ldfjac,n)
+c ----------
+c if iflag = 1 calculate the functions at x and
+c return this vector in fvec. do not alter fjac.
+c if iflag = 2 calculate the Jacobian at x and
+c return this matrix in fjac. do not alter fvec.
+c ----------
+c return
+c end
+c
+c the value of iflag should not be changed by fcn unless
+c the user wants to terminate execution of lmder.
+c in this case set iflag to a negative integer.
+c
+c m is a positive integer input variable set to the number
+c of functions.
+c
+c n is a positive integer input variable set to the number
+c of variables. n must not exceed m.
+c
+c x is an array of length n. on input x must contain
+c an initial estimate of the solution vector. on output x
+c contains the final estimate of the solution vector.
+c
+c fvec is an output array of length m which contains
+c the functions evaluated at the output x.
+c
+c fjac is an output m by n array. the upper n by n submatrix
+c of fjac contains an upper triangular matrix r with
+c diagonal elements of nonincreasing magnitude such that
+c
+c t t t
+c p *(jac *jac)*p = r *r,
+c
+c where p is a permutation matrix and jac is the final
+c calculated Jacobian. column j of p is column ipvt(j)
+c (see below) of the identity matrix. the lower trapezoidal
+c part of fjac contains information generated during
+c the computation of r.
+c
+c ldfjac is a positive integer input variable not less than m
+c which specifies the leading dimension of the array fjac.
+c
+c ftol is a nonnegative input variable. termination
+c occurs when both the actual and predicted relative
+c reductions in the sum of squares are at most ftol.
+c therefore, ftol measures the relative error desired
+c in the sum of squares.
+c
+c xtol is a nonnegative input variable. termination
+c occurs when the relative error between two consecutive
+c iterates is at most xtol. therefore, xtol measures the
+c relative error desired in the approximate solution.
+c
+c gtol is a nonnegative input variable. termination
+c occurs when the cosine of the angle between fvec and
+c any column of the Jacobian is at most gtol in absolute
+c value. therefore, gtol measures the orthogonality
+c desired between the function vector and the columns
+c of the Jacobian.
+c
+c maxfev is a positive integer input variable. termination
+c occurs when the number of calls to fcn with iflag = 1
+c has reached maxfev.
+c
+c diag is an array of length n. if mode = 1 (see
+c below), diag is internally set. if mode = 2, diag
+c must contain positive entries that serve as
+c multiplicative scale factors for the variables.
+c
+c mode is an integer input variable. if mode = 1, the
+c variables will be scaled internally. if mode = 2,
+c the scaling is specified by the input diag. other
+c values of mode are equivalent to mode = 1.
+c
+c factor is a positive input variable used in determining the
+c initial step bound. this bound is set to the product of
+c factor and the euclidean norm of diag*x if nonzero, or else
+c to factor itself. in most cases factor should lie in the
+c interval (.1,100.).100. is a generally recommended value.
+c
+c nprint is an integer input variable that enables controlled
+c printing of iterates if it is positive. in this case,
+c fcn is called with iflag = 0 at the beginning of the first
+c iteration and every nprint iterations thereafter and
+c immediately prior to return, with x, fvec, and fjac
+c available for printing. fvec and fjac should not be
+c altered. if nprint is not positive, no special calls
+c of fcn with iflag = 0 are made.
+c
+c info is an integer output variable. if the user has
+c terminated execution, info is set to the (negative)
+c value of iflag. see description of fcn. otherwise,
+c info is set as follows.
+c
+c info = 0 improper input parameters.
+c
+c info = 1 both actual and predicted relative reductions
+c in the sum of squares are at most ftol.
+c
+c info = 2 relative error between two consecutive iterates
+c is at most xtol.
+c
+c info = 3 conditions for info = 1 and info = 2 both hold.
+c
+c info = 4 the cosine of the angle between fvec and any
+c column of the Jacobian is at most gtol in
+c absolute value.
+c
+c info = 5 number of calls to fcn with iflag = 1 has
+c reached maxfev.
+c
+c info = 6 ftol is too small. no further reduction in
+c the sum of squares is possible.
+c
+c info = 7 xtol is too small. no further improvement in
+c the approximate solution x is possible.
+c
+c info = 8 gtol is too small. fvec is orthogonal to the
+c columns of the Jacobian to machine precision.
+c
+c nfev is an integer output variable set to the number of
+c calls to fcn with iflag = 1.
+c
+c njev is an integer output variable set to the number of
+c calls to fcn with iflag = 2.
+c
+c ipvt is an integer output array of length n. ipvt
+c defines a permutation matrix p such that jac*p = q*r,
+c where jac is the final calculated Jacobian, q is
+c orthogonal (not stored), and r is upper triangular
+c with diagonal elements of nonincreasing magnitude.
+c column j of p is column ipvt(j) of the identity matrix.
+c
+c qtf is an output array of length n which contains
+c the first n elements of the vector (q transpose)*fvec.
+c
+c wa1, wa2, and wa3 are work arrays of length n.
+c
+c wa4 is a work array of length m.
+c
+c subprograms called
+c
+c user-supplied ...... fcn
+c
+c minpack-supplied ... dpmpar,enorm,lmpar,qrfac
+c
+c fortran-supplied ... dabs,dmax1,dmin1,dsqrt,mod
+c
+c argonne national laboratory. minpack project. march 1980.
+c burton s. garbow, kenneth e. hillstrom, jorge j. more
+c
+
+*/
+
+ int i,iter,j,l;
+// double actred,delta,dirder,fnorm,fnorm1,gnorm,
+// one,pnorm,prered,p1,p5,p25,p75,p0001,ratio,
+// sum,temp,temp1,temp2,xnorm,zero;
+
+ double actred,delta,dirder,fnorm,fnorm1,gnorm,
+ pnorm,prered,ratio,
+ sum,temp,temp1,temp2,xnorm;
+
+ double par[] = new double[2];
+
+ boolean doneout,donein;
+
+ int iflag[] = new int[2];
+ double wa1[] = new double[n+1];
+ double wa2[] = new double[n+1];
+ double wa3[] = new double[n+1];
+ double wa4[] = new double[m+1];
+
+// Java compiler complains if delta and xnorm are not initialized
+
+ delta = 0.0;
+ xnorm = 0.0;
+
+// one = 1.0;
+// p1 = .1;
+// p5 = .5;
+// p25 = .25;
+// p75 = .75;
+// p0001 = .0001;
+// zero = 0.0;
+
+ info[1] = 0;
+ iflag[1] = 0;
+ nfev[1] = 0;
+ njev[1] = 0;
+
+// Check the input parameters for errors.
+
+ if (n <= 0 || m < n
+ || ftol < zero || xtol < zero || gtol < zero
+ || maxfev <= 0 || factor <= zero) {
+
+// Termination
+
+ if (nprint > 0) {
+
+ nlls.fcn(m,n,x,fvec,fjac,iflag);
+
+ }
+
+ return;
+
+ }
+
+ if (mode == 2) {
+
+ for (j = 1; j <= n; j++) {
+
+ if (diag[j] <= zero) {
+
+// Termination
+
+ if (nprint > 0) {
+
+ nlls.fcn(m,n,x,fvec,fjac,iflag);
+
+ }
+
+ return;
+
+ }
+
+ }
+
+ }
+
+// Evaluate the function at the starting point
+// and calculate its norm.
+
+ iflag[1] = 1;
+
+ nlls.fcn(m,n,x,fvec,fjac,iflag);
+
+ nfev[1] = 1;
+
+ if (iflag[1] < 0) {
+
+// Termination
+
+ info[1] = iflag[1];
+ iflag[1] = 0;
+
+ if (nprint > 0) {
+
+ nlls.fcn(m,n,x,fvec,fjac,iflag);
+
+ }
+
+ return;
+
+ }
+
+ fnorm = Minpack_f77.enorm_f77(m,fvec);
+
+// Initialize Levenberg-Marquardt parameter and iteration counter.
+
+ par[1] = zero;
+ iter = 1;
+
+// Beginning of the outer loop.
+
+ doneout = false;
+
+ while (!doneout) {
+
+// 30 continue
+
+// Calculate the Jacobian matrix.
+
+ iflag[1] = 2;
+
+ nlls.fcn(m,n,x,fvec,fjac,iflag);
+
+ njev[1]++;
+
+ if (iflag[1] < 0) {
+
+// Termination
+
+ info[1] = iflag[1];
+ iflag[1] = 0;
+
+ if (nprint > 0) {
+
+ nlls.fcn(m,n,x,fvec,fjac,iflag);
+
+ }
+
+ return;
+
+ }
+
+// If requested, call fcn to enable printing of iterates.
+
+ if (nprint > 0) {
+
+ iflag[1] = 0;
+
+ if ((iter-1)%nprint == 0) {
+
+ nlls.fcn(m,n,x,fvec,fjac,iflag);
+
+ }
+
+ if (iflag[1] < 0) {
+
+// Termination
+
+ info[1] = iflag[1];
+ iflag[1] = 0;
+
+ nlls.fcn(m,n,x,fvec,fjac,iflag);
+
+ return;
+
+ }
+
+ }
+
+// Compute the qr factorization of the Jacobian.
+
+ Minpack_f77.qrfac_f77(m,n,fjac,true,ipvt,wa1,wa2,wa3);
+
+// On the first iteration and if mode is 1, scale according
+// to the norms of the columns of the initial Jacobian.
+
+ if (iter == 1) {
+
+ if (mode != 2) {
+
+ for (j = 1; j <= n; j++) {
+
+ diag[j] = wa2[j];
+
+ if (wa2[j] == zero) diag[j] = one;
+
+ }
+
+ }
+
+// On the first iteration, calculate the norm of the scaled x
+// and initialize the step bound delta.
+
+ for (j = 1; j <= n; j++) {
+
+ wa3[j] = diag[j]*x[j];
+
+ }
+
+ xnorm = Minpack_f77.enorm_f77(n,wa3);
+
+ delta = factor*xnorm;
+
+ if (delta == zero) delta = factor;
+
+ }
+
+// Form (q transpose)*fvec and store the first n components in
+// qtf.
+
+ for (i = 1; i <= m; i++) wa4[i] = fvec[i];
+
+ for (j = 1; j <= n; j++) {
+
+ if (fjac[j][j] != zero) {
+
+ sum = zero;
+
+ for (i = j; i <= m; i++) sum += fjac[i][j]*wa4[i];
+
+ temp = -sum/fjac[j][j];
+
+ for (i = j; i <= m; i++) wa4[i] += fjac[i][j]*temp;
+
+ }
+
+ fjac[j][j] = wa1[j];
+ qtf[j] = wa4[j];
+
+ }
+
+
+// Compute the norm of the scaled gradient.
+
+ gnorm = zero;
+
+ if (fnorm != zero) {
+
+ for (j = 1; j <= n; j++) {
+
+ l = ipvt[j];
+
+ if (wa2[l] != zero) {
+
+ sum = zero;
+
+ for (i = 1; i <= j; i++) sum +=
+ fjac[i][j]*(qtf[i]/fnorm);
+
+ gnorm = Math.max(gnorm,Math.abs(sum/wa2[l]));
+
+ }
+
+ }
+
+ }
+
+// Test for convergence of the gradient norm.
+
+ if (gnorm <= gtol) info[1] = 4;
+
+ if (info[1] != 0) {
+
+// Termination
+
+ if (iflag[1] < 0) info[1] = iflag[1];
+ iflag[1] = 0;
+
+ if (nprint > 0) {
+
+ nlls.fcn(m,n,x,fvec,fjac,iflag);
+
+ }
+
+ return;
+
+ }
+
+// Rescale if necessary.
+
+ if (mode != 2) {
+
+ for (j = 1; j <= n; j++) {
+
+ diag[j] = Math.max(diag[j],wa2[j]);
+
+ }
+
+ }
+
+
+// Beginning of the inner loop.
+
+ donein = false;
+
+ while (!donein) {
+
+// 200 continue
+
+// Determine the Levenberg-Marquardt parameter.
+
+ Minpack_f77.lmpar_f77(n,fjac,ipvt,diag,qtf,delta,par,wa1,wa2,
+ wa3,wa4);
+
+// Store the direction p and x + p. calculate the norm of p.
+
+ for (j = 1; j <= n; j++) {
+
+ wa1[j] = -wa1[j];
+ wa2[j] = x[j] + wa1[j];
+ wa3[j] = diag[j]*wa1[j];
+
+ }
+
+ pnorm = Minpack_f77.enorm_f77(n,wa3);
+
+// On the first iteration, adjust the initial step bound.
+
+ if (iter == 1) delta = Math.min(delta,pnorm);
+
+// Evaluate the function at x + p and calculate its norm.
+
+ iflag[1] = 1;
+
+ nlls.fcn(m,n,wa2,wa4,fjac,iflag);
+
+ nfev[1]++;
+
+ if (iflag[1] < 0) {
+
+// Termination
+
+ info[1] = iflag[1];
+ iflag[1] = 0;
+
+ if (nprint > 0) {
+
+ nlls.fcn(m,n,x,fvec,fjac,iflag);
+
+ }
+
+ return;
+
+ }
+
+ fnorm1 = Minpack_f77.enorm_f77(m,wa4);
+
+// Compute the scaled actual reduction.
+
+ actred = -one;
+
+ if (p1*fnorm1 < fnorm) actred = one - (fnorm1/fnorm)*(fnorm1/fnorm);
+
+// Compute the scaled predicted reduction and
+// the scaled directional derivative.
+
+ for (j = 1; j <= n; j++) {
+
+ wa3[j] = zero;
+ l = ipvt[j];
+ temp = wa1[l];
+
+ for (i = 1; i <= j; i++) wa3[i] += fjac[i][j]*temp;
+
+ }
+
+ temp1 = Minpack_f77.enorm_f77(n,wa3)/fnorm;
+ temp2 = (Math.sqrt(par[1])*pnorm)/fnorm;
+
+ prered = temp1*temp1 + temp2*temp2/p5;
+ dirder = -(temp1*temp1 + temp2*temp2);
+
+// Compute the ratio of the actual to the predicted
+// reduction.
+
+ ratio = zero;
+ if (prered != zero) ratio = actred/prered;
+
+// Update the step bound.
+
+ if (ratio <= p25) {
+
+ if (actred >= zero) {
+
+ temp = p5;
+
+ } else {
+
+ temp = p5*dirder/(dirder + p5*actred);
+
+ }
+
+ if (p1*fnorm1 >= fnorm || temp < p1) temp = p1;
+
+ delta = temp*Math.min(delta,pnorm/p1);
+
+ par[1] /= temp;
+
+ } else {
+
+ if (par[1] == zero || ratio >= p75) {
+
+ delta = pnorm/p5;
+ par[1] *= p5;
+
+ }
+
+ }
+
+
+// Test for successful iteration.
+
+ if (ratio >= p0001) {
+
+// Successful iteration. Update x, fvec, and their norms.
+
+ for (j = 1; j <= n; j++) {
+
+ x[j] = wa2[j];
+ wa2[j] = diag[j]*x[j];
+
+ }
+
+ for (i = 1; i <= m; i++) fvec[i] = wa4[i];
+
+ xnorm = Minpack_f77.enorm_f77(n,wa2);
+
+ fnorm = fnorm1;
+
+ iter++;
+
+ }
+
+
+// Tests for convergence.
+
+ if (Math.abs(actred) <= ftol && prered <= ftol
+ && p5*ratio <= one) info[1] = 1;
+
+ if (delta <= xtol*xnorm) info[1] = 2;
+
+ if (Math.abs(actred) <= ftol && prered <= ftol
+ && p5*ratio <= one && info[1] == 2) info[1] = 3;
+
+ if (info[1] != 0) {
+
+// Termination
+
+ if (iflag[1] < 0) info[1] = iflag[1];
+ iflag[1] = 0;
+
+ if (nprint > 0) {
+
+ nlls.fcn(m,n,x,fvec,fjac,iflag);
+
+ }
+
+ return;
+
+ }
+
+
+// Tests for termination and stringent tolerances.
+
+ if (nfev[1] >= maxfev) info[1] = 5;
+
+ if (Math.abs(actred) <= epsmch && prered <= epsmch
+ && p5*ratio <= one) info[1] = 6;
+
+ if (delta <= epsmch*xnorm) info[1] = 7;
+
+ if (gnorm <= epsmch) info[1] = 8;
+
+ if (info[1] != 0) {
+
+// Termination
+
+ if (iflag[1] < 0) info[1] = iflag[1];
+ iflag[1] = 0;
+
+ if (nprint > 0) {
+
+ nlls.fcn(m,n,x,fvec,fjac,iflag);
+
+ }
+
+ return;
+
+ }
+
+
+// End of the inner loop. Repeat if iteration unsuccessful.
+
+ if (ratio >= p0001) donein = true;
+
+ }
+
+// End of the outer loop.
+
+ }
+
+ }
+
+
+/**
+*
+*<p>
+*The enorm_f77 method calculates the Euclidean norm of a vector.
+*<p>
+*Translated by Steve Verrill on November 14, 2000
+*from the FORTRAN MINPACK source produced by Garbow, Hillstrom, and More.<p>
+*
+*
+*@param n The length of the vector, x.
+*@param x The vector whose Euclidean norm is to be calculated.
+*
+*/
+
+
+ public static double enorm_f77(int n, double x[]) {
+
+
+/*
+
+Here is a copy of the enorm FORTRAN documentation:
+
+
+ double precision function enorm(n,x)
+ integer n
+ double precision x(n)
+
+c **********
+c
+c function enorm
+c
+c given an n-vector x, this function calculates the
+c euclidean norm of x.
+c
+c the euclidean norm is computed by accumulating the sum of
+c squares in three different sums. the sums of squares for the
+c small and large components are scaled so that no overflows
+c occur. non-destructive underflows are permitted. underflows
+c and overflows do not occur in the computation of the unscaled
+c sum of squares for the intermediate components.
+c the definitions of small, intermediate and large components
+c depend on two constants, rdwarf and rgiant. the main
+c restrictions on these constants are that rdwarf**2 not
+c underflow and rgiant**2 not overflow. the constants
+c given here are suitable for every known computer.
+c
+c the function statement is
+c
+c double precision function enorm(n,x)
+c
+c where
+c
+c n is a positive integer input variable.
+c
+c x is an input array of length n.
+c
+c subprograms called
+c
+c fortran-supplied ... dabs,dsqrt
+c
+c argonne national laboratory. minpack project. march 1980.
+c burton s. garbow, kenneth e. hillstrom, jorge j. more
+c
+c **********
+
+*/
+
+
+ int i;
+// double agiant,floatn,one,rdwarf,rgiant,s1,s2,s3,xabs,
+// x1max,x3max,zero;
+ double agiant,floatn,rdwarf,rgiant,s1,s2,s3,xabs,
+ x1max,x3max;
+ double enorm;
+
+// one = 1.0;
+// zero = 0.0;
+ rdwarf = 3.834e-20;
+ rgiant = 1.304e+19;
+
+ s1 = zero;
+ s2 = zero;
+ s3 = zero;
+ x1max = zero;
+ x3max = zero;
+ floatn = n;
+ agiant = rgiant/floatn;
+
+ for (i = 1; i <= n; i++) {
+
+ xabs = Math.abs(x[i]);
+
+ if (xabs <= rdwarf || xabs >= agiant) {
+
+ if (xabs > rdwarf) {
+
+// Sum for large components.
+
+ if (xabs > x1max) {
+
+ s1 = one + s1*(x1max/xabs)*(x1max/xabs);
+ x1max = xabs;
+
+ } else {
+
+ s1 += (xabs/x1max)*(xabs/x1max);
+
+ }
+
+
+ } else {
+
+// Sum for small components.
+
+ if (xabs > x3max) {
+
+ s3 = one + s3*(x3max/xabs)*(x3max/xabs);
+ x3max = xabs;
+
+ } else {
+
+ if (xabs != zero) s3 += (xabs/x3max)*(xabs/x3max);
+
+ }
+
+ }
+
+ } else {
+
+// Sum for intermediate components.
+
+ s2 += xabs*xabs;
+
+ }
+
+ }
+
+// Calculation of norm.
+
+ if (s1 != zero) {
+
+ enorm = x1max*Math.sqrt(s1+(s2/x1max)/x1max);
+
+ } else {
+
+ if (s2 != zero) {
+
+ if (s2 >= x3max) {
+
+ enorm = Math.sqrt(s2*(one+(x3max/s2)*(x3max*s3)));
+
+ } else {
+
+ enorm = Math.sqrt(x3max*((s2/x3max)+(x3max*s3)));
+
+ }
+
+ } else {
+
+ enorm = x3max*Math.sqrt(s3);
+
+ }
+
+ }
+
+ return enorm;
+
+ }
+
+
+
+
+/**
+*
+*<p>
+*The qrfac_f77 method uses Householder transformations with column
+*pivoting (optional) to compute a QR factorization of the
+*m by n matrix A. That is, qrfac_f77 determines an orthogonal
+*matrix Q, a permutation matrix P, and an upper trapezoidal
+*matrix R with diagonal elements of nonincreasing magnitude,
+*such that AP = QR.
+*<p>
+*Translated by Steve Verrill on November 17, 2000
+*from the FORTRAN MINPACK source produced by Garbow, Hillstrom, and More.<p>
+*
+*
+*@param m The number of rows of A.
+*@param n The number of columns of A.
+*@param a A is an m by n array. On input A contains the matrix for
+* which the QR factorization is to be computed. On output
+* the strict upper trapezoidal part of A contains the strict
+* upper trapezoidal part of R, and the lower trapezoidal
+* part of A contains a factored form of Q.
+*@param pivot pivot is a logical input variable. If pivot is set true,
+* then column pivoting is enforced. If pivot is set false,
+* then no column pivoting is done.
+*@param ipvt ipvt is an integer output array. ipvt
+* defines the permutation matrix P such that A*P = Q*R.
+* Column j of P is column ipvt[j] of the identity matrix.
+* If pivot is false, ipvt is not referenced.
+*@param rdiag rdiag is an output array of length n which contains the
+* diagonal elements of R.
+*@param acnorm acnorm is an output array of length n which contains the
+* norms of the corresponding columns of the input matrix A.
+*@param wa wa is a work array of length n.
+*
+*
+*/
+
+
+ public static void qrfac_f77(int m, int n, double a[][], boolean pivot,
+ int ipvt[], double rdiag[], double acnorm[],
+ double wa[]) {
+
+/*
+
+Here is a copy of the qrfac FORTRAN documentation:
+
+
+ subroutine qrfac(m,n,a,lda,pivot,ipvt,lipvt,rdiag,acnorm,wa)
+
+ integer m,n,lda,lipvt
+ integer ipvt(lipvt)
+ logical pivot
+ double precision a(lda,n),rdiag(n),acnorm(n),wa(n)
+
+c **********
+c
+c subroutine qrfac
+c
+c this subroutine uses householder transformations with column
+c pivoting (optional) to compute a qr factorization of the
+c m by n matrix a. that is, qrfac determines an orthogonal
+c matrix q, a permutation matrix p, and an upper trapezoidal
+c matrix r with diagonal elements of nonincreasing magnitude,
+c such that a*p = q*r. the householder transformation for
+c column k, k = 1,2,...,min(m,n), is of the form
+c
+c t
+c i - (1/u(k))*u*u
+c
+c where u has zeros in the first k-1 positions. the form of
+c this transformation and the method of pivoting first
+c appeared in the corresponding linpack subroutine.
+c
+c the subroutine statement is
+c
+c subroutine qrfac(m,n,a,lda,pivot,ipvt,lipvt,rdiag,acnorm,wa)
+c
+c where
+c
+c m is a positive integer input variable set to the number
+c of rows of a.
+c
+c n is a positive integer input variable set to the number
+c of columns of a.
+c
+c a is an m by n array. on input a contains the matrix for
+c which the qr factorization is to be computed. on output
+c the strict upper trapezoidal part of a contains the strict
+c upper trapezoidal part of r, and the lower trapezoidal
+c part of a contains a factored form of q (the non-trivial
+c elements of the u vectors described above).
+c
+c lda is a positive integer input variable not less than m
+c which specifies the leading dimension of the array a.
+c
+c pivot is a logical input variable. if pivot is set true,
+c then column pivoting is enforced. if pivot is set false,
+c then no column pivoting is done.
+c
+c ipvt is an integer output array of length lipvt. ipvt
+c defines the permutation matrix p such that a*p = q*r.
+c column j of p is column ipvt(j) of the identity matrix.
+c if pivot is false, ipvt is not referenced.
+c
+c lipvt is a positive integer input variable. if pivot is false,
+c then lipvt may be as small as 1. if pivot is true, then
+c lipvt must be at least n.
+c
+c rdiag is an output array of length n which contains the
+c diagonal elements of r.
+c
+c acnorm is an output array of length n which contains the
+c norms of the corresponding columns of the input matrix a.
+c if this information is not needed, then acnorm can coincide
+c with rdiag.
+c
+c wa is a work array of length n. if pivot is false, then wa
+c can coincide with rdiag.
+c
+c subprograms called
+c
+c minpack-supplied ... dpmpar,enorm
+c
+c fortran-supplied ... dmax1,dsqrt,min0
+c
+c argonne national laboratory. minpack project. march 1980.
+c burton s. garbow, kenneth e. hillstrom, jorge j. more
+c
+c **********
+
+*/
+
+
+ int i,j,jp1,k,kmax,minmn;
+// double ajnorm,one,p05,sum,temp,zero;
+ double ajnorm,sum,temp;
+ double fac;
+
+ double tempvec[] = new double[m+1];
+
+// one = 1.0;
+// p05 = .05;
+// zero = 0.0;
+
+
+// Compute the initial column norms and initialize several arrays.
+
+ for (j = 1; j <= n; j++) {
+
+ for (i = 1; i <= m; i++) {
+
+ tempvec[i] = a[i][j];
+
+ }
+
+// acnorm[j] = Minpack_f77.enorm_f77(m,a[1][j]);
+
+ acnorm[j] = Minpack_f77.enorm_f77(m,tempvec);
+
+ rdiag[j] = acnorm[j];
+ wa[j] = rdiag[j];
+ if (pivot) ipvt[j] = j;
+
+ }
+
+// Reduce A to R with Householder transformations.
+
+ minmn = Math.min(m,n);
+
+ for (j = 1; j <= minmn; j++) {
+
+ if (pivot) {
+
+// Bring the column of largest norm into the pivot position.
+
+ kmax = j;
+
+ for (k = j; k <= n; k++) {
+
+ if (rdiag[k] > rdiag[kmax]) kmax = k;
+
+ }
+
+ if (kmax != j) {
+
+ for (i = 1; i <= m; i++) {
+
+ temp = a[i][j];
+ a[i][j] = a[i][kmax];
+ a[i][kmax] = temp;
+
+ }
+
+ rdiag[kmax] = rdiag[j];
+ wa[kmax] = wa[j];
+ k = ipvt[j];
+ ipvt[j] = ipvt[kmax];
+ ipvt[kmax] = k;
+
+ }
+
+ }
+
+// Compute the Householder transformation to reduce the
+// j-th column of A to a multiple of the j-th unit vector.
+
+ for (i = j; i <= m; i++) {
+
+ tempvec[i - j + 1] = a[i][j];
+
+ }
+
+// ajnorm = Minpack_f77.enorm_f77(m-j+1,a[j][j]);
+ ajnorm = Minpack_f77.enorm_f77(m-j+1,tempvec);
+
+ if (ajnorm != zero) {
+
+ if (a[j][j] < zero) ajnorm = -ajnorm;
+
+ for (i = j; i <= m; i++) {
+
+ a[i][j] /= ajnorm;
+
+ }
+
+ a[j][j] += one;
+
+// Apply the transformation to the remaining columns
+// and update the norms.
+
+ jp1 = j + 1;
+
+ if (n >= jp1) {
+
+ for (k = jp1; k <= n; k++) {
+
+ sum = zero;
+
+ for (i = j; i <= m; i++) {
+
+ sum += a[i][j]*a[i][k];
+
+ }
+
+ temp = sum/a[j][j];
+
+ for (i = j; i <= m; i++) {
+
+ a[i][k] -= temp*a[i][j];
+
+ }
+
+ if (pivot && rdiag[k] != zero) {
+
+ temp = a[j][k]/rdiag[k];
+ rdiag[k] *= Math.sqrt(Math.max(zero,one-temp*temp));
+
+ fac = rdiag[k]/wa[k];
+ if (p05*fac*fac <= epsmch) {
+
+ for (i = jp1; i <= m; i++) {
+
+ tempvec[i - j] = a[i][k];
+
+ }
+
+// rdiag[k] = Minpack_f77.enorm_f77(m-j,a[jp1][k]);
+ rdiag[k] = Minpack_f77.enorm_f77(m-j,tempvec);
+ wa[k] = rdiag[k];
+
+ }
+
+ }
+
+ }
+
+ }
+
+ }
+
+ rdiag[j] = -ajnorm;
+
+ }
+
+ return;
+
+ }
+
+
+
+
+/**
+*
+*<p>
+*Given an m by n matrix A, an n by n diagonal matrix D,
+*and an m-vector b, the problem is to determine an x which
+*solves the system
+*<pre>
+* Ax = b , Dx = 0 ,
+*</pre>
+*in the least squares sense.
+*<p>
+*This method completes the solution of the problem
+*if it is provided with the necessary information from the
+*QR factorization, with column pivoting, of A. That is, if
+*AP = QR, where P is a permutation matrix, Q has orthogonal
+*columns, and R is an upper triangular matrix with diagonal
+*elements of nonincreasing magnitude, then qrsolv_f77 expects
+*the full upper triangle of R, the permutation matrix P,
+*and the first n components of (Q transpose)b. The system
+*<pre>
+* Ax = b, Dx = 0, is then equivalent to
+*
+* t t
+* Rz = Q b, P DPz = 0 ,
+*</pre>
+*where x = Pz. If this system does not have full rank,
+*then a least squares solution is obtained. On output qrsolv_f77
+*also provides an upper triangular matrix S such that
+*<pre>
+* t t t
+* P (A A + DD)P = S S .
+*</pre>
+*S is computed within qrsolv_f77 and may be of separate interest.
+*<p>
+*Translated by Steve Verrill on November 17, 2000
+*from the FORTRAN MINPACK source produced by Garbow, Hillstrom, and More.<p>
+*
+*
+*@param n The order of r.
+*@param r r is an n by n array. On input the full upper triangle
+* must contain the full upper triangle of the matrix R.
+* On output the full upper triangle is unaltered, and the
+* strict lower triangle contains the strict upper triangle
+* (transposed) of the upper triangular matrix S.
+*@param ipvt ipvt is an integer input array of length n which defines the
+* permutation matrix P such that AP = QR. Column j of P
+* is column ipvt[j] of the identity matrix.
+*@param diag diag is an input array of length n which must contain the
+* diagonal elements of the matrix D.
+*@param qtb qtb is an input array of length n which must contain the first
+* n elements of the vector (Q transpose)b.
+*@param x x is an output array of length n which contains the least
+* squares solution of the system Ax = b, Dx = 0.
+*@param sdiag sdiag is an output array of length n which contains the
+* diagonal elements of the upper triangular matrix S.
+*@param wa wa is a work array of length n.
+*
+*
+*/
+
+
+ public static void qrsolv_f77(int n, double r[][], int ipvt[],
+ double diag[], double qtb[],
+ double x[], double sdiag[],
+ double wa[]) {
+
+/*
+
+Here is a copy of the qrsolv FORTRAN documentation:
+
+
+ subroutine qrsolv(n,r,ldr,ipvt,diag,qtb,x,sdiag,wa)
+ integer n,ldr
+ integer ipvt(n)
+ double precision r(ldr,n),diag(n),qtb(n),x(n),sdiag(n),wa(n)
+
+c **********
+c
+c subroutine qrsolv
+c
+c given an m by n matrix a, an n by n diagonal matrix d,
+c and an m-vector b, the problem is to determine an x which
+c solves the system
+c
+c a*x = b , d*x = 0 ,
+c
+c in the least squares sense.
+c
+c this subroutine completes the solution of the problem
+c if it is provided with the necessary information from the
+c qr factorization, with column pivoting, of a. that is, if
+c a*p = q*r, where p is a permutation matrix, q has orthogonal
+c columns, and r is an upper triangular matrix with diagonal
+c elements of nonincreasing magnitude, then qrsolv expects
+c the full upper triangle of r, the permutation matrix p,
+c and the first n components of (q transpose)*b. the system
+c a*x = b, d*x = 0, is then equivalent to
+c
+c t t
+c r*z = q *b , p *d*p*z = 0 ,
+c
+c where x = p*z. if this system does not have full rank,
+c then a least squares solution is obtained. on output qrsolv
+c also provides an upper triangular matrix s such that
+c
+c t t t
+c p *(a *a + d*d)*p = s *s .
+c
+c s is computed within qrsolv and may be of separate interest.
+c
+c the subroutine statement is
+c
+c subroutine qrsolv(n,r,ldr,ipvt,diag,qtb,x,sdiag,wa)
+c
+c where
+c
+c n is a positive integer input variable set to the order of r.
+c
+c r is an n by n array. on input the full upper triangle
+c must contain the full upper triangle of the matrix r.
+c on output the full upper triangle is unaltered, and the
+c strict lower triangle contains the strict upper triangle
+c (transposed) of the upper triangular matrix s.
+c
+c ldr is a positive integer input variable not less than n
+c which specifies the leading dimension of the array r.
+c
+c ipvt is an integer input array of length n which defines the
+c permutation matrix p such that a*p = q*r. column j of p
+c is column ipvt(j) of the identity matrix.
+c
+c diag is an input array of length n which must contain the
+c diagonal elements of the matrix d.
+c
+c qtb is an input array of length n which must contain the first
+c n elements of the vector (q transpose)*b.
+c
+c x is an output array of length n which contains the least
+c squares solution of the system a*x = b, d*x = 0.
+c
+c sdiag is an output array of length n which contains the
+c diagonal elements of the upper triangular matrix s.
+c
+c wa is a work array of length n.
+c
+c subprograms called
+c
+c fortran-supplied ... dabs,dsqrt
+c
+c argonne national laboratory. minpack project. march 1980.
+c burton s. garbow, kenneth e. hillstrom, jorge j. more
+c
+c **********
+
+*/
+
+
+ int i,j,jp1,k,kp1,l,nsing;
+// double cos,cotan,p5,p25,qtbpj,sin,sum,tan,temp,zero;
+ double cos,cotan,qtbpj,sin,sum,tan,temp;
+
+// p5 = .5;
+// p25 = .25;
+// zero = 0.0;
+
+// Copy R and (Q transpose)b to preserve input and initialize S.
+// In particular, save the diagonal elements of R in x.
+
+ for (j = 1; j <= n; j++) {
+
+ for (i = j; i <= n; i++) {
+
+ r[i][j] = r[j][i];
+
+ }
+
+ x[j] = r[j][j];
+ wa[j] = qtb[j];
+
+ }
+
+// Eliminate the diagonal matrix D using a Givens rotation.
+
+ for (j = 1; j <= n; j++) {
+
+// Prepare the row of D to be eliminated, locating the
+// diagonal element using P from the QR factorization.
+
+ l = ipvt[j];
+
+ if (diag[l] != zero) {
+
+ for (k = j; k <= n; k++) {
+
+ sdiag[k] = zero;
+
+ }
+
+ sdiag[j] = diag[l];
+
+// The transformations to eliminate the row of D
+// modify only a single element of (Q transpose)b
+// beyond the first n, which is initially zero. ??????
+
+ qtbpj = zero;
+
+ for (k = j; k <= n; k++) {
+
+// Determine a Givens rotation which eliminates the
+// appropriate element in the current row of D.
+
+ if (sdiag[k] != zero) {
+
+ if (Math.abs(r[k][k]) < Math.abs(sdiag[k])) {
+
+ cotan = r[k][k]/sdiag[k];
+ sin = p5/Math.sqrt(p25+p25*cotan*cotan);
+ cos = sin*cotan;
+
+ } else {
+
+ tan = sdiag[k]/r[k][k];
+ cos = p5/Math.sqrt(p25+p25*tan*tan);
+ sin = cos*tan;
+
+ }
+
+// Compute the modified diagonal element of R and
+// the modified element of ((Q transpose)b,0).
+
+ r[k][k] = cos*r[k][k] + sin*sdiag[k];
+ temp = cos*wa[k] + sin*qtbpj;
+ qtbpj = -sin*wa[k] + cos*qtbpj;
+ wa[k] = temp;
+
+// Accumulate the tranformation in the row of S.
+
+ kp1 = k + 1;
+
+ for (i = kp1; i <= n; i++) {
+
+ temp = cos*r[i][k] + sin*sdiag[i];
+ sdiag[i] = -sin*r[i][k] + cos*sdiag[i];
+ r[i][k] = temp;
+
+ }
+
+ }
+
+ }
+
+ }
+
+// Store the diagonal element of S and restore
+// the corresponding diagonal element of R.
+
+ sdiag[j] = r[j][j];
+ r[j][j] = x[j];
+
+ }
+
+// Solve the triangular system for z. if the system is
+// singular, then obtain a least squares solution.
+
+ nsing = n;
+
+ for (j = 1; j <= n; j++) {
+
+ if (sdiag[j] == zero && nsing == n) nsing = j - 1;
+ if (nsing < n) wa[j] = zero;
+
+ }
+
+// if (nsing >= 1) {
+
+ for (k = 1; k <= nsing; k++) {
+
+ j = nsing - k + 1;
+ sum = zero;
+ jp1 = j + 1;
+
+// if (nsing >= jp1) {
+
+ for (i = jp1; i <= nsing; i++) {
+
+ sum += r[i][j]*wa[i];
+
+ }
+
+// }
+
+ wa[j] = (wa[j] - sum)/sdiag[j];
+
+ }
+
+// }
+
+// Permute the components of z back to components of x.
+
+ for (j = 1; j <= n; j++) {
+
+ l = ipvt[j];
+ x[l] = wa[j];
+
+ }
+
+ return;
+
+ }
+
+
+
+
+
+/**
+*
+*<p>
+*Given an m by n matrix A, an n by n nonsingular diagonal
+*matrix D, an m-vector b, and a positive number delta,
+*the problem is to determine a value for the parameter
+*par such that if x solves the system
+*<pre>
+* A*x = b , sqrt(par)*D*x = 0
+*</pre>
+*in the least squares sense, and dxnorm is the Euclidean
+*norm of D*x, then either par is zero and
+*<pre>
+* (dxnorm-delta) <= 0.1*delta ,
+*</pre>
+* or par is positive and
+*<pre>
+* abs(dxnorm-delta) <= 0.1*delta .
+*</pre>
+*This method (lmpar_f77) completes the solution of the problem
+*if it is provided with the necessary information from the
+*QR factorization, with column pivoting, of A. That is, if
+*AP = QR, where P is a permutation matrix, Q has orthogonal
+*columns, and R is an upper triangular matrix with diagonal
+*elements of nonincreasing magnitude, then lmpar_f77 expects
+*the full upper triangle of R, the permutation matrix P,
+*and the first n components of (Q transpose)b. On output
+*lmpar_f77 also provides an upper triangular matrix S such that
+*<pre>
+* t t t
+* P (A A + par*DD)P = S S .
+*</pre>
+*S is employed within lmpar_f77 and may be of separate interest.
+*<p>
+*Only a few iterations are generally needed for convergence
+*of the algorithm. If, however, the limit of 10 iterations
+*is reached, then the output par will contain the best
+*value obtained so far.
+*<p>
+*Translated by Steve Verrill on November 17, 2000
+*from the FORTRAN MINPACK source produced by Garbow, Hillstrom, and More.<p>
+*
+*
+*@param n The order of r.
+*@param r r is an n by n array. On input the full upper triangle
+* must contain the full upper triangle of the matrix R.
+* On output the full upper triangle is unaltered, and the
+* strict lower triangle contains the strict upper triangle
+* (transposed) of the upper triangular matrix S.
+*@param ipvt ipvt is an integer input array of length n which defines the
+* permutation matrix P such that AP = QR. Column j of P
+* is column ipvt[j] of the identity matrix.
+*@param diag diag is an input array of length n which must contain the
+* diagonal elements of the matrix D.
+*@param qtb qtb is an input array of length n which must contain the first
+* n elements of the vector (Q transpose)b.
+*@param delta delta is a positive input variable which specifies an upper
+* bound on the Euclidean norm of Dx.
+*@param par par is a nonnegative variable. On input par contains an
+* initial estimate of the Levenberg-Marquardt parameter.
+* On output par contains the final estimate.
+*@param x x is an output array of length n which contains the least
+* squares solution of the system Ax = b, sqrt(par)*Dx = 0,
+* for the output par.
+*@param sdiag sdiag is an output array of length n which contains the
+* diagonal elements of the upper triangular matrix S.
+*@param wa1 wa1 is a work array of length n.
+*@param wa2 wa2 is a work array of length n.
+*
+*
+*/
+
+
+ public static void lmpar_f77(int n, double r[][], int ipvt[],
+ double diag[], double qtb[],
+ double delta, double par[],
+ double x[], double sdiag[],
+ double wa1[], double wa2[]) {
+
+
+/*
+
+Here is a copy of the lmpar FORTRAN documentation:
+
+
+ subroutine lmpar(n,r,ldr,ipvt,diag,qtb,delta,par,x,sdiag,wa1,
+ * wa2)
+ integer n,ldr
+ integer ipvt(n)
+ double precision delta,par
+ double precision r(ldr,n),diag(n),qtb(n),x(n),sdiag(n),wa1(n),
+ * wa2(n)
+
+c **********
+c
+c subroutine lmpar
+c
+c given an m by n matrix a, an n by n nonsingular diagonal
+c matrix d, an m-vector b, and a positive number delta,
+c the problem is to determine a value for the parameter
+c par such that if x solves the system
+c
+c a*x = b , sqrt(par)*d*x = 0 ,
+c
+c in the least squares sense, and dxnorm is the euclidean
+c norm of d*x, then either par is zero and
+c
+c (dxnorm-delta) .le. 0.1*delta ,
+c
+c or par is positive and
+c
+c abs(dxnorm-delta) .le. 0.1*delta .
+c
+c this subroutine completes the solution of the problem
+c if it is provided with the necessary information from the
+c qr factorization, with column pivoting, of a. that is, if
+c a*p = q*r, where p is a permutation matrix, q has orthogonal
+c columns, and r is an upper triangular matrix with diagonal
+c elements of nonincreasing magnitude, then lmpar expects
+c the full upper triangle of r, the permutation matrix p,
+c and the first n components of (q transpose)*b. on output
+c lmpar also provides an upper triangular matrix s such that
+c
+c t t t
+c p *(a *a + par*d*d)*p = s *s .
+c
+c s is employed within lmpar and may be of separate interest.
+c
+c only a few iterations are generally needed for convergence
+c of the algorithm. if, however, the limit of 10 iterations
+c is reached, then the output par will contain the best
+c value obtained so far.
+c
+c the subroutine statement is
+c
+c subroutine lmpar(n,r,ldr,ipvt,diag,qtb,delta,par,x,sdiag,
+c wa1,wa2)
+c
+c where
+c
+c n is a positive integer input variable set to the order of r.
+c
+c r is an n by n array. on input the full upper triangle
+c must contain the full upper triangle of the matrix r.
+c on output the full upper triangle is unaltered, and the
+c strict lower triangle contains the strict upper triangle
+c (transposed) of the upper triangular matrix s.
+c
+c ldr is a positive integer input variable not less than n
+c which specifies the leading dimension of the array r.
+c
+c ipvt is an integer input array of length n which defines the
+c permutation matrix p such that a*p = q*r. column j of p
+c is column ipvt(j) of the identity matrix.
+c
+c diag is an input array of length n which must contain the
+c diagonal elements of the matrix d.
+c
+c qtb is an input array of length n which must contain the first
+c n elements of the vector (q transpose)*b.
+c
+c delta is a positive input variable which specifies an upper
+c bound on the euclidean norm of d*x.
+c
+c par is a nonnegative variable. on input par contains an
+c initial estimate of the levenberg-marquardt parameter.
+c on output par contains the final estimate.
+c
+c x is an output array of length n which contains the least
+c squares solution of the system a*x = b, sqrt(par)*d*x = 0,
+c for the output par.
+c
+c sdiag is an output array of length n which contains the
+c diagonal elements of the upper triangular matrix s.
+c
+c wa1 and wa2 are work arrays of length n.
+c
+c subprograms called
+c
+c minpack-supplied ... dpmpar,enorm,qrsolv
+c
+c fortran-supplied ... dabs,dmax1,dmin1,dsqrt
+c
+c argonne national laboratory. minpack project. march 1980.
+c burton s. garbow, kenneth e. hillstrom, jorge j. more
+c
+c **********
+
+*/
+
+
+ int i,iter,j,jm1,jp1,k,l,nsing;
+
+// double dxnorm,dwarf,fp,gnorm,parc,parl,paru,p1,p001,
+// sum,temp,zero;
+ double dxnorm,dwarf,fp,gnorm,parc,parl,paru,
+ sum,temp;
+
+ boolean loop;
+
+// p1 = .1;
+// p001 = .001;
+// zero = 0.0;
+
+// dwarf is the smallest positive magnitude.
+
+ dwarf = minmag;
+
+// Compute and store in x the Gauss-Newton direction. If the
+// Jacobian is rank-deficient, obtain a least squares solution.
+
+ nsing = n;
+
+ for (j = 1; j <= n; j++) {
+
+ wa1[j] = qtb[j];
+ if (r[j][j] == zero && nsing == n) nsing = j - 1;
+ if (nsing < n) wa1[j] = zero;
+
+ }
+
+// if (nsing >= 1) {
+
+ for (k = 1; k <= nsing; k++) {
+
+ j = nsing - k + 1;
+ wa1[j] /= r[j][j];
+ temp = wa1[j];
+ jm1 = j - 1;
+
+// if (jm1 >= 1) {
+
+ for (i = 1; i <= jm1; i++) {
+
+ wa1[i] -= r[i][j]*temp;
+
+ }
+
+// }
+
+ }
+
+// }
+
+ for (j = 1; j <= n; j++) {
+
+ l = ipvt[j];
+ x[l] = wa1[j];
+
+ }
+
+// Initialize the iteration counter.
+// Evaluate the function at the origin, and test
+// for acceptance of the Gauss-Newton direction.
+
+ iter = 0;
+
+ for (j = 1; j <= n; j++) {
+
+ wa2[j] = diag[j]*x[j];
+
+ }
+
+ dxnorm = Minpack_f77.enorm_f77(n,wa2);
+
+ fp = dxnorm - delta;
+
+ if (fp <= p1*delta) {
+
+ par[1] = zero;
+ return;
+
+ }
+
+// If the Jacobian is not rank deficient, the Newton
+// step provides a lower bound, parl, for the zero of
+// the function. Otherwise set this bound to zero.
+
+ parl = zero;
+
+ if (nsing >= n) {
+
+ for (j = 1; j <= n; j++) {
+
+ l = ipvt[j];
+ wa1[j] = diag[l]*(wa2[l]/dxnorm);
+
+ }
+
+ for (j = 1; j <= n; j++) {
+
+ sum = zero;
+ jm1 = j - 1;
+
+// if (jm1 >= 1) {
+
+ for (i = 1; i <= jm1; i++) {
+
+ sum += r[i][j]*wa1[i];
+
+ }
+
+// }
+
+ wa1[j] = (wa1[j] - sum)/r[j][j];
+
+ }
+
+ temp = Minpack_f77.enorm_f77(n,wa1);
+ parl = ((fp/delta)/temp)/temp;
+
+ }
+
+// Calculate an upper bound, paru, for the zero of the function.
+
+ for (j = 1; j <= n; j++) {
+
+ sum = zero;
+
+ for (i = 1; i <= j; i++) {
+
+ sum += r[i][j]*qtb[i];
+
+ }
+
+ l = ipvt[j];
+ wa1[j] = sum/diag[l];
+
+ }
+
+ gnorm = Minpack_f77.enorm_f77(n,wa1);
+ paru = gnorm/delta;
+
+ if (paru == zero) paru = dwarf/Math.min(delta,p1);
+
+// If the input par lies outside of the interval (parl,paru),
+// set par to the closer endpoint.
+
+ par[1] = Math.max(par[1],parl);
+ par[1] = Math.min(par[1],paru);
+
+ if (par[1] == zero) par[1] = gnorm/dxnorm;
+
+// Beginning of an iteration.
+
+ loop = true;
+
+ while (loop) {
+
+ iter++;
+
+// Evaluate the function at the current value of par.
+
+ if (par[1] == zero) par[1] = Math.max(dwarf,p001*paru);
+ temp = Math.sqrt(par[1]);
+
+ for (j = 1; j <= n; j++) {
+
+ wa1[j] = temp*diag[j];
+
+ }
+
+ Minpack_f77.qrsolv_f77(n,r,ipvt,wa1,qtb,x,sdiag,wa2);
+
+ for (j = 1; j <= n; j++) {
+
+ wa2[j] = diag[j]*x[j];
+
+ }
+
+ dxnorm = Minpack_f77.enorm_f77(n,wa2);
+ temp = fp;
+ fp = dxnorm - delta;
+
+// If the function is small enough, accept the current value
+// of par. Also test for the exceptional cases where parl
+// is zero or the number of iterations has reached 10.
+
+ if (Math.abs(fp) <= p1*delta
+ || parl == zero && fp <= temp
+ && temp < zero || iter == 10) {
+
+// Termination
+
+ if (iter == 0) par[1] = zero;
+ return;
+
+ }
+
+// Compute the Newton correction.
+
+ for (j = 1; j <= n; j++) {
+
+ l = ipvt[j];
+ wa1[j] = diag[l]*(wa2[l]/dxnorm);
+
+ }
+
+ for (j = 1; j <= n; j++) {
+
+ wa1[j] /= sdiag[j];
+ temp = wa1[j];
+ jp1 = j + 1;
+
+ for (i = jp1; i <= n; i++) {
+
+ wa1[i] -= r[i][j]*temp;
+
+ }
+
+ }
+
+ temp = Minpack_f77.enorm_f77(n,wa1);
+ parc = ((fp/delta)/temp)/temp;
+
+// Depending on the sign of the function, update parl or paru.
+
+ if (fp > zero) parl = Math.max(parl,par[1]);
+ if (fp < zero) paru = Math.min(paru,par[1]);
+
+// Compute an improved estimate for par[1].
+
+ par[1] = Math.max(parl,par[1]+parc);
+
+// End of an iteration.
+
+ }
+
+
+ }
+
+
+
+/**
+*
+*<p>
+*The lmdif1_f77 method minimizes the sum of the squares of
+*m nonlinear functions in n variables by a modification of the
+*Levenberg-Marquardt algorithm. This is done by using the more
+*general least-squares solver lmdif. The user must provide a
+*method that calculates the functions. The Jacobian is
+*then calculated by a forward-difference approximation.
+*<p>
+*Translated by Steve Verrill on November 24, 2000
+*from the FORTRAN MINPACK source produced by Garbow, Hillstrom, and More.<p>
+*
+*
+*@param nlls A class that implements the Lmdif_fcn interface
+* (see the definition in Lmdif_fcn.java). See
+* LmdifTest_f77.java for an example of such a class.
+* The class must define a method, fcn, that must
+* have the form
+*
+* public static void fcn(int m, int n, double x[],
+* double fvec[], int iflag[])
+*
+* The value of iflag[1] should not be changed by fcn unless
+* the user wants to terminate execution of lmdif_f77.
+* In this case set iflag[1] to a negative integer.
+*
+*@param m A positive integer set to the number of functions
+* [number of observations]
+*@param n A positive integer set to the number of variables
+* [number of parameters]. n must not exceed m.
+*@param x On input, it contains the initial estimate of
+* the solution vector [the least squares parameters].
+* On output it contains the final estimate of the
+* solution vector.
+*@param fvec An output vector that contains the m functions
+* [residuals] evaluated at x.
+*@param tol tol is a nonnegative input variable. Termination occurs
+* when the algorithm estimates either that the relative
+* error in the sum of squares is at most tol or that
+* the relative error between x and the solution is at
+* most tol.
+*@param info An integer output variable. If the user has
+* terminated execution, info is set to the (negative)
+* value of iflag[1]. See description of fcn. Otherwise,
+* info is set as follows.
+*
+* info = 0 improper input parameters.
+*
+* info = 1 algorithm estimates that the relative error
+* in the sum of squares is at most tol.
+*
+* info = 2 algorithm estimates that the relative error
+* between x and the solution is at most tol.
+*
+* info = 3 conditions for info = 1 and info = 2 both hold.
+*
+* info = 4 fvec is orthogonal to the columns of the
+* Jacobian to machine precision.
+*
+* info = 5 number of calls to fcn has
+* reached or exceeded 200*(n+1).
+*
+* info = 6 tol is too small. No further reduction in
+* the sum of squares is possible.
+*
+* info = 7 tol is too small. No further improvement in
+* the approximate solution x is possible.
+*
+*/
+
+
+ public static void lmdif1_f77(Lmdif_fcn nlls, int m, int n,
+ double x[], double fvec[],
+ double tol, int info[]) {
+
+
+/*
+
+Here is a copy of the lmdif1 FORTRAN documentation:
+
+
+ subroutine lmdif1(fcn,m,n,x,fvec,tol,info,iwa,wa,lwa)
+ integer m,n,info,lwa
+ integer iwa(n)
+ double precision tol
+ double precision x(n),fvec(m),wa(lwa)
+ external fcn
+c **********
+c
+c subroutine lmdif1
+c
+c the purpose of lmdif1 is to minimize the sum of the squares of
+c m nonlinear functions in n variables by a modification of the
+c levenberg-marquardt algorithm. this is done by using the more
+c general least-squares solver lmdif. the user must provide a
+c subroutine which calculates the functions. the jacobian is
+c then calculated by a forward-difference approximation.
+c
+c the subroutine statement is
+c
+c subroutine lmdif1(fcn,m,n,x,fvec,tol,info,iwa,wa,lwa)
+c
+c where
+c
+c fcn is the name of the user-supplied subroutine which
+c calculates the functions. fcn must be declared
+c in an external statement in the user calling
+c program, and should be written as follows.
+c
+c subroutine fcn(m,n,x,fvec,iflag)
+c integer m,n,iflag
+c double precision x(n),fvec(m)
+c ----------
+c calculate the functions at x and
+c return this vector in fvec.
+c ----------
+c return
+c end
+c
+c the value of iflag should not be changed by fcn unless
+c the user wants to terminate execution of lmdif1.
+c in this case set iflag to a negative integer.
+c
+c m is a positive integer input variable set to the number
+c of functions.
+c
+c n is a positive integer input variable set to the number
+c of variables. n must not exceed m.
+c
+c x is an array of length n. on input x must contain
+c an initial estimate of the solution vector. on output x
+c contains the final estimate of the solution vector.
+c
+c fvec is an output array of length m which contains
+c the functions evaluated at the output x.
+c
+c tol is a nonnegative input variable. termination occurs
+c when the algorithm estimates either that the relative
+c error in the sum of squares is at most tol or that
+c the relative error between x and the solution is at
+c most tol.
+c
+c info is an integer output variable. if the user has
+c terminated execution, info is set to the (negative)
+c value of iflag. see description of fcn. otherwise,
+c info is set as follows.
+c
+c info = 0 improper input parameters.
+c
+c info = 1 algorithm estimates that the relative error
+c in the sum of squares is at most tol.
+c
+c info = 2 algorithm estimates that the relative error
+c between x and the solution is at most tol.
+c
+c info = 3 conditions for info = 1 and info = 2 both hold.
+c
+c info = 4 fvec is orthogonal to the columns of the
+c jacobian to machine precision.
+c
+c info = 5 number of calls to fcn has reached or
+c exceeded 200*(n+1).
+c
+c info = 6 tol is too small. no further reduction in
+c the sum of squares is possible.
+c
+c info = 7 tol is too small. no further improvement in
+c the approximate solution x is possible.
+c
+c iwa is an integer work array of length n.
+c
+c wa is a work array of length lwa.
+c
+c lwa is a positive integer input variable not less than
+c m*n+5*n+m.
+c
+c subprograms called
+c
+c user-supplied ...... fcn
+c
+c minpack-supplied ... lmdif
+c
+c argonne national laboratory. minpack project. march 1980.
+c burton s. garbow, kenneth e. hillstrom, jorge j. more
+c
+c **********
+
+*/
+
+
+ int maxfev,mode,nprint;
+
+// double epsfcn,factor,ftol,gtol,xtol,zero;
+ double epsfcn,factor,ftol,gtol,xtol;
+
+ double diag[] = new double[n+1];
+ int nfev[] = new int[2];
+ double fjac[][] = new double[m+1][n+1];
+ int ipvt[] = new int[n+1];
+ double qtf[] = new double[n+1];
+
+
+ factor = 100.0;
+// zero = 0.0;
+
+ info[1] = 0;
+
+// Check the input parameters for errors.
+
+ if (n <= 0 || m < n || tol < zero) {
+
+ return;
+
+ }
+
+// Call lmdif.
+
+ maxfev = 200*(n + 1);
+ ftol = tol;
+ xtol = tol;
+ gtol = zero;
+ epsfcn = zero;
+ mode = 1;
+ nprint = 0;
+
+ Minpack_f77.lmdif_f77(nlls,m,n,x,fvec,ftol,xtol,gtol,maxfev,epsfcn,diag,
+ mode,factor,nprint,info,nfev,fjac,
+ ipvt,qtf);
+
+ if (info[1] == 8) info[1] = 4;
+
+ return;
+
+ }
+
+
+/**
+*
+*<p>
+*The lmdif_f77 method minimizes the sum of the squares of
+*m nonlinear functions in n variables by a modification of
+*the Levenberg-Marquardt algorithm. The user must provide a
+*method that calculates the functions. The Jacobian is
+*then calculated by a forward-difference approximation.
+*<p>
+*Translated by Steve Verrill on November 20, 2000
+*from the FORTRAN MINPACK source produced by Garbow, Hillstrom, and More.<p>
+*
+*
+*@param nlls A class that implements the Lmdif_fcn interface
+* (see the definition in Lmdif_fcn.java). See
+* LmdifTest_f77.java for an example of such a class.
+* The class must define a method, fcn, that must
+* have the form
+*
+* public static void fcn(int m, int n, double x[],
+* double fvec[], int iflag[])
+*
+* The value of iflag[1] should not be changed by fcn unless
+* the user wants to terminate execution of lmdif_f77.
+* In this case set iflag[1] to a negative integer.
+*
+*@param m A positive integer set to the number of functions
+* [number of observations]
+*@param n A positive integer set to the number of variables
+* [number of parameters]. n must not exceed m.
+*@param x On input, it contains the initial estimate of
+* the solution vector [the least squares parameters].
+* On output it contains the final estimate of the
+* solution vector.
+*@param fvec An output vector that contains the m functions
+* [residuals] evaluated at x.
+*@param ftol A nonnegative input variable. Termination
+* occurs when both the actual and predicted relative
+* reductions in the sum of squares are at most ftol.
+* Therefore, ftol measures the relative error desired
+* in the sum of squares.
+*@param xtol A nonnegative input variable. Termination
+* occurs when the relative error between two consecutive
+* iterates is at most xtol. Therefore, xtol measures the
+* relative error desired in the approximate solution.
+*@param gtol A nonnegative input variable. Termination
+* occurs when the cosine of the angle between fvec and
+* any column of the Jacobian is at most gtol in absolute
+* value. Therefore, gtol measures the orthogonality
+* desired between the function vector and the columns
+* of the Jacobian.
+*@param maxfev A positive integer input variable. Termination
+* occurs when the number of calls to fcn is at least
+* maxfev by the end of an iteration.
+*@param epsfcn An input variable used in determining a suitable
+* step length for the forward-difference approximation. This
+* approximation assumes that the relative errors in the
+* functions are of the order of epsfcn. If epsfcn is less
+* than the machine precision, it is assumed that the relative
+* errors in the functions are of the order of the machine
+* precision.
+*@param diag An vector of length n. If mode = 1 (see
+* below), diag is internally set. If mode = 2, diag
+* must contain positive entries that serve as
+* multiplicative scale factors for the variables.
+*@param mode If mode = 1, the
+* variables will be scaled internally. If mode = 2,
+* the scaling is specified by the input diag. Other
+* values of mode are equivalent to mode = 1.
+*@param factor A positive input variable used in determining the
+* initial step bound. This bound is set to the product of
+* factor and the euclidean norm of diag*x if nonzero, or else
+* to factor itself. In most cases factor should lie in the
+* interval (.1,100). 100 is a generally recommended value.
+*@param nprint An integer input variable that enables controlled
+* printing of iterates if it is positive. In this case,
+* fcn is called with iflag[1] = 0 at the beginning of the first
+* iteration and every nprint iterations thereafter and
+* immediately prior to return, with x and fvec
+* available for printing. If nprint is not positive,
+* no special calls of fcn with iflag[1] = 0 are made.
+*@param info An integer output variable. If the user has
+* terminated execution, info is set to the (negative)
+* value of iflag[1]. See description of fcn. Otherwise,
+* info is set as follows.
+*
+* info = 0 improper input parameters.
+*
+* info = 1 both actual and predicted relative reductions
+* in the sum of squares are at most ftol.
+*
+* info = 2 relative error between two consecutive iterates
+* is at most xtol.
+*
+* info = 3 conditions for info = 1 and info = 2 both hold.
+*
+* info = 4 the cosine of the angle between fvec and any
+* column of the Jacobian is at most gtol in
+* absolute value.
+*
+* info = 5 number of calls to fcn with iflag[1] = 1 has
+* reached maxfev.
+*
+* info = 6 ftol is too small. no further reduction in
+* the sum of squares is possible.
+*
+* info = 7 xtol is too small. no further improvement in
+* the approximate solution x is possible.
+*
+* info = 8 gtol is too small. fvec is orthogonal to the
+* columns of the Jacobian to machine precision.
+*
+*@param nfev An integer output variable set to the number of
+* calls to fcn.
+*@param fjac An output m by n array. The upper n by n submatrix
+* of fjac contains an upper triangular matrix R with
+* diagonal elements of nonincreasing magnitude such that
+*
+* t t t
+* P (jac *jac)P = R R,
+*
+* where P is a permutation matrix and jac is the final
+* calculated Jacobian. Column j of P is column ipvt[j]
+* (see below) of the identity matrix. The lower trapezoidal
+* part of fjac contains information generated during
+* the computation of R.
+*@param ipvt An integer output array of length n. ipvt
+* defines a permutation matrix P such that jac*P = QR,
+* where jac is the final calculated Jacobian, Q is
+* orthogonal (not stored), and R is upper triangular
+* with diagonal elements of nonincreasing magnitude.
+* column j of P is column ipvt[j] of the identity matrix.
+*
+*@param qtf An output array of length n which contains
+* the first n elements of the vector (Q transpose)fvec.
+*
+*
+*/
+
+
+ public static void lmdif_f77(Lmdif_fcn nlls, int m, int n,
+ double x[], double fvec[],
+ double ftol,
+ double xtol, double gtol,
+ int maxfev, double epsfcn,
+ double diag[], int mode,
+ double factor, int nprint, int info[],
+ int nfev[], double fjac[][],
+ int ipvt[], double qtf[]) {
+
+
+/*
+
+Here is a copy of the lmdif FORTRAN documentation:
+
+
+ subroutine lmdif(fcn,m,n,x,fvec,ftol,xtol,gtol,maxfev,epsfcn,
+ * diag,mode,factor,nprint,info,nfev,fjac,ldfjac,
+ * ipvt,qtf,wa1,wa2,wa3,wa4)
+ integer m,n,maxfev,mode,nprint,info,nfev,ldfjac
+ integer ipvt(n)
+ double precision ftol,xtol,gtol,epsfcn,factor
+ double precision x(n),fvec(m),diag(n),fjac(ldfjac,n),qtf(n),
+ * wa1(n),wa2(n),wa3(n),wa4(m)
+ external fcn
+c **********
+c
+c subroutine lmdif
+c
+c the purpose of lmdif is to minimize the sum of the squares of
+c m nonlinear functions in n variables by a modification of
+c the levenberg-marquardt algorithm. the user must provide a
+c subroutine which calculates the functions. the jacobian is
+c then calculated by a forward-difference approximation.
+c
+c the subroutine statement is
+c
+c subroutine lmdif(fcn,m,n,x,fvec,ftol,xtol,gtol,maxfev,epsfcn,
+c diag,mode,factor,nprint,info,nfev,fjac,
+c ldfjac,ipvt,qtf,wa1,wa2,wa3,wa4)
+c
+c where
+c
+c fcn is the name of the user-supplied subroutine which
+c calculates the functions. fcn must be declared
+c in an external statement in the user calling
+c program, and should be written as follows.
+c
+c subroutine fcn(m,n,x,fvec,iflag)
+c integer m,n,iflag
+c double precision x(n),fvec(m)
+c ----------
+c calculate the functions at x and
+c return this vector in fvec.
+c ----------
+c return
+c end
+c
+c the value of iflag should not be changed by fcn unless
+c the user wants to terminate execution of lmdif.
+c in this case set iflag to a negative integer.
+c
+c m is a positive integer input variable set to the number
+c of functions.
+c
+c n is a positive integer input variable set to the number
+c of variables. n must not exceed m.
+c
+c x is an array of length n. on input x must contain
+c an initial estimate of the solution vector. on output x
+c contains the final estimate of the solution vector.
+c
+c fvec is an output array of length m which contains
+c the functions evaluated at the output x.
+c
+c ftol is a nonnegative input variable. termination
+c occurs when both the actual and predicted relative
+c reductions in the sum of squares are at most ftol.
+c therefore, ftol measures the relative error desired
+c in the sum of squares.
+c
+c xtol is a nonnegative input variable. termination
+c occurs when the relative error between two consecutive
+c iterates is at most xtol. therefore, xtol measures the
+c relative error desired in the approximate solution.
+c
+c gtol is a nonnegative input variable. termination
+c occurs when the cosine of the angle between fvec and
+c any column of the jacobian is at most gtol in absolute
+c value. therefore, gtol measures the orthogonality
+c desired between the function vector and the columns
+c of the jacobian.
+c
+c maxfev is a positive integer input variable. termination
+c occurs when the number of calls to fcn is at least
+c maxfev by the end of an iteration.
+c
+c epsfcn is an input variable used in determining a suitable
+c step length for the forward-difference approximation. this
+c approximation assumes that the relative errors in the
+c functions are of the order of epsfcn. if epsfcn is less
+c than the machine precision, it is assumed that the relative
+c errors in the functions are of the order of the machine
+c precision.
+c
+c diag is an array of length n. if mode = 1 (see
+c below), diag is internally set. if mode = 2, diag
+c must contain positive entries that serve as
+c multiplicative scale factors for the variables.
+c
+c mode is an integer input variable. if mode = 1, the
+c variables will be scaled internally. if mode = 2,
+c the scaling is specified by the input diag. other
+c values of mode are equivalent to mode = 1.
+c
+c factor is a positive input variable used in determining the
+c initial step bound. this bound is set to the product of
+c factor and the euclidean norm of diag*x if nonzero, or else
+c to factor itself. in most cases factor should lie in the
+c interval (.1,100.). 100. is a generally recommended value.
+c
+c nprint is an integer input variable that enables controlled
+c printing of iterates if it is positive. in this case,
+c fcn is called with iflag = 0 at the beginning of the first
+c iteration and every nprint iterations thereafter and
+c immediately prior to return, with x and fvec available
+c for printing. if nprint is not positive, no special calls
+c of fcn with iflag = 0 are made.
+c
+c info is an integer output variable. if the user has
+c terminated execution, info is set to the (negative)
+c value of iflag. see description of fcn. otherwise,
+c info is set as follows.
+c
+c info = 0 improper input parameters.
+c
+c info = 1 both actual and predicted relative reductions
+c in the sum of squares are at most ftol.
+c
+c info = 2 relative error between two consecutive iterates
+c is at most xtol.
+c
+c info = 3 conditions for info = 1 and info = 2 both hold.
+c
+c info = 4 the cosine of the angle between fvec and any
+c column of the jacobian is at most gtol in
+c absolute value.
+c
+c info = 5 number of calls to fcn has reached or
+c exceeded maxfev.
+c
+c info = 6 ftol is too small. no further reduction in
+c the sum of squares is possible.
+c
+c info = 7 xtol is too small. no further improvement in
+c the approximate solution x is possible.
+c
+c info = 8 gtol is too small. fvec is orthogonal to the
+c columns of the jacobian to machine precision.
+c
+c nfev is an integer output variable set to the number of
+c calls to fcn.
+c
+c fjac is an output m by n array. the upper n by n submatrix
+c of fjac contains an upper triangular matrix r with
+c diagonal elements of nonincreasing magnitude such that
+c
+c t t t
+c p *(jac *jac)*p = r *r,
+c
+c where p is a permutation matrix and jac is the final
+c calculated jacobian. column j of p is column ipvt(j)
+c (see below) of the identity matrix. the lower trapezoidal
+c part of fjac contains information generated during
+c the computation of r.
+c
+c ldfjac is a positive integer input variable not less than m
+c which specifies the leading dimension of the array fjac.
+c
+c ipvt is an integer output array of length n. ipvt
+c defines a permutation matrix p such that jac*p = q*r,
+c where jac is the final calculated jacobian, q is
+c orthogonal (not stored), and r is upper triangular
+c with diagonal elements of nonincreasing magnitude.
+c column j of p is column ipvt(j) of the identity matrix.
+c
+c qtf is an output array of length n which contains
+c the first n elements of the vector (q transpose)*fvec.
+c
+c wa1, wa2, and wa3 are work arrays of length n.
+c
+c wa4 is a work array of length m.
+c
+c subprograms called
+c
+c user-supplied ...... fcn
+c
+c minpack-supplied ... dpmpar,enorm,fdjac2,lmpar,qrfac
+c
+c fortran-supplied ... dabs,dmax1,dmin1,dsqrt,mod
+c
+c argonne national laboratory. minpack project. march 1980.
+c burton s. garbow, kenneth e. hillstrom, jorge j. more
+c
+c **********
+
+*/
+
+ int i,iter,j,l;
+
+// double actred,delta,dirder,fnorm,fnorm1,gnorm,
+// one,pnorm,prered,p1,p5,p25,p75,p0001,ratio,
+// sum,temp,temp1,temp2,xnorm,zero;
+ double actred,delta,dirder,fnorm,fnorm1,gnorm,
+ pnorm,prered,ratio,
+ sum,temp,temp1,temp2,xnorm;
+
+ double par[] = new double[2];
+
+ boolean doneout,donein;
+
+ int iflag[] = new int[2];
+ double wa1[] = new double[n+1];
+ double wa2[] = new double[n+1];
+ double wa3[] = new double[n+1];
+ double wa4[] = new double[m+1];
+
+// The Java compiler complaines if delta and xnorm have not been
+// initialized.
+
+ delta = 0.0;
+ xnorm = 0.0;
+
+// one = 1.0;
+// p1 = .1;
+// p25 = .25;
+// p5 = .5;
+// p75 = .75;
+// p0001 = .0001;
+// zero = 0.0;
+
+ info[1] = 0;
+ iflag[1] = 0;
+ nfev[1] = 0;
+
+// Check the input parameters for errors.
+
+ if (n <= 0 || m < n
+ || ftol < zero || xtol < zero || gtol < zero
+ || maxfev <= 0 || factor <= zero) {
+
+// Termination
+
+ if (nprint > 0) {
+
+ nlls.fcn(m,n,x,fvec,iflag);
+
+ }
+
+ return;
+
+ }
+
+ if (mode == 2) {
+
+ for (j = 1; j <= n; j++) {
+
+ if (diag[j] <= zero) {
+
+// Termination
+
+ if (nprint > 0) {
+
+ nlls.fcn(m,n,x,fvec,iflag);
+
+ }
+
+ return;
+
+ }
+
+ }
+
+ }
+
+// Evaluate the function at the starting point
+// and calculate its norm.
+
+ iflag[1] = 1;
+
+ nlls.fcn(m,n,x,fvec,iflag);
+
+ nfev[1] = 1;
+
+ if (iflag[1] < 0) {
+
+// Termination
+
+ info[1] = iflag[1];
+ iflag[1] = 0;
+
+ if (nprint > 0) {
+
+ nlls.fcn(m,n,x,fvec,iflag);
+
+ }
+
+ return;
+
+ }
+
+ fnorm = Minpack_f77.enorm_f77(m,fvec);
+
+// Initialize Levenberg-Marquardt parameter and iteration counter.
+
+ par[1] = zero;
+ iter = 1;
+
+// Beginning of the outer loop.
+
+ doneout = false;
+
+ while (!doneout) {
+
+// Calculate the Jacobian matrix.
+
+ iflag[1] = 2;
+
+ Minpack_f77.fdjac2_f77(nlls,m,n,x,fvec,fjac,iflag,epsfcn,wa4);
+
+ nfev[1] += n;
+
+ if (iflag[1] < 0) {
+
+// Termination
+
+ info[1] = iflag[1];
+ iflag[1] = 0;
+
+ if (nprint > 0) {
+
+ nlls.fcn(m,n,x,fvec,iflag);
+
+ }
+
+ return;
+
+ }
+
+// If requested, call fcn to enable printing of iterates.
+
+ if (nprint > 0) {
+
+ iflag[1] = 0;
+
+ if ((iter-1)%nprint == 0) {
+
+ nlls.fcn(m,n,x,fvec,iflag);
+
+ }
+
+ if (iflag[1] < 0) {
+
+// Termination
+
+ info[1] = iflag[1];
+ iflag[1] = 0;
+
+ nlls.fcn(m,n,x,fvec,iflag);
+
+ return;
+
+ }
+
+ }
+
+// Compute the qr factorization of the Jacobian.
+
+ Minpack_f77.qrfac_f77(m,n,fjac,true,ipvt,wa1,wa2,wa3);
+
+// On the first iteration and if mode is 1, scale according
+// to the norms of the columns of the initial Jacobian.
+
+ if (iter == 1) {
+
+ if (mode != 2) {
+
+ for (j = 1; j <= n; j++) {
+
+ diag[j] = wa2[j];
+
+ if (wa2[j] == zero) diag[j] = one;
+
+ }
+
+ }
+
+// On the first iteration, calculate the norm of the scaled x
+// and initialize the step bound delta.
+
+ for (j = 1; j <= n; j++) {
+
+ wa3[j] = diag[j]*x[j];
+
+ }
+
+ xnorm = Minpack_f77.enorm_f77(n,wa3);
+
+ delta = factor*xnorm;
+
+ if (delta == zero) delta = factor;
+
+ }
+
+// Form (q transpose)*fvec and store the first n components in
+// qtf.
+
+ for (i = 1; i <= m; i++) wa4[i] = fvec[i];
+
+ for (j = 1; j <= n; j++) {
+
+ if (fjac[j][j] != zero) {
+
+ sum = zero;
+
+ for (i = j; i <= m; i++) sum += fjac[i][j]*wa4[i];
+
+ temp = -sum/fjac[j][j];
+
+ for (i = j; i <= m; i++) wa4[i] += fjac[i][j]*temp;
+
+ }
+
+ fjac[j][j] = wa1[j];
+ qtf[j] = wa4[j];
+
+ }
+
+// Compute the norm of the scaled gradient.
+
+ gnorm = zero;
+
+ if (fnorm != zero) {
+
+ for (j = 1; j <= n; j++) {
+
+ l = ipvt[j];
+
+ if (wa2[l] != zero) {
+
+ sum = zero;
+
+ for (i = 1; i <= j; i++) sum +=
+ fjac[i][j]*(qtf[i]/fnorm);
+
+ gnorm = Math.max(gnorm,Math.abs(sum/wa2[l]));
+
+ }
+
+ }
+
+ }
+
+// Test for convergence of the gradient norm.
+
+ if (gnorm <= gtol) info[1] = 4;
+
+ if (info[1] != 0) {
+
+// Termination
+
+ if (iflag[1] < 0) info[1] = iflag[1];
+ iflag[1] = 0;
+
+ if (nprint > 0) {
+
+ nlls.fcn(m,n,x,fvec,iflag);
+
+ }
+
+ return;
+
+ }
+
+// Rescale if necessary.
+
+ if (mode != 2) {
+
+ for (j = 1; j <= n; j++) {
+
+ diag[j] = Math.max(diag[j],wa2[j]);
+
+ }
+
+ }
+
+// Beginning of the inner loop.
+
+ donein = false;
+
+ while (!donein) {
+
+// Determine the Levenberg-Marquardt parameter.
+
+ Minpack_f77.lmpar_f77(n,fjac,ipvt,diag,qtf,delta,par,wa1,wa2,
+ wa3,wa4);
+
+// Store the direction p and x + p. Calculate the norm of p.
+
+ for (j = 1; j <= n; j++) {
+
+ wa1[j] = -wa1[j];
+ wa2[j] = x[j] + wa1[j];
+ wa3[j] = diag[j]*wa1[j];
+
+ }
+
+ pnorm = Minpack_f77.enorm_f77(n,wa3);
+
+// On the first iteration, adjust the initial step bound.
+
+ if (iter == 1) delta = Math.min(delta,pnorm);
+
+// Evaluate the function at x + p and calculate its norm.
+
+ iflag[1] = 1;
+
+ nlls.fcn(m,n,wa2,wa4,iflag);
+
+ nfev[1]++;
+
+ if (iflag[1] < 0) {
+
+// Termination
+
+ info[1] = iflag[1];
+ iflag[1] = 0;
+
+ if (nprint > 0) {
+
+ nlls.fcn(m,n,x,fvec,iflag);
+
+ }
+
+ return;
+
+ }
+
+ fnorm1 = Minpack_f77.enorm_f77(m,wa4);
+
+// Compute the scaled actual reduction.
+
+ actred = -one;
+
+ if (p1*fnorm1 < fnorm) actred = one - (fnorm1/fnorm)*(fnorm1/fnorm);
+
+// Compute the scaled predicted reduction and
+// the scaled directional derivative.
+
+ for (j = 1; j <= n; j++) {
+
+ wa3[j] = zero;
+ l = ipvt[j];
+ temp = wa1[l];
+
+ for (i = 1; i <= j; i++) wa3[i] += fjac[i][j]*temp;
+
+ }
+
+ temp1 = Minpack_f77.enorm_f77(n,wa3)/fnorm;
+ temp2 = (Math.sqrt(par[1])*pnorm)/fnorm;
+
+ prered = temp1*temp1 + temp2*temp2/p5;
+ dirder = -(temp1*temp1 + temp2*temp2);
+
+// Compute the ratio of the actual to the predicted
+// reduction.
+
+ ratio = zero;
+ if (prered != zero) ratio = actred/prered;
+
+// Update the step bound.
+
+ if (ratio <= p25) {
+
+ if (actred >= zero) {
+
+ temp = p5;
+
+ } else {
+
+ temp = p5*dirder/(dirder + p5*actred);
+
+ }
+
+ if (p1*fnorm1 >= fnorm || temp < p1) temp = p1;
+
+ delta = temp*Math.min(delta,pnorm/p1);
+
+ par[1] /= temp;
+
+ } else {
+
+ if (par[1] == zero || ratio >= p75) {
+
+ delta = pnorm/p5;
+ par[1] *= p5;
+
+ }
+
+ }
+
+
+// Test for successful iteration.
+
+ if (ratio >= p0001) {
+
+// Successful iteration. Update x, fvec, and their norms.
+
+ for (j = 1; j <= n; j++) {
+
+ x[j] = wa2[j];
+ wa2[j] = diag[j]*x[j];
+
+ }
+
+ for (i = 1; i <= m; i++) fvec[i] = wa4[i];
+
+ xnorm = Minpack_f77.enorm_f77(n,wa2);
+
+ fnorm = fnorm1;
+
+ iter++;
+
+ }
+
+// Tests for convergence.
+
+ if (Math.abs(actred) <= ftol && prered <= ftol
+ && p5*ratio <= one) info[1] = 1;
+
+ if (delta <= xtol*xnorm) info[1] = 2;
+
+ if (Math.abs(actred) <= ftol && prered <= ftol
+ && p5*ratio <= one && info[1] == 2) info[1] = 3;
+
+ if (info[1] != 0) {
+
+// Termination
+
+ if (iflag[1] < 0) info[1] = iflag[1];
+ iflag[1] = 0;
+
+ if (nprint > 0) {
+
+ nlls.fcn(m,n,x,fvec,iflag);
+
+ }
+
+ return;
+
+ }
+
+// Tests for termination and stringent tolerances.
+
+ if (nfev[1] >= maxfev) info[1] = 5;
+
+ if (Math.abs(actred) <= epsmch && prered <= epsmch
+ && p5*ratio <= one) info[1] = 6;
+
+ if (delta <= epsmch*xnorm) info[1] = 7;
+
+ if (gnorm <= epsmch) info[1] = 8;
+
+ if (info[1] != 0) {
+
+// Termination
+
+ if (iflag[1] < 0) info[1] = iflag[1];
+ iflag[1] = 0;
+
+ if (nprint > 0) {
+
+ nlls.fcn(m,n,x,fvec,iflag);
+
+ }
+
+ return;
+
+ }
+
+// End of the inner loop. Repeat if iteration unsuccessful.
+
+ if (ratio >= p0001) donein = true;
+
+ }
+
+// End of the outer loop.
+
+ }
+
+ }
+
+
+/**
+*
+*<p>
+*The fdjac2 method computes a forward-difference approximation
+*to the m by n Jacobian matrix associated with a specified
+*problem of m functions in n variables.
+*<p>
+*Translated by Steve Verrill on November 24, 2000
+*from the FORTRAN MINPACK source produced by Garbow, Hillstrom, and More.<p>
+*
+*
+*@param nlls A class that implements the Lmdif_fcn interface
+* (see the definition in Lmdif_fcn.java). See
+* LmdifTest_f77.java for an example of such a class.
+* The class must define a method, fcn, that must
+* have the form
+*
+* public static void fcn(int m, int n, double x[],
+* double fvec[], int iflag[])
+*
+* The value of iflag[1] should not be changed by fcn unless
+* the user wants to terminate execution of fdjac2_f77.
+* In this case iflag[1] should be set to a negative integer.
+*@param m A positive integer set to the number of functions
+* [number of observations]
+*@param n A positive integer set to the number of variables
+* [number of parameters]. n must not exceed m.
+*@param x An input array.
+*@param fvec An input array that contains the functions
+* evaluated at x.
+*@param fjac An output m by n array that contains the
+* approximation to the Jacobian matrix evaluated at x.
+*@param iflag An integer variable that can be used to terminate
+* the execution of fdjac2. See the description of nlls.
+*@param epsfcn An input variable used in determining a suitable
+* step length for the forward-difference approximation. This
+* approximation assumes that the relative errors in the
+* functions are of the order of epsfcn. If epsfcn is less
+* than the machine precision, it is assumed that the relative
+* errors in the functions are of the order of the machine
+* precision.
+*@param wa A work array.
+*
+*/
+
+
+ public static void fdjac2_f77(Lmdif_fcn nlls, int m, int n,
+ double x[], double fvec[],
+ double fjac[][], int iflag[],
+ double epsfcn, double wa[]) {
+
+
+/*
+
+Here is a copy of the fdjac2 FORTRAN documentation:
+
+
+ subroutine fdjac2(fcn,m,n,x,fvec,fjac,ldfjac,iflag,epsfcn,wa)
+ integer m,n,ldfjac,iflag
+ double precision epsfcn
+ double precision x(n),fvec(m),fjac(ldfjac,n),wa(m)
+c **********
+c
+c subroutine fdjac2
+c
+c this subroutine computes a forward-difference approximation
+c to the m by n jacobian matrix associated with a specified
+c problem of m functions in n variables.
+c
+c the subroutine statement is
+c
+c subroutine fdjac2(fcn,m,n,x,fvec,fjac,ldfjac,iflag,epsfcn,wa)
+c
+c where
+c
+c fcn is the name of the user-supplied subroutine which
+c calculates the functions. fcn must be declared
+c in an external statement in the user calling
+c program, and should be written as follows.
+c
+c subroutine fcn(m,n,x,fvec,iflag)
+c integer m,n,iflag
+c double precision x(n),fvec(m)
+c ----------
+c calculate the functions at x and
+c return this vector in fvec.
+c ----------
+c return
+c end
+c
+c the value of iflag should not be changed by fcn unless
+c the user wants to terminate execution of fdjac2.
+c in this case set iflag to a negative integer.
+c
+c m is a positive integer input variable set to the number
+c of functions.
+c
+c n is a positive integer input variable set to the number
+c of variables. n must not exceed m.
+c
+c x is an input array of length n.
+c
+c fvec is an input array of length m which must contain the
+c functions evaluated at x.
+c
+c fjac is an output m by n array which contains the
+c approximation to the jacobian matrix evaluated at x.
+c
+c ldfjac is a positive integer input variable not less than m
+c which specifies the leading dimension of the array fjac.
+c
+c iflag is an integer variable which can be used to terminate
+c the execution of fdjac2. see description of fcn.
+c
+c epsfcn is an input variable used in determining a suitable
+c step length for the forward-difference approximation. this
+c approximation assumes that the relative errors in the
+c functions are of the order of epsfcn. if epsfcn is less
+c than the machine precision, it is assumed that the relative
+c errors in the functions are of the order of the machine
+c precision.
+c
+c wa is a work array of length m.
+c
+c subprograms called
+c
+c user-supplied ...... fcn
+c
+c minpack-supplied ... dpmpar
+c
+c fortran-supplied ... dabs,dmax1,dsqrt
+c
+c argonne national laboratory. minpack project. march 1980.
+c burton s. garbow, kenneth e. hillstrom, jorge j. more
+c
+c **********
+
+*/
+
+
+ int i,j;
+// double eps,h,temp,zero;
+ double eps,h,temp;
+
+// zero = 0.0;
+
+ eps = Math.sqrt(Math.max(epsfcn,epsmch));
+
+ for (j = 1; j <= n; j++) {
+
+ temp = x[j];
+ h = eps*Math.abs(temp);
+
+ if (h == zero) h = eps;
+
+ x[j] = temp + h;
+
+ nlls.fcn(m,n,x,wa,iflag);
+
+ if (iflag[1] < 0) {
+
+ return;
+
+ }
+
+ x[j] = temp;
+
+ for (i = 1; i <= m; i++) {
+
+ fjac[i][j] = (wa[i] - fvec[i])/h;
+
+ }
+
+ }
+
+ return;
+
+ }
+
+
+
+}
diff --git a/README b/README
new file mode 100644
index 0000000..2eb4494
--- /dev/null
+++ b/README
@@ -0,0 +1,214 @@
+Nonlinear Optimization Java Package
+-----------------------------------
+
+(see http://ws13.fpl.fs.fed.us/optimization.html for
+an html version of this document)
+
+Currently (as of 2/6/02) this package contains Java translations
+of the 1-dimensional minimization routine, fmin, the
+multi-dimensional minimization routine UNCMIN, the MINPACK
+nonlinear least squares routines (lmder1, lmder, lmdif1, and lmdif),
+and the SLATEC 1-dimensional zero-finding routine, dfzero.
+Eventually (sometime in 2002?), the package will also contain
+Java translations of some of the MINPACK nonlinear equation
+solvers.
+
+The 1-dimensional minimization routine is an unofficial
+Java translation of the FORTRAN version of the public domain
+fmin routine that can be found at http://www.netlib.org/go/fmin.f.
+
+The multi-dimensional minimization routine is an unofficial
+Java translation of the FORTRAN version of the public domain
+UNCMIN package that can be found at http://gams.nist.gov.
+
+The nonlinear least squares routines are unofficial
+Java translations of the FORTRAN versions of the public domain
+MINPACK nonlinear least squares routines that can be found
+at http://www.netlib.org/minpack.
+
+The 1-dimensional zero-finding routine is an unofficial
+Java translation of the FORTRAN version of the public domain
+SLATEC dfzero routine that can be found at netlib. Either go to
+http://www.netlib.org and do a search for dfzero.f, or go to
+http://www.netlib.org/slatec and download the entire SLATEC library.
+
+
+Warning!!!
+----------
+
+1.) These routines were translated by a user of numerical analysis
+routines rather than a developer. When Java optimization routines
+written by professional numerical analysts become
+available, they should be used.
+
+2.) I have tried to be careful with the coding and have done some
+simple testing (see the FminTest, UncminTest_f77, LmderTest_f77,
+LmdifTest_f77, and FzeroTest applications).
+However, it is quite possible that I have introduced
+errors into the routines in the course of the translations.
+If you detect bugs in fmin, UNCMIN, MINPACK, or fzero,
+PLEASE contact me (Steve Verrill at steve at ws13.fpl.fs.fed.us
+or 608-231-9375). Note that for "pathological" problems,
+results can depend upon programming details or the version
+of the Java Developer's Kit. See
+http://ws13.fpl.fs.fed.us/minpack.prob.html for details.
+
+3.) Uncmin_f77.java, UncminTest_f77.java, Minpack_f77.java,
+LmderTest_f77.java, and LmdifTest_f77.java make use of FORTRAN style
+indexing. In these you will tend to see
+
+ for (i = 1; i <= n; i++)
+
+rather than
+
+ for (i = 0; i < n; i++)
+
+To use the "_f77" routines you will have to declare your vectors
+and matrices to be one element larger (e.g., v[101] rather than
+v[100], and a[101][101] rather than a[100][100]), and you will have
+to fill elements 1 through n rather than elements 0 through n - 1.
+See, for example, UncminTest_f77.java.
+
+Eventually, classes that make use of Java/C style indexing might be
+made available (actually, this is unlikely).
+
+
+DISCLAIMER OF WARRANTIES
+------------------------
+
+THIS SOFTWARE IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND.
+THE AUTHOR/TRANSLATOR DOES NOT WARRANT, GUARANTEE OR MAKE ANY
+REPRESENTATIONS REGARDING THE SOFTWARE OR DOCUMENTATION IN TERMS
+OF THEIR CORRECTNESS, RELIABILITY, CURRENTNESS, OR OTHERWISE.
+THE ENTIRE RISK AS TO THE RESULTS AND PERFORMANCE OF THE SOFTWARE
+IS ASSUMED BY YOU. IN NO CASE WILL ANY PARTY INVOLVED WITH THE
+CREATION OR DISTRIBUTION OF THE SOFTWARE BE LIABLE FOR ANY DAMAGE
+THAT MAY RESULT FROM THE USE OF THIS SOFTWARE.
+
+Sorry about that.
+
+
+Documentation
+-------------
+
+The documentation generated by javadoc for these routines can be
+viewed at
+
+http://www1.fpl.fs.fed.us/Package-optimization.html
+
+and
+
+http://www1.fpl.fs.fed.us/Package-linear_algebra.html
+
+Also see
+1.) the source code of Uncmin_f77.java
+2.) the source code of Minpack_f77.java
+3.) R.B. Schnabel, J.E. Koontz, and B.E. Weiss,
+"A Modular System of Algorithms for Unconstrained Minimization,"
+Report CU-CS-240-82, Comp. Sci. Dept., University of Colorado at
+Boulder, 1982
+4.) J.E. Dennis and R.B. Schnabel, Numerical Methods
+for Unconstrained Optimization and Nonlinear Equations, 1983
+
+
+Installation
+------------
+
+After you have uncompressed and untarred the
+src.tar.Z file you should have 35 new files:
+
+1.) This README file.
+2.) Fmin_methods.java
+3.) Fmin.java
+4.) FminTest.java
+5.) Uncmin_methods.java
+6.) Uncmin_f77.java
+7.) UncminTest_f77.java
+8.) Lmder_fcn.java
+9.) LmderTest_f77.java
+10.) Lmdif_fcn.java
+11.) LmdifTest_f77.java
+12.) Minpack_f77.java
+13.) Fzero_methods.java
+14.) Fzero.java
+15.) FzeroTest.java
+16.) Blas_f77.java
+17.) Console.java
+18.) Fmin_methods.class
+19.) Fmin.class
+20.) FminTest.class
+21.) Uncmin_methods.class
+22.) Uncmin_f77.class
+23.) UncminTest_f77.class
+24.) Lmder_fcn.class
+25.) LmderTest_f77.class
+26.) Lmdif_fcn.class
+27.) LmdifTest_f77.class
+28.) Minpack_f77.class
+29.) Fzero_methods.class
+30.) Fzero.class
+31.) FzeroTest.class
+32.) Blas_f77.class
+33.) Console.class
+34.) disclaimer
+35.) copyright
+
+Please read the disclaimer and copyright files.
+
+Given the manner in which the routines are currently written,
+you will need to import them (see the beginning of
+UncminTest_f77.java). All of the classes will have to be placed
+in subdirectories of a directory in your CLASSPATH.
+For the optimization classes, this subdirectory will
+have to be called optimization. For the Blas_f77 class, this
+subdirectory will have to be called linear_algebra.
+Console.class will have to be placed in a
+subdirectory called corejava.
+
+Note that if you install the software in this manner,
+then to run, for example, FminTest you will have to issue the command:
+
+java optimization.FminTest
+
+Alternatively,you could remove the package and import statements
+(other than import java.lang.*) in the source code,
+and compile all of the necessary files in the same directory.
+
+To run Fmin you will need to write a driver
+class such as FminTest that implements the Fmin_methods
+interface. This class will have to include an f_to_minimize method.
+
+To run Uncmin_f77 you will need to write a driver
+class such as UncminTest_f77 that implements the Uncmin_methods
+interface. This class will have to include an f_to_minimize method
+and, at the least, dummy gradient and hessian methods.
+
+To run Minpack_f77 you will need to write a driver
+class such as LmderTest_f77 or LmdifTest_f77 that implements
+the Lmder_fcn or Lmdif_fcn
+interface. This class will have to include an fcn method.
+
+To run Fzero you will need to write a driver
+class such as FzeroTest that implements the Fzero_methods
+interface. This class will have to include an f_to_zero method.
+
+
+Console.java
+------------
+
+Console.java is software described in Cornell
+and Horstmann's Core Java (SunSoft Press/Prentice-Hall).
+(I like this book.) It may be used for non-commerical purposes.
+I only make use of it in the testing routines.
+
+
+Support
+-------
+
+If you have questions about this software,
+or suggestions for improvement, please contact me
+(Steve Verrill) at steve at ws13.fpl.fs.fed.us
+or 608-231-9375.
+
+
+Last modified on 2/6/02.
diff --git a/Uncmin_f77.java b/Uncmin_f77.java
new file mode 100644
index 0000000..137dfcb
--- /dev/null
+++ b/Uncmin_f77.java
@@ -0,0 +1,5886 @@
+/*
+ Uncmin_f77.java copyright claim:
+
+ This software is based on the public domain UNCMIN routines.
+ It was translated from FORTRAN to Java by a US government employee
+ on official time. Thus this software is also in the public domain.
+
+
+ The translator's mail address is:
+
+ Steve Verrill
+ USDA Forest Products Laboratory
+ 1 Gifford Pinchot Drive
+ Madison, Wisconsin
+ 53705
+
+
+ The translator's e-mail address is:
+
+ steve at ws13.fpl.fs.fed.us
+
+
+***********************************************************************
+
+DISCLAIMER OF WARRANTIES:
+
+THIS SOFTWARE IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND.
+THE TRANSLATOR DOES NOT WARRANT, GUARANTEE OR MAKE ANY REPRESENTATIONS
+REGARDING THE SOFTWARE OR DOCUMENTATION IN TERMS OF THEIR CORRECTNESS,
+RELIABILITY, CURRENCY, OR OTHERWISE. THE ENTIRE RISK AS TO
+THE RESULTS AND PERFORMANCE OF THE SOFTWARE IS ASSUMED BY YOU.
+IN NO CASE WILL ANY PARTY INVOLVED WITH THE CREATION OR DISTRIBUTION
+OF THE SOFTWARE BE LIABLE FOR ANY DAMAGE THAT MAY RESULT FROM THE USE
+OF THIS SOFTWARE.
+
+Sorry about that.
+
+***********************************************************************
+
+
+History:
+
+Date Translator Changes
+
+4/14/98 Steve Verrill Translated
+
+*/
+
+
+package optimization;
+
+import optimization.*;
+import linear_algebra.Blas_f77;
+
+/**
+*
+*<p>
+*This class contains Java translations of the UNCMIN unconstrained
+*optimization routines. See R.B. Schnabel, J.E. Koontz, and B.E.
+*Weiss, <i>A Modular System
+*of Algorithms for Unconstrained Minimization</i>, Report CU-CS-240-82,
+*Comp. Sci. Dept., University of Colorado at Boulder, 1982.
+*
+*<p>
+*<b>IMPORTANT:</b> The "_f77" suffixes indicate that these routines use
+*FORTRAN style indexing. For example, you will see
+*<pre>
+* for (i = 1; i <= n; i++)
+*</pre>
+*rather than
+*<pre>
+* for (i = 0; i < n; i++)
+*</pre>
+*To use the "_f77" routines you will have to declare your vectors
+*and matrices to be one element larger (e.g., v[101] rather than
+*v[100], and a[101][101] rather than a[100][100]), and you will have
+*to fill elements 1 through n rather than elements 0 through n - 1.
+*Versions of these programs that use C/Java style indexing will
+*eventually be available. They will end with the suffix "_j".
+*
+*<p>
+*This class was translated by a statistician from a FORTRAN version of
+*UNCMIN. It is NOT an official translation. It wastes
+*memory by failing to use the first elements of vectors. When
+*public domain Java optimization routines become available
+*from the people who produced UNCMIN, then <b>THE CODE PRODUCED
+*BY THE NUMERICAL ANALYSTS SHOULD BE USED</b>.
+*
+*<p>
+*Meanwhile, if you have suggestions for improving this
+*code, please contact Steve Verrill at steve at ws13.fpl.fs.fed.us.
+*
+*@author (translator)Steve Verrill
+*@version .5 --- April 14, 1998
+*
+*/
+
+
+public class Uncmin_f77 extends Object {
+
+
+/**
+*
+*<p>
+*The optif0_f77 method minimizes a smooth nonlinear function of n variables.
+*A method that computes the function value at any point
+*must be supplied. (See Uncmin_methods.java and UncminTest.java.)
+*Derivative values are not required.
+*The optif0_f77 method provides the simplest user access to the UNCMIN
+*minimization routines. Without a recompile,
+*the user has no control over options.
+*For details, see the Schnabel et al reference and the comments in the code.
+*
+*Translated by Steve Verrill, August 4, 1998.
+*
+*@param n The number of arguments of the function to minimize
+*@param x The initial estimate of the minimum point
+*@param minclass A class that implements the Uncmin_methods
+* interface (see the definition in
+* Uncmin_methods.java). See UncminTest_f77.java for an
+* example of such a class. The class must define:
+* 1.) a method, f_to_minimize, to minimize.
+* f_to_minimize must have the form
+*
+* public static double f_to_minimize(double x[])
+*
+* where x is the vector of arguments to the function
+* and the return value is the value of the function
+* evaluated at x.
+* 2.) a method, gradient, that has the form
+*
+* public static void gradient(double x[],
+* double g[])
+*
+* where g is the gradient of f evaluated at x. This
+* method will have an empty body if the user
+* does not wish to provide an analytic estimate
+* of the gradient.
+* 3.) a method, hessian, that has the form
+*
+* public static void hessian(double x[],
+* double h[][])
+* where h is the Hessian of f evaluated at x. This
+* method will have an empty body if the user
+* does not wish to provide an analytic estimate
+* of the Hessian. If the user wants Uncmin
+* to check the Hessian, then the hessian method
+* should only fill the lower triangle (and diagonal)
+* of h.
+*@param xpls The final estimate of the minimum point
+*@param fpls The value of f_to_minimize at xpls
+*@param gpls The gradient at the local minimum xpls
+*@param itrmcd Termination code
+* ITRMCD = 0: Optimal solution found
+* ITRMCD = 1: Terminated with gradient small,
+* xpls is probably optimal
+* ITRMCD = 2: Terminated with stepsize small,
+* xpls is probably optimal
+* ITRMCD = 3: Lower point cannot be found,
+* xpls is probably optimal
+* ITRMCD = 4: Iteration limit (150) exceeded
+* ITRMCD = 5: Too many large steps,
+* function may be unbounded
+*@param a Workspace for the Hessian (or its estimate)
+* and its Cholesky decomposition
+*@param udiag Workspace for the diagonal of the Hessian
+*
+*
+*/
+
+ public static void optif0_f77(int n, double x[], Uncmin_methods minclass,
+ double xpls[], double fpls[], double gpls[],
+ int itrmcd[], double a[][], double udiag[]) {
+
+
+/*
+
+Here is a copy of the optif0 FORTRAN documentation:
+
+ SUBROUTINE OPTIF0(NR,N,X,FCN,XPLS,FPLS,GPLS,ITRMCD,A,WRK)
+c
+ implicit double precision (a-h,o-z)
+c
+C
+C PURPOSE
+C -------
+C PROVIDE SIMPLEST INTERFACE TO MINIMIZATION PACKAGE.
+C USER HAS NO CONTROL OVER OPTIONS.
+
+
+C PARAMETERS
+C ----------
+C NR --> ROW DIMENSION OF MATRIX
+C N --> DIMENSION OF PROBLEM
+C X(N) --> INITIAL ESTIMATE OF MINIMUM
+C FCN --> NAME OF ROUTINE TO EVALUATE MINIMIZATION FUNCTION.
+C MUST BE DECLARED EXTERNAL IN CALLING ROUTINE.
+C XPLS(N) <-- LOCAL MINIMUM
+C FPLS <-- FUNCTION VALUE AT LOCAL MINIMUM XPLS
+C GPLS(N) <-- GRADIENT AT LOCAL MINIMUM XPLS
+C ITRMCD <-- TERMINATION CODE
+C A(N,N) --> WORKSPACE
+C WRK(N,9) --> WORKSPACE
+C
+
+*/
+
+ int msg[] = new int[2];
+
+ double typsiz[] = new double[n+1];
+ double g[] = new double[n+1];
+ double p[] = new double[n+1];
+ double sx[] = new double[n+1];
+ double wrk0[] = new double[n+1];
+ double wrk1[] = new double[n+1];
+ double wrk2[] = new double[n+1];
+ double wrk3[] = new double[n+1];
+
+ double dlt[] = new double[2];
+ double fscale[] = new double[2];
+ double stepmx[] = new double[2];
+
+ int ndigit[] = new int[2];
+
+ int i,ig,it;
+ int lt;
+
+ int method[] = new int[2];
+ int iexp[] = new int[2];
+ int itnlim[] = new int[2];
+ int iagflg[] = new int[2];
+ int iahflg[] = new int[2];
+
+ double gradtl[] = new double[2];
+ double steptl[] = new double[2];
+
+ Uncmin_f77.dfault_f77(n,x,typsiz,fscale,method,iexp,msg,
+ ndigit,itnlim,iagflg,iahflg,dlt,
+ gradtl,stepmx,steptl);
+
+ Uncmin_f77.optdrv_f77(n,x,minclass,typsiz,fscale,method,
+ iexp,msg,ndigit,itnlim,iagflg,iahflg,
+ dlt,gradtl,stepmx,steptl,xpls,
+ fpls,gpls,itrmcd,a,udiag,g,p,sx,
+ wrk0,wrk1,wrk2,wrk3);
+
+ if (itrmcd[1] == 1) {
+
+ System.out.print("\nUncmin WARNING --- itrmcd = 1, " +
+ "probably converged, gradient small\n\n");
+
+ } else if (itrmcd[1] == 2) {
+
+ System.out.print("\nUncmin WARNING --- itrmcd = 2, " +
+ "probably converged, stepsize small\n\n");
+
+ } else if (itrmcd[1] == 3) {
+
+ System.out.print("\nUncmin WARNING --- itrmcd = 3, " +
+ "cannot find lower point\n\n");
+
+ } else if (itrmcd[1] == 4) {
+
+ System.out.print("\nUncmin WARNING --- itrmcd = 4, " +
+ "too many iterations\n\n");
+
+ } else if (itrmcd[1] == 5) {
+
+ System.out.print("\nUncmin WARNING --- itrmcd = 5, " +
+ "too many large steps, " +
+ "possibly unbounded\n\n");
+
+ }
+
+ return;
+
+ }
+
+
+
+/**
+*
+*<p>
+*The optif9_f77 method minimizes a smooth nonlinear function of n variables.
+*A method that computes the function value at any point
+*must be supplied. (See Uncmin_methods.java and UncminTest.java.)
+*Derivative values are not required.
+*The optif9 method provides complete user access to the UNCMIN
+*minimization routines. The user has full control over options.
+*For details, see the Schnabel et al reference and the comments in the code.
+*
+*Translated by Steve Verrill, August 4, 1998.
+*
+*@param n The number of arguments of the function to minimize
+*@param x The initial estimate of the minimum point
+*@param minclass A class that implements the Uncmin_methods
+* interface (see the definition in
+* Uncmin_methods.java). See UncminTest_f77.java for an
+* example of such a class. The class must define:
+* 1.) a method, f_to_minimize, to minimize.
+* f_to_minimize must have the form
+*
+* public static double f_to_minimize(double x[])
+*
+* where x is the vector of arguments to the function
+* and the return value is the value of the function
+* evaluated at x.
+* 2.) a method, gradient, that has the form
+*
+* public static void gradient(double x[],
+* double g[])
+*
+* where g is the gradient of f evaluated at x. This
+* method will have an empty body if the user
+* does not wish to provide an analytic estimate
+* of the gradient.
+* 3.) a method, hessian, that has the form
+*
+* public static void hessian(double x[],
+* double h[][])
+* where h is the Hessian of f evaluated at x. This
+* method will have an empty body if the user
+* does not wish to provide an analytic estimate
+* of the Hessian. If the user wants Uncmin
+* to check the Hessian, then the hessian method
+* should only fill the lower triangle (and diagonal)
+* of h.
+*@param typsiz Typical size for each component of x
+*@param fscale Estimate of the scale of the objective function
+*@param method Algorithm to use to solve the minimization problem
+* = 1 line search
+* = 2 double dogleg
+* = 3 More-Hebdon
+*@param iexp = 1 if the optimization function f_to_minimize
+* is expensive to evaluate, = 0 otherwise. If iexp = 1,
+* then the Hessian will be evaluated by secant update
+* rather than analytically or by finite differences.
+*@param msg Message to inhibit certain automatic checks and output
+*@param ndigit Number of good digits in the minimization function
+*@param itnlim Maximum number of allowable iterations
+*@param iagflg = 0 if an analytic gradient is not supplied
+*@param iahflg = 0 if an analytic Hessian is not supplied
+*@param dlt Trust region radius
+*@param gradtl Tolerance at which the gradient is considered close enough
+* to zero to terminate the algorithm
+*@param stepmx Maximum allowable step size
+*@param steptl Relative step size at which successive iterates
+* are considered close enough to terminate the algorithm
+*@param xpls The final estimate of the minimum point
+*@param fpls The value of f_to_minimize at xpls
+*@param gpls The gradient at the local minimum xpls
+*@param itrmcd Termination code
+* ITRMCD = 0: Optimal solution found
+* ITRMCD = 1: Terminated with gradient small,
+* X is probably optimal
+* ITRMCD = 2: Terminated with stepsize small,
+* X is probably optimal
+* ITRMCD = 3: Lower point cannot be found,
+* X is probably optimal
+* ITRMCD = 4: Iteration limit (150) exceeded
+* ITRMCD = 5: Too many large steps,
+* function may be unbounded
+*@param a Workspace for the Hessian (or its estimate)
+* and its Cholesky decomposition
+*@param udiag Workspace for the diagonal of the Hessian
+*
+*/
+
+ public static void optif9_f77(int n, double x[], Uncmin_methods minclass,
+ double typsiz[], double fscale[], int method[],
+ int iexp[], int msg[], int ndigit[], int itnlim[],
+ int iagflg[], int iahflg[], double dlt[],
+ double gradtl[], double stepmx[], double steptl[],
+ double xpls[], double fpls[], double gpls[],
+ int itrmcd[], double a[][], double udiag[]) {
+
+/*
+
+Here is a copy of the optif9 FORTRAN documentation:
+
+ SUBROUTINE OPTIF9(NR,N,X,FCN,D1FCN,D2FCN,TYPSIZ,FSCALE,
+ + METHOD,IEXP,MSG,NDIGIT,ITNLIM,IAGFLG,IAHFLG,IPR,
+ + DLT,GRADTL,STEPMX,STEPTL,
+ + XPLS,FPLS,GPLS,ITRMCD,A,WRK)
+c
+ implicit double precision (a-h,o-z)
+c
+C
+C PURPOSE
+C -------
+C PROVIDE COMPLETE INTERFACE TO MINIMIZATION PACKAGE.
+C USER HAS FULL CONTROL OVER OPTIONS.
+C
+C PARAMETERS
+C ----------
+C NR --> ROW DIMENSION OF MATRIX
+C N --> DIMENSION OF PROBLEM
+C X(N) --> ON ENTRY: ESTIMATE TO A ROOT OF FCN
+C FCN --> NAME OF SUBROUTINE TO EVALUATE OPTIMIZATION FUNCTION
+C MUST BE DECLARED EXTERNAL IN CALLING ROUTINE
+C FCN: R(N) --> R(1)
+C D1FCN --> (OPTIONAL) NAME OF SUBROUTINE TO EVALUATE GRADIENT
+C OF FCN. MUST BE DECLARED EXTERNAL IN CALLING ROUTINE
+C D2FCN --> (OPTIONAL) NAME OF SUBROUTINE TO EVALUATE HESSIAN OF
+C OF FCN. MUST BE DECLARED EXTERNAL IN CALLING ROUTINE
+C TYPSIZ(N) --> TYPICAL SIZE FOR EACH COMPONENT OF X
+C FSCALE --> ESTIMATE OF SCALE OF OBJECTIVE FUNCTION
+C METHOD --> ALGORITHM TO USE TO SOLVE MINIMIZATION PROBLEM
+C =1 LINE SEARCH
+C =2 DOUBLE DOGLEG
+C =3 MORE-HEBDON
+C IEXP --> =1 IF OPTIMIZATION FUNCTION FCN IS EXPENSIVE TO
+C EVALUATE, =0 OTHERWISE. IF SET THEN HESSIAN WILL
+C BE EVALUATED BY SECANT UPDATE INSTEAD OF
+C ANALYTICALLY OR BY FINITE DIFFERENCES
+C MSG <--> ON INPUT: (.GT.0) MESSAGE TO INHIBIT CERTAIN
+C AUTOMATIC CHECKS
+C ON OUTPUT: (.LT.0) ERROR CODE; =0 NO ERROR
+C NDIGIT --> NUMBER OF GOOD DIGITS IN OPTIMIZATION FUNCTION FCN
+C ITNLIM --> MAXIMUM NUMBER OF ALLOWABLE ITERATIONS
+C IAGFLG --> =1 IF ANALYTIC GRADIENT SUPPLIED
+C IAHFLG --> =1 IF ANALYTIC HESSIAN SUPPLIED
+C IPR --> DEVICE TO WHICH TO SEND OUTPUT
+C DLT --> TRUST REGION RADIUS
+C GRADTL --> TOLERANCE AT WHICH GRADIENT CONSIDERED CLOSE
+C ENOUGH TO ZERO TO TERMINATE ALGORITHM
+C STEPMX --> MAXIMUM ALLOWABLE STEP SIZE
+C STEPTL --> RELATIVE STEP SIZE AT WHICH SUCCESSIVE ITERATES
+C CONSIDERED CLOSE ENOUGH TO TERMINATE ALGORITHM
+C XPLS(N) <--> ON EXIT: XPLS IS LOCAL MINIMUM
+C FPLS <--> ON EXIT: FUNCTION VALUE AT SOLUTION, XPLS
+C GPLS(N) <--> ON EXIT: GRADIENT AT SOLUTION XPLS
+C ITRMCD <-- TERMINATION CODE
+C A(N,N) --> WORKSPACE FOR HESSIAN (OR ESTIMATE)
+C AND ITS CHOLESKY DECOMPOSITION
+C WRK(N,8) --> WORKSPACE
+C
+
+*/
+
+ double g[] = new double[n+1];
+ double p[] = new double[n+1];
+ double sx[] = new double[n+1];
+ double wrk0[] = new double[n+1];
+ double wrk1[] = new double[n+1];
+ double wrk2[] = new double[n+1];
+ double wrk3[] = new double[n+1];
+
+// MINIMIZE FUNCTION
+
+ Uncmin_f77.optdrv_f77(n,x,minclass,typsiz,fscale,method,
+ iexp,msg,ndigit,itnlim,iagflg,iahflg,
+ dlt,gradtl,stepmx,steptl,xpls,
+ fpls,gpls,itrmcd,a,udiag,g,p,sx,
+ wrk0,wrk1,wrk2,wrk3);
+
+ if (itrmcd[1] == 1) {
+
+ System.out.print("\nUncmin WARNING --- itrmcd = 1, " +
+ "probably converged, gradient small\n\n");
+
+ } else if (itrmcd[1] == 2) {
+
+ System.out.print("\nUncmin WARNING --- itrmcd = 2, " +
+ "probably converged, stepsize small\n\n");
+
+ } else if (itrmcd[1] == 3) {
+
+ System.out.print("\nUncmin WARNING --- itrmcd = 3, " +
+ "cannot find lower point\n\n");
+
+ } else if (itrmcd[1] == 4) {
+
+ System.out.print("\nUncmin WARNING --- itrmcd = 4, " +
+ "too many iterations\n\n");
+
+ } else if (itrmcd[1] == 5) {
+
+ System.out.print("\nUncmin WARNING --- itrmcd = 5, " +
+ "too many large steps, " +
+ "possibly unbounded\n\n");
+
+ }
+
+ return;
+
+ }
+
+
+
+/**
+*
+*<p>
+*The bakslv_f77 method solves Ax = b where A is an upper triangular
+*matrix. Note that A is input as a lower triangular matrix and
+*this method takes its transpose implicitly.
+*
+*Translated by Steve Verrill, April 14, 1998.
+*
+*@param n Dimension of the problem
+*@param a n by n lower triangular matrix (preserved)
+*@param x The solution vector
+*@param b The right-hand side vector
+*
+*/
+
+ public static void bakslv_f77(int n, double a[][], double x[], double b[]) {
+
+/*
+
+Here is a copy of the bakslv FORTRAN documentation:
+
+ SUBROUTINE BAKSLV(NR,N,A,X,B)
+
+C
+C PURPOSE
+C -------
+C SOLVE AX=B WHERE A IS UPPER TRIANGULAR MATRIX.
+C NOTE THAT A IS INPUT AS A LOWER TRIANGULAR MATRIX AND
+C THAT THIS ROUTINE TAKES ITS TRANSPOSE IMPLICITLY.
+C
+C PARAMETERS
+C ----------
+C NR --> ROW DIMENSION OF MATRIX
+C N --> DIMENSION OF PROBLEM
+C A(N,N) --> LOWER TRIANGULAR MATRIX (PRESERVED)
+C X(N) <-- SOLUTION VECTOR
+C B(N) --> RIGHT-HAND SIDE VECTOR
+C
+C NOTE
+C ----
+C IF B IS NO LONGER REQUIRED BY CALLING ROUTINE,
+C THEN VECTORS B AND X MAY SHARE THE SAME STORAGE.
+C
+
+*/
+
+ int i,ip1,j;
+ double sum;
+
+// SOLVE (L-TRANSPOSE)X=B. (BACK SOLVE)
+
+ i = n;
+ x[i] = b[i]/a[i][i];
+
+ while (i > 1) {
+
+ ip1 = i;
+ i--;
+
+ sum = 0.0;
+
+ for (j = ip1; j <= n; j++) {
+
+ sum += a[j][i]*x[j];
+
+ }
+
+ x[i] = (b[i] - sum)/a[i][i];
+
+ }
+
+ return;
+
+ }
+
+
+/**
+*
+*<p>
+*The chlhsn_f77 method finds
+*"THE L(L-TRANSPOSE) [WRITTEN LL+] DECOMPOSITION OF THE PERTURBED
+*MODEL HESSIAN MATRIX A+MU*I(WHERE MU\0 AND I IS THE IDENTITY MATRIX)
+*WHICH IS SAFELY POSITIVE DEFINITE. IF A IS SAFELY POSITIVE DEFINITE
+*UPON ENTRY, THEN MU=0."
+*
+*Translated by Steve Verrill, April 14, 1998.
+*
+*@param n Dimension of the problem
+*@param a On entry: A is the model Hessian (only the lower
+* triangle and diagonal stored)
+* On exit: A contains L of the LL+ decomposition of
+* the perturbed model Hessian in the lower triangle
+* and diagonal, and contains the Hessian in the upper
+* triangle and udiag
+*@param epsm Machine epsilon
+*@param sx Scaling vector for x
+*@param udiag On exit: Contains the diagonal of the Hessian
+*
+*/
+
+ public static void chlhsn_f77(int n, double a[][], double epsm,
+ double sx[], double udiag[]) {
+
+/*
+
+Here is a copy of the chlhsn FORTRAN documentation:
+
+ SUBROUTINE CHLHSN(NR,N,A,EPSM,SX,UDIAG)
+
+C
+C PURPOSE
+C -------
+C FIND THE L(L-TRANSPOSE) [WRITTEN LL+] DECOMPOSITION OF THE PERTURBED
+C MODEL HESSIAN MATRIX A+MU*I(WHERE MU\0 AND I IS THE IDENTITY MATRIX)
+C WHICH IS SAFELY POSITIVE DEFINITE. IF A IS SAFELY POSITIVE DEFINITE
+C UPON ENTRY, THEN MU=0.
+C
+C PARAMETERS
+C ----------
+C NR --> ROW DIMENSION OF MATRIX
+C N --> DIMENSION OF PROBLEM
+C A(N,N) <--> ON ENTRY; "A" IS MODEL HESSIAN (ONLY LOWER
+C TRIANGULAR PART AND DIAGONAL STORED)
+C ON EXIT: A CONTAINS L OF LL+ DECOMPOSITION OF
+C PERTURBED MODEL HESSIAN IN LOWER TRIANGULAR
+C PART AND DIAGONAL AND CONTAINS HESSIAN IN UPPER
+C TRIANGULAR PART AND UDIAG
+C EPSM --> MACHINE EPSILON
+C SX(N) --> DIAGONAL SCALING MATRIX FOR X
+C UDIAG(N) <-- ON EXIT: CONTAINS DIAGONAL OF HESSIAN
+C
+C INTERNAL VARIABLES
+C ------------------
+C TOL TOLERANCE
+C DIAGMN MINIMUM ELEMENT ON DIAGONAL OF A
+C DIAGMX MAXIMUM ELEMENT ON DIAGONAL OF A
+C OFFMAX MAXIMUM OFF-DIAGONAL ELEMENT OF A
+C OFFROW SUM OF OFF-DIAGONAL ELEMENTS IN A ROW OF A
+C EVMIN MINIMUM EIGENVALUE OF A
+C EVMAX MAXIMUM EIGENVALUE OF A
+C
+C DESCRIPTION
+C -----------
+C 1. IF "A" HAS ANY NEGATIVE DIAGONAL ELEMENTS, THEN CHOOSE MU>0
+C SUCH THAT THE DIAGONAL OF A:=A+MU*I IS ALL POSITIVE
+C WITH THE RATIO OF ITS SMALLEST TO LARGEST ELEMENT ON THE
+C ORDER OF SQRT(EPSM).
+C
+C 2. "A" UNDERGOES A PERTURBED CHOLESKY DECOMPOSITION WHICH
+C RESULTS IN AN LL+ DECOMPOSITION OF A+D, WHERE D IS A
+C NON-NEGATIVE DIAGONAL MATRIX WHICH IS IMPLICITLY ADDED TO
+C "A" DURING THE DECOMPOSITION IF "A" IS NOT POSITIVE DEFINITE.
+C "A" IS RETAINED AND NOT CHANGED DURING THIS PROCESS BY
+C COPYING L INTO THE UPPER TRIANGULAR PART OF "A" AND THE
+C DIAGONAL INTO UDIAG. THEN THE CHOLESKY DECOMPOSITION ROUTINE
+C IS CALLED. ON RETURN, ADDMAX CONTAINS MAXIMUM ELEMENT OF D.
+C
+C 3. IF ADDMAX=0, "A" WAS POSITIVE DEFINITE GOING INTO STEP 2
+C AND RETURN IS MADE TO CALLING PROGRAM. OTHERWISE,
+C THE MINIMUM NUMBER SDD WHICH MUST BE ADDED TO THE
+C DIAGONAL OF A TO MAKE IT SAFELY STRICTLY DIAGONALLY DOMINANT
+C IS CALCULATED. SINCE A+ADDMAX*I AND A+SDD*I ARE SAFELY
+C POSITIVE DEFINITE, CHOOSE MU=MIN(ADDMAX,SDD) AND DECOMPOSE
+C A+MU*I TO OBTAIN L.
+C
+
+*/
+
+ int i,j,im1,jm1;
+ double tol,diagmx,diagmn,posmax,amu,offmax,
+ evmin,evmax,offrow,sdd;
+
+ double addmax[] = new double[2];
+
+// SCALE HESSIAN
+// PRE- AND POST- MULTIPLY "A" BY INV(SX)
+
+ for (j = 1; j <= n; j++) {
+
+ for (i = j; i <= n; i++) {
+
+ a[i][j] /= (sx[i]*sx[j]);
+
+ }
+
+ }
+
+// STEP1
+// -----
+// NOTE: IF A DIFFERENT TOLERANCE IS DESIRED THROUGHOUT THIS
+// ALGORITHM, CHANGE TOLERANCE HERE:
+
+ tol = Math.sqrt(epsm);
+
+ diagmx = a[1][1];
+ diagmn = a[1][1];
+
+ for (i = 2; i <= n; i++) {
+
+ if (a[i][i] < diagmn) diagmn = a[i][i];
+ if (a[i][i] > diagmx) diagmx = a[i][i];
+
+ }
+
+ posmax = Math.max(diagmx,0.0);
+
+// DIAGMN .LE. 0
+
+ if (diagmn <= posmax*tol) {
+
+ amu = tol*(posmax - diagmn) - diagmn;
+
+ if (amu == 0.0) {
+
+// FIND LARGEST OFF-DIAGONAL ELEMENT OF A
+
+ offmax = 0.0;
+
+ for (i = 2; i <= n; i++) {
+
+ im1 = i - 1;
+
+ for (j = 1; j <= im1; j++) {
+
+ if (Math.abs(a[i][j]) > offmax) offmax =
+ Math.abs(a[i][j]);
+
+ }
+
+ }
+
+ amu = offmax;
+
+ if (amu == 0.0) {
+
+ amu = 1.0;
+
+ } else {
+
+ amu *= 1.0 + tol;
+
+ }
+
+ }
+
+// A = A + MU*I
+
+ for (i = 1; i <= n; i++) {
+
+ a[i][i] += amu;
+
+ }
+
+ diagmx += amu;
+
+ }
+
+// STEP2
+// -----
+// COPY LOWER TRIANGULAR PART OF "A" TO UPPER TRIANGULAR PART
+// AND DIAGONAL OF "A" TO UDIAG
+
+
+ for (j = 1; j <= n; j++) {
+
+ udiag[j] = a[j][j];
+
+ for (i = j + 1; i <= n; i++) {
+
+ a[j][i] = a[i][j];
+
+ }
+
+ }
+
+ Uncmin_f77.choldc_f77(n,a,diagmx,tol,addmax);
+
+// STEP3
+// -----
+// IF ADDMAX=0, "A" WAS POSITIVE DEFINITE GOING INTO STEP 2,
+// THE LL+ DECOMPOSITION HAS BEEN DONE, AND WE RETURN.
+// OTHERWISE, ADDMAX>0. PERTURB "A" SO THAT IT IS SAFELY
+// DIAGONALLY DOMINANT AND FIND LL+ DECOMPOSITION
+
+ if (addmax[1] > 0.0) {
+
+// RESTORE ORIGINAL "A" (LOWER TRIANGULAR PART AND DIAGONAL)
+
+ for (j = 1; j <= n; j++) {
+
+ a[j][j] = udiag[j];
+
+ for (i = j + 1; i <= n; i++) {
+
+ a[i][j] = a[j][i];
+
+ }
+
+ }
+
+// FIND SDD SUCH THAT A+SDD*I IS SAFELY POSITIVE DEFINITE
+// NOTE: EVMIN<0 SINCE A IS NOT POSITIVE DEFINITE;
+
+ evmin = 0.0;
+ evmax = a[1][1];
+
+ for (i = 1; i <= n; i++) {
+
+ offrow = 0.0;
+ im1 = i - 1;
+
+ for (j = 1; j <= im1; j++) {
+
+ offrow += Math.abs(a[i][j]);
+
+ }
+
+ for (j = i + 1; j <= n; j++) {
+
+ offrow += Math.abs(a[j][i]);
+
+ }
+
+ evmin = Math.min(evmin,a[i][i] - offrow);
+ evmax = Math.max(evmax,a[i][i] + offrow);
+
+ }
+
+ sdd = tol*(evmax - evmin) - evmin;
+
+// PERTURB "A" AND DECOMPOSE AGAIN
+
+ amu = Math.min(sdd,addmax[1]);
+
+ for (i = 1; i <= n; i++) {
+
+ a[i][i] += amu;
+ udiag[i] = a[i][i];
+
+ }
+
+// "A" NOW GUARANTEED SAFELY POSITIVE DEFINITE
+
+ Uncmin_f77.choldc_f77(n,a,0.0,tol,addmax);
+
+ }
+
+// UNSCALE HESSIAN AND CHOLESKY DECOMPOSITION MATRIX
+
+ for (j = 1; j <= n; j++) {
+
+ for (i = j; i <= n; i++) {
+
+ a[i][j] *= sx[i];
+
+ }
+
+ jm1 = j - 1;
+
+ for (i = 1; i <= jm1; i++) {
+
+ a[i][j] *= sx[i]*sx[j];
+
+ }
+
+ udiag[j] *= sx[j]*sx[j];
+
+ }
+
+ return;
+
+ }
+
+
+/**
+*
+*<p>
+*The choldc_f77 method finds
+*"THE PERTURBED L(L-TRANSPOSE) [WRITTEN LL+] DECOMPOSITION
+*OF A+D, WHERE D IS A NON-NEGATIVE DIAGONAL MATRIX ADDED TO A IF
+*NECESSARY TO ALLOW THE CHOLESKY DECOMPOSITION TO CONTINUE."
+*
+*Translated by Steve Verrill, April 15, 1998.
+*
+*@param n Dimension of the problem
+*@param a On entry: matrix for which to find the perturbed
+* Cholesky decomposition
+* On exit: contains L of the LL+ decomposition
+* in lower triangle
+*@param diagmx Maximum diagonal element of "A"
+*@param tol Tolerance
+*@param addmax Maximum amount implicitly added to diagonal
+* of "A" in forming the Cholesky decomposition
+* of A+D
+*
+*/
+
+ public static void choldc_f77(int n, double a[][], double diagmx,
+ double tol, double addmax[]) {
+
+/*
+
+Here is a copy of the choldc FORTRAN documentation:
+
+ SUBROUTINE CHOLDC(NR,N,A,DIAGMX,TOL,ADDMAX)
+
+C
+C PURPOSE
+C -------
+C FIND THE PERTURBED L(L-TRANSPOSE) [WRITTEN LL+] DECOMPOSITION
+C OF A+D, WHERE D IS A NON-NEGATIVE DIAGONAL MATRIX ADDED TO A IF
+C NECESSARY TO ALLOW THE CHOLESKY DECOMPOSITION TO CONTINUE.
+C
+C PARAMETERS
+C ----------
+C NR --> ROW DIMENSION OF MATRIX
+C N --> DIMENSION OF PROBLEM
+C A(N,N) <--> ON ENTRY: MATRIX FOR WHICH TO FIND PERTURBED
+C CHOLESKY DECOMPOSITION
+C ON EXIT: CONTAINS L OF LL+ DECOMPOSITION
+C IN LOWER TRIANGULAR PART AND DIAGONAL OF "A"
+C DIAGMX --> MAXIMUM DIAGONAL ELEMENT OF "A"
+C TOL --> TOLERANCE
+C ADDMAX <-- MAXIMUM AMOUNT IMPLICITLY ADDED TO DIAGONAL OF "A"
+C IN FORMING THE CHOLESKY DECOMPOSITION OF A+D
+C INTERNAL VARIABLES
+C ------------------
+C AMINL SMALLEST ELEMENT ALLOWED ON DIAGONAL OF L
+C AMNLSQ =AMINL**2
+C OFFMAX MAXIMUM OFF-DIAGONAL ELEMENT IN COLUMN OF A
+C
+C
+C DESCRIPTION
+C -----------
+C THE NORMAL CHOLESKY DECOMPOSITION IS PERFORMED. HOWEVER, IF AT ANY
+C POINT THE ALGORITHM WOULD ATTEMPT TO SET L(I,I)=SQRT(TEMP)
+C WITH TEMP < TOL*DIAGMX, THEN L(I,I) IS SET TO SQRT(TOL*DIAGMX)
+C INSTEAD. THIS IS EQUIVALENT TO ADDING TOL*DIAGMX-TEMP TO A(I,I)
+C
+C
+
+*/
+
+ int i,j,jm1,jp1,k;
+ double aminl,amnlsq,offmax,sum,temp;
+
+ addmax[1] = 0.0;
+ aminl = Math.sqrt(diagmx*tol);
+ amnlsq = aminl*aminl;
+
+// FORM COLUMN J OF L
+
+ for (j = 1; j <= n; j++) {
+
+// FIND DIAGONAL ELEMENTS OF L
+
+ sum = 0.0;
+ jm1 = j - 1;
+ jp1 = j + 1;
+
+ for (k = 1; k <= jm1; k++) {
+
+ sum += a[j][k]*a[j][k];
+
+ }
+
+ temp = a[j][j] - sum;
+
+ if (temp >= amnlsq) {
+
+ a[j][j] = Math.sqrt(temp);
+
+ } else {
+
+// FIND MAXIMUM OFF-DIAGONAL ELEMENT IN COLUMN
+
+ offmax = 0.0;
+
+ for (i = jp1; i <= n; i++) {
+
+ if (Math.abs(a[i][j]) > offmax) offmax = Math.abs(a[i][j]);
+
+ }
+
+ if (offmax <= amnlsq) offmax = amnlsq;
+
+// ADD TO DIAGONAL ELEMENT TO ALLOW CHOLESKY DECOMPOSITION TO CONTINUE
+
+ a[j][j] = Math.sqrt(offmax);
+ addmax[1] = Math.max(addmax[1],offmax-temp);
+
+ }
+
+// FIND I,J ELEMENT OF LOWER TRIANGULAR MATRIX
+
+ for (i = jp1; i <= n; i++) {
+
+ sum = 0.0;
+
+ for (k = 1; k <= jm1; k++) {
+
+ sum += a[i][k]*a[j][k];
+
+ }
+
+ a[i][j] = (a[i][j] - sum)/a[j][j];
+
+ }
+
+ }
+
+ return;
+
+ }
+
+
+
+/**
+*
+*<p>
+*The dfault_f77 method sets default values for each input
+*variable to the minimization algorithm.
+*
+*Translated by Steve Verrill, August 4, 1998.
+*
+*@param n Dimension of the problem
+*@param x Initial estimate of the solution (to compute max step size)
+*@param typsiz Typical size for each component of x
+*@param fscale Estimate of the scale of the minimization function
+*@param method Algorithm to use to solve the minimization problem
+*@param iexp = 0 if the minimization function is not expensive to evaluate
+*@param msg Message to inhibit certain automatic checks and output
+*@param ndigit Number of good digits in the minimization function
+*@param itnlim Maximum number of allowable iterations
+*@param iagflg = 0 if an analytic gradient is not supplied
+*@param iahflg = 0 if an analytic Hessian is not supplied
+*@param dlt Trust region radius
+*@param gradtl Tolerance at which the gradient is considered close enough
+* to zero to terminate the algorithm
+*@param stepmx "Value of zero to trip default maximum in optchk"
+*@param steptl Tolerance at which successive iterates are considered
+* close enough to terminate the algorithm
+*
+*/
+
+ public static void dfault_f77(int n, double x[], double typsiz[],
+ double fscale[], int method[], int iexp[],
+ int msg[], int ndigit[], int itnlim[],
+ int iagflg[], int iahflg[], double dlt[],
+ double gradtl[], double stepmx[],
+ double steptl[]) {
+
+/*
+
+Here is a copy of the dfault FORTRAN documentation:
+
+
+ SUBROUTINE DFAULT(N,X,TYPSIZ,FSCALE,METHOD,IEXP,MSG,NDIGIT,
+ + ITNLIM,IAGFLG,IAHFLG,IPR,DLT,GRADTL,STEPMX,STEPTL)
+
+C
+C PURPOSE
+C -------
+C SET DEFAULT VALUES FOR EACH INPUT VARIABLE TO
+C MINIMIZATION ALGORITHM.
+C
+C PARAMETERS
+C ----------
+C N --> DIMENSION OF PROBLEM
+C X(N) --> INITIAL GUESS TO SOLUTION (TO COMPUTE MAX STEP SIZE)
+C TYPSIZ(N) <-- TYPICAL SIZE FOR EACH COMPONENT OF X
+C FSCALE <-- ESTIMATE OF SCALE OF MINIMIZATION FUNCTION
+C METHOD <-- ALGORITHM TO USE TO SOLVE MINIMIZATION PROBLEM
+C IEXP <-- =0 IF MINIMIZATION FUNCTION NOT EXPENSIVE TO EVALUATE
+C MSG <-- MESSAGE TO INHIBIT CERTAIN AUTOMATIC CHECKS + OUTPUT
+C NDIGIT <-- NUMBER OF GOOD DIGITS IN MINIMIZATION FUNCTION
+C ITNLIM <-- MAXIMUM NUMBER OF ALLOWABLE ITERATIONS
+C IAGFLG <-- =0 IF ANALYTIC GRADIENT NOT SUPPLIED
+C IAHFLG <-- =0 IF ANALYTIC HESSIAN NOT SUPPLIED
+C IPR <-- DEVICE TO WHICH TO SEND OUTPUT
+C DLT <-- TRUST REGION RADIUS
+C GRADTL <-- TOLERANCE AT WHICH GRADIENT CONSIDERED CLOSE ENOUGH
+C TO ZERO TO TERMINATE ALGORITHM
+C STEPMX <-- VALUE OF ZERO TO TRIP DEFAULT MAXIMUM IN OPTCHK
+C STEPTL <-- TOLERANCE AT WHICH SUCCESSIVE ITERATES CONSIDERED
+C CLOSE ENOUGH TO TERMINATE ALGORITHM
+C
+
+*/
+
+ int i;
+ double epsm;
+
+// SET TYPICAL SIZE OF X AND MINIMIZATION FUNCTION
+
+ for (i = 1; i <= n; i++) {
+
+ typsiz[i] = 1.0;
+
+ }
+
+ fscale[1] = 1.0;
+
+// SET TOLERANCES
+
+ dlt[1] = -1.0;
+
+ epsm = 1.12e-16;
+ gradtl[1] = Math.pow(epsm,1.0/3.0);
+ steptl[1] = Math.sqrt(epsm);
+
+ stepmx[1] = 0.0;
+
+// SET FLAGS
+
+ method[1] = 1;
+ iexp[1] = 1;
+ msg[1] = 0;
+ ndigit[1] = -1;
+ itnlim[1] = 150;
+ iagflg[1] = 0;
+ iahflg[1] = 0;
+
+ }
+
+
+
+/**
+*
+*<p>
+*The dogdrv_f77 method finds the next Newton iterate (xpls) by the double dogleg
+*method. It drives dogstp_f77.
+*
+*Translated by Steve Verrill, April 15, 1998.
+*
+*@param n Dimension of the problem
+*@param x The old iterate
+*@param f Function value at the old iterate
+*@param g Gradient or approximation at the old iterate
+*@param a Cholesky decomposition of Hessian
+* in lower triangular part and diagonal
+*@param p Newton step
+*@param xpls The new iterate
+*@param fpls Function value at the new iterate
+*@param minclass A class that implements the Uncmin_methods
+* interface (see the definition in
+* Uncmin_methods.java). See UncminTest_f77.java for an
+* example of such a class. The class must define:
+* 1.) a method, f_to_minimize, to minimize.
+* f_to_minimize must have the form
+*
+* public static double f_to_minimize(double x[])
+*
+* where x is the vector of arguments to the function
+* and the return value is the value of the function
+* evaluated at x.
+* 2.) a method, gradient, that has the form
+*
+* public static void gradient(double x[],
+* double g[])
+*
+* where g is the gradient of f evaluated at x. This
+* method will have an empty body if the user
+* does not wish to provide an analytic estimate
+* of the gradient.
+* 3.) a method, hessian, that has the form
+*
+* public static void hessian(double x[],
+* double h[][])
+* where h is the Hessian of f evaluated at x. This
+* method will have an empty body if the user
+* does not wish to provide an analytic estimate
+* of the Hessian. If the user wants Uncmin
+* to check the Hessian, then the hessian method
+* should only fill the lower triangle (and diagonal)
+* of h.
+*@param sx Scaling vector for x
+*@param stepmx Maximum allowable step size
+*@param steptl Relative step size at which successive iterates
+* are considered close enough to terminate the
+* algorithm
+*@param dlt Trust region radius (value needs to be retained
+* between successive calls)
+*@param iretcd Return code:
+* 0 --- satisfactory xpls found
+* 1 --- failed to find satisfactory xpls
+* sufficently distinct from x
+*@param mxtake Boolean flag indicating that a step of maximum
+* length was used length
+*@param sc Workspace (current step)
+*@param wrk1 Workspace (and place holding argument to tregup)
+*@param wrk2 Workspace
+*@param wrk3 Workspace
+*
+*
+*/
+
+ public static void dogdrv_f77(int n, double x[], double f[], double g[],
+ double a[][], double p[], double xpls[],
+ double fpls[], Uncmin_methods minclass,
+ double sx[], double stepmx[], double steptl[],
+ double dlt[], int iretcd[], boolean mxtake[],
+ double sc[], double wrk1[], double wrk2[],
+ double wrk3[]) {
+
+/*
+
+Here is a copy of the dogdrv FORTRAN documentation:
+
+ SUBROUTINE DOGDRV(NR,N,X,F,G,A,P,XPLS,FPLS,FCN,SX,STEPMX,
+ + STEPTL,DLT,IRETCD,MXTAKE,SC,WRK1,WRK2,WRK3,IPR)
+
+C
+C PURPOSE
+C -------
+C FIND A NEXT NEWTON ITERATE (XPLS) BY THE DOUBLE DOGLEG METHOD
+C
+C PARAMETERS
+C ----------
+C NR --> ROW DIMENSION OF MATRIX
+C N --> DIMENSION OF PROBLEM
+C X(N) --> OLD ITERATE X[K-1]
+C F --> FUNCTION VALUE AT OLD ITERATE, F(X)
+C G(N) --> GRADIENT AT OLD ITERATE, G(X), OR APPROXIMATE
+C A(N,N) --> CHOLESKY DECOMPOSITION OF HESSIAN
+C IN LOWER TRIANGULAR PART AND DIAGONAL
+C P(N) --> NEWTON STEP
+C XPLS(N) <-- NEW ITERATE X[K]
+C FPLS <-- FUNCTION VALUE AT NEW ITERATE, F(XPLS)
+C FCN --> NAME OF SUBROUTINE TO EVALUATE FUNCTION
+C SX(N) --> DIAGONAL SCALING MATRIX FOR X
+C STEPMX --> MAXIMUM ALLOWABLE STEP SIZE
+C STEPTL --> RELATIVE STEP SIZE AT WHICH SUCCESSIVE ITERATES
+C CONSIDERED CLOSE ENOUGH TO TERMINATE ALGORITHM
+C DLT <--> TRUST REGION RADIUS
+C [RETAIN VALUE BETWEEN SUCCESSIVE CALLS]
+C IRETCD <-- RETURN CODE
+C =0 SATISFACTORY XPLS FOUND
+C =1 FAILED TO FIND SATISFACTORY XPLS SUFFICIENTLY
+C DISTINCT FROM X
+C MXTAKE <-- BOOLEAN FLAG INDICATING STEP OF MAXIMUM LENGTH USED
+C SC(N) --> WORKSPACE [CURRENT STEP]
+C WRK1(N) --> WORKSPACE (AND PLACE HOLDING ARGUMENT TO TREGUP)
+C WRK2(N) --> WORKSPACE
+C WRK3(N) --> WORKSPACE
+C IPR --> DEVICE TO WHICH TO SEND OUTPUT
+C
+
+*/
+
+ int i;
+ double tmp,rnwtln;
+ double fplsp[] = new double[2];
+ double cln[] = new double[2];
+ double eta[] = new double[2];
+ boolean fstdog[] = new boolean[2];
+ boolean nwtake[] = new boolean[2];
+
+
+
+
+ iretcd[1] = 4;
+ fstdog[1] = true;
+ tmp = 0.0;
+
+ for (i = 1; i <= n; i++) {
+
+ tmp += sx[i]*sx[i]*p[i]*p[i];
+
+ }
+
+ rnwtln = Math.sqrt(tmp);
+
+ while (iretcd[1] > 1) {
+
+// FIND NEW STEP BY DOUBLE DOGLEG ALGORITHM
+
+ Uncmin_f77.dogstp_f77(n,g,a,p,sx,rnwtln,dlt,nwtake,fstdog,
+ wrk1,wrk2,cln,eta,sc,stepmx);
+
+// CHECK NEW POINT AND UPDATE TRUST REGION
+
+ Uncmin_f77.tregup_f77(n,x,f,g,a,minclass,sc,sx,nwtake,stepmx,
+ steptl,dlt,iretcd,wrk3,fplsp,xpls,fpls,
+ mxtake,2,wrk1);
+
+ }
+
+ return;
+
+ }
+
+
+/**
+*
+*<p>
+*The dogstp_f77 method finds the new step by the double dogleg
+*appproach.
+*
+*Translated by Steve Verrill, April 21, 1998.
+*
+*@param n DIMENSION OF PROBLEM
+*@param g GRADIENT AT CURRENT ITERATE, G(X)
+*@param a CHOLESKY DECOMPOSITION OF HESSIAN IN
+* LOWER PART AND DIAGONAL
+*@param p NEWTON STEP
+*@param sx Scaling vector for x
+*@param rnwtln NEWTON STEP LENGTH
+*@param dlt TRUST REGION RADIUS
+*@param nwtake BOOLEAN, = true IF NEWTON STEP TAKEN
+*@param fstdog BOOLEAN, = true IF ON FIRST LEG OF DOGLEG
+*@param ssd WORKSPACE [CAUCHY STEP TO THE MINIMUM OF THE
+* QUADRATIC MODEL IN THE SCALED STEEPEST DESCENT
+* DIRECTION] [RETAIN VALUE BETWEEN SUCCESSIVE CALLS]
+*@param v WORKSPACE [RETAIN VALUE BETWEEN SUCCESSIVE CALLS]
+*@param cln CAUCHY LENGTH
+* [RETAIN VALUE BETWEEN SUCCESSIVE CALLS]
+*@param eta [RETAIN VALUE BETWEEN SUCCESSIVE CALLS]
+*@param sc CURRENT STEP
+*@param stepmx MAXIMUM ALLOWABLE STEP SIZE
+*
+*
+*/
+
+
+ public static void dogstp_f77(int n, double g[], double a[][], double p[],
+ double sx[], double rnwtln, double dlt[],
+ boolean nwtake[], boolean fstdog[], double ssd[],
+ double v[], double cln[], double eta[],
+ double sc[], double stepmx[]) {
+
+/*
+
+Here is a copy of the dogstp FORTRAN documentation:
+
+C
+C PURPOSE
+C -------
+C FIND NEW STEP BY DOUBLE DOGLEG ALGORITHM
+C
+C PARAMETERS
+C ----------
+C NR --> ROW DIMENSION OF MATRIX
+C N --> DIMENSION OF PROBLEM
+C G(N) --> GRADIENT AT CURRENT ITERATE, G(X)
+C A(N,N) --> CHOLESKY DECOMPOSITION OF HESSIAN IN
+C LOWER PART AND DIAGONAL
+C P(N) --> NEWTON STEP
+C SX(N) --> DIAGONAL SCALING MATRIX FOR X
+C RNWTLN --> NEWTON STEP LENGTH
+C DLT <--> TRUST REGION RADIUS
+C NWTAKE <--> BOOLEAN, =.TRUE. IF NEWTON STEP TAKEN
+C FSTDOG <--> BOOLEAN, =.TRUE. IF ON FIRST LEG OF DOGLEG
+C SSD(N) <--> WORKSPACE [CAUCHY STEP TO THE MINIMUM OF THE
+C QUADRATIC MODEL IN THE SCALED STEEPEST DESCENT
+C DIRECTION] [RETAIN VALUE BETWEEN SUCCESSIVE CALLS]
+C V(N) <--> WORKSPACE [RETAIN VALUE BETWEEN SUCCESSIVE CALLS]
+C CLN <--> CAUCHY LENGTH
+C [RETAIN VALUE BETWEEN SUCCESSIVE CALLS]
+C ETA [RETAIN VALUE BETWEEN SUCCESSIVE CALLS]
+C SC(N) <-- CURRENT STEP
+C IPR --> DEVICE TO WHICH TO SEND OUTPUT
+C STEPMX --> MAXIMUM ALLOWABLE STEP SIZE
+C
+C INTERNAL VARIABLES
+C ------------------
+C CLN LENGTH OF CAUCHY STEP
+C
+
+*/
+
+ double alpha,beta,tmp,dot1,dot2,alam;
+ int i,j;
+
+// CAN WE TAKE NEWTON STEP
+
+ if (rnwtln <= dlt[1]) {
+
+ nwtake[1] = true;
+
+ for (i = 1; i <= n; i++) {
+
+ sc[i] = p[i];
+
+ }
+
+ dlt[1] = rnwtln;
+
+ } else {
+
+// NEWTON STEP TOO LONG
+// CAUCHY STEP IS ON DOUBLE DOGLEG CURVE
+
+ nwtake[1] = false;
+
+ if (fstdog[1]) {
+
+// CALCULATE DOUBLE DOGLEG CURVE (SSD)
+
+ fstdog[1] = false;
+ alpha = 0.0;
+
+ for (i = 1; i <= n; i++) {
+
+ alpha += (g[i]*g[i])/(sx[i]*sx[i]);
+
+ }
+
+ beta = 0.0;
+
+ for (i = 1; i <= n; i++) {
+
+ tmp = 0.0;
+
+ for (j = i; j <= n; j++) {
+
+ tmp += (a[j][i]*g[j])/(sx[j]*sx[j]);
+
+ }
+
+ beta += tmp*tmp;
+
+ }
+
+ for (i = 1; i <= n; i++) {
+
+ ssd[i] = -(alpha/beta)*g[i]/sx[i];
+
+ }
+
+ cln[1] = alpha*Math.sqrt(alpha)/beta;
+
+ eta[1] = .2 +
+ (.8*alpha*alpha)/(-beta*Blas_f77.ddot_f77(n,g,1,p,1));
+
+ for (i = 1; i <= n; i++) {
+
+ v[i] = eta[1]*sx[i]*p[i] - ssd[i];
+
+ }
+
+ if (dlt[1] == -1.0) dlt[1] = Math.min(cln[1],stepmx[1]);
+
+ }
+
+ if (eta[1]*rnwtln <= dlt[1]) {
+
+// TAKE PARTIAL STEP IN NEWTON DIRECTION
+
+ for (i = 1; i <= n; i++) {
+
+ sc[i] = (dlt[1]/rnwtln)*p[i];
+
+ }
+
+ } else {
+
+ if (cln[1] >= dlt[1]) {
+
+// TAKE STEP IN STEEPEST DESCENT DIRECTION
+
+ for (i = 1; i <= n; i++) {
+
+ sc[i] = (dlt[1]/cln[1])*ssd[i]/sx[i];
+
+ }
+
+ } else {
+
+// CALCULATE CONVEX COMBINATION OF SSD AND ETA*P
+// WHICH HAS SCALED LENGTH DLT
+
+ dot1 = Blas_f77.ddot_f77(n,v,1,ssd,1);
+ dot2 = Blas_f77.ddot_f77(n,v,1,v,1);
+
+ alam = (-dot1 + Math.sqrt((dot1*dot1) -
+ dot2*(cln[1]*cln[1] - dlt[1]*dlt[1])))/dot2;
+
+ for (i = 1; i <= n; i++) {
+
+ sc[i] = (ssd[i] + alam*v[i])/sx[i];
+
+ }
+
+ }
+
+ }
+
+ }
+
+ return;
+
+ }
+
+
+/**
+*
+*<p>
+*The forslv_f77 method solves Ax = b where A is a lower triangular matrix.
+*
+*Translated by Steve Verrill, April 21, 1998.
+*
+*@param n The dimension of the problem
+*@param a The lower triangular matrix (preserved)
+*@param x The solution vector
+*@param b The right-hand side vector
+*
+*
+*/
+
+ public static void forslv_f77(int n, double a[][], double x[],
+ double b[]) {
+
+/*
+
+Here is a copy of the forslv FORTRAN documentation:
+
+ SUBROUTINE FORSLV(NR,N,A,X,B)
+
+C
+C PURPOSE
+C -------
+C SOLVE AX=B WHERE A IS LOWER TRIANGULAR MATRIX
+C
+C PARAMETERS
+C ----------
+C NR --> ROW DIMENSION OF MATRIX
+C N --> DIMENSION OF PROBLEM
+C A(N,N) --> LOWER TRIANGULAR MATRIX (PRESERVED)
+C X(N) <-- SOLUTION VECTOR
+C B(N) --> RIGHT-HAND SIDE VECTOR
+C
+C NOTE
+C ----
+C IF B IS NO LONGER REQUIRED BY CALLING ROUTINE,
+C THEN VECTORS B AND X MAY SHARE THE SAME STORAGE.
+C
+
+*/
+
+ int i,im1,j;
+ double sum;
+
+// SOLVE LX=B. (FORWARD SOLVE)
+
+ x[1] = b[1]/a[1][1];
+
+ for (i = 2; i <= n; i++) {
+
+ sum = 0.0;
+ im1 = i - 1;
+
+ for (j = 1; j <= im1; j++) {
+
+ sum += a[i][j]*x[j];
+
+ }
+
+ x[i] = (b[i] - sum)/a[i][i];
+
+ }
+
+ return;
+
+ }
+
+
+/**
+*
+*<p>
+*The fstocd_f77 method finds a central difference approximation to the
+*gradient of the function to be minimized.
+*
+*Translated by Steve Verrill, April 21, 1998.
+*
+*@param n The dimension of the problem
+*@param x The point at which the gradient is to be approximated
+*@param minclass A class that implements the Uncmin_methods
+* interface (see the definition in
+* Uncmin_methods.java). See UncminTest_f77.java for an
+* example of such a class. The class must define:
+* 1.) a method, f_to_minimize, to minimize.
+* f_to_minimize must have the form
+*
+* public static double f_to_minimize(double x[])
+*
+* where x is the vector of arguments to the function
+* and the return value is the value of the function
+* evaluated at x.
+* 2.) a method, gradient, that has the form
+*
+* public static void gradient(double x[],
+* double g[])
+*
+* where g is the gradient of f evaluated at x. This
+* method will have an empty body if the user
+* does not wish to provide an analytic estimate
+* of the gradient.
+* 3.) a method, hessian, that has the form
+*
+* public static void hessian(double x[],
+* double h[][])
+* where h is the Hessian of f evaluated at x. This
+* method will have an empty body if the user
+* does not wish to provide an analytic estimate
+* of the Hessian. If the user wants Uncmin
+* to check the Hessian, then the hessian method
+* should only fill the lower triangle (and diagonal)
+* of h.
+*@param sx Scaling vector for x
+*@param rnoise Relative noise in the function to be minimized
+*@param g A central difference approximation to the gradient
+*
+*
+*/
+
+
+ public static void fstocd_f77(int n, double x[], Uncmin_methods minclass,
+ double sx[], double rnoise, double g[]) {
+
+/*
+
+Here is a copy of the fstocd FORTRAN documentation:
+
+ SUBROUTINE FSTOCD (N, X, FCN, SX, RNOISE, G)
+
+C PURPOSE
+C -------
+C FIND CENTRAL DIFFERENCE APPROXIMATION G TO THE FIRST DERIVATIVE
+C (GRADIENT) OF THE FUNCTION DEFINED BY FCN AT THE POINT X.
+C
+C PARAMETERS
+C ----------
+C N --> DIMENSION OF PROBLEM
+C X --> POINT AT WHICH GRADIENT IS TO BE APPROXIMATED.
+C FCN --> NAME OF SUBROUTINE TO EVALUATE FUNCTION.
+C SX --> DIAGONAL SCALING MATRIX FOR X.
+C RNOISE --> RELATIVE NOISE IN FCN [F(X)].
+C G <-- CENTRAL DIFFERENCE APPROXIMATION TO GRADIENT.
+C
+C
+
+*/
+
+ double stepi,xtempi,fplus,fminus,xmult;
+ int i;
+
+// FIND I-TH STEPSIZE, EVALUATE TWO NEIGHBORS IN DIRECTION OF I-TH
+// UNIT VECTOR, AND EVALUATE I-TH COMPONENT OF GRADIENT.
+
+ xmult = Math.pow(rnoise,1.0/3.0);
+
+ for (i = 1; i <= n; i++) {
+
+ stepi = xmult*Math.max(Math.abs(x[i]),1.0/sx[i]);
+ xtempi = x[i];
+
+ x[i] = xtempi + stepi;
+ fplus = minclass.f_to_minimize(x);
+
+ x[i] = xtempi - stepi;
+ fminus = minclass.f_to_minimize(x);
+
+ x[i] = xtempi;
+
+ g[i] = (fplus - fminus)/(2.0*stepi);
+
+ }
+
+ return;
+
+ }
+
+
+/**
+*
+*<p>
+*This version of the fstofd_f77 method finds a finite difference approximation
+*to the Hessian.
+*
+*
+*Translated by Steve Verrill, April 22, 1998.
+*
+*@param n The dimension of the problem
+*@param xpls New iterate
+*@param minclass A class that implements the Uncmin_methods
+* interface (see the definition in
+* Uncmin_methods.java). See UncminTest_f77.java for an
+* example of such a class. The class must define:
+* 1.) a method, f_to_minimize, to minimize.
+* f_to_minimize must have the form
+*
+* public static double f_to_minimize(double x[])
+*
+* where x is the vector of arguments to the function
+* and the return value is the value of the function
+* evaluated at x.
+* 2.) a method, gradient, that has the form
+*
+* public static void gradient(double x[],
+* double g[])
+*
+* where g is the gradient of f evaluated at x. This
+* method will have an empty body if the user
+* does not wish to provide an analytic estimate
+* of the gradient.
+* 3.) a method, hessian, that has the form
+*
+* public static void hessian(double x[],
+* double h[][])
+* where h is the Hessian of f evaluated at x. This
+* method will have an empty body if the user
+* does not wish to provide an analytic estimate
+* of the Hessian. If the user wants Uncmin
+* to check the Hessian, then the hessian method
+* should only fill the lower triangle (and diagonal)
+* of h.
+*@param fpls fpls[1] -- fpls[n] contains the gradient
+* of the function to minimize
+*@param a "FINITE DIFFERENCE APPROXIMATION. ONLY
+* LOWER TRIANGULAR MATRIX AND DIAGONAL ARE RETURNED"
+*@param sx Scaling vector for x
+*@param rnoise Relative noise in the function to be minimized
+*@param fhat Workspace
+*
+*/
+
+ public static void fstofd_f77(int n, double xpls[], Uncmin_methods minclass,
+ double fpls[], double a[][], double sx[],
+ double rnoise, double fhat[]) {
+
+/*
+
+Here is a copy of the fstofd FORTRAN documentation. This
+is not entirely relevant as this Java version of fstofd
+only estimates the Hessian.
+
+ SUBROUTINE FSTOFD(NR,M,N,XPLS,FCN,FPLS,A,SX,RNOISE,FHAT,ICASE)
+
+C PURPOSE
+C -------
+C FIND FIRST ORDER FORWARD FINITE DIFFERENCE APPROXIMATION "A" TO THE
+C FIRST DERIVATIVE OF THE FUNCTION DEFINED BY THE SUBPROGRAM "FNAME"
+C EVALUATED AT THE NEW ITERATE "XPLS".
+C
+C
+C FOR OPTIMIZATION USE THIS ROUTINE TO ESTIMATE:
+C 1) THE FIRST DERIVATIVE (GRADIENT) OF THE OPTIMIZATION FUNCTION "FCN
+C ANALYTIC USER ROUTINE HAS BEEN SUPPLIED;
+C 2) THE SECOND DERIVATIVE (HESSIAN) OF THE OPTIMIZATION FUNCTION
+C IF NO ANALYTIC USER ROUTINE HAS BEEN SUPPLIED FOR THE HESSIAN BUT
+C ONE HAS BEEN SUPPLIED FOR THE GRADIENT ("FCN") AND IF THE
+C OPTIMIZATION FUNCTION IS INEXPENSIVE TO EVALUATE
+C
+C NOTE
+C ----
+C _M=1 (OPTIMIZATION) ALGORITHM ESTIMATES THE GRADIENT OF THE FUNCTION
+C (FCN). FCN(X) # F: R(N)-->R(1)
+C _M=N (SYSTEMS) ALGORITHM ESTIMATES THE JACOBIAN OF THE FUNCTION
+C FCN(X) # F: R(N)-->R(N).
+C _M=N (OPTIMIZATION) ALGORITHM ESTIMATES THE HESSIAN OF THE OPTIMIZATIO
+C FUNCTION, WHERE THE HESSIAN IS THE FIRST DERIVATIVE OF "FCN"
+C
+C PARAMETERS
+C ----------
+C NR --> ROW DIMENSION OF MATRIX
+C M --> NUMBER OF ROWS IN A
+C N --> NUMBER OF COLUMNS IN A; DIMENSION OF PROBLEM
+C XPLS(N) --> NEW ITERATE: X[K]
+C FCN --> NAME OF SUBROUTINE TO EVALUATE FUNCTION
+C FPLS(M) --> _M=1 (OPTIMIZATION) FUNCTION VALUE AT NEW ITERATE:
+C FCN(XPLS)
+C _M=N (OPTIMIZATION) VALUE OF FIRST DERIVATIVE
+C (GRADIENT) GIVEN BY USER FUNCTION FCN
+C _M=N (SYSTEMS) FUNCTION VALUE OF ASSOCIATED
+C MINIMIZATION FUNCTION
+C A(NR,N) <-- FINITE DIFFERENCE APPROXIMATION (SEE NOTE). ONLY
+C LOWER TRIANGULAR MATRIX AND DIAGONAL ARE RETURNED
+C SX(N) --> DIAGONAL SCALING MATRIX FOR X
+C RNOISE --> RELATIVE NOISE IN FCN [F(X)]
+C FHAT(M) --> WORKSPACE
+C ICASE --> =1 OPTIMIZATION (GRADIENT)
+C =2 SYSTEMS
+C =3 OPTIMIZATION (HESSIAN)
+C
+C INTERNAL VARIABLES
+C ------------------
+C STEPSZ - STEPSIZE IN THE J-TH VARIABLE DIRECTION
+C
+
+*/
+
+ double xmult,stepsz,xtmpj;
+ int i,j,nm1;
+
+ xmult = Math.sqrt(rnoise);
+
+
+ for (j = 1; j <= n; j++) {
+
+ stepsz = xmult*Math.max(Math.abs(xpls[j]),1.0/sx[j]);
+ xtmpj = xpls[j];
+ xpls[j] = xtmpj + stepsz;
+
+ minclass.gradient(xpls,fhat);
+
+ xpls[j] = xtmpj;
+
+ for (i = 1; i <= n; i++) {
+
+ a[i][j] = (fhat[i] - fpls[i])/stepsz;
+
+ }
+
+ }
+
+ nm1 = n - 1;
+
+ for (j = 1; j <= nm1; j++) {
+
+ for (i = j+1; i <= n; i++) {
+
+ a[i][j] = (a[i][j] + a[j][i])/2.0;
+
+ }
+
+ }
+
+ return;
+
+ }
+
+
+/**
+*
+*<p>
+*This version of the fstofd_f77 method finds first order finite difference
+*approximations for gradients.
+*
+*Translated by Steve Verrill, April 22, 1998.
+*
+*@param n The dimension of the problem
+*@param xpls New iterate
+*@param minclass A class that implements the Uncmin_methods
+* interface (see the definition in
+* Uncmin_methods.java). See UncminTest_f77.java for an
+* example of such a class. The class must define:
+* 1.) a method, f_to_minimize, to minimize.
+* f_to_minimize must have the form
+*
+* public static double f_to_minimize(double x[])
+*
+* where x is the vector of arguments to the function
+* and the return value is the value of the function
+* evaluated at x.
+* 2.) a method, gradient, that has the form
+*
+* public static void gradient(double x[],
+* double g[])
+*
+* where g is the gradient of f evaluated at x. This
+* method will have an empty body if the user
+* does not wish to provide an analytic estimate
+* of the gradient.
+* 3.) a method, hessian, that has the form
+*
+* public static void hessian(double x[],
+* double h[][])
+* where h is the Hessian of f evaluated at x. This
+* method will have an empty body if the user
+* does not wish to provide an analytic estimate
+* of the Hessian. If the user wants Uncmin
+* to check the Hessian, then the hessian method
+* should only fill the lower triangle (and diagonal)
+* of h.
+*@param fpls fpls contains the value of the
+* function to minimize at the new iterate
+*@param g finite difference approximation to the gradient
+*@param sx Scaling vector for x
+*@param rnoise Relative noise in the function to be minimized
+*
+*/
+
+
+ public static void fstofd_f77(int n, double xpls[], Uncmin_methods minclass,
+ double fpls[], double g[], double sx[],
+ double rnoise) {
+
+/*
+
+Here is a copy of the fstofd FORTRAN documentation. It
+is not entirely relevant here as this particular
+Java method is only used for gradient calculations.
+
+ SUBROUTINE FSTOFD(NR,M,N,XPLS,FCN,FPLS,A,SX,RNOISE,FHAT,ICASE)
+
+C PURPOSE
+C -------
+C FIND FIRST ORDER FORWARD FINITE DIFFERENCE APPROXIMATION "A" TO THE
+C FIRST DERIVATIVE OF THE FUNCTION DEFINED BY THE SUBPROGRAM "FNAME"
+C EVALUATED AT THE NEW ITERATE "XPLS".
+C
+C
+C FOR OPTIMIZATION USE THIS ROUTINE TO ESTIMATE:
+C 1) THE FIRST DERIVATIVE (GRADIENT) OF THE OPTIMIZATION FUNCTION "FCN
+C ANALYTIC USER ROUTINE HAS BEEN SUPPLIED;
+C 2) THE SECOND DERIVATIVE (HESSIAN) OF THE OPTIMIZATION FUNCTION
+C IF NO ANALYTIC USER ROUTINE HAS BEEN SUPPLIED FOR THE HESSIAN BUT
+C ONE HAS BEEN SUPPLIED FOR THE GRADIENT ("FCN") AND IF THE
+C OPTIMIZATION FUNCTION IS INEXPENSIVE TO EVALUATE
+C
+C NOTE
+C ----
+C _M=1 (OPTIMIZATION) ALGORITHM ESTIMATES THE GRADIENT OF THE FUNCTION
+C (FCN). FCN(X) # F: R(N)-->R(1)
+C _M=N (SYSTEMS) ALGORITHM ESTIMATES THE JACOBIAN OF THE FUNCTION
+C FCN(X) # F: R(N)-->R(N).
+C _M=N (OPTIMIZATION) ALGORITHM ESTIMATES THE HESSIAN OF THE OPTIMIZATIO
+C FUNCTION, WHERE THE HESSIAN IS THE FIRST DERIVATIVE OF "FCN"
+C
+C PARAMETERS
+C ----------
+C NR --> ROW DIMENSION OF MATRIX
+C M --> NUMBER OF ROWS IN A
+C N --> NUMBER OF COLUMNS IN A; DIMENSION OF PROBLEM
+C XPLS(N) --> NEW ITERATE: X[K]
+C FCN --> NAME OF SUBROUTINE TO EVALUATE FUNCTION
+C FPLS(M) --> _M=1 (OPTIMIZATION) FUNCTION VALUE AT NEW ITERATE:
+C FCN(XPLS)
+C _M=N (OPTIMIZATION) VALUE OF FIRST DERIVATIVE
+C (GRADIENT) GIVEN BY USER FUNCTION FCN
+C _M=N (SYSTEMS) FUNCTION VALUE OF ASSOCIATED
+C MINIMIZATION FUNCTION
+C A(NR,N) <-- FINITE DIFFERENCE APPROXIMATION (SEE NOTE). ONLY
+C LOWER TRIANGULAR MATRIX AND DIAGONAL ARE RETURNED
+C SX(N) --> DIAGONAL SCALING MATRIX FOR X
+C RNOISE --> RELATIVE NOISE IN FCN [F(X)]
+C FHAT(M) --> WORKSPACE
+C ICASE --> =1 OPTIMIZATION (GRADIENT)
+C =2 SYSTEMS
+C =3 OPTIMIZATION (HESSIAN)
+C
+C INTERNAL VARIABLES
+C ------------------
+C STEPSZ - STEPSIZE IN THE J-TH VARIABLE DIRECTION
+C
+
+*/
+
+ double xmult,stepsz,xtmpj,fhat;
+ int j;
+
+ xmult = Math.sqrt(rnoise);
+
+// gradient
+
+ for (j = 1; j <= n; j++) {
+
+ stepsz = xmult*Math.max(Math.abs(xpls[j]),1.0/sx[j]);
+ xtmpj = xpls[j];
+ xpls[j] = xtmpj + stepsz;
+
+ fhat = minclass.f_to_minimize(xpls);
+
+ xpls[j] = xtmpj;
+
+ g[j] = (fhat - fpls[1])/stepsz;
+
+ }
+
+ return;
+
+ }
+
+
+/**
+*
+*<p>
+*The grdchk_f77 method checks the analytic gradient supplied
+*by the user.
+*
+*Translated by Steve Verrill, April 22, 1998.
+*
+*@param n The dimension of the problem
+*@param x The location at which the gradient is to be checked
+*@param minclass A class that implements the Uncmin_methods
+* interface (see the definition in
+* Uncmin_methods.java). See UncminTest_f77.java for an
+* example of such a class. The class must define:
+* 1.) a method, f_to_minimize, to minimize.
+* f_to_minimize must have the form
+*
+* public static double f_to_minimize(double x[])
+*
+* where x is the vector of arguments to the function
+* and the return value is the value of the function
+* evaluated at x.
+* 2.) a method, gradient, that has the form
+*
+* public static void gradient(double x[],
+* double g[])
+*
+* where g is the gradient of f evaluated at x. This
+* method will have an empty body if the user
+* does not wish to provide an analytic estimate
+* of the gradient.
+* 3.) a method, hessian, that has the form
+*
+* public static void hessian(double x[],
+* double h[][])
+* where h is the Hessian of f evaluated at x. This
+* method will have an empty body if the user
+* does not wish to provide an analytic estimate
+* of the Hessian. If the user wants Uncmin
+* to check the Hessian, then the hessian method
+* should only fill the lower triangle (and diagonal)
+* of h.
+*@param f Function value
+*@param g Analytic gradient
+*@param typsiz Typical size for each component of x
+*@param sx Scaling vector for x: sx[i] = 1.0/typsiz[i]
+*@param fscale Estimate of scale of f_to_minimize
+*@param rnf Relative noise in f_to_minimize
+*@param analtl Tolerance for comparison of estimated and
+* analytical gradients
+*@param gest Finite difference gradient
+*
+*/
+
+ public static void grdchk_f77(int n, double x[], Uncmin_methods minclass,
+ double f[], double g[], double typsiz[],
+ double sx[], double fscale[], double rnf,
+ double analtl, double gest[]) {
+
+/*
+
+Here is a copy of the grdchk FORTRAN documentation:
+
+ SUBROUTINE GRDCHK(N,X,FCN,F,G,TYPSIZ,SX,FSCALE,RNF,
+ + ANALTL,WRK1,MSG,IPR)
+
+C
+C PURPOSE
+C -------
+C CHECK ANALYTIC GRADIENT AGAINST ESTIMATED GRADIENT
+C
+C PARAMETERS
+C ----------
+C N --> DIMENSION OF PROBLEM
+C X(N) --> ESTIMATE TO A ROOT OF FCN
+C FCN --> NAME OF SUBROUTINE TO EVALUATE OPTIMIZATION FUNCTION
+C MUST BE DECLARED EXTERNAL IN CALLING ROUTINE
+C FCN: R(N) --> R(1)
+C F --> FUNCTION VALUE: FCN(X)
+C G(N) --> GRADIENT: G(X)
+C TYPSIZ(N) --> TYPICAL SIZE FOR EACH COMPONENT OF X
+C SX(N) --> DIAGONAL SCALING MATRIX: SX(I)=1./TYPSIZ(I)
+C FSCALE --> ESTIMATE OF SCALE OF OBJECTIVE FUNCTION FCN
+C RNF --> RELATIVE NOISE IN OPTIMIZATION FUNCTION FCN
+C ANALTL --> TOLERANCE FOR COMPARISON OF ESTIMATED AND
+C ANALYTICAL GRADIENTS
+C WRK1(N) --> WORKSPACE
+C MSG <-- MESSAGE OR ERROR CODE
+C ON OUTPUT: =-21, PROBABLE CODING ERROR OF GRADIENT
+C IPR --> DEVICE TO WHICH TO SEND OUTPUT
+C
+
+*/
+
+ double gs;
+ int ker,i;
+
+// COMPUTE FIRST ORDER FINITE DIFFERENCE GRADIENT AND COMPARE TO
+// ANALYTIC GRADIENT.
+
+ Uncmin_f77.fstofd_f77(n,x,minclass,f,gest,sx,rnf);
+
+ ker = 0;
+
+ for (i = 1; i <= n; i++) {
+
+ gs = Math.max(Math.abs(f[1]),fscale[1])/Math.max(Math.abs(x[i]),typsiz[i]);
+
+ if (Math.abs(g[i] - gest[i]) >
+ Math.max(Math.abs(g[i]),gs)*analtl) ker = 1;
+
+ }
+
+ if (ker == 0) return;
+
+ System.out.print("\nThere appears to be an error in the coding");
+ System.out.print(" of the gradient method.\n\n\n");
+ System.out.print("Component Analytic Finite Difference\n\n");
+
+ for (i = 1; i <= n; i++) {
+
+ System.out.println(i + " " + g[i] + " " + gest[i]);
+
+ }
+
+ System.exit(0);
+
+ }
+
+
+/**
+*
+*<p>
+*The heschk_f77 method checks the analytic Hessian supplied
+*by the user.
+*
+*Translated by Steve Verrill, April 23, 1998.
+*
+*@param n The dimension of the problem
+*@param x The location at which the Hessian is to be checked
+*@param minclass A class that implements the Uncmin_methods
+* interface (see the definition in
+* Uncmin_methods.java). See UncminTest_f77.java for an
+* example of such a class. The class must define:
+* 1.) a method, f_to_minimize, to minimize.
+* f_to_minimize must have the form
+*
+* public static double f_to_minimize(double x[])
+*
+* where x is the vector of arguments to the function
+* and the return value is the value of the function
+* evaluated at x.
+* 2.) a method, gradient, that has the form
+*
+* public static void gradient(double x[],
+* double g[])
+*
+* where g is the gradient of f evaluated at x. This
+* method will have an empty body if the user
+* does not wish to provide an analytic estimate
+* of the gradient.
+* 3.) a method, hessian, that has the form
+*
+* public static void hessian(double x[],
+* double h[][])
+* where h is the Hessian of f evaluated at x. This
+* method will have an empty body if the user
+* does not wish to provide an analytic estimate
+* of the Hessian. If the user wants Uncmin
+* to check the Hessian, then the hessian method
+* should only fill the lower triangle (and diagonal)
+* of h.
+*@param f Function value
+*@param g Gradient
+*@param a On exit: Hessian in lower triangle
+*@param typsiz Typical size for each component of x
+*@param sx Scaling vector for x: sx[i] = 1.0/typsiz[i]
+*@param rnf Relative noise in f_to_minimize
+*@param analtl Tolerance for comparison of estimated and
+* analytic gradients
+*@param iagflg = 1 if an analytic gradient is supplied
+*@param udiag Workspace
+*@param wrk1 Workspace
+*@param wrk2 Workspace
+*/
+
+ public static void heschk_f77(int n, double x[], Uncmin_methods minclass,
+ double f[], double g[], double a[][],
+ double typsiz[],
+ double sx[], double rnf,
+ double analtl, int iagflg[],
+ double udiag[], double wrk1[],
+ double wrk2[]) {
+
+/*
+
+Here is a copy of the heschk FORTRAN documentation:
+
+ SUBROUTINE HESCHK(NR,N,X,FCN,D1FCN,D2FCN,F,G,A,TYPSIZ,SX,RNF,
+ + ANALTL,IAGFLG,UDIAG,WRK1,WRK2,MSG,IPR)
+
+C
+C PURPOSE
+C -------
+C CHECK ANALYTIC HESSIAN AGAINST ESTIMATED HESSIAN
+C (THIS MAY BE DONE ONLY IF THE USER SUPPLIED ANALYTIC HESSIAN
+C D2FCN FILLS ONLY THE LOWER TRIANGULAR PART AND DIAGONAL OF A)
+C
+C PARAMETERS
+C ----------
+C NR --> ROW DIMENSION OF MATRIX
+C N --> DIMENSION OF PROBLEM
+C X(N) --> ESTIMATE TO A ROOT OF FCN
+C FCN --> NAME OF SUBROUTINE TO EVALUATE OPTIMIZATION FUNCTION
+C MUST BE DECLARED EXTERNAL IN CALLING ROUTINE
+C FCN: R(N) --> R(1)
+C D1FCN --> NAME OF SUBROUTINE TO EVALUATE GRADIENT OF FCN.
+C MUST BE DECLARED EXTERNAL IN CALLING ROUTINE
+C D2FCN --> NAME OF SUBROUTINE TO EVALUATE HESSIAN OF FCN.
+C MUST BE DECLARED EXTERNAL IN CALLING ROUTINE
+C F --> FUNCTION VALUE: FCN(X)
+C G(N) <-- GRADIENT: G(X)
+C A(N,N) <-- ON EXIT: HESSIAN IN LOWER TRIANGULAR PART AND DIAG
+C TYPSIZ(N) --> TYPICAL SIZE FOR EACH COMPONENT OF X
+C SX(N) --> DIAGONAL SCALING MATRIX: SX(I)=1./TYPSIZ(I)
+C RNF --> RELATIVE NOISE IN OPTIMIZATION FUNCTION FCN
+C ANALTL --> TOLERANCE FOR COMPARISON OF ESTIMATED AND
+C ANALYTICAL GRADIENTS
+C IAGFLG --> =1 IF ANALYTIC GRADIENT SUPPLIED
+C UDIAG(N) --> WORKSPACE
+C WRK1(N) --> WORKSPACE
+C WRK2(N) --> WORKSPACE
+C MSG <--> MESSAGE OR ERROR CODE
+C ON INPUT : IF =1XX DO NOT COMPARE ANAL + EST HESS
+C ON OUTPUT: =-22, PROBABLE CODING ERROR OF HESSIAN
+C IPR --> DEVICE TO WHICH TO SEND OUTPUT
+C
+
+*/
+
+ int i,j,ker;
+ double hs;
+
+
+// COMPUTE FINITE DIFFERENCE APPROXIMATION H TO THE HESSIAN.
+
+ if (iagflg[1] == 1) Uncmin_f77.fstofd_f77(n,x,minclass,g,a,sx,rnf,wrk1);
+
+ if (iagflg[1] != 1) Uncmin_f77.sndofd_f77(n,x,minclass,f,a,sx,rnf,wrk1,wrk2);
+
+ ker = 0;
+
+// COPY LOWER TRIANGULAR PART OF H TO UPPER TRIANGULAR PART
+// AND DIAGONAL OF H TO UDIAG
+
+ for (j = 1; j <= n; j++) {
+
+ udiag[j] = a[j][j];
+
+ for (i = j + 1; i <= n; i++) {
+
+ a[j][i] = a[i][j];
+
+ }
+
+ }
+
+// COMPUTE ANALYTIC HESSIAN AND COMPARE TO FINITE DIFFERENCE
+// APPROXIMATION.
+
+ minclass.hessian(x,a);
+
+ for (j = 1;j <= n; j++) {
+
+ hs = Math.max(Math.abs(g[j]),1.0)/Math.max(Math.abs(x[j]),typsiz[j]);
+
+ if (Math.abs(a[j][j] - udiag[j]) >
+ Math.max(Math.abs(udiag[j]),hs)*analtl) ker = 1;
+
+ for (i = j + 1; i <= n; i++) {
+
+ if (Math.abs(a[i][j] - a[j][i]) >
+ Math.max(Math.abs(a[i][j]),hs)*analtl) ker = 1;
+
+ }
+
+ }
+
+ if (ker == 0) return;
+
+ System.out.print("\nThere appears to be an error in the coding");
+ System.out.print(" of the Hessian method.\n\n\n");
+ System.out.print("Row Column Analytic Finite Difference\n\n");
+
+ for (i = 1; i <= n; i++) {
+
+ for (j = 1; j < i; j++) {
+
+ System.out.println(i + " " + j + " " + a[i][j] + " " + a[j][i]);
+
+ }
+
+ System.out.println(i + " " + i + " " + a[i][i] + " " + udiag[i]);
+
+ }
+
+ System.exit(0);
+
+ }
+
+
+/**
+*
+*<p>
+*The hookdr_f77 method finds a next Newton iterate (xpls) by the More-Hebdon
+*technique. It drives hookst_f77.
+*
+*Translated by Steve Verrill, April 23, 1998.
+*
+*@param n The dimension of the problem
+*@param x The old iterate
+*@param f The function value at the old iterate
+*@param g Gradient or approximation at old iterate
+*@param a Cholesky decomposition of Hessian in lower triangle
+* and diagonal. Hessian in upper triangle and udiag.
+*@param udiag Diagonal of Hessian in a
+*@param p Newton step
+*@param xpls New iterate
+*@param fpls Function value at the new iterate
+*@param minclass A class that implements the Uncmin_methods
+* interface (see the definition in
+* Uncmin_methods.java). See UncminTest_f77.java for an
+* example of such a class. The class must define:
+* 1.) a method, f_to_minimize, to minimize.
+* f_to_minimize must have the form
+*
+* public static double f_to_minimize(double x[])
+*
+* where x is the vector of arguments to the function
+* and the return value is the value of the function
+* evaluated at x.
+* 2.) a method, gradient, that has the form
+*
+* public static void gradient(double x[],
+* double g[])
+*
+* where g is the gradient of f evaluated at x. This
+* method will have an empty body if the user
+* does not wish to provide an analytic estimate
+* of the gradient.
+* 3.) a method, hessian, that has the form
+*
+* public static void hessian(double x[],
+* double h[][])
+* where h is the Hessian of f evaluated at x. This
+* method will have an empty body if the user
+* does not wish to provide an analytic estimate
+* of the Hessian. If the user wants Uncmin
+* to check the Hessian, then the hessian method
+* should only fill the lower triangle (and diagonal)
+* of h.
+*@param sx Scaling vector for x
+*@param stepmx Maximum allowable step size
+*@param steptl Relative step size at which consecutive iterates
+* are considered close enough to terminate the algorithm
+*@param dlt Trust region radius
+*@param iretcd Return code
+* = 0 satisfactory xpls found
+* = 1 failed to find satisfactory xpls
+* sufficiently distinct from x
+*@param mxtake Boolean flag indicating step of maximum length used
+*@param amu [Retain value between successive calls]
+*@param dltp [Retain value between successive calls]
+*@param phi [Retain value between successive calls]
+*@param phip0 [Retain value between successive calls]
+*@param sc Workspace
+*@param xplsp Workspace
+*@param wrk0 Workspace
+*@param epsm Machine epsilon
+*@param itncnt Iteration count
+*
+*/
+
+ public static void hookdr_f77(int n, double x[], double f[], double g[],
+ double a[][], double udiag[],
+ double p[], double xpls[], double fpls[],
+ Uncmin_methods minclass,
+ double sx[], double stepmx[],
+ double steptl[], double dlt[],
+ int iretcd[], boolean mxtake[],
+ double amu[], double dltp[],
+ double phi[], double phip0[],
+ double sc[], double xplsp[],
+ double wrk0[], double epsm,
+ int itncnt[]) {
+
+/*
+
+Here is a copy of the hookdr FORTRAN documentation:
+
+ SUBROUTINE HOOKDR(NR,N,X,F,G,A,UDIAG,P,XPLS,FPLS,FCN,SX,STEPMX,
+ + STEPTL,DLT,IRETCD,MXTAKE,AMU,DLTP,PHI,PHIP0,
+ + SC,XPLSP,WRK0,EPSM,ITNCNT,IPR)
+
+C
+C PURPOSE
+C -------
+C FIND A NEXT NEWTON ITERATE (XPLS) BY THE MORE-HEBDON METHOD
+C
+C PARAMETERS
+C ----------
+C NR --> ROW DIMENSION OF MATRIX
+C N --> DIMENSION OF PROBLEM
+C X(N) --> OLD ITERATE X[K-1]
+C F --> FUNCTION VALUE AT OLD ITERATE, F(X)
+C G(N) --> GRADIENT AT OLD ITERATE, G(X), OR APPROXIMATE
+C A(N,N) --> CHOLESKY DECOMPOSITION OF HESSIAN IN LOWER
+C TRIANGULAR PART AND DIAGONAL.
+C HESSIAN IN UPPER TRIANGULAR PART AND UDIAG.
+C UDIAG(N) --> DIAGONAL OF HESSIAN IN A(.,.)
+C P(N) --> NEWTON STEP
+C XPLS(N) <-- NEW ITERATE X[K]
+C FPLS <-- FUNCTION VALUE AT NEW ITERATE, F(XPLS)
+C FCN --> NAME OF SUBROUTINE TO EVALUATE FUNCTION
+C SX(N) --> DIAGONAL SCALING MATRIX FOR X
+C STEPMX --> MAXIMUM ALLOWABLE STEP SIZE
+C STEPTL --> RELATIVE STEP SIZE AT WHICH SUCCESSIVE ITERATES
+C CONSIDERED CLOSE ENOUGH TO TERMINATE ALGORITHM
+C DLT <--> TRUST REGION RADIUS
+C IRETCD <-- RETURN CODE
+C =0 SATISFACTORY XPLS FOUND
+C =1 FAILED TO FIND SATISFACTORY XPLS SUFFICIENTLY
+C DISTINCT FROM X
+C MXTAKE <-- BOOLEAN FLAG INDICATING STEP OF MAXIMUM LENGTH USED
+C AMU <--> [RETAIN VALUE BETWEEN SUCCESSIVE CALLS]
+C DLTP <--> [RETAIN VALUE BETWEEN SUCCESSIVE CALLS]
+C PHI <--> [RETAIN VALUE BETWEEN SUCCESSIVE CALLS]
+C PHIP0 <--> [RETAIN VALUE BETWEEN SUCCESSIVE CALLS]
+C SC(N) --> WORKSPACE
+C XPLSP(N) --> WORKSPACE
+C WRK0(N) --> WORKSPACE
+C EPSM --> MACHINE EPSILON
+C ITNCNT --> ITERATION COUNT
+C IPR --> DEVICE TO WHICH TO SEND OUTPUT
+C
+
+*/
+
+
+ int i,j;
+ boolean fstime[] = new boolean[2];
+ boolean nwtake[] = new boolean[2];
+ double tmp,rnwtln,alpha,beta;
+
+ double fplsp[] = new double[2];
+
+ iretcd[1] = 4;
+ fstime[1] = true;
+
+ tmp = 0.0;
+
+ for (i = 1; i <= n; i++) {
+
+ tmp += sx[i]*sx[i]*p[i]*p[i];
+
+ }
+
+ rnwtln = Math.sqrt(tmp);
+
+ if (itncnt[1] == 1) {
+
+ amu[1] = 0.0;
+
+// IF FIRST ITERATION AND TRUST REGION NOT PROVIDED BY USER,
+// COMPUTE INITIAL TRUST REGION.
+
+ if (dlt[1] == -1.0) {
+
+ alpha = 0.0;
+
+ for (i = 1; i <= n; i++) {
+
+ alpha += (g[i]*g[i])/(sx[i]*sx[i]);
+
+ }
+
+ beta = 0.0;
+
+ for (i = 1; i <= n; i++) {
+
+ tmp = 0.0;
+
+ for (j = i; j <= n; j++) {
+
+ tmp += (a[j][i]*g[j])/(sx[j]*sx[j]);
+
+ }
+
+ beta += tmp*tmp;
+
+ }
+
+ dlt[1] = alpha*Math.sqrt(alpha)/beta;
+ dlt[1] = Math.min(dlt[1],stepmx[1]);
+
+ }
+
+ }
+
+ while (iretcd[1] > 1) {
+
+// FIND NEW STEP BY MORE-HEBDON ALGORITHM
+
+ Uncmin_f77.hookst_f77(n,g,a,udiag,p,sx,rnwtln,dlt,
+ amu,dltp,phi,phip0,
+ fstime,sc,nwtake,wrk0,epsm);
+
+ dltp[1] = dlt[1];
+
+// CHECK NEW POINT AND UPDATE TRUST REGION
+
+ Uncmin_f77.tregup_f77(n,x,f,g,a,minclass,sc,sx,nwtake,stepmx,
+ steptl,dlt,iretcd,xplsp,fplsp,xpls,fpls,
+ mxtake,3,udiag);
+
+ }
+
+ return;
+
+ }
+
+
+/**
+*
+*<p>
+*The hookst_f77 method finds a new step by the More-Hebdon algorithm.
+*It is driven by hookdr_f77.
+*
+*Translated by Steve Verrill, April 24, 1998.
+*
+*@param n The dimension of the problem
+*@param g The gradient at the current iterate
+*@param a Cholesky decomposition of the Hessian in
+* the lower triangle and diagonal. Hessian
+* or approximation in upper triangle (and udiag).
+*@udiag Diagonal of Hessian in a
+*@param p Newton step
+*@param sx Scaling vector for x
+*@param rnwtln Newton step length
+*@param dlt Trust region radius
+*@param amu Retain value between successive calls
+*@param dltp Trust region radius at last exit from
+* this routine
+*@param phi Retain value between successive calls
+*@param phip0 Retain value between successive calls
+*@param fstime "= true if first entry to this routine
+* during the k-th iteration"
+*@param sc Current step
+*@param nwtake = true if Newton step taken
+*@param wrk0 Workspace
+*@param epsm Machine epsilon
+*
+*/
+
+ public static void hookst_f77(int n, double g[], double a[][], double udiag[],
+ double p[], double sx[], double rnwtln,
+ double dlt[], double amu[], double dltp[],
+ double phi[], double phip0[],
+ boolean fstime[], double sc[], boolean nwtake[],
+ double wrk0[], double epsm) {
+
+/*
+
+Here is a copy of the hookst FORTRAN documentation:
+
+ SUBROUTINE HOOKST(NR,N,G,A,UDIAG,P,SX,RNWTLN,DLT,AMU,
+ + DLTP,PHI,PHIP0,FSTIME,SC,NWTAKE,WRK0,EPSM,IPR)
+
+C
+C PURPOSE
+C -------
+C FIND NEW STEP BY MORE-HEBDON ALGORITHM
+C
+C PARAMETERS
+C ----------
+C NR --> ROW DIMENSION OF MATRIX
+C N --> DIMENSION OF PROBLEM
+C G(N) --> GRADIENT AT CURRENT ITERATE, G(X)
+C A(N,N) --> CHOLESKY DECOMPOSITION OF HESSIAN IN
+C LOWER TRIANGULAR PART AND DIAGONAL.
+C HESSIAN OR APPROX IN UPPER TRIANGULAR PART
+C UDIAG(N) --> DIAGONAL OF HESSIAN IN A(.,.)
+C P(N) --> NEWTON STEP
+C SX(N) --> DIAGONAL SCALING MATRIX FOR N
+C RNWTLN --> NEWTON STEP LENGTH
+C DLT <--> TRUST REGION RADIUS
+C AMU <--> [RETAIN VALUE BETWEEN SUCCESSIVE CALLS]
+C DLTP --> TRUST REGION RADIUS AT LAST EXIT FROM THIS ROUTINE
+C PHI <--> [RETAIN VALUE BETWEEN SUCCESSIVE CALLS]
+C PHIP0 <--> [RETAIN VALUE BETWEEN SUCCESSIVE CALLS]
+C FSTIME <--> BOOLEAN. =.TRUE. IF FIRST ENTRY TO THIS ROUTINE
+C DURING K-TH ITERATION
+C SC(N) <-- CURRENT STEP
+C NWTAKE <-- BOOLEAN, =.TRUE. IF NEWTON STEP TAKEN
+C WRK0 --> WORKSPACE
+C EPSM --> MACHINE EPSILON
+C IPR --> DEVICE TO WHICH TO SEND OUTPUT
+C
+
+*/
+
+ double hi,alo;
+ double phip,amulo,amuup,stepln;
+ int i,j;
+ boolean done;
+
+ double addmax[] = new double[2];
+
+// HI AND ALO ARE CONSTANTS USED IN THIS ROUTINE.
+// CHANGE HERE IF OTHER VALUES ARE TO BE SUBSTITUTED.
+
+ phip = 0.0;
+ hi = 1.5;
+ alo = .75;
+
+ if (rnwtln <= hi*dlt[1]) {
+
+// TAKE NEWTON STEP
+
+ nwtake[1] = true;
+
+ for (i = 1; i <= n; i++) {
+
+ sc[i] = p[i];
+
+ }
+
+ dlt[1] = Math.min(dlt[1],rnwtln);
+ amu[1] = 0.0;
+
+ return;
+
+ } else {
+
+// NEWTON STEP NOT TAKEN
+
+ nwtake[1] = false;
+
+ if (amu[1] > 0.0) {
+
+ amu[1] -= (phi[1] + dltp[1])*((dltp[1] - dlt[1]) + phi[1])/(dlt[1]*phip);
+
+ }
+
+ phi[1] = rnwtln - dlt[1];
+
+ if (fstime[1]) {
+
+ for (i = 1; i <= n; i++) {
+
+ wrk0[i] = sx[i]*sx[i]*p[i];
+
+ }
+
+// SOLVE L*Y = (SX**2)*P
+
+ Uncmin_f77.forslv_f77(n,a,wrk0,wrk0);
+ phip0[1] = -Math.pow(Blas_f77.dnrm2_f77(n,wrk0,1),2)/rnwtln;
+ fstime[1] = false;
+
+ }
+
+ phip = phip0[1];
+ amulo = -phi[1]/phip;
+
+ amuup = 0.0;
+
+ for (i = 1; i <= n; i++) {
+
+ amuup += (g[i]*g[i])/(sx[i]*sx[i]);
+
+ }
+
+ amuup = Math.sqrt(amuup)/dlt[1];
+
+ done = false;
+
+// TEST VALUE OF AMU; GENERATE NEXT AMU IF NECESSARY
+
+ while (!done) {
+
+ if (amu[1] < amulo || amu[1] > amuup) {
+
+ amu[1] = Math.max(Math.sqrt(amulo*amuup),amuup*.001);
+
+ }
+
+// COPY (H,UDIAG) TO L
+// WHERE H <-- H+AMU*(SX**2) [DO NOT ACTUALLY CHANGE (H,UDIAG)]
+
+ for (j = 1; j <= n; j++) {
+
+ a[j][j] = udiag[j] + amu[1]*sx[j]*sx[j];
+
+ for (i = j + 1; i <= n; i++) {
+
+ a[i][j] = a[j][i];
+
+ }
+
+ }
+
+// FACTOR H=L(L+)
+
+ Uncmin_f77.choldc_f77(n,a,0.0,Math.sqrt(epsm),addmax);
+
+// SOLVE H*P = L(L+)*SC = -G
+
+ for (i = 1; i <= n; i++) {
+
+ wrk0[i] = -g[i];
+
+ }
+
+ Uncmin_f77.lltslv_f77(n,a,sc,wrk0);
+
+// RESET H. NOTE SINCE UDIAG HAS NOT BEEN DESTROYED WE NEED DO
+// NOTHING HERE. H IS IN THE UPPER PART AND IN UDIAG, STILL INTACT
+
+ stepln = 0.0;
+
+ for (i = 1; i <= n; i++) {
+
+ stepln += sx[i]*sx[i]*sc[i]*sc[i];
+
+ }
+
+ stepln = Math.sqrt(stepln);
+ phi[1] = stepln - dlt[1];
+
+ for (i = 1; i <= n; i++) {
+
+ wrk0[i] = sx[i]*sx[i]*sc[i];
+
+ }
+
+ Uncmin_f77.forslv_f77(n,a,wrk0,wrk0);
+
+ phip = -Math.pow(Blas_f77.dnrm2_f77(n,wrk0,1),2)/stepln;
+
+ if ((alo*dlt[1] <= stepln && stepln <= hi*dlt[1]) ||
+ (amuup-amulo <= 0.0)) {
+
+// SC IS ACCEPTABLE HOOKSTEP
+
+ done = true;
+
+ } else {
+
+// SC NOT ACCEPTABLE HOOKSTEP. SELECT NEW AMU
+
+ amulo = Math.max(amulo,amu[1]-(phi[1]/phip));
+ if (phi[1] < 0.0) amuup = Math.min(amuup,amu[1]);
+ amu[1] -= (stepln*phi[1])/(dlt[1]*phip);
+
+ }
+
+ }
+
+ return;
+
+ }
+
+ }
+
+
+/**
+*
+*<p>
+*The hsnint_f77 method provides the initial Hessian when secant
+*updates are being used.
+*
+*Translated by Steve Verrill, April 27, 1998.
+*
+*@param n The dimension of the problem
+*@param a Initial Hessian (lower triangular matrix)
+*@param sx Scaling vector for x
+*@param method Algorithm to use to solve the minimization problem
+* 1,2 --- factored secant method
+* 3 --- unfactored secant method
+*/
+
+ public static void hsnint_f77(int n, double a[][], double sx[],
+ int method[]) {
+
+/*
+
+Here is a copy of the hsnint FORTRAN documentation:
+
+ SUBROUTINE HSNINT(NR,N,A,SX,METHOD)
+
+C
+C PURPOSE
+C -------
+C PROVIDE INITIAL HESSIAN WHEN USING SECANT UPDATES
+C
+C PARAMETERS
+C ----------
+C NR --> ROW DIMENSION OF MATRIX
+C N --> DIMENSION OF PROBLEM
+C A(N,N) <-- INITIAL HESSIAN (LOWER TRIANGULAR MATRIX)
+C SX(N) --> DIAGONAL SCALING MATRIX FOR X
+C METHOD --> ALGORITHM TO USE TO SOLVE MINIMIZATION PROBLEM
+C =1,2 FACTORED SECANT METHOD USED
+C =3 UNFACTORED SECANT METHOD USED
+C
+
+*/
+
+ int i,j;
+
+ for (j = 1; j <= n; j++) {
+
+ if (method[1] == 3) {
+
+ a[j][j] = sx[j]*sx[j];
+
+ } else {
+
+ a[j][j] = sx[j];
+
+ }
+
+ for (i = j + 1; i <= n; i++) {
+
+ a[i][j] = 0.0;
+
+ }
+
+ }
+
+ return;
+
+ }
+
+/**
+*
+*<p>
+*The lltslv_f77 method solves Ax = b where A has the form L(L transpose)
+*but only the lower triangular part, L, is stored.
+*
+*Translated by Steve Verrill, April 27, 1998.
+*
+*@param n The dimension of the problem
+*@param a Matrix of form L(L transpose).
+* On return a is unchanged.
+*@param x The solution vector
+*@param b The right-hand side vector
+*
+*
+*/
+
+
+ public static void lltslv_f77(int n, double a[][], double x[],
+ double b[]) {
+
+/*
+
+Here is a copy of the lltslv FORTRAN documentation:
+
+ SUBROUTINE LLTSLV(NR,N,A,X,B)
+
+C
+C PURPOSE
+C -------
+C SOLVE AX=B WHERE A HAS THE FORM L(L-TRANSPOSE)
+C BUT ONLY THE LOWER TRIANGULAR PART, L, IS STORED.
+C
+C PARAMETERS
+C ----------
+C NR --> ROW DIMENSION OF MATRIX
+C N --> DIMENSION OF PROBLEM
+C A(N,N) --> MATRIX OF FORM L(L-TRANSPOSE).
+C ON RETURN A IS UNCHANGED.
+C X(N) <-- SOLUTION VECTOR
+C B(N) --> RIGHT-HAND SIDE VECTOR
+C
+C NOTE
+C ----
+C IF B IS NOT REQUIRED BY CALLING PROGRAM, THEN
+C B AND X MAY SHARE THE SAME STORAGE.
+C
+
+*/
+
+// FORWARD SOLVE, RESULT IN X
+
+ Uncmin_f77.forslv_f77(n,a,x,b);
+
+// BACK SOLVE, RESULT IN X
+
+ Uncmin_f77.bakslv_f77(n,a,x,x);
+
+ return;
+
+ }
+
+
+/**
+*
+*<p>
+*The lnsrch_f77 method finds a next Newton iterate by line search.
+*
+*Translated by Steve Verrill, May 15, 1998.
+*
+*@param n The dimension of the problem
+*@param x Old iterate
+*@param f Function value at old iterate
+*@param g Gradient or approximation at old iterate
+*@param p Non-zero Newton step
+*@param xpls New iterate
+*@param fpls Function value at new iterate
+*@param minclass A class that implements the Uncmin_methods
+* interface (see the definition in
+* Uncmin_methods.java). See UncminTest_f77.java for an
+* example of such a class. The class must define:
+* 1.) a method, f_to_minimize, to minimize.
+* f_to_minimize must have the form
+*
+* public static double f_to_minimize(double x[])
+*
+* where x is the vector of arguments to the function
+* and the return value is the value of the function
+* evaluated at x.
+* 2.) a method, gradient, that has the form
+*
+* public static void gradient(double x[],
+* double g[])
+*
+* where g is the gradient of f evaluated at x. This
+* method will have an empty body if the user
+* does not wish to provide an analytic estimate
+* of the gradient.
+* 3.) a method, hessian, that has the form
+*
+* public static void hessian(double x[],
+* double h[][])
+* where h is the Hessian of f evaluated at x. This
+* method will have an empty body if the user
+* does not wish to provide an analytic estimate
+* of the Hessian. If the user wants Uncmin
+* to check the Hessian, then the hessian method
+* should only fill the lower triangle (and diagonal)
+* of h.
+*@param mxtake Boolean flag indicating whether the step of
+* maximum length was used
+*@param iretcd Return code
+*@param stepmx Maximum allowable step size
+*@param steptl Relative step size at which successive iterates
+* are considered close enough to terminate the
+* algorithm
+*@param sx Scaling vector for x
+*
+*
+*/
+
+
+ public static void lnsrch_f77(int n, double x[], double f[],
+ double g[], double p[], double xpls[],
+ double fpls[], Uncmin_methods minclass,
+ boolean mxtake[], int iretcd[], double stepmx[],
+ double steptl[], double sx[]) {
+
+/*
+
+Here is a copy of the lnsrch FORTRAN documentation:
+
+ SUBROUTINE LNSRCH(N,X,F,G,P,XPLS,FPLS,FCN,MXTAKE,
+ + IRETCD,STEPMX,STEPTL,SX,IPR)
+
+C PURPOSE
+C -------
+C FIND A NEXT NEWTON ITERATE BY LINE SEARCH.
+C
+C PARAMETERS
+C ----------
+C N --> DIMENSION OF PROBLEM
+C X(N) --> OLD ITERATE: X[K-1]
+C F --> FUNCTION VALUE AT OLD ITERATE, F(X)
+C G(N) --> GRADIENT AT OLD ITERATE, G(X), OR APPROXIMATE
+C P(N) --> NON-ZERO NEWTON STEP
+C XPLS(N) <-- NEW ITERATE X[K]
+C FPLS <-- FUNCTION VALUE AT NEW ITERATE, F(XPLS)
+C FCN --> NAME OF SUBROUTINE TO EVALUATE FUNCTION
+C IRETCD <-- RETURN CODE
+C MXTAKE <-- BOOLEAN FLAG INDICATING STEP OF MAXIMUM LENGTH USED
+C STEPMX --> MAXIMUM ALLOWABLE STEP SIZE
+C STEPTL --> RELATIVE STEP SIZE AT WHICH SUCCESSIVE ITERATES
+C CONSIDERED CLOSE ENOUGH TO TERMINATE ALGORITHM
+C SX(N) --> DIAGONAL SCALING MATRIX FOR X
+C IPR --> DEVICE TO WHICH TO SEND OUTPUT
+C
+C INTERNAL VARIABLES
+C ------------------
+C SLN NEWTON LENGTH
+C RLN RELATIVE LENGTH OF NEWTON STEP
+C
+
+*/
+
+ int i;
+ double tmp,sln,scl,slp,rln,rmnlmb,almbda,tlmbda;
+ double t1,t2,t3,a,b,disc,pfpls,plmbda;
+
+ pfpls = 0.0;
+ plmbda = 0.0;
+
+ mxtake[1] = false;
+ iretcd[1] = 2;
+
+ tmp = 0.0;
+
+ for (i = 1; i <= n; i++) {
+
+ tmp += sx[i]*sx[i]*p[i]*p[i];
+
+ }
+
+ sln = Math.sqrt(tmp);
+
+ if (sln > stepmx[1]) {
+
+// NEWTON STEP LONGER THAN MAXIMUM ALLOWED
+
+ scl = stepmx[1]/sln;
+ Uncmin_f77.sclmul_f77(n,scl,p,p);
+ sln = stepmx[1];
+
+ }
+
+ slp = Blas_f77.ddot_f77(n,g,1,p,1);
+ rln = 0.0;
+
+ for (i = 1; i <= n; i++) {
+
+ rln =
+ Math.max(rln,Math.abs(p[i])/Math.max(Math.abs(x[i]),1.0/sx[i]));
+
+ }
+
+ rmnlmb = steptl[1]/rln;
+ almbda = 1.0;
+
+// LOOP
+// CHECK IF NEW ITERATE SATISFACTORY. GENERATE NEW LAMBDA IF NECESSARY.
+
+ while (iretcd[1] >= 2) {
+
+ for (i = 1; i <= n; i++) {
+
+ xpls[i] = x[i] + almbda*p[i];
+
+ }
+
+ fpls[1] = minclass.f_to_minimize(xpls);
+
+ if (fpls[1] <= (f[1] + slp*.0001*almbda)) {
+
+// SOLUTION FOUND
+
+ iretcd[1] = 0;
+ if (almbda == 1.0 && sln > .99*stepmx[1]) mxtake[1] = true;
+
+ } else {
+
+// SOLUTION NOT (YET) FOUND
+
+ if (almbda < rmnlmb) {
+
+// NO SATISFACTORY XPLS FOUND SUFFICIENTLY DISTINCT FROM X
+
+ iretcd[1] = 1;
+
+ } else {
+
+// CALCULATE NEW LAMBDA
+
+ if (almbda == 1.0) {
+
+// FIRST BACKTRACK: QUADRATIC FIT
+
+ tlmbda = -slp/(2.0*(fpls[1] - f[1] - slp));
+
+ } else {
+
+// ALL SUBSEQUENT BACKTRACKS: CUBIC FIT
+
+ t1 = fpls[1] - f[1] - almbda*slp;
+ t2 = pfpls - f[1] - plmbda*slp;
+ t3 = 1.0/(almbda - plmbda);
+ a = t3*(t1/(almbda*almbda) - t2/(plmbda*plmbda));
+ b = t3*(t2*almbda/(plmbda*plmbda)
+ - t1*plmbda/(almbda*almbda));
+ disc = b*b - 3.0*a*slp;
+
+ if (disc > b*b) {
+
+// ONLY ONE POSITIVE CRITICAL POINT, MUST BE MINIMUM
+
+ tlmbda = (-b +
+ Blas_f77.sign_f77(1.0,a)*Math.sqrt(disc))/(3.0*a);
+
+ } else {
+
+// BOTH CRITICAL POINTS POSITIVE, FIRST IS MINIMUM
+
+ tlmbda = (-b -
+ Blas_f77.sign_f77(1.0,a)*Math.sqrt(disc))/(3.0*a);
+
+ }
+
+ if (tlmbda > .5*almbda) tlmbda = .5*almbda;
+
+ }
+
+ plmbda = almbda;
+ pfpls = fpls[1];
+
+ if (tlmbda < almbda/10.0) {
+
+ almbda *= .1;
+
+ } else {
+
+ almbda = tlmbda;
+
+ }
+
+ }
+
+ }
+
+ }
+
+ return;
+
+ }
+
+
+
+/**
+*
+*<p>
+*The mvmltl_f77 method computes y = Lx where L is a lower
+*triangular matrix stored in A.
+*
+*Translated by Steve Verrill, April 27, 1998.
+*
+*@param n The dimension of the problem
+*@param a Lower triangular matrix
+*@param x Operand vector
+*@param y Result vector
+*
+*
+*/
+
+ public static void mvmltl_f77(int n, double a[][], double x[],
+ double y[]) {
+/*
+
+Here is a copy of the mvmltl FORTRAN documentation:
+
+ SUBROUTINE MVMLTL(NR,N,A,X,Y)
+C
+C PURPOSE
+C -------
+C COMPUTE Y=LX
+C WHERE L IS A LOWER TRIANGULAR MATRIX STORED IN A
+C
+C PARAMETERS
+C ----------
+C NR --> ROW DIMENSION OF MATRIX
+C N --> DIMENSION OF PROBLEM
+C A(N,N) --> LOWER TRIANGULAR (N*N) MATRIX
+C X(N) --> OPERAND VECTOR
+C Y(N) <-- RESULT VECTOR
+C
+C NOTE
+C ----
+C X AND Y CANNOT SHARE STORAGE
+C
+
+*/
+
+ double sum;
+ int i,j;
+
+ for (i = 1; i <= n; i++) {
+
+ sum = 0.0;
+
+ for (j = 1; j <= i; j++) {
+
+ sum += a[i][j]*x[j];
+
+ }
+
+ y[i] = sum;
+
+ }
+
+ return;
+
+ }
+
+
+
+/**
+*
+*<p>
+*The mvmlts_f77 method computes y = Ax where A is a symmetric matrix
+*stored in its lower triangular part.
+*
+*Translated by Steve Verrill, April 27, 1998.
+*
+*@param n The dimension of the problem
+*@param a The symmetric matrix
+*@param x Operand vector
+*@param y Result vector
+*
+*
+*/
+
+ public static void mvmlts_f77(int n, double a[][], double x[],
+ double y[]) {
+/*
+
+Here is a copy of the mvmlts FORTRAN documentation:
+
+ SUBROUTINE MVMLTS(NR,N,A,X,Y)
+
+C
+C PURPOSE
+C -------
+C COMPUTE Y=AX
+C WHERE "A" IS A SYMMETRIC (N*N) MATRIX STORED IN ITS LOWER
+C TRIANGULAR PART AND X,Y ARE N-VECTORS
+C
+C PARAMETERS
+C ----------
+C NR --> ROW DIMENSION OF MATRIX
+C N --> DIMENSION OF PROBLEM
+C A(N,N) --> SYMMETRIC (N*N) MATRIX STORED IN
+C LOWER TRIANGULAR PART AND DIAGONAL
+C X(N) --> OPERAND VECTOR
+C Y(N) <-- RESULT VECTOR
+C
+C NOTE
+C ----
+C X AND Y CANNOT SHARE STORAGE.
+C
+
+*/
+
+ double sum;
+ int i,j;
+
+ for (i = 1; i <= n; i++) {
+
+ sum = 0.0;
+
+ for (j = 1; j <= i; j++) {
+
+ sum += a[i][j]*x[j];
+
+ }
+
+ for (j = i + 1; j <= n; j++) {
+
+ sum += a[j][i]*x[j];
+
+ }
+
+ y[i] = sum;
+
+ }
+
+ return;
+
+ }
+
+
+
+/**
+*
+*<p>
+*The mvmltu_f77 method computes Y = (L transpose)X where L is a
+*lower triangular matrix stored in A (L transpose
+*is taken implicitly).
+*
+*Translated by Steve Verrill, April 27, 1998.
+*
+*@param n The dimension of the problem
+*@param a The lower triangular matrix
+*@param x Operand vector
+*@param y Result vector
+*
+*
+*/
+
+ public static void mvmltu_f77(int n, double a[][], double x[],
+ double y[]) {
+/*
+
+Here is a copy of the mvmltu FORTRAN documentation:
+
+ SUBROUTINE MVMLTU(NR,N,A,X,Y)
+
+C
+C PURPOSE
+C -------
+C COMPUTE Y=(L+)X
+C WHERE L IS A LOWER TRIANGULAR MATRIX STORED IN A
+C (L-TRANSPOSE (L+) IS TAKEN IMPLICITLY)
+C
+C PARAMETERS
+C ----------
+C NR --> ROW DIMENSION OF MATRIX
+C N --> DIMENSION OF PROBLEM
+C A(NR,1) --> LOWER TRIANGULAR (N*N) MATRIX
+C X(N) --> OPERAND VECTOR
+C Y(N) <-- RESULT VECTOR
+C
+C NOTE
+C ----
+C X AND Y CANNOT SHARE STORAGE
+C
+
+*/
+
+ double sum;
+ int i,j;
+
+ for (i = 1; i <= n; i++) {
+
+ sum = 0.0;
+
+ for (j = i; j <= n; j++) {
+
+ sum += a[j][i]*x[j];
+
+ }
+
+ y[i] = sum;
+
+ }
+
+ return;
+
+ }
+
+
+/**
+*
+*<p>
+*The optchk_f77 method checks the input for reasonableness.
+*
+*Translated by Steve Verrill, May 12, 1998.
+*
+*@param n The dimension of the problem
+*@param x On entry, estimate of the root of f_to_minimize
+*@param typsiz Typical size of each component of x
+*@param sx Scaling vector for x
+*@param fscale Estimate of scale of objective function
+*@param gradtl Tolerance at which the gradient is considered
+* close enough to zero to terminate the algorithm
+*@param itnlim Maximum number of allowable iterations
+*@param ndigit Number of good digits in the optimization function
+*@param epsm Machine epsilon
+*@param dlt Trust region radius
+*@param method Algorithm indicator
+*@param iexp Expense flag
+*@param iagflg = 1 if an analytic gradient is supplied
+*@param iahflg = 1 if an analytic Hessian is supplied
+*@param stepmx Maximum step size
+*@param msg Message and error code
+*
+*/
+
+ public static void optchk_f77(int n, double x[], double typsiz[],
+ double sx[], double fscale[], double gradtl[],
+ int itnlim[], int ndigit[], double epsm,
+ double dlt[], int method[], int iexp[],
+ int iagflg[], int iahflg[], double stepmx[],
+ int msg[]) {
+
+/*
+
+Here is a copy of the optchk FORTRAN documentation:
+
+
+ SUBROUTINE OPTCHK(N,X,TYPSIZ,SX,FSCALE,GRADTL,ITNLIM,NDIGIT,EPSM,
+ + DLT,METHOD,IEXP,IAGFLG,IAHFLG,STEPMX,MSG,IPR)
+C
+C PURPOSE
+C -------
+C CHECK INPUT FOR REASONABLENESS
+C
+C PARAMETERS
+C ----------
+C N --> DIMENSION OF PROBLEM
+C X(N) --> ON ENTRY, ESTIMATE TO ROOT OF FCN
+C TYPSIZ(N) <--> TYPICAL SIZE OF EACH COMPONENT OF X
+C SX(N) <-- DIAGONAL SCALING MATRIX FOR X
+C FSCALE <--> ESTIMATE OF SCALE OF OBJECTIVE FUNCTION FCN
+C GRADTL --> TOLERANCE AT WHICH GRADIENT CONSIDERED CLOSE
+C ENOUGH TO ZERO TO TERMINATE ALGORITHM
+C ITNLIM <--> MAXIMUM NUMBER OF ALLOWABLE ITERATIONS
+C NDIGIT <--> NUMBER OF GOOD DIGITS IN OPTIMIZATION FUNCTION FCN
+C EPSM --> MACHINE EPSILON
+C DLT <--> TRUST REGION RADIUS
+C METHOD <--> ALGORITHM INDICATOR
+C IEXP <--> EXPENSE FLAG
+C IAGFLG <--> =1 IF ANALYTIC GRADIENT SUPPLIED
+C IAHFLG <--> =1 IF ANALYTIC HESSIAN SUPPLIED
+C STEPMX <--> MAXIMUM STEP SIZE
+C MSG <--> MESSAGE AND ERROR CODE
+C IPR --> DEVICE TO WHICH TO SEND OUTPUT
+C
+
+*/
+
+ int i;
+ double stpsiz;
+
+
+// CHECK THAT PARAMETERS ONLY TAKE ON ACCEPTABLE VALUES.
+// IF NOT, SET THEM TO DEFAULT VALUES.
+
+ if (method[1] < 1 || method[1] > 3) method[1] = 1;
+ if (iagflg[1] != 1) iagflg[1] = 0;
+ if (iahflg[1] != 1) iahflg[1] = 0;
+ if (iexp[1] != 0) iexp[1] = 1;
+
+ if ((msg[1]/2)%2 == 1 && iagflg[1] == 0) {
+
+ System.out.print("\n\nOPTCHK User requests that analytic gradient");
+ System.out.print(" be accepted as properly coded,\n");
+ System.out.print("OPTCHK msg = " + msg + ",\n");
+ System.out.print("OPTCHK but an analytic gradient is not");
+ System.out.print(" supplied,\n");
+ System.out.print("OPTCHK iagflg = " + iagflg[1] + ".\n\n");
+
+ System.exit(0);
+
+ }
+
+ if ((msg[1]/4)%2 == 1 && iahflg[1] == 0) {
+
+ System.out.print("\n\nOPTCHK User requests that analytic Hessian");
+ System.out.print(" be accepted as properly coded,\n");
+ System.out.print("OPTCHK msg = " + msg + ",\n");
+ System.out.print("OPTCHK but an analytic Hessian is not");
+ System.out.print(" supplied,\n");
+ System.out.print("OPTCHK iahflg = " + iahflg[1] + ".\n\n");
+
+ System.exit(0);
+
+ }
+
+// CHECK DIMENSION OF PROBLEM
+
+ if (n <= 0) {
+
+ System.out.print("\n\nOPTCHK Illegal dimension, n = " + n + "\n\n");
+
+ System.exit(0);
+
+ }
+
+ if (n == 1 && msg[1]%2 == 0) {
+
+ System.out.print("\n\nOPTCHK !!!WARNING!!! This class is ");
+ System.out.print("inefficient for problems of size 1.\n");
+ System.out.print("OPTCHK You might want to try Fmin instead.\n\n");
+
+ msg[1] = -2;
+
+ }
+
+// COMPUTE SCALE MATRIX
+
+ for (i = 1; i <= n; i++) {
+
+ if (typsiz[i] == 0) typsiz[i] = 1.0;
+ if (typsiz[i] < 0.0) typsiz[i] = -typsiz[i];
+ sx[i] = 1.0/typsiz[i];
+
+ }
+
+// CHECK MAXIMUM STEP SIZE
+
+ if (stepmx[1] <= 0.0) {
+
+ stpsiz = 0.0;
+
+ for (i = 1; i <= n; i++) {
+
+ stpsiz += x[i]*x[i]*sx[i]*sx[i];
+
+ }
+
+ stpsiz = Math.sqrt(stpsiz);
+
+ stepmx[1] = Math.max(1000.0*stpsiz,1000.0);
+
+ }
+
+// CHECK FUNCTION SCALE
+
+ if (fscale[1] == 0) fscale[1] = 1.0;
+ if (fscale[1] < 0.0) fscale[1] = -fscale[1];
+
+// CHECK GRADIENT TOLERANCE
+
+ if (gradtl[1] < 0.0) {
+
+ System.out.print("\n\nOPTCHK Illegal tolerance, gradtl = " + gradtl[1] + "\n\n");
+
+ System.exit(0);
+
+ }
+
+// CHECK ITERATION LIMIT
+
+ if (itnlim[1] < 0) {
+
+ System.out.print("\n\nOPTCHK Illegal iteration limit,");
+ System.out.print(" itnlim = " + itnlim[1] + "\n\n");
+
+ System.exit(0);
+
+ }
+
+// CHECK NUMBER OF DIGITS OF ACCURACY IN FUNCTION FCN
+
+ if (ndigit[1] == 0) {
+
+ System.out.print("\n\nOPTCHK Minimization function has no good");
+ System.out.print(" digits, ndigit = " + ndigit + "\n\n");
+
+ System.exit(0);
+
+ }
+
+ if (ndigit[1] < 0) ndigit[1] = (int)(-Math.log(epsm)/Math.log(10.0));
+
+// CHECK TRUST REGION RADIUS
+
+ if (dlt[1] <= 0.0) dlt[1] = -1.0;
+ if (dlt[1] > stepmx[1]) dlt[1] = stepmx[1];
+
+ return;
+
+ }
+
+
+
+/**
+*
+*<p>
+*The optdrv_f77 method is the driver for the nonlinear optimization problem.
+*
+*Translated by Steve Verrill, May 18, 1998.
+*
+*@param n The dimension of the problem
+*@param x On entry, estimate of the location of a minimum
+* of f_to_minimize
+*@param minclass A class that implements the Uncmin_methods
+* interface (see the definition in
+* Uncmin_methods.java). See UncminTest_f77.java for an
+* example of such a class. The class must define:
+* 1.) a method, f_to_minimize, to minimize.
+* f_to_minimize must have the form
+*
+* public static double f_to_minimize(double x[])
+*
+* where x is the vector of arguments to the function
+* and the return value is the value of the function
+* evaluated at x.
+* 2.) a method, gradient, that has the form
+*
+* public static void gradient(double x[],
+* double g[])
+*
+* where g is the gradient of f evaluated at x. This
+* method will have an empty body if the user
+* does not wish to provide an analytic estimate
+* of the gradient.
+* 3.) a method, hessian, that has the form
+*
+* public static void hessian(double x[],
+* double h[][])
+* where h is the Hessian of f evaluated at x. This
+* method will have an empty body if the user
+* does not wish to provide an analytic estimate
+* of the Hessian. If the user wants Uncmin
+* to check the Hessian, then the hessian method
+* should only fill the lower triangle (and diagonal)
+* of h.
+*@param typsiz Typical size of each component of x
+*@param fscale Estimate of scale of objective function
+*@param method Algorithm indicator
+* 1 -- line search
+* 2 -- double dogleg
+* 3 -- More-Hebdon
+*@param iexp Expense flag.
+* 1 -- optimization function, f_to_minimize,
+* is expensive to evaluate
+* 0 -- otherwise
+* If iexp = 1, the Hessian will be evaluated
+* by secant update rather than analytically or
+* by finite differences.
+*@param msg On input: (> 0) message to inhibit certain
+* automatic checks
+* On output: (< 0) error code (= 0, no error)
+*@param ndigit Number of good digits in the optimization function
+*@param itnlim Maximum number of allowable iterations
+*@param iagflg = 1 if an analytic gradient is supplied
+*@param iahflg = 1 if an analytic Hessian is supplied
+*@param dlt Trust region radius
+*@param gradtl Tolerance at which the gradient is considered
+* close enough to zero to terminate the algorithm
+*@param stepmx Maximum step size
+*@param steptl Relative step size at which successive iterates
+* are considered close enough to terminate the
+* algorithm
+*@param xpls On exit: xpls is a local minimum
+*@param fpls On exit: function value at xpls
+*@param gpls On exit: gradient at xpls
+*@param itrmcd Termination code
+*@param a workspace for Hessian (or its approximation)
+* and its Cholesky decomposition
+*@param udiag workspace (for diagonal of Hessian)
+*@param g workspace (for gradient at current iterate)
+*@param p workspace for step
+*@param sx workspace (for scaling vector)
+*@param wrk0 workspace
+*@param wrk1 workspace
+*@param wrk2 workspace
+*@param wrk3 workspace
+*
+*/
+
+ public static void optdrv_f77(int n, double x[], Uncmin_methods minclass,
+ double typsiz[], double fscale[], int method[],
+ int iexp[], int msg[], int ndigit[], int itnlim[],
+ int iagflg[], int iahflg[], double dlt[],
+ double gradtl[], double stepmx[], double steptl[],
+ double xpls[], double fpls[], double gpls[],
+ int itrmcd[], double a[][], double udiag[],
+ double g[], double p[], double sx[],
+ double wrk0[], double wrk1[], double wrk2[],
+ double wrk3[]) {
+
+/*
+
+Here is a copy of the optdrv FORTRAN documentation:
+
+
+ SUBROUTINE OPTDRV(NR,N,X,FCN,D1FCN,D2FCN,TYPSIZ,FSCALE,
+ + METHOD,IEXP,MSG,NDIGIT,ITNLIM,IAGFLG,IAHFLG,IPR,
+ + DLT,GRADTL,STEPMX,STEPTL,
+ + XPLS,FPLS,GPLS,ITRMCD,
+ + A,UDIAG,G,P,SX,WRK0,WRK1,WRK2,WRK3)
+
+C
+C PURPOSE
+C -------
+C DRIVER FOR NON-LINEAR OPTIMIZATION PROBLEM
+C
+C PARAMETERS
+C ----------
+C NR --> ROW DIMENSION OF MATRIX
+C N --> DIMENSION OF PROBLEM
+C X(N) --> ON ENTRY: ESTIMATE TO A ROOT OF FCN
+C FCN --> NAME OF SUBROUTINE TO EVALUATE OPTIMIZATION FUNCTION
+C MUST BE DECLARED EXTERNAL IN CALLING ROUTINE
+C FCN: R(N) --> R(1)
+C D1FCN --> (OPTIONAL) NAME OF SUBROUTINE TO EVALUATE GRADIENT
+C OF FCN. MUST BE DECLARED EXTERNAL IN CALLING ROUTINE
+C D2FCN --> (OPTIONAL) NAME OF SUBROUTINE TO EVALUATE HESSIAN OF
+C OF FCN. MUST BE DECLARED EXTERNAL IN CALLING ROUTINE
+C TYPSIZ(N) --> TYPICAL SIZE FOR EACH COMPONENT OF X
+C FSCALE --> ESTIMATE OF SCALE OF OBJECTIVE FUNCTION
+C METHOD --> ALGORITHM TO USE TO SOLVE MINIMIZATION PROBLEM
+C =1 LINE SEARCH
+C =2 DOUBLE DOGLEG
+C =3 MORE-HEBDON
+C IEXP --> =1 IF OPTIMIZATION FUNCTION FCN IS EXPENSIVE TO
+C EVALUATE, =0 OTHERWISE. IF SET THEN HESSIAN WILL
+C BE EVALUATED BY SECANT UPDATE INSTEAD OF
+C ANALYTICALLY OR BY FINITE DIFFERENCES
+C MSG <--> ON INPUT: (.GT.0) MESSAGE TO INHIBIT CERTAIN
+C AUTOMATIC CHECKS
+C ON OUTPUT: (.LT.0) ERROR CODE; =0 NO ERROR
+C NDIGIT --> NUMBER OF GOOD DIGITS IN OPTIMIZATION FUNCTION FCN
+C ITNLIM --> MAXIMUM NUMBER OF ALLOWABLE ITERATIONS
+C IAGFLG --> =1 IF ANALYTIC GRADIENT SUPPLIED
+C IAHFLG --> =1 IF ANALYTIC HESSIAN SUPPLIED
+C IPR --> DEVICE TO WHICH TO SEND OUTPUT
+C DLT --> TRUST REGION RADIUS
+C GRADTL --> TOLERANCE AT WHICH GRADIENT CONSIDERED CLOSE
+C ENOUGH TO ZERO TO TERMINATE ALGORITHM
+C STEPMX --> MAXIMUM ALLOWABLE STEP SIZE
+C STEPTL --> RELATIVE STEP SIZE AT WHICH SUCCESSIVE ITERATES
+C CONSIDERED CLOSE ENOUGH TO TERMINATE ALGORITHM
+C XPLS(N) <--> ON EXIT: XPLS IS LOCAL MINIMUM
+C FPLS <--> ON EXIT: FUNCTION VALUE AT SOLUTION, XPLS
+C GPLS(N) <--> ON EXIT: GRADIENT AT SOLUTION XPLS
+C ITRMCD <-- TERMINATION CODE
+C A(N,N) --> WORKSPACE FOR HESSIAN (OR ESTIMATE)
+C AND ITS CHOLESKY DECOMPOSITION
+C UDIAG(N) --> WORKSPACE [FOR DIAGONAL OF HESSIAN]
+C G(N) --> WORKSPACE (FOR GRADIENT AT CURRENT ITERATE)
+C P(N) --> WORKSPACE FOR STEP
+C SX(N) --> WORKSPACE (FOR DIAGONAL SCALING MATRIX)
+C WRK0(N) --> WORKSPACE
+C WRK1(N) --> WORKSPACE
+C WRK2(N) --> WORKSPACE
+C WRK3(N) --> WORKSPACE
+C
+C
+C INTERNAL VARIABLES
+C ------------------
+C ANALTL TOLERANCE FOR COMPARISON OF ESTIMATED AND
+C ANALYTICAL GRADIENTS AND HESSIANS
+C EPSM MACHINE EPSILON
+C F FUNCTION VALUE: FCN(X)
+C ITNCNT CURRENT ITERATION, K
+C RNF RELATIVE NOISE IN OPTIMIZATION FUNCTION FCN.
+C NOISE=10.**(-NDIGIT)
+C
+
+*/
+
+ boolean noupdt[] = new boolean[2];
+ boolean mxtake[] = new boolean[2];
+
+ int i,j,num5,remain,ilow,ihigh;
+ int icscmx[] = new int[2];
+ int iretcd[] = new int[2];
+ int itncnt[] = new int[2];
+
+ double rnf,analtl,dltsav,
+ amusav,dlpsav,phisav,phpsav;
+
+ double f[] = new double[2];
+
+ double amu[] = new double[2];
+ double dltp[] = new double[2];
+ double phi[] = new double[2];
+ double phip0[] = new double[2];
+
+ double epsm;
+
+ dltsav = amusav = dlpsav = phisav = phpsav = 0.0;
+
+
+// INITIALIZATION
+
+ for (i = 1; i <= n; i++) {
+
+ p[i] = 0.0;
+
+ }
+
+ itncnt[1] = 0;
+ iretcd[1] = -1;
+
+ epsm = 1.12e-16;
+
+ Uncmin_f77.optchk_f77(n,x,typsiz,sx,fscale,gradtl,itnlim,ndigit,
+ epsm,dlt,method,iexp,iagflg,iahflg,stepmx,
+ msg);
+
+// removed because don't want to stop if msg = -2
+// and other negative messages are now handled where they are generated
+// if (msg[1] < 0) return;
+
+ rnf = Math.max(Math.pow(10.0,-ndigit[1]),epsm);
+ analtl = Math.max(.01,Math.sqrt(rnf));
+
+ if ((msg[1]/8)%2 != 1) {
+
+ num5 = n/5;
+ remain = n%5;
+
+ System.out.print("\n\nOPTDRV Typical x\n\n");
+
+ ilow = -4;
+ ihigh = 0;
+
+ for (i = 1; i <= num5; i++) {
+
+ ilow += 5;
+ ihigh += 5;
+
+ System.out.print(ilow + "--" + ihigh + " ");
+
+ for (j = 1; j <= 5; j++) {
+
+ System.out.print(typsiz[ilow+j-1] + " ");
+
+ }
+
+ System.out.print("\n");
+
+ }
+
+ ilow += 5;
+ ihigh = ilow + remain - 1;
+
+ System.out.print(ilow + "--" + ihigh + " ");
+
+ for (j = 1; j <= remain; j++) {
+
+ System.out.print(typsiz[ilow+j-1] + " ");
+
+ }
+
+ System.out.print("\n");
+
+
+ System.out.print("\n\nOPTDRV Scaling vector for x\n\n");
+
+ ilow = -4;
+ ihigh = 0;
+
+ for (i = 1; i <= num5; i++) {
+
+ ilow += 5;
+ ihigh += 5;
+
+ System.out.print(ilow + "--" + ihigh + " ");
+
+ for (j = 1; j <= 5; j++) {
+
+ System.out.print(sx[ilow+j-1] + " ");
+
+ }
+
+ System.out.print("\n");
+
+ }
+
+ ilow += 5;
+ ihigh = ilow + remain - 1;
+
+ System.out.print(ilow + "--" + ihigh + " ");
+
+ for (j = 1; j <= remain; j++) {
+
+ System.out.print(sx[ilow+j-1] + " ");
+
+ }
+
+ System.out.print("\n");
+
+
+ System.out.println("\n\nOPTDRV Typical f = " + fscale[1]);
+ System.out.print("OPTDRV Number of good digits in");
+ System.out.println(" f_to_minimize = " + ndigit[1]);
+ System.out.print("OPTDRV Gradient flag");
+ System.out.println(" = " + iagflg[1]);
+ System.out.print("OPTDRV Hessian flag");
+ System.out.println(" = " + iahflg[1]);
+ System.out.print("OPTDRV Expensive function calculation flag");
+ System.out.println(" = " + iexp[1]);
+ System.out.print("OPTDRV Method to use");
+ System.out.println(" = " + method[1]);
+ System.out.print("OPTDRV Iteration limit");
+ System.out.println(" = " + itnlim[1]);
+ System.out.print("OPTDRV Machine epsilon");
+ System.out.println(" = " + epsm);
+ System.out.print("OPTDRV Maximum step size");
+ System.out.println(" = " + stepmx[1]);
+ System.out.print("OPTDRV Step tolerance");
+ System.out.println(" = " + steptl[1]);
+ System.out.print("OPTDRV Gradient tolerance");
+ System.out.println(" = " + gradtl[1]);
+ System.out.print("OPTDRV Trust region radius");
+ System.out.println(" = " + dlt[1]);
+ System.out.print("OPTDRV Relative noise in");
+ System.out.println(" f_to_minimize = " + rnf);
+ System.out.print("OPTDRV Analytical fd tolerance");
+ System.out.println(" = " + analtl);
+
+ }
+
+// EVALUATE FCN(X)
+
+ f[1] = minclass.f_to_minimize(x);
+
+// EVALUATE ANALYTIC OR FINITE DIFFERENCE GRADIENT AND CHECK ANALYTIC
+// GRADIENT, IF REQUESTED.
+
+ if (iagflg[1] == 0) {
+
+ Uncmin_f77.fstofd_f77(n,x,minclass,f,g,sx,rnf);
+
+ } else {
+
+ minclass.gradient(x,g);
+
+ if ((msg[1]/2)%2 == 0) {
+
+ Uncmin_f77.grdchk_f77(n,x,minclass,f,g,typsiz,
+ sx,fscale,rnf,analtl,
+ wrk1);
+
+ }
+
+ }
+
+
+ Uncmin_f77.optstp_f77(n,x,f,g,wrk1,itncnt,icscmx,
+ itrmcd,gradtl,steptl,sx,fscale,
+ itnlim,iretcd,mxtake,msg);
+
+ if (itrmcd[1] != 0) {
+
+ fpls[1] = f[1];
+
+ for (i = 1; i <= n; i++) {
+
+ xpls[i] = x[i];
+ gpls[i] = g[i];
+
+ }
+
+ } else {
+
+ if (iexp[1] == 1) {
+
+// IF OPTIMIZATION FUNCTION EXPENSIVE TO EVALUATE (IEXP=1), THEN
+// HESSIAN WILL BE OBTAINED BY SECANT UPDATES. GET INITIAL HESSIAN.
+
+ Uncmin_f77.hsnint_f77(n,a,sx,method);
+
+ } else {
+
+// EVALUATE ANALYTIC OR FINITE DIFFERENCE HESSIAN AND CHECK ANALYTIC
+// HESSIAN IF REQUESTED (ONLY IF USER-SUPPLIED ANALYTIC HESSIAN
+// ROUTINE minclass.hessian FILLS ONLY LOWER TRIANGULAR PART AND DIAGONAL OF A).
+
+ if (iahflg[1] == 0) {
+
+ if (iagflg[1] == 1) {
+
+ Uncmin_f77.fstofd_f77(n,x,minclass,g,a,sx,rnf,wrk1);
+
+ } else {
+
+ Uncmin_f77.sndofd_f77(n,x,minclass,f,a,sx,rnf,wrk1,wrk2);
+
+ }
+
+ } else {
+
+ if ((msg[1]/4)%2 == 1) {
+
+ minclass.hessian(x,a);
+
+ } else {
+
+ Uncmin_f77.heschk_f77(n,x,minclass,f,g,a,typsiz,
+ sx,rnf,analtl,iagflg,udiag,
+ wrk1,wrk2);
+
+// HESCHK EVALUATES minclass.hessian AND CHECKS IT AGAINST THE FINITE
+// DIFFERENCE HESSIAN WHICH IT CALCULATES BY CALLING FSTOFD
+// (IF IAGFLG .EQ. 1) OR SNDOFD (OTHERWISE).
+
+ }
+
+ }
+
+ }
+
+ if ((msg[1]/8)%2 == 0) Uncmin_f77.result_f77(n,x,f,g,a,
+ p,itncnt,1);
+
+// ITERATION
+
+ while (itrmcd[1] == 0) {
+
+ itncnt[1]++;
+
+// FIND PERTURBED LOCAL MODEL HESSIAN AND ITS LL+ DECOMPOSITION
+// (SKIP THIS STEP IF LINE SEARCH OR DOGSTEP TECHNIQUES BEING USED WITH
+// SECANT UPDATES. CHOLESKY DECOMPOSITION L ALREADY OBTAINED FROM
+// SECFAC.)
+
+ if (iexp[1] != 1 || method[1] == 3) {
+
+ Uncmin_f77.chlhsn_f77(n,a,epsm,sx,udiag);
+
+ }
+
+// SOLVE FOR NEWTON STEP: AP=-G
+
+ for (i = 1; i <= n; i++) {
+
+ wrk1[i] = -g[i];
+
+ }
+
+ Uncmin_f77.lltslv_f77(n,a,p,wrk1);
+
+// DECIDE WHETHER TO ACCEPT NEWTON STEP XPLS=X + P
+// OR TO CHOOSE XPLS BY A GLOBAL STRATEGY.
+
+ if (iagflg[1] == 0 && method[1] != 1) {
+
+ dltsav = dlt[1];
+
+ if (method[1] != 2) {
+
+ amusav = amu[1];
+ dlpsav = dltp[1];
+ phisav = phi[1];
+ phpsav = phip0[1];
+
+ }
+
+ }
+
+ if (method[1] == 1) {
+
+ Uncmin_f77.lnsrch_f77(n,x,f,g,p,xpls,fpls,minclass,
+ mxtake,iretcd,stepmx,steptl,
+ sx);
+
+ } else if (method[1] == 2 ) {
+
+ Uncmin_f77.dogdrv_f77(n,x,f,g,a,p,xpls,fpls,minclass,
+ sx,stepmx,steptl,dlt,iretcd,
+ mxtake,wrk0,wrk1,wrk2,wrk3);
+
+ } else {
+
+ Uncmin_f77.hookdr_f77(n,x,f,g,a,udiag,p,xpls,fpls,
+ minclass,sx,stepmx,steptl,
+ dlt,iretcd,mxtake,amu,
+ dltp,phi,phip0,wrk0,wrk1,
+ wrk2,epsm,itncnt);
+
+ }
+
+// IF COULD NOT FIND SATISFACTORY STEP AND FORWARD DIFFERENCE
+// GRADIENT WAS USED, RETRY USING CENTRAL DIFFERENCE GRADIENT.
+
+ if (iretcd[1] == 1 && iagflg[1] == 0) {
+
+// SET IAGFLG FOR CENTRAL DIFFERENCES
+
+ iagflg[1] = -1;
+
+ System.out.print("\nOPTDRV Shift from forward to central");
+ System.out.print(" differences");
+ System.out.print("\nOPTDRV in iteration " + itncnt[1]);
+ System.out.print("\n");
+
+ Uncmin_f77.fstocd_f77(n,x,minclass,sx,rnf,g);
+
+ if (method[1] == 1) {
+
+// SOLVE FOR NEWTON STEP: AP=-G
+
+ for (i = 1; i <= n; i++) {
+
+ wrk1[i] = -g[i];
+
+ }
+
+ Uncmin_f77.lltslv_f77(n,a,p,wrk1);
+
+ Uncmin_f77.lnsrch_f77(n,x,f,g,p,xpls,fpls,minclass,
+ mxtake,iretcd,stepmx,steptl,
+ sx);
+
+ } else {
+
+ dlt[1] = dltsav;
+ if (method[1] == 2) {
+
+
+// SOLVE FOR NEWTON STEP: AP=-G
+
+ for (i = 1; i <= n; i++) {
+
+ wrk1[i] = -g[i];
+
+ }
+
+ Uncmin_f77.lltslv_f77(n,a,p,wrk1);
+
+ Uncmin_f77.dogdrv_f77(n,x,f,g,a,p,xpls,fpls,minclass,
+ sx,stepmx,steptl,dlt,iretcd,
+ mxtake,wrk0,wrk1,wrk2,wrk3);
+
+
+ } else {
+
+ amu[1] = amusav;
+ dltp[1] = dlpsav;
+ phi[1] = phisav;
+ phip0[1] = phpsav;
+
+ Uncmin_f77.chlhsn_f77(n,a,epsm,sx,udiag);
+
+// SOLVE FOR NEWTON STEP: AP=-G
+
+ for (i = 1; i <= n; i++) {
+
+ wrk1[i] = -g[i];
+
+ }
+
+ Uncmin_f77.lltslv_f77(n,a,p,wrk1);
+
+ Uncmin_f77.hookdr_f77(n,x,f,g,a,udiag,p,xpls,fpls,
+ minclass,sx,stepmx,steptl,
+ dlt,iretcd,mxtake,amu,
+ dltp,phi,phip0,wrk0,wrk1,
+ wrk2,epsm,itncnt);
+
+ }
+
+ }
+
+ }
+
+// CALCULATE STEP FOR OUTPUT
+
+ for (i = 1; i <= n; i++) {
+
+ p[i] = xpls[i] - x[i];
+
+ }
+
+// CALCULATE GRADIENT AT XPLS
+
+ if (iagflg[1] == -1) {
+
+// CENTRAL DIFFERENCE GRADIENT
+
+ Uncmin_f77.fstocd_f77(n,xpls,minclass,sx,rnf,gpls);
+
+ } else if (iagflg[1] == 0) {
+
+// FORWARD DIFFERENCE GRADIENT
+
+ Uncmin_f77.fstofd_f77(n,xpls,minclass,fpls,gpls,sx,rnf);
+
+ } else {
+
+// ANALYTIC GRADIENT
+
+ minclass.gradient(xpls,gpls);
+
+ }
+
+// CHECK WHETHER STOPPING CRITERIA SATISFIED
+
+ Uncmin_f77.optstp_f77(n,xpls,fpls,gpls,x,itncnt,icscmx,
+ itrmcd,gradtl,steptl,sx,fscale,
+ itnlim,iretcd,mxtake,msg);
+
+ if (itrmcd[1] == 0) {
+
+// EVALUATE HESSIAN AT XPLS
+
+ if (iexp[1] != 0) {
+
+ if (method[1] == 3) {
+
+ Uncmin_f77.secunf_f77(n,x,g,a,udiag,xpls,gpls,epsm,
+ itncnt,rnf,iagflg,noupdt,
+ wrk1,wrk2,wrk3);
+
+ } else {
+
+ Uncmin_f77.secfac_f77(n,x,g,a,xpls,gpls,epsm,itncnt,
+ rnf,iagflg,noupdt,wrk0,wrk1,
+ wrk2,wrk3);
+
+ }
+
+ } else {
+
+ if (iahflg[1] == 1) {
+
+ minclass.hessian(xpls,a);
+
+ } else {
+
+ if (iagflg[1] == 1) {
+
+ Uncmin_f77.fstofd_f77(n,xpls,minclass,gpls,a,
+ sx,rnf,wrk1);
+
+ } else {
+
+ Uncmin_f77.sndofd_f77(n,xpls,minclass,fpls,a,
+ sx,rnf,wrk1,wrk2);
+
+ }
+
+ }
+
+
+ }
+
+ if ((msg[1]/16)%2 == 1) {
+
+ Uncmin_f77.result_f77(n,xpls,fpls,gpls,a,p,itncnt,1);
+
+ }
+
+// X <-- XPLS AND G <-- GPLS AND F <-- FPLS
+
+ f[1] = fpls[1];
+
+ for (i = 1; i <= n; i++) {
+
+ x[i] = xpls[i];
+ g[i] = gpls[i];
+
+ }
+
+ }
+
+ }
+
+// TERMINATION
+// -----------
+// RESET XPLS,FPLS,GPLS, IF PREVIOUS ITERATE SOLUTION
+//
+
+ if (itrmcd[1] == 3) {
+
+ fpls[1] = f[1];
+
+ for (i = 1; i <= n; i++) {
+
+ xpls[i] = x[i];
+ gpls[i] = g[i];
+
+ }
+
+ }
+
+ }
+
+// PRINT RESULTS
+
+ if ((msg[1]/8)%2 == 0) {
+
+ Uncmin_f77.result_f77(n,xpls,fpls,gpls,a,p,itncnt,0);
+
+ }
+
+ msg[1] = 0;
+ return;
+
+ }
+
+
+
+/**
+*
+*<p>
+*The optstp_f77 method determines whether the algorithm should
+*terminate due to any of the following:
+*1) problem solved within user tolerance
+*2) convergence within user tolerance
+*3) iteration limit reached
+*4) divergence or too restrictive maximum step (stepmx)
+* suspected
+*
+*Translated by Steve Verrill, May 12, 1998.
+*
+*@param n The dimension of the problem
+*@param xpls New iterate
+*@param fpls Function value at new iterate
+*@param gpls Gradient or approximation at new iterate
+*@param x Old iterate
+*@param itncnt Current iteration
+*@param icscmx Number of consecutive steps >= stepmx
+* (retain between successive calls)
+*@param itrmcd Termination code
+*@param gradtl Tolerance at which the relative gradient is considered
+* close enough to zero to terminate the algorithm
+*@param steptl Relative step size at which successive iterates
+* are considered close enough to terminate the algorithm
+*@param sx Scaling vector for x
+*@param fscale Estimate of the scale of the objective function
+*@param itnlim Maximum number of allowable iterations
+*@param iretcd Return code
+*@param mxtake Boolean flag indicating step of
+* maximum length was used
+*@param msg If msg includes a term 8, suppress output
+*
+*/
+
+ public static void optstp_f77(int n, double xpls[], double fpls[],
+ double gpls[], double x[], int itncnt[],
+ int icscmx[], int itrmcd[], double gradtl[],
+ double steptl[], double sx[], double fscale[],
+ int itnlim[], int iretcd[], boolean mxtake[],
+ int msg[]) {
+/*
+
+Here is a copy of the optstp FORTRAN documentation:
+
+ SUBROUTINE OPTSTP(N,XPLS,FPLS,GPLS,X,ITNCNT,ICSCMX,
+ + ITRMCD,GRADTL,STEPTL,SX,FSCALE,ITNLIM,IRETCD,MXTAKE,IPR,MSG)
+
+C
+C UNCONSTRAINED MINIMIZATION STOPPING CRITERIA
+C --------------------------------------------
+C FIND WHETHER THE ALGORITHM SHOULD TERMINATE, DUE TO ANY
+C OF THE FOLLOWING:
+C 1) PROBLEM SOLVED WITHIN USER TOLERANCE
+C 2) CONVERGENCE WITHIN USER TOLERANCE
+C 3) ITERATION LIMIT REACHED
+C 4) DIVERGENCE OR TOO RESTRICTIVE MAXIMUM STEP (STEPMX) SUSPECTED
+C
+C
+C PARAMETERS
+C ----------
+C N --> DIMENSION OF PROBLEM
+C XPLS(N) --> NEW ITERATE X[K]
+C FPLS --> FUNCTION VALUE AT NEW ITERATE F(XPLS)
+C GPLS(N) --> GRADIENT AT NEW ITERATE, G(XPLS), OR APPROXIMATE
+C X(N) --> OLD ITERATE X[K-1]
+C ITNCNT --> CURRENT ITERATION K
+C ICSCMX <--> NUMBER CONSECUTIVE STEPS .GE. STEPMX
+C [RETAIN VALUE BETWEEN SUCCESSIVE CALLS]
+C ITRMCD <-- TERMINATION CODE
+C GRADTL --> TOLERANCE AT WHICH RELATIVE GRADIENT CONSIDERED CLOSE
+C ENOUGH TO ZERO TO TERMINATE ALGORITHM
+C STEPTL --> RELATIVE STEP SIZE AT WHICH SUCCESSIVE ITERATES
+C CONSIDERED CLOSE ENOUGH TO TERMINATE ALGORITHM
+C SX(N) --> DIAGONAL SCALING MATRIX FOR X
+C FSCALE --> ESTIMATE OF SCALE OF OBJECTIVE FUNCTION
+C ITNLIM --> MAXIMUM NUMBER OF ALLOWABLE ITERATIONS
+C IRETCD --> RETURN CODE
+C MXTAKE --> BOOLEAN FLAG INDICATING STEP OF MAXIMUM LENGTH USED
+C IPR --> DEVICE TO WHICH TO SEND OUTPUT
+C MSG --> IF MSG INCLUDES A TERM 8, SUPPRESS OUTPUT
+C
+C
+
+*/
+
+ int i;
+ double d,rgx,relgrd,rsx,relstp;
+
+ itrmcd[1] = 0;
+
+// LAST GLOBAL STEP FAILED TO LOCATE A POINT LOWER THAN X
+
+ if (iretcd[1] == 1) {
+
+ itrmcd[1] = 3;
+
+ if ((msg[1]/8)%2 == 0) {
+
+ System.out.print("\n\nOPTSTP The last global step failed");
+ System.out.print(" to locate a point lower than x.\n");
+ System.out.print("OPTSTP Either x is an approximate local");
+ System.out.print(" minimum of the function,\n");
+ System.out.print("OPTSTP the function is too nonlinear for");
+ System.out.print(" this algorithm, or\n");
+ System.out.print("OPTSTP steptl is too large.\n");
+
+ }
+
+ return;
+
+ } else {
+
+// FIND DIRECTION IN WHICH RELATIVE GRADIENT MAXIMUM.
+// CHECK WHETHER WITHIN TOLERANCE
+
+ d = Math.max(Math.abs(fpls[1]),fscale[1]);
+ rgx = 0.0;
+
+ for (i = 1; i <= n; i++) {
+
+ relgrd = Math.abs(gpls[i])*Math.max(Math.abs(xpls[i]),1.0/sx[i])/d;
+ rgx = Math.max(rgx,relgrd);
+
+ }
+
+ if (rgx <= gradtl[1]) {
+
+ itrmcd[1] = 1;
+
+ if ((msg[1]/8)%2 == 0) {
+
+ System.out.print("\n\nOPTSTP The relative gradient is close");
+ System.out.print(" to zero.\n");
+ System.out.print("OPTSTP The current iterate is probably");
+ System.out.print(" a solution.\n");
+
+ }
+
+ return;
+
+ }
+
+ if (itncnt[1] == 0) return;
+
+// FIND DIRECTION IN WHICH RELATIVE STEPSIZE MAXIMUM
+// CHECK WHETHER WITHIN TOLERANCE.
+
+ rsx = 0.0;
+
+ for (i = 1; i <= n; i++) {
+
+ relstp = Math.abs(xpls[i] - x[i])/
+ Math.max(Math.abs(xpls[i]),1.0/sx[i]);
+
+ rsx = Math.max(rsx,relstp);
+
+ }
+
+ if (rsx <= steptl[1]) {
+
+ itrmcd[1] = 2;
+
+ if ((msg[1]/8)%2 == 0) {
+
+ System.out.print("\n\nOPTSTP Successive iterates are within");
+ System.out.print(" steptl.\n");
+ System.out.print("OPTSTP The current iterate is probably");
+ System.out.print(" a solution.\n");
+
+ }
+
+ return;
+
+ }
+
+
+// CHECK ITERATION LIMIT
+
+ if (itncnt[1] >= itnlim[1]) {
+
+ itrmcd[1] = 4;
+
+ if ((msg[1]/8)%2 == 0) {
+
+ System.out.print("\n\nOPTSTP The iteration limit was reached.\n");
+ System.out.print("OPTSTP The algorithm failed.\n");
+
+ }
+
+ return;
+
+ }
+
+// CHECK NUMBER OF CONSECUTIVE STEPS \ STEPMX
+
+ if (!mxtake[1]) {
+
+ icscmx[1] = 0;
+
+ return;
+
+ }
+
+ if ((msg[1]/8)%2 == 0) {
+
+ System.out.print("\n\nOPTSTP Step of maximum length (stepmx)");
+ System.out.print(" taken.\n");
+
+ }
+
+ icscmx[1]++;
+
+ if (icscmx[1] < 5) return;
+
+ itrmcd[1] = 5;
+
+ if ((msg[1]/8)%2 == 0) {
+
+ System.out.print("\n\nOPTSTP Maximum step size exceeded");
+ System.out.print(" five consecutive times.\n");
+ System.out.print("OPTSTP Either the function is unbounded");
+ System.out.print(" below,\n");
+ System.out.print("OPTSTP becomes asymptotic to a finite value");
+ System.out.print(" from above in some direction, or\n");
+ System.out.print("OPTSTP stepmx is too small.\n");
+
+ }
+
+ return;
+
+ }
+
+ }
+
+
+
+/**
+*
+*<p>
+*The qraux1_f77 method interchanges rows i,i+1 of the upper
+*Hessenberg matrix r, columns i to n.
+*
+*Translated by Steve Verrill, April 29, 1998.
+*
+*@param n The dimension of the matrix
+*@param r Upper Hessenberg matrix
+*@param i Index of row to interchange (i < n)
+*
+*
+*/
+
+ public static void qraux1_f77(int n, double r[][], int i) {
+
+/*
+
+Here is a copy of the qraux1 FORTRAN documentation:
+
+ SUBROUTINE QRAUX1(NR,N,R,I)
+C
+C PURPOSE
+C -------
+C INTERCHANGE ROWS I,I+1 OF THE UPPER HESSENBERG MATRIX R,
+C COLUMNS I TO N
+C
+C PARAMETERS
+C ----------
+C NR --> ROW DIMENSION OF MATRIX
+C N --> DIMENSION OF MATRIX
+C R(N,N) <--> UPPER HESSENBERG MATRIX
+C I --> INDEX OF ROW TO INTERCHANGE (I.LT.N)
+C
+
+*/
+
+ double tmp;
+ int j,ip1;
+
+ ip1 = i + 1;
+
+ for (j = i; j <= n; j++) {
+
+ tmp = r[i][j];
+ r[i][j] = r[ip1][j];
+ r[ip1][j] = tmp;
+
+ }
+
+ return;
+
+ }
+
+
+/**
+*
+*<p>
+*The qraux2_f77 method pre-multiplies r by the Jacobi rotation j(i,i+1,a,b).
+*
+*Translated by Steve Verrill, April 29, 1998.
+*
+*@param n The dimension of the matrix
+*@param r Upper Hessenberg matrix
+*@param i Index of row
+*@param a scalar
+*@param b scalar
+*
+*
+*/
+
+ public static void qraux2_f77(int n, double r[][], int i,
+ double a, double b) {
+
+/*
+
+Here is a copy of the qraux2 FORTRAN documentation:
+
+ SUBROUTINE QRAUX2(NR,N,R,I,A,B)
+C
+C PURPOSE
+C -------
+C PRE-MULTIPLY R BY THE JACOBI ROTATION J(I,I+1,A,B)
+C
+C PARAMETERS
+C ----------
+C NR --> ROW DIMENSION OF MATRIX
+C N --> DIMENSION OF MATRIX
+C R(N,N) <--> UPPER HESSENBERG MATRIX
+C I --> INDEX OF ROW
+C A --> SCALAR
+C B --> SCALAR
+C
+
+*/
+
+ double den,c,s,y,z;
+ int j,ip1;
+
+ ip1 = i + 1;
+
+ den = Math.sqrt(a*a + b*b);
+ c = a/den;
+ s = b/den;
+
+ for (j = i; j <= n; j++) {
+
+ y = r[i][j];
+ z = r[ip1][j];
+ r[i][j] = c*y - s*z;
+ r[ip1][j] = s*y + c*z;
+
+ }
+
+ return;
+
+ }
+
+
+
+/**
+*
+*<p>
+*The qrupdt_f77 method finds an orthogonal n by n matrix, Q*,
+*and an upper triangular n by n matrix, R*, such that
+*(Q*)(R*) = R+U(V+).
+*
+*Translated by Steve Verrill, May 11, 1998.
+*
+*@param n The dimension of the problem
+*@param a On input: contains R
+* On output: contains R*
+*@param u Vector
+*@param v Vector
+*
+*
+*/
+
+ public static void qrupdt_f77(int n, double a[][],
+ double u[], double v[]) {
+
+/*
+
+Here is a copy of the qrupdt FORTRAN documentation:
+
+
+ SUBROUTINE QRUPDT(NR,N,A,U,V)
+C
+C PURPOSE
+C -------
+C FIND AN ORTHOGONAL (N*N) MATRIX (Q*) AND AN UPPER TRIANGULAR (N*N)
+C MATRIX (R*) SUCH THAT (Q*)(R*)=R+U(V+)
+C
+C PARAMETERS
+C ----------
+C NR --> ROW DIMENSION OF MATRIX
+C N --> DIMENSION OF PROBLEM
+C A(N,N) <--> ON INPUT: CONTAINS R
+C ON OUTPUT: CONTAINS (R*)
+C U(N) --> VECTOR
+C V(N) --> VECTOR
+C
+
+*/
+
+ int k,km1,ii,i,j;
+ double t1,t2;
+
+// DETERMINE LAST NON-ZERO IN U(.)
+
+ k = n;
+
+ while (u[k] == 0 && k > 1) {
+
+ k--;
+
+ }
+
+// (K-1) JACOBI ROTATIONS TRANSFORM
+// R + U(V+) --> (R*) + (U(1)*E1)(V+)
+// WHICH IS UPPER HESSENBERG
+
+ km1 = k - 1;
+
+ for (ii = 1; ii <= km1; ii++) {
+
+ i = km1 - ii + 1;
+
+ if (u[i] == 0.0) {
+
+ Uncmin_f77.qraux1_f77(n,a,i);
+ u[i] = u[i+1];
+
+ } else {
+
+ Uncmin_f77.qraux2_f77(n,a,i,u[i],-u[i+1]);
+ u[i] = Math.sqrt(u[i]*u[i] + u[i+1]*u[i+1]);
+
+ }
+
+ }
+
+// R <-- R + (U(1)*E1)(V+)
+
+ for (j = 1; j <= n; j++) {
+
+ a[1][j] += u[1]*v[j];
+
+ }
+
+// (K-1) JACOBI ROTATIONS TRANSFORM UPPER HESSENBERG R
+// TO UPPER TRIANGULAR (R*)
+
+ km1 = k - 1;
+
+ for (i = 1; i <= km1; i++) {
+
+ if (a[i][i] == 0.0) {
+
+ Uncmin_f77.qraux1_f77(n,a,i);
+
+ } else {
+
+ t1 = a[i][i];
+ t2 = -a[i+1][i];
+
+ Uncmin_f77.qraux2_f77(n,a,i,t1,t2);
+
+ }
+
+ }
+
+ return;
+
+ }
+
+
+/**
+*
+*<p>
+*The result_f77 method prints information.
+*
+*Translated by Steve Verrill, May 11, 1998.
+*
+*@param n The dimension of the problem
+*@param x Estimate of the location of a minimum at iteration k
+*@param f function value at x
+*@param g gradient at x
+*@param a Hessian at x
+*@param p Step taken
+*@param itncnt Iteration number (k)
+*@param iflg Flag controlling the information to print
+*
+*/
+
+ public static void result_f77(int n, double x[], double f[],
+ double g[], double a[][],
+ double p[], int itncnt[], int iflg) {
+
+/*
+
+Here is a copy of the result FORTRAN documentation:
+
+
+ SUBROUTINE RESULT(NR,N,X,F,G,A,P,ITNCNT,IFLG,IPR)
+C
+C PURPOSE
+C -------
+C PRINT INFORMATION
+C
+C PARAMETERS
+C ----------
+C NR --> ROW DIMENSION OF MATRIX
+C N --> DIMENSION OF PROBLEM
+C X(N) --> ITERATE X[K]
+C F --> FUNCTION VALUE AT X[K]
+C G(N) --> GRADIENT AT X[K]
+C A(N,N) --> HESSIAN AT X[K]
+C P(N) --> STEP TAKEN
+C ITNCNT --> ITERATION NUMBER K
+C IFLG --> FLAG CONTROLLING INFO TO PRINT
+C IPR --> DEVICE TO WHICH TO SEND OUTPUT
+C
+
+*/
+
+ int i,j,iii,num5,remain,iii5,iiir;
+ int ilow,ihigh;
+
+ num5 = n/5;
+ remain = n%5;
+
+
+// PRINT ITERATION NUMBER
+
+ System.out.print("\n\nRESULT Iterate k = " + itncnt[1] + "\n");
+
+ if (iflg != 0) {
+
+// PRINT STEP
+
+ System.out.print("\n\nRESULT Step\n\n");
+
+ ilow = -4;
+ ihigh = 0;
+
+ for (i = 1; i <= num5; i++) {
+
+ ilow += 5;
+ ihigh += 5;
+
+ System.out.print(ilow + "--" + ihigh + " ");
+
+ for (j = 1; j <= 5; j++) {
+
+ System.out.print(p[ilow+j-1] + " ");
+
+ }
+
+ System.out.print("\n");
+
+ }
+
+ ilow += 5;
+ ihigh = ilow + remain - 1;
+
+ System.out.print(ilow + "--" + ihigh + " ");
+
+ for (j = 1; j <= remain; j++) {
+
+ System.out.print(p[ilow+j-1] + " ");
+
+ }
+
+ System.out.print("\n");
+
+ }
+
+// PRINT CURRENT ITERATE
+
+ System.out.print("\n\nRESULT Current x\n\n");
+
+ ilow = -4;
+ ihigh = 0;
+
+ for (i = 1; i <= num5; i++) {
+
+ ilow += 5;
+ ihigh += 5;
+
+ System.out.print(ilow + "--" + ihigh + " ");
+
+ for (j = 1; j <= 5; j++) {
+
+ System.out.print(x[ilow+j-1] + " ");
+
+ }
+
+ System.out.print("\n");
+
+ }
+
+ ilow += 5;
+ ihigh = ilow + remain - 1;
+
+ System.out.print(ilow + "--" + ihigh + " ");
+
+ for (j = 1; j <= remain; j++) {
+
+ System.out.print(x[ilow+j-1] + " ");
+
+ }
+
+ System.out.print("\n");
+
+
+// PRINT FUNCTION VALUE
+
+ System.out.print("\n\nRESULT f_to_minimize at x = " + f[1] + "\n");
+
+
+// PRINT GRADIENT
+
+ System.out.print("\n\nRESULT Gradient at x\n\n");
+
+ ilow = -4;
+ ihigh = 0;
+
+ for (i = 1; i <= num5; i++) {
+
+ ilow += 5;
+ ihigh += 5;
+
+ System.out.print(ilow + "--" + ihigh + " ");
+
+ for (j = 1; j <= 5; j++) {
+
+ System.out.print(g[ilow+j-1] + " ");
+
+ }
+
+ System.out.print("\n");
+
+ }
+
+ ilow += 5;
+ ihigh = ilow + remain - 1;
+
+ System.out.print(ilow + "--" + ihigh + " ");
+
+ for (j = 1; j <= remain; j++) {
+
+ System.out.print(g[ilow+j-1] + " ");
+
+ }
+
+ System.out.print("\n");
+
+
+// PRINT HESSIAN FROM ITERATION K
+
+ if (iflg != 0) {
+
+ System.out.print("\n\nRESULT Hessian at x\n\n");
+
+ for (iii = 1; iii <= n; iii++) {
+
+ iii5 = iii/5;
+ iiir = iii%5;
+
+ ilow = -4;
+ ihigh = 0;
+
+ for (i = 1; i <= iii5; i++) {
+
+ ilow += 5;
+ ihigh += 5;
+
+ System.out.print("i = " + iii + ", j = ");
+ System.out.print(ilow + "--" + ihigh + " ");
+
+ for (j = 1; j <= 5; j++) {
+
+ System.out.print(a[iii][ilow+j-1] + " ");
+
+ }
+
+ System.out.print("\n");
+
+ }
+
+ ilow += 5;
+ ihigh = ilow + iiir - 1;
+
+ System.out.print("i = " + iii + ", j = ");
+ System.out.print(ilow + "--" + ihigh + " ");
+
+ for (j = 1; j <= iiir; j++) {
+
+ System.out.print(a[iii][ilow+j-1] + " ");
+
+ }
+
+ System.out.print("\n");
+
+ }
+
+ }
+
+ return;
+
+ }
+
+
+
+/**
+*
+*<p>
+*The sclmul_f77 method multiplies a vector by a scalar.
+*
+*Translated by Steve Verrill, May 8, 1998.
+*
+*@param n The dimension of the problem
+*@param s The scalar
+*@param v Operand vector
+*@param z Result vector
+*
+*
+*/
+
+ public static void sclmul_f77(int n, double s, double v[],
+ double z[]) {
+
+/*
+
+Here is a copy of the sclmul FORTRAN documentation:
+
+ SUBROUTINE SCLMUL(N,S,V,Z)
+C
+C PURPOSE
+C -------
+C MULTIPLY VECTOR BY SCALAR
+C RESULT VECTOR MAY BE OPERAND VECTOR
+C
+C PARAMETERS
+C ----------
+C N --> DIMENSION OF VECTORS
+C S --> SCALAR
+C V(N) --> OPERAND VECTOR
+C Z(N) <-- RESULT VECTOR
+
+*/
+
+ int i;
+
+ for (i = 1; i <= n; i++) {
+
+ z[i] = s*v[i];
+
+ }
+
+ return;
+
+ }
+
+
+
+/**
+*
+*<p>
+*The secfac_f77 method updates the Hessian by the BFGS factored technique.
+*
+*Translated by Steve Verrill, May 14, 1998.
+*
+*@param n The dimension of the problem
+*@param x Old iterate
+*@param g Gradient or approximation at the old iterate
+*@param a On entry: Cholesky decomposition of Hessian
+* in lower triangle and diagonal
+* On exit: Updated Cholesky decomposition of
+* Hessian in lower triangle and diagonal
+*@param xpls New iterate
+*@param gpls Gradient or approximation at the new iterate
+*@param epsm Machine epsilon
+*@param itncnt Iteration count
+*@param rnf Relative noise in optimization function f_to_minimize
+*@param iagflg 1 if an analytic gradient is supplied, 0 otherwise
+*@param noupdt Boolean: no update yet (retain value between
+* successive calls)
+*@param s Workspace
+*@param y Workspace
+*@param u Workspace
+*@param w Workspace
+*
+*
+*/
+
+ public static void secfac_f77(int n, double x[], double g[],
+ double a[][], double xpls[], double gpls[],
+ double epsm, int itncnt[], double rnf,
+ int iagflg[], boolean noupdt[], double s[],
+ double y[], double u[], double w[]) {
+
+/*
+
+Here is a copy of the secfac FORTRAN documentation:
+
+ SUBROUTINE SECFAC(NR,N,X,G,A,XPLS,GPLS,EPSM,ITNCNT,RNF,
+ + IAGFLG,NOUPDT,S,Y,U,W)
+C
+C PURPOSE
+C -------
+C UPDATE HESSIAN BY THE BFGS FACTORED METHOD
+C
+C PARAMETERS
+C ----------
+C NR --> ROW DIMENSION OF MATRIX
+C N --> DIMENSION OF PROBLEM
+C X(N) --> OLD ITERATE, X[K-1]
+C G(N) --> GRADIENT OR APPROXIMATE AT OLD ITERATE
+C A(N,N) <--> ON ENTRY: CHOLESKY DECOMPOSITION OF HESSIAN IN
+C LOWER PART AND DIAGONAL.
+C ON EXIT: UPDATED CHOLESKY DECOMPOSITION OF HESSIAN
+C IN LOWER TRIANGULAR PART AND DIAGONAL
+C XPLS(N) --> NEW ITERATE, X[K]
+C GPLS(N) --> GRADIENT OR APPROXIMATE AT NEW ITERATE
+C EPSM --> MACHINE EPSILON
+C ITNCNT --> ITERATION COUNT
+C RNF --> RELATIVE NOISE IN OPTIMIZATION FUNCTION FCN
+C IAGFLG --> =1 IF ANALYTIC GRADIENT SUPPLIED, =0 ITHERWISE
+C NOUPDT <--> BOOLEAN: NO UPDATE YET
+C [RETAIN VALUE BETWEEN SUCCESSIVE CALLS]
+C S(N) --> WORKSPACE
+C Y(N) --> WORKSPACE
+C U(N) --> WORKSPACE
+C W(N) --> WORKSPACE
+C
+
+*/
+
+ boolean skpupd;
+ int i,j,im1;
+ double den1,snorm2,ynrm2,den2,alp,reltol;
+
+ if (itncnt[1] == 1) noupdt[1] = true;
+
+ for (i = 1; i <= n; i++) {
+
+ s[i] = xpls[i] - x[i];
+ y[i] = gpls[i] - g[i];
+
+ }
+
+ den1 = Blas_f77.ddot_f77(n,s,1,y,1);
+ snorm2 = Blas_f77.dnrm2_f77(n,s,1);
+ ynrm2 = Blas_f77.dnrm2_f77(n,y,1);
+
+ if (den1 >= Math.sqrt(epsm)*snorm2*ynrm2) {
+
+ Uncmin_f77.mvmltu_f77(n,a,s,u);
+ den2 = Blas_f77.ddot_f77(n,u,1,u,1);
+
+// L <-- SQRT(DEN1/DEN2)*L
+
+ alp = Math.sqrt(den1/den2);
+
+ if (noupdt[1]) {
+
+ for (j = 1; j <= n; j++) {
+
+ u[j] *= alp;
+
+ for (i = j; i <= n; i++) {
+
+ a[i][j] *= alp;
+
+ }
+
+ }
+
+ noupdt[1] = false;
+ den2 = den1;
+ alp = 1.0;
+
+ }
+
+ skpupd = true;
+
+// W = L(L+)S = HS
+
+ Uncmin_f77.mvmltl_f77(n,a,u,w);
+
+ i = 1;
+
+ if (iagflg[1] == 0) {
+
+ reltol = Math.sqrt(rnf);
+
+ } else {
+
+ reltol = rnf;
+
+ }
+
+ while (i <= n && skpupd) {
+
+ if (Math.abs(y[i] - w[i]) >=
+ reltol*Math.max(Math.abs(g[i]),Math.abs(gpls[i]))) {
+
+ skpupd = false;
+
+ } else {
+
+ i++;
+
+ }
+
+ }
+
+ if (!skpupd) {
+
+// W=Y-ALP*L(L+)S
+
+ for (i = 1; i <= n; i++) {
+
+ w[i] = y[i] - alp*w[i];
+
+ }
+
+// ALP=1/SQRT(DEN1*DEN2)
+
+ alp /= den1;
+
+// U=(L+)/SQRT(DEN1*DEN2) = (L+)S/SQRT((Y+)S * (S+)L(L+)S)
+
+ for (i = 1; i <= n; i++) {
+
+ u[i] *= alp;
+
+ }
+
+// COPY L INTO UPPER TRIANGULAR PART. ZERO L.
+
+ for (i = 2; i <= n; i++) {
+
+ im1 = i - 1;
+
+ for (j = 1; j <= im1; j++) {
+
+ a[j][i] = a[i][j];
+ a[i][j] = 0.0;
+
+ }
+
+ }
+
+// FIND Q, (L+) SUCH THAT Q(L+) = (L+) + U(W+)
+
+ Uncmin_f77.qrupdt_f77(n,a,u,w);
+
+// UPPER TRIANGULAR PART AND DIAGONAL OF A NOW CONTAIN UPDATED
+// CHOLESKY DECOMPOSITION OF HESSIAN. COPY BACK TO LOWER
+// TRIANGULAR PART.
+
+ for (i = 2; i <= n; i++) {
+
+ im1 = i - 1;
+
+ for (j = 1; j <= im1; j++) {
+
+ a[i][j] = a[j][i];
+
+ }
+
+ }
+
+ }
+
+ }
+
+ return;
+
+ }
+
+
+/**
+*
+*<p>
+*The secunf_f77 method updates the Hessian by the BFGS unfactored approach.
+*
+*Translated by Steve Verrill, May 8, 1998.
+*
+*@param n The dimension of the problem
+*@param x The old iterate
+*@param g The gradient or an approximation at the old iterate
+*@param a On entry: Approximate Hessian at the old iterate
+* in the upper triangular part (and udiag)
+* On exit: Updated approximate Hessian at the new
+* iterate in the lower triangular part and
+* diagonal
+*@param udiag On entry: Diagonal of Hessian
+*@param xpls New iterate
+*@param gpls Gradient or approximation at the new iterate
+*@param epsm Machine epsilon
+*@param itncnt Iteration count
+*@param rnf Relative noise in the optimization function,
+* f_to_minimize
+*@param iagflg = 1 if an analytic gradient is supplied,
+* = 0 otherwise
+*@param noupdt Boolean: no update yet (retain value between calls)
+*@param s workspace
+*@param y workspace
+*@param t workspace
+*
+*
+*/
+
+ public static void secunf_f77(int n, double x[], double g[], double
+ a[][], double udiag[], double xpls[],
+ double gpls[], double epsm, int itncnt[],
+ double rnf, int iagflg[], boolean noupdt[],
+ double s[], double y[], double t[]) {
+
+/*
+
+Here is a copy of the secunf FORTRAN documentation:
+
+ SUBROUTINE SECUNF(NR,N,X,G,A,UDIAG,XPLS,GPLS,EPSM,ITNCNT,
+ + RNF,IAGFLG,NOUPDT,S,Y,T)
+C
+C PURPOSE
+C -------
+C UPDATE HESSIAN BY THE BFGS UNFACTORED METHOD
+C
+C PARAMETERS
+C ----------
+C NR --> ROW DIMENSION OF MATRIX
+C N --> DIMENSION OF PROBLEM
+C X(N) --> OLD ITERATE, X[K-1]
+C G(N) --> GRADIENT OR APPROXIMATE AT OLD ITERATE
+C A(N,N) <--> ON ENTRY: APPROXIMATE HESSIAN AT OLD ITERATE
+C IN UPPER TRIANGULAR PART (AND UDIAG)
+C ON EXIT: UPDATED APPROX HESSIAN AT NEW ITERATE
+C IN LOWER TRIANGULAR PART AND DIAGONAL
+C [LOWER TRIANGULAR PART OF SYMMETRIC MATRIX]
+C UDIAG --> ON ENTRY: DIAGONAL OF HESSIAN
+C XPLS(N) --> NEW ITERATE, X[K]
+C GPLS(N) --> GRADIENT OR APPROXIMATE AT NEW ITERATE
+C EPSM --> MACHINE EPSILON
+C ITNCNT --> ITERATION COUNT
+C RNF --> RELATIVE NOISE IN OPTIMIZATION FUNCTION FCN
+C IAGFLG --> =1 IF ANALYTIC GRADIENT SUPPLIED, =0 OTHERWISE
+C NOUPDT <--> BOOLEAN: NO UPDATE YET
+C [RETAIN VALUE BETWEEN SUCCESSIVE CALLS]
+C S(N) --> WORKSPACE
+C Y(N) --> WORKSPACE
+C T(N) --> WORKSPACE
+C
+
+*/
+
+ double den1,snorm2,ynrm2,den2,gam,tol;
+ int i,j;
+ boolean skpupd;
+
+
+// COPY HESSIAN IN UPPER TRIANGULAR PART AND UDIAG TO
+// LOWER TRIANGULAR PART AND DIAGONAL
+
+ for (j = 1; j <= n; j++) {
+
+ a[j][j] = udiag[j];
+
+ for (i = j+1; i <= n; i++) {
+
+ a[i][j] = a[j][i];
+
+ }
+
+ }
+
+ if (itncnt[1] == 1) noupdt[1] = true;
+
+ for (i = 1; i <= n; i++) {
+
+ s[i] = xpls[i] - x[i];
+ y[i] = gpls[i] - g[i];
+
+ }
+
+ den1 = Blas_f77.ddot_f77(n,s,1,y,1);
+
+ snorm2 = Blas_f77.dnrm2_f77(n,s,1);
+
+ ynrm2 = Blas_f77.dnrm2_f77(n,y,1);
+
+ if (den1 >= Math.sqrt(epsm)*snorm2*ynrm2) {
+
+ Uncmin_f77.mvmlts_f77(n,a,s,t);
+
+ den2 = Blas_f77.ddot_f77(n,s,1,t,1);
+
+ if (noupdt[1]) {
+
+// H <-- [(S+)Y/(S+)HS]H
+
+ gam = den1/den2;
+
+ den2 = gam*den2;
+
+ for (j = 1; j <= n; j++) {
+
+ t[j] *= gam;
+
+ for (i = j; i <= n; i++) {
+
+ a[i][j] *= gam;
+
+ }
+
+ }
+
+ noupdt[1] = false;
+
+ }
+
+ skpupd = true;
+
+// CHECK UPDATE CONDITION ON ROW I
+
+ for (i = 1; i <= n; i++) {
+
+ tol = rnf*Math.max(Math.abs(g[i]),Math.abs(gpls[i]));
+ if (iagflg[1] == 0) tol /= Math.sqrt(rnf);
+
+ if (Math.abs(y[i] - t[i]) >= tol) {
+
+ skpupd = false;
+ break;
+
+ }
+
+ }
+
+ if (!skpupd) {
+
+// BFGS UPDATE
+
+ for (j = 1; j <= n; j++) {
+
+ for (i = j; i <= n; i++) {
+
+ a[i][j] += y[i]*y[j]/den1 - t[i]*t[j]/den2;
+
+ }
+
+ }
+
+ }
+
+ }
+
+ return;
+
+ }
+
+
+/**
+*
+*<p>
+*The sndofd_f77 method finds second order forward finite difference
+*approximations to the Hessian. For optimization use this
+*method to estimate the Hessian of the optimization function
+*if no analytical user function has been supplied for either
+*the gradient or the Hessian, and the optimization function
+*is inexpensive to evaluate.
+*
+*Translated by Steve Verrill, May 8, 1998.
+*
+*@param n The dimension of the problem
+*@param xpls New iterate
+*@param minclass A class that implements the Uncmin_methods
+* interface (see the definition in
+* Uncmin_methods.java). See UncminTest_f77.java for an
+* example of such a class. The class must define:
+* 1.) a method, f_to_minimize, to minimize.
+* f_to_minimize must have the form
+*
+* public static double f_to_minimize(double x[])
+*
+* where x is the vector of arguments to the function
+* and the return value is the value of the function
+* evaluated at x.
+* 2.) a method, gradient, that has the form
+*
+* public static void gradient(double x[],
+* double g[])
+*
+* where g is the gradient of f evaluated at x. This
+* method will have an empty body if the user
+* does not wish to provide an analytic estimate
+* of the gradient.
+* 3.) a method, hessian, that has the form
+*
+* public static void hessian(double x[],
+* double h[][])
+* where h is the Hessian of f evaluated at x. This
+* method will have an empty body if the user
+* does not wish to provide an analytic estimate
+* of the Hessian. If the user wants Uncmin
+* to check the Hessian, then the hessian method
+* should only fill the lower triangle (and diagonal)
+* of h.
+*@param fpls Function value at the new iterate
+*@param a "FINITE DIFFERENCE APPROXIMATION TO HESSIAN. ONLY
+* LOWER TRIANGULAR MATRIX AND DIAGONAL ARE RETURNED"
+*@param sx Scaling vector for x
+*@param rnoise Relative noise in the function to be minimized
+*@param stepsz Workspace (stepsize in i-th component direction)
+*@param anbr Workspace (neighbor in i-th direction)
+*
+*/
+
+ public static void sndofd_f77(int n, double xpls[], Uncmin_methods minclass,
+ double fpls[], double a[][], double sx[],
+ double rnoise, double stepsz[], double anbr[]) {
+
+/*
+
+Here is a copy of the sndofd FORTRAN documentation.
+
+ SUBROUTINE SNDOFD(NR,N,XPLS,FCN,FPLS,A,SX,RNOISE,STEPSZ,ANBR)
+
+C PURPOSE
+C -------
+C FIND SECOND ORDER FORWARD FINITE DIFFERENCE APPROXIMATION "A"
+C TO THE SECOND DERIVATIVE (HESSIAN) OF THE FUNCTION DEFINED BY THE SUBP
+C "FCN" EVALUATED AT THE NEW ITERATE "XPLS"
+C
+C FOR OPTIMIZATION USE THIS ROUTINE TO ESTIMATE
+C 1) THE SECOND DERIVATIVE (HESSIAN) OF THE OPTIMIZATION FUNCTION
+C IF NO ANALYTICAL USER FUNCTION HAS BEEN SUPPLIED FOR EITHER
+C THE GRADIENT OR THE HESSIAN AND IF THE OPTIMIZATION FUNCTION
+C "FCN" IS INEXPENSIVE TO EVALUATE.
+C
+C PARAMETERS
+C ----------
+C NR --> ROW DIMENSION OF MATRIX
+C N --> DIMENSION OF PROBLEM
+C XPLS(N) --> NEW ITERATE: X[K]
+C FCN --> NAME OF SUBROUTINE TO EVALUATE FUNCTION
+C FPLS --> FUNCTION VALUE AT NEW ITERATE, F(XPLS)
+C A(N,N) <-- FINITE DIFFERENCE APPROXIMATION TO HESSIAN
+C ONLY LOWER TRIANGULAR MATRIX AND DIAGONAL
+C ARE RETURNED
+C SX(N) --> DIAGONAL SCALING MATRIX FOR X
+C RNOISE --> RELATIVE NOISE IN FNAME [F(X)]
+C STEPSZ(N) --> WORKSPACE (STEPSIZE IN I-TH COMPONENT DIRECTION)
+C ANBR(N) --> WORKSPACE (NEIGHBOR IN I-TH DIRECTION)
+C
+C
+
+*/
+
+ double xmult,xtmpi,xtmpj,fhat;
+ int i,j;
+
+// FIND I-TH STEPSIZE AND EVALUATE NEIGHBOR IN DIRECTION
+// OF I-TH UNIT VECTOR
+
+ xmult = Math.pow(rnoise,1.0/3.0);
+
+ for (i = 1; i <= n; i++) {
+
+ stepsz[i] = xmult*Math.max(Math.abs(xpls[i]),1.0/sx[i]);
+ xtmpi = xpls[i];
+ xpls[i] = xtmpi + stepsz[i];
+
+ anbr[i] = minclass.f_to_minimize(xpls);
+ xpls[i] = xtmpi;
+
+ }
+
+// CALCULATE COLUMN I OF A
+
+ for (i = 1; i <= n; i++) {
+
+ xtmpi = xpls[i];
+ xpls[i] = xtmpi + 2.0*stepsz[i];
+ fhat = minclass.f_to_minimize(xpls);
+ a[i][i] = ((fpls[1] - anbr[i]) + (fhat - anbr[i]))/
+ (stepsz[i]*stepsz[i]);
+
+// CALCULATE SUB-DIAGONAL ELEMENTS OF COLUMN
+
+ if (i != n) {
+
+ xpls[i] = xtmpi + stepsz[i];
+
+ for (j = i+1; j <= n; j++) {
+
+ xtmpj = xpls[j];
+ xpls[j] = xtmpj + stepsz[j];
+ fhat = minclass.f_to_minimize(xpls);
+ a[j][i] = ((fpls[1] - anbr[i]) + (fhat - anbr[j]))/
+ (stepsz[i]*stepsz[j]);
+ xpls[j] = xtmpj;
+
+ }
+
+ }
+
+ xpls[i] = xtmpi;
+
+ }
+
+ return;
+
+ }
+
+
+
+
+/**
+*
+*<p>
+*The tregup_f77 method decides whether to accept xpls = x + sc as the next
+*iterate and update the trust region dlt.
+*
+*Translated by Steve Verrill, May 11, 1998.
+*
+*@param n The dimension of the problem
+*@param x Old iterate
+*@param f Function value at old iterate
+*@param g Gradient or approximation at old iterate
+*@param a Cholesky decomposition of Hessian in
+* lower triangular part and diagonal.
+* Hessian or approximation in upper triangular part.
+*@param minclass A class that implements the Uncmin_methods
+* interface (see the definition in
+* Uncmin_methods.java). See UncminTest_f77.java for an
+* example of such a class. The class must define:
+* 1.) a method, f_to_minimize, to minimize.
+* f_to_minimize must have the form
+*
+* public static double f_to_minimize(double x[])
+*
+* where x is the vector of arguments to the function
+* and the return value is the value of the function
+* evaluated at x.
+* 2.) a method, gradient, that has the form
+*
+* public static void gradient(double x[],
+* double g[])
+*
+* where g is the gradient of f evaluated at x. This
+* method will have an empty body if the user
+* does not wish to provide an analytic estimate
+* of the gradient.
+* 3.) a method, hessian, that has the form
+*
+* public static void hessian(double x[],
+* double h[][])
+* where h is the Hessian of f evaluated at x. This
+* method will have an empty body if the user
+* does not wish to provide an analytic estimate
+* of the Hessian. If the user wants Uncmin
+* to check the Hessian, then the hessian method
+* should only fill the lower triangle (and diagonal)
+* of h.
+*@param sc Current step
+*@param sx Scaling vector for x
+*@param nwtake Boolean, = true if Newton step taken
+*@param stepmx Maximum allowable step size
+*@param steptl Relative step size at which successive iterates
+* are considered close enough to terminate
+* the algorithm
+*@param dlt Trust region radius
+*@param iretcd Return code
+* = 0 xpls accepted as next iterate, dlt is the
+* trust region radius for the next iteration
+* = 1 xpls unsatisfactory but accepted as next
+* iterate because xpls - x is less than the
+* smallest allowable step length
+* = 2 f(xpls) too large. Continue current
+* iteration with new reduced dlt.
+* = 3 f(xpls) sufficiently small, but quadratic
+* model predicts f(xpls) sufficiently
+* well to continue current iteration
+* with new doubled dlt.
+*@param xplsp Workspace (value needs to be retained between
+* successive calls of k-th global step)
+*@param fplsp Retain between successive calls
+*@param xpls New iterate
+*@param fpls Function value at new iterate
+*@param mxtake Boolean flag indicating step of maximum length used
+*@param method Algorithm to use to solve minimization problem
+* = 1 Line search
+* = 2 Double dogleg
+* = 3 More-Hebdon
+*@param udiag Diagonal of Hessian in a
+*
+*/
+
+
+ public static void tregup_f77(int n, double x[], double f[], double g[],
+ double a[][], Uncmin_methods minclass,
+ double sc[], double sx[], boolean nwtake[],
+ double stepmx[], double steptl[], double dlt[],
+ int iretcd[], double xplsp[], double fplsp[],
+ double xpls[], double fpls[], boolean mxtake[],
+ int method, double udiag[]) {
+
+/*
+
+Here is a copy of the tregup FORTRAN documentation.
+
+
+ SUBROUTINE TREGUP(NR,N,X,F,G,A,FCN,SC,SX,NWTAKE,STEPMX,STEPTL,
+ + DLT,IRETCD,XPLSP,FPLSP,XPLS,FPLS,MXTAKE,IPR,METHOD,UDIAG)
+C
+C PURPOSE
+C -------
+C DECIDE WHETHER TO ACCEPT XPLS=X+SC AS THE NEXT ITERATE AND UPDATE THE
+C TRUST REGION DLT.
+C
+C PARAMETERS
+C ----------
+C NR --> ROW DIMENSION OF MATRIX
+C N --> DIMENSION OF PROBLEM
+C X(N) --> OLD ITERATE X[K-1]
+C F --> FUNCTION VALUE AT OLD ITERATE, F(X)
+C G(N) --> GRADIENT AT OLD ITERATE, G(X), OR APPROXIMATE
+C A(N,N) --> CHOLESKY DECOMPOSITION OF HESSIAN IN
+C LOWER TRIANGULAR PART AND DIAGONAL.
+C HESSIAN OR APPROX IN UPPER TRIANGULAR PART
+C FCN --> NAME OF SUBROUTINE TO EVALUATE FUNCTION
+C SC(N) --> CURRENT STEP
+C SX(N) --> DIAGONAL SCALING MATRIX FOR X
+C NWTAKE --> BOOLEAN, =.TRUE. IF NEWTON STEP TAKEN
+C STEPMX --> MAXIMUM ALLOWABLE STEP SIZE
+C STEPTL --> RELATIVE STEP SIZE AT WHICH SUCCESSIVE ITERATES
+C CONSIDERED CLOSE ENOUGH TO TERMINATE ALGORITHM
+C DLT <--> TRUST REGION RADIUS
+C IRETCD <--> RETURN CODE
+C =0 XPLS ACCEPTED AS NEXT ITERATE;
+C DLT TRUST REGION FOR NEXT ITERATION.
+C =1 XPLS UNSATISFACTORY BUT ACCEPTED AS NEXT ITERATE
+C BECAUSE XPLS-X .LT. SMALLEST ALLOWABLE
+C STEP LENGTH.
+C =2 F(XPLS) TOO LARGE. CONTINUE CURRENT ITERATION
+C WITH NEW REDUCED DLT.
+C =3 F(XPLS) SUFFICIENTLY SMALL, BUT QUADRATIC MODEL
+C PREDICTS F(XPLS) SUFFICIENTLY WELL TO CONTINUE
+C CURRENT ITERATION WITH NEW DOUBLED DLT.
+C XPLSP(N) <--> WORKSPACE [VALUE NEEDS TO BE RETAINED BETWEEN
+C SUCCESIVE CALLS OF K-TH GLOBAL STEP]
+C FPLSP <--> [RETAIN VALUE BETWEEN SUCCESSIVE CALLS]
+C XPLS(N) <-- NEW ITERATE X[K]
+C FPLS <-- FUNCTION VALUE AT NEW ITERATE, F(XPLS)
+C MXTAKE <-- BOOLEAN FLAG INDICATING STEP OF MAXIMUM LENGTH USED
+C IPR --> DEVICE TO WHICH TO SEND OUTPUT
+C METHOD --> ALGORITHM TO USE TO SOLVE MINIMIZATION PROBLEM
+C =1 LINE SEARCH
+C =2 DOUBLE DOGLEG
+C =3 MORE-HEBDON
+C UDIAG(N) --> DIAGONAL OF HESSIAN IN A(.,.)
+C
+
+*/
+
+ int i,j;
+ double rln,temp,dltf,slp,dltmp,dltfp;
+
+ mxtake[1] = false;
+
+ for (i = 1; i <= n; i++) {
+
+ xpls[i] = x[i] + sc[i];
+
+ }
+
+ fpls[1] = minclass.f_to_minimize(xpls);
+
+ dltf = fpls[1] - f[1];
+
+ slp = Blas_f77.ddot_f77(n,g,1,sc,1);
+
+ if (iretcd[1] == 4) fplsp[1] = 0.0;
+
+ if ((iretcd[1] == 3) && ((fpls[1] >= fplsp[1]) ||
+ (dltf > .0001*slp))) {
+
+// RESET XPLS TO XPLSP AND TERMINATE GLOBAL STEP
+
+ iretcd[1] = 0;
+
+ for (i = 1; i <= n; i++) {
+
+ xpls[i] = xplsp[i];
+
+ }
+
+ fpls[1] = fplsp[1];
+ dlt[1] *= .5;
+
+ } else {
+
+// FPLS TOO LARGE
+
+ if (dltf > .0001*slp) {
+
+ rln = 0.0;
+
+ for (i = 1; i <= n; i++) {
+
+ rln = Math.max(rln,Math.abs(sc[i])/
+ Math.max(Math.abs(xpls[i]),1.0/sx[i]));
+
+ }
+
+ if (rln < steptl[1]) {
+
+// CANNOT FIND SATISFACTORY XPLS SUFFICIENTLY DISTINCT FROM X
+
+ iretcd[1] = 1;
+
+ } else {
+
+// REDUCE TRUST REGION AND CONTINUE GLOBAL STEP
+
+ iretcd[1] = 2;
+
+ dltmp = -slp*dlt[1]/(2.0*(dltf - slp));
+
+ if (dltmp < .1*dlt[1]) {
+
+ dlt[1] *= .1;
+
+ } else {
+
+ dlt[1] = dltmp;
+
+ }
+
+ }
+
+ } else {
+
+// FPLS SUFFICIENTLY SMALL
+
+ dltfp = 0.0;
+
+ if (method == 2) {
+
+ for (i = 1; i <= n; i++) {
+
+ temp = 0.0;
+
+ for (j = i; j <= n; j++) {
+
+ temp += a[j][i]*sc[j];
+
+ }
+
+ dltfp += temp*temp;
+
+ }
+
+ } else {
+
+ for (i = 1; i <= n; i++) {
+
+ dltfp += udiag[i]*sc[i]*sc[i];
+ temp = 0.0;
+
+ for (j = i+1; j <= n; j++) {
+
+ temp += a[i][j]*sc[i]*sc[j];
+
+ }
+
+ dltfp += 2.0*temp;
+
+ }
+
+ }
+
+ dltfp = slp + dltfp/2.0;
+
+ if ((iretcd[1] != 2) && (Math.abs(dltfp - dltf) <=
+ .1*Math.abs(dltf)) && (!nwtake[1]) &&
+ (dlt[1] <= .99*stepmx[1])) {
+
+// DOUBLE TRUST REGION AND CONTINUE GLOBAL STEP
+
+ iretcd[1] = 3;
+
+ for (i = 1; i <= n; i++) {
+
+ xplsp[i] = xpls[i];
+
+ }
+
+ fplsp[1] = fpls[1];
+ dlt[1] = Math.min(2.0*dlt[1],stepmx[1]);
+
+ } else {
+
+// ACCEPT XPLS AS NEXT ITERATE. CHOOSE NEW TRUST REGION.
+
+ iretcd[1] = 0;
+
+ if (dlt[1] > .99*stepmx[1]) mxtake[1] = true;
+
+ if (dltf >= .1*dltfp) {
+
+// DECREASE TRUST REGION FOR NEXT ITERATION
+
+ dlt[1] *= .5;
+
+ } else {
+
+// CHECK WHETHER TO INCREASE TRUST REGION FOR NEXT ITERATION
+
+ if (dltf <= .75*dltfp) dlt[1] = Math.min(2.0*dlt[1],stepmx[1]);
+
+ }
+
+ }
+
+ }
+
+ }
+
+ return;
+
+ }
+
+}
diff --git a/Uncmin_methods.java b/Uncmin_methods.java
new file mode 100644
index 0000000..3ee8dae
--- /dev/null
+++ b/Uncmin_methods.java
@@ -0,0 +1,11 @@
+package optimization;
+
+public interface Uncmin_methods {
+
+ double f_to_minimize(double x[]);
+
+ void gradient(double x[], double g[]);
+
+ void hessian(double x[], double h[][]);
+
+}
diff --git a/copyright b/copyright
new file mode 100644
index 0000000..26bf7bc
--- /dev/null
+++ b/copyright
@@ -0,0 +1,34 @@
+ Package optimization copyright claim:
+
+ These classes were either developed by a US Government employee
+ on official time, or they were translated from public domain
+ sources by a US Government employee on official time. Thus
+ they are in the public domain.
+
+ These programs are distributed in the hope that they will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+
+ The author/translator's mail address is
+
+ Steve Verrill
+ USDA Forest Products Laboratory
+ 1 Gifford Pinchot Drive
+ Madison, Wisconsin
+ 53705
+
+ The author/translator's e-mail address is
+
+ steve at ws10.fpl.fs.fed.us
+
+
+************************************************************************
+
+ The Console.java code was not written by Steve Verrill
+ and is under a separate copyright. Please see the listing
+ of the Console.java code for details.
+
+************************************************************************
+
+
+
diff --git a/disclaimer b/disclaimer
new file mode 100644
index 0000000..925aae6
--- /dev/null
+++ b/disclaimer
@@ -0,0 +1,15 @@
+
+DISCLAIMER OF WARRANTIES
+
+THIS SOFTWARE IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND.
+THE AUTHOR/TRANSLATOR DOES NOT WARRANT, GUARANTEE OR MAKE ANY
+REPRESENTATIONS REGARDING THE SOFTWARE OR DOCUMENTATION IN TERMS
+OF THEIR CORRECTNESS, RELIABILITY, CURRENTNESS, OR OTHERWISE.
+THE ENTIRE RISK AS TO THE RESULTS AND PERFORMANCE OF THE SOFTWARE
+IS ASSUMED BY YOU. IN NO CASE WILL ANY PARTY INVOLVED WITH THE
+CREATION OR DISTRIBUTION OF THE SOFTWARE BE LIABLE FOR ANY DAMAGE
+THAT MAY RESULT FROM THE USE OF THIS SOFTWARE.
+
+Sorry about that.
+
+
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/pkg-java/liboptimization-java.git
More information about the pkg-java-commits
mailing list